Populate a WTForms SelectField with an SQL query - python

I'm want to populate a WTForms SelectField with the rows returned by this query:
cur.execute("SELECT length FROM skipakke_alpin_ski WHERE stock > 0")
The query returns rows with the ski length of different types of skis. cur.fetchall() returns the following tuple:
[(70,), (75,), (82,), (88,), (105,), (115,), (125,), (132,), (140,), (150,), (160,), (170,)]
How would I go about to do add these numbers to a SelectField, so that each ski length would be its own selectable choice? If I had done this manually, I would have done the following:
ski_size = SelectField('Ski size', choices=['70', '70', '75', '75'])
... And so on for all of the different lengths.

In one of the projects I have used like below:
models
class PropertyType(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(255), nullable=False)
def __repr__(self):
return str(self.id)
and in forms
from wtforms.ext.sqlalchemy.fields import QuerySelectField
class PropertyEditor(Form):
property_type = QuerySelectField(
'Property Type',
query_factory=lambda: models.PropertyType.query,
allow_blank=False
)
//Other remaining fields
Hope this helps.

The solution might look like the code below.
Let's assume you have two files: routes.py and views.py
In routes.py file you put this
from flask_wtf import FlaskForm
from wtforms import SelectField
# Here we have class to render ski
class SkiForm(FlaskForm):
ski = SelectField('Ski size')
In views.py file you put this
# Import your SkiForm class from `routes.py` file
from routes import SkiForm
# Here you define `cur`
cur = ...
# Now let's define a method to return rendered page with SkiForm
def show_ski_to_user():
# List of skies
cur.execute("SELECT length FROM skipakke_alpin_ski WHERE stock > 0")
skies = cur.fetchall()
# create form instance
form = SkiForm()
# Now add ski length to the options of select field
# it must be list with options with (key, value) data
form.ski.choices = [(ski, ski) for ski in skies]
# If you want to use id or other data as `key` you can change it in list generator
if form.validate():
# your code goes here
return render_template('any_file.html', form=form)
Remember that by default key value is unicode. If you want to use int or other data type use coerce argument in SkiForm class, like this
class SkiForm(FlaskForm):
ski = SelectField('Ski size', coerce=int)

Solution:
I had quite simmilar problem, and here is my workaround
class SkiForm(FlaskForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
cur.execute("SELECT length FROM skipakke_alpin_ski WHERE stock > 0")
skies = cur.fetchall()
self.ski_size.choices = [
(ski , ski) for ski in skies
]
ski_size = SelectField("Ski size")
Explanation:
We modified original FlaskForm, so that it executs database query each time when it is being created.
So SkiForm field data choices always stays up to date.

I managed to solve this by doing the following:
def fetch_available_items(table_name, column):
with sqlite3.connect('database.db') as con:
cur = con.cursor()
cur.execute("SELECT length FROM skipakke_alpin_ski WHERE stock > 0")
return cur.fetchall()
class SkipakkeForm(Form):
alpin_ski = SelectField('Select your ski size', choices=[])
#app.route('/skipakke')
def skipakke():
form = SkipakkeForm
# Clear the SelectField on page load
form.alpin_ski.choices = []
for row in fetch_available_items('skipakke_alpin_ski', 'length'):
stock = str(row[0])
form.alpin_ski.choices += [(stock, stock + ' cm')]

Related

django-import-export append column to imported .xlsx [duplicate]

Im trying to import some data from a csv file to a django database using django-import-export, with a foreign key (location). What I want to achieve is, that the location_id is passed by the request url.
value,datetime,location
4.46,2020-01-01,1
4.46,2020-01-02,1
My urls look like this, so I want "location_id" to be passed into the uploaded csv file:
urlpatterns = [
...
...
path('..../<int:location_id>/upload', views.simple_upload, name='upload'),
]
My view looks like this:
def simple_upload(request, location_id):
if request.method == 'POST':
rainfall_resource = RainfallResource()
dataset = Dataset()
new_rainfall = request.FILES['myfile']
imported_data = dataset.load(new_rainfall.read().decode("utf-8"), format="csv")
try:
result = rainfall_resource.import_data(dataset, dry_run=True) # Test the data import
except Exception as e:
return HttpResponse(e, status=status.HTTP_400_BAD_REQUEST)
if not result.has_errors():
rainfall_resource.import_data(dataset, dry_run=False) # Actually import now
return render(request, '/import.html')
My ModelResource looks like this:
class RainfallResource(resources.ModelResource):
location_id = fields.Field(
column_name='location_id',
attribute='location_id',
widget=ForeignKeyWidget(Location, 'Location'))
class Meta:
model = Rainfall
def before_import_row(self, row, **kwargs):
row['location'] = location_id
The manipulation works when I hardcode "location_id" like:
def before_import_row(self, row, **kwargs):
row['location'] = 123
However, I do not understand how to pass the location_id argument from the "url" to the "before_import_row" function. Help would be highly appreciated:-)
I think you will have to modify your imported_data in memory, before importing.
You can use the tablib API to update the dataset:
# import the data as per your existing code
imported_data = dataset.load(new_rainfall.read().decode("utf-8"), format="csv")
# create an array containing the location_id
location_arr = [location_id] * len(imported_data)
# use the tablib API to add a new column, and insert the location array values
imported_data.append_col(location_arr, header="location")
By using this approach, you won't need to override before_import_row()

Flask Babel convert to original language when insert to database

I have a set of SelectField with Flask-WTF and I convert the default language with Flask-Babel.
Here is the snippet of my code:
from flask_babel import _, lazy_gettext as _l
class PaymentStatus(enum.Enum):
PENDING = _l('PENDING')
COMPLETED = _l('COMPLETED')
EXPIRED = _l('EXPIRED')
def __str__(self):
return '{}'.format(self.value)
payment_status = [(str(_l(y)), y) for y in (PaymentStatus)]
class PaymentForm(FlaskForm):
status_of_payment = SelectField(_l('Payment Status'), choices=payment_status)
# ...
# ...
And here is my model look like:
class Payment(db.Model):
__tablename__ = 'payment'
id = db.Column(db.Integer, primary_key=True)
status_of_payment = db.Column(db.Enum(PaymentStatus, name='status_of_payment'))
# ...
# ...
And when I try to insert the value from the Flask-WTF form to my database, I got some error.
Here is the snippet how I insert it to database:
if form.validate_on_submit():
payment = Payment(
# payment_status=form.status_of_payment.data
payment_status=PaymentStatus.PENDING.value
# ...
# ...
)
The value of the enum PENDING also converted to the language on preferred language on my browser, so I got this error message:
sqlalchemy.exc.StatementError: (builtins.LookupError) "MENGUNGGU" is
not among the defined enum values
for more information: "MENGUNGGU" = is Indonesian language for "PENDING" in English.
So the problem here is, when I insert the SelectField value, it also converts the language to my preferred browser language, and my database which is PostgreSQL block it, because I don't define the value on my enum type.
So, the point of my question is, can we excluded the i18n & l10n value from Flask-Babel when we want to insert the value to a database..?, or what should I do to face this..?
You should swap values in your choices because the first element in the tuple is the actual value that will be submitted and the second one is the presentation:
payment_status = [(y.name, _l(str(y.value))) for y in PaymentStatus]
Thanks to this you'll have translated names and proper values submitted.
Enum's name should be stored in your database instead of the value.

Django JSONField filtering Queryset where filter value is annotated sum value

How do I properly write the filter code so it returns only the Animals that are not sold out.
I'm using POSTGRES db, python3.6 and Django 2.1.7 (currently there are v2.2a1,v2.2b1 pre-release versions)
My questioin is an extension to Django JSONField filtering
which filters on a hard coded value in the filter.
My Case requires an annotated value in the filter.
models.py I know that the models can be optimized, but I already have huge amount of records since more than 3 years
from django.db import models
from django.contrib.postgres.fields import JSONField
class Animal(models.Model):
data = models.JSONField(verbose_name=_('data'), blank=True)
class Sell(models.Model):
count = models.IntegerField(verbose_name=_('data'), blank=True)
animal = models.ForeignKey('Animal',
on_delete=models.CASCADE,
related_name="sales_set",
related_query_name="sold"
)
in my api I want to return only the animals that still have something left for selling
animal = Animal(data={'type':'dog', 'bread':'Husky', 'count':20})
What I want to filter should be similar to animal.data['count'] > sum(animal.sales_set__count
Animal.objects.annotate(animals_sold=Sum('sales_set__count'))
.filter(data__contains=[{'count__gt': F('animals_sold')}])
with the code above i get builtins.TypeError
TypeError: Object of type 'F' is not JSON serializable
if I remove the F it won't filter on the value of the animals_sold, but on the text 'animals_sold' and it doesn't do any help.
Animal.objects.annotate(animals_sold=Sum('sales_set__count'))
.filter(data__contains=[{'count__gt': F('animals_sold')}])
Edit 1:
There is one more topic here that can be linked:
Postgres: values query on json key with django
Edit 2:
here is some additional code with custom transform classes as suggested in related django ticket
from django.db.models.constants import LOOKUP_SEP
from django.db.models import F, Q, Prefetch, Sum
from django.db.models import IntegerField, FloatField, ExpressionWrapper
from django.db.models.functions import Cast
from django.contrib.postgres.fields import JSONField
from django.contrib.postgres.fields.jsonb import KeyTransform, KeyTextTransform
class KeyIntegerTransform(KeyTransform): # similar to KeyTextTransform
""" trasnform the data.count to integer """
operator = '->>'
nested_operator = '#>>'
output_field = IntegerField()
class KeyIntTransformFactory:
""" helper class for the JSONF() """
def __init__(self, key_name):
self.key_name = key_name
def __call__(self, *args, **kwargs):
return KeyIntegerTransform(self.key_name, *args, **kwargs)
class JSONF(F):
""" for filtering on JSON Fields """
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
rhs = super().resolve_expression(query, allow_joins, reuse, summarize, for_save)
field_list = self.name.split(LOOKUP_SEP)
for name in field_list[1:]:
rhs = KeyIntegerTransform(name)(rhs)
return rhs
queryset filtering I tried so far:
q = q.filter(data__contains={'count__gt':JSONF('sold_count_sum')})
# err: Object of type 'JSONF' is not JSON serializable
q = q.filter(sold_count_sum__lt=Cast(JSONF('data_count'), IntegerField()))
# err: operator does not exist: text ->> unknown
q = q.filter(sold_count_sum__lt=Cast(JSONF('data__count'), IntegerField()))
# err: 'KeyIntegerTransform' takes exactly 1 argument (0 given)
q = q.filter(sold_count_sum__lt=KeyIntegerTransform('count', 'data'))
# err: operator does not exist: text ->> unknown
q = q.filter(sold_count_sum__lt=F('data__count'))
# err: operator does not exist: text ->> unknown
q = q.filter(sold_count_sum__lt=F('data_count'))
# err: operator does not exist: text ->> unknown
q = q.filter(sold_count_sum__lt=JSONF('data_count'))
# err: operator does not exist: text ->> unknown
q = q.filter(sold_count_sum__lt=JSONF('data__count'))
# err: 'KeyIntegerTransform' takes exactly 1 argument (0 given)
q = q.filter(sold_count_sum__lt=JSONF('data', 'count'))
# err: JSONF.__init__() takes 2 params
queryset = Animal.objects.annotate(
json=Cast(F('data'), JSONField()),
sold_count_sum = Sum('sold__count'),
sold_times = Count('sold'),
).filter(
Q(sold_times=0) | Q(sold_count_sum__lt=Cast(
KeyTextTransform('count', 'json'), IntegerField())
),
# keyword filtering here ...
# client = client
)
this is what works for me, but it can be optimized with a good JSONF field probably
we can also (re)move the json annotation and use casted version of data (may have some performance improvement):
queryset = Animal.objects.annotate(
sold_count_sum = Sum('sold__count'),
sold_times = Count('sold'),
).filter(
Q(sold_times=0) | Q(sold_count_sum__lt=Cast(
KeyTextTransform('count', Cast(
F('data'), JSONField())), IntegerField()
)
),
# keyword filtering here ...
# client = client
)
How about something like this:
from django.db.models import Sum, F
from django.contrib.postgres.fields.jsonb import KeyTransform
Animal.objects.annotate(animals_sold=Sum('sales_set__count'), data_count=KeyTransform('count', 'data')).filter(data_count__gt=F('animals_sold'))
The F class doesn't support a JSONField at this time, but you might try making your own custom expression as described in the related ticket.

SQLAlchemy logging of changes with date and user

This is very similar to another question that's over 3 years old: What's a good general way to look SQLAlchemy transactions, complete with authenticated user, etc?
I'm working on an application where I'd like to log all changes to particular tables. There's currently a really good "recipe" that does versioning, but I need to modify it to instead record a datetime when the change occurred and a user id of who made the change. I took the history_meta.py example that's packaged with SQLAlchemy and made it record times instead of version numbers, but I'm having trouble figuring out how to pass in a user id.
The question I referenced above suggests including the user id in the session object. That makes a lot of sense, but I'm not sure how to do that. I've tried something simple like session.userid = authenticated_userid(request) but in history_meta.py that attribute doesn't seem to be on the session object any more.
I'm doing all of this in the Pyramid framework and the session object that I'm using is defined as DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension())). In a view I do session = DBSession() and then proceed to use session. (I'm not really sure if that's necessary, but that's what's going on)
Here's my modified history_meta.py in case someone might find it useful:
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import mapper, class_mapper, attributes, object_mapper
from sqlalchemy.orm.exc import UnmappedClassError, UnmappedColumnError
from sqlalchemy import Table, Column, ForeignKeyConstraint, DateTime
from sqlalchemy import event
from sqlalchemy.orm.properties import RelationshipProperty
from datetime import datetime
def col_references_table(col, table):
for fk in col.foreign_keys:
if fk.references(table):
return True
return False
def _history_mapper(local_mapper):
cls = local_mapper.class_
# set the "active_history" flag
# on on column-mapped attributes so that the old version
# of the info is always loaded (currently sets it on all attributes)
for prop in local_mapper.iterate_properties:
getattr(local_mapper.class_, prop.key).impl.active_history = True
super_mapper = local_mapper.inherits
super_history_mapper = getattr(cls, '__history_mapper__', None)
polymorphic_on = None
super_fks = []
if not super_mapper or local_mapper.local_table is not super_mapper.local_table:
cols = []
for column in local_mapper.local_table.c:
if column.name == 'version_datetime':
continue
col = column.copy()
col.unique = False
if super_mapper and col_references_table(column, super_mapper.local_table):
super_fks.append((col.key, list(super_history_mapper.local_table.primary_key)[0]))
cols.append(col)
if column is local_mapper.polymorphic_on:
polymorphic_on = col
if super_mapper:
super_fks.append(('version_datetime', super_history_mapper.base_mapper.local_table.c.version_datetime))
cols.append(Column('version_datetime', DateTime, default=datetime.now, nullable=False, primary_key=True))
else:
cols.append(Column('version_datetime', DateTime, default=datetime.now, nullable=False, primary_key=True))
if super_fks:
cols.append(ForeignKeyConstraint(*zip(*super_fks)))
table = Table(local_mapper.local_table.name + '_history', local_mapper.local_table.metadata,
*cols
)
else:
# single table inheritance. take any additional columns that may have
# been added and add them to the history table.
for column in local_mapper.local_table.c:
if column.key not in super_history_mapper.local_table.c:
col = column.copy()
col.unique = False
super_history_mapper.local_table.append_column(col)
table = None
if super_history_mapper:
bases = (super_history_mapper.class_,)
else:
bases = local_mapper.base_mapper.class_.__bases__
versioned_cls = type.__new__(type, "%sHistory" % cls.__name__, bases, {})
m = mapper(
versioned_cls,
table,
inherits=super_history_mapper,
polymorphic_on=polymorphic_on,
polymorphic_identity=local_mapper.polymorphic_identity
)
cls.__history_mapper__ = m
if not super_history_mapper:
local_mapper.local_table.append_column(
Column('version_datetime', DateTime, default=datetime.now, nullable=False, primary_key=False)
)
local_mapper.add_property("version_datetime", local_mapper.local_table.c.version_datetime)
class Versioned(object):
#declared_attr
def __mapper_cls__(cls):
def map(cls, *arg, **kw):
mp = mapper(cls, *arg, **kw)
_history_mapper(mp)
return mp
return map
def versioned_objects(iter):
for obj in iter:
if hasattr(obj, '__history_mapper__'):
yield obj
def create_version(obj, session, deleted = False):
obj_mapper = object_mapper(obj)
history_mapper = obj.__history_mapper__
history_cls = history_mapper.class_
obj_state = attributes.instance_state(obj)
attr = {}
obj_changed = False
for om, hm in zip(obj_mapper.iterate_to_root(), history_mapper.iterate_to_root()):
if hm.single:
continue
for hist_col in hm.local_table.c:
if hist_col.key == 'version_datetime':
continue
obj_col = om.local_table.c[hist_col.key]
# get the value of the
# attribute based on the MapperProperty related to the
# mapped column. this will allow usage of MapperProperties
# that have a different keyname than that of the mapped column.
try:
prop = obj_mapper.get_property_by_column(obj_col)
except UnmappedColumnError:
# in the case of single table inheritance, there may be
# columns on the mapped table intended for the subclass only.
# the "unmapped" status of the subclass column on the
# base class is a feature of the declarative module as of sqla 0.5.2.
continue
# expired object attributes and also deferred cols might not be in the
# dict. force it to load no matter what by using getattr().
if prop.key not in obj_state.dict:
getattr(obj, prop.key)
a, u, d = attributes.get_history(obj, prop.key)
if d:
attr[hist_col.key] = d[0]
obj_changed = True
elif u:
attr[hist_col.key] = u[0]
else:
# if the attribute had no value.
attr[hist_col.key] = a[0]
obj_changed = True
if not obj_changed:
# not changed, but we have relationships. OK
# check those too
for prop in obj_mapper.iterate_properties:
if isinstance(prop, RelationshipProperty) and \
attributes.get_history(obj, prop.key).has_changes():
obj_changed = True
break
if not obj_changed and not deleted:
return
attr['version_datetime'] = obj.version_datetime
hist = history_cls()
for key, value in attr.items():
setattr(hist, key, value)
session.add(hist)
print(dir(session))
obj.version_datetime = datetime.now()
def versioned_session(session):
#event.listens_for(session, 'before_flush')
def before_flush(session, flush_context, instances):
for obj in versioned_objects(session.dirty):
create_version(obj, session)
for obj in versioned_objects(session.deleted):
create_version(obj, session, deleted = True)
UPDATE:
Okay, it seems that in the before_flush() method the session I get is of type sqlalchemy.orm.session.Session where the session I attached the user_id to was sqlalchemy.orm.scoping.scoped_session. So, at some point an object layer is stripped off. Is it safe to assign the user_id to the Session within the scoped_session? Can I be sure that it won't be there for other requests?
Old question, but still very relevant.
You should avoid trying to place web session information on the database session. It's combining unrelated concerns and each has it's own lifecycle (which don't match). Here's an approach I use in Flask with SQLAlchemy (not Flask-SQLAlchemy, but that should work too). I've tried to comment where Pyramid would be different.
from flask import has_request_context # How to check if in a Flask session
from sqlalchemy import inspect
from sqlalchemy.orm import class_mapper
from sqlalchemy.orm.attributes import get_history
from sqlalchemy.event import listen
from YOUR_SESSION_MANAGER import get_user # This would be something in Pyramid
from my_project import models # Where your models are defined
def get_object_changes(obj):
""" Given a model instance, returns dict of pending
changes waiting for database flush/commit.
e.g. {
'some_field': {
'before': *SOME-VALUE*,
'after': *SOME-VALUE*
},
...
}
"""
inspection = inspect(obj)
changes = {}
for attr in class_mapper(obj.__class__).column_attrs:
if getattr(inspection.attrs, attr.key).history.has_changes():
if get_history(obj, attr.key)[2]:
before = get_history(obj, attr.key)[2].pop()
after = getattr(obj, attr.key)
if before != after:
if before or after:
changes[attr.key] = {'before': before, 'after': after}
return changes
def my_model_change_listener(mapper, connection, target):
changes = get_object_changes(target)
changes.pop("modify_ts", None) # remove fields you don't want to track
user_id = None
if has_request_context():
# Call your function to get active user and extract id
user_id = getattr(get_user(), 'id', None)
if user_id is None:
# What do you want to do if user can't be determined
pass
# You now have the model instance (target), the user_id who is logged in,
# and a dictionary of changes.
# Either do somthing "quick" with it here or call an async task (e.g.
# Celery) to do something with the information that may take longer
# than you want the request to take.
# Add the listener
listen(models.MyModel, 'after_update', my_model_change_listener)
After a bunch of fiddling I seem to able to set values on the session object within the scoped_session by doing the following:
DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))
session = DBSession()
inner_session = session.registry()
inner_session.user_id = "test"
versioned_session(session)
Now the session object being passed around in history_meta.py has a user_id attribute on it which I set. I'm a little concerned about whether this is the right way of doing this as the object in the registry is a thread-local one and the threads are being re-used for different http requests.
I ran into this old question recently. My requirement is to log all changes to a set of tables.
I'll post the code I ended up with here in case anyone finds it useful. It has some limitations, especially around deletes, but works for my purposes. The code supports logging audit records for selected tables to either a log file, or an audit table in the db.
from app import db
import datetime
from flask import current_app, g
# your own session user goes here
# you'll need an id and an email in that model
from flask_user import current_user as user
import importlib
import logging
from sqlalchemy import event, inspect
from sqlalchemy.orm.attributes import get_history
from sqlalchemy.orm import ColumnProperty, class_mapper
from uuid import uuid4
class AuditManager (object):
config = {'storage': 'log',
#define class for Audit model for your project, if saving audit records in db
'auditModel': 'app.models.user_models.Audit'}
def __init__(self, app):
if 'AUDIT_CONFIG' in app.config:
app.before_request(self.before_request_handler)
self.config.update(app.config['AUDIT_CONFIG'])
event.listen(
db.session,
'after_flush',
self.db_after_flush
)
event.listen(
db.session,
'before_flush',
self.db_before_flush
)
event.listen(
db.session,
'after_bulk_delete',
self.db_after_bulk_delete
)
if self.config['storage'] == 'log':
self.logger = logging.getLogger(__name__)
elif self.config['storage'] == 'db':
# Load Audit model class at runtime, so that log file users dont need to define it
module_name, class_name = self.config['auditModel'].rsplit(".", 1)
self.AuditModel = getattr(importlib.import_module(module_name), class_name)
#Create a global request id
# Use this to group transactions together
def before_request_handler(self):
g.request_id = uuid4()
def db_after_flush(self, session, flush_context):
for instance in session.new:
if instance.__tablename__ in self.config['tables']:
# Record the inserts for this table
data = {}
auditFields = getattr(instance.__class__, 'Meta', None)
auditFields = getattr(auditFields,\
'auditFields', #Prefer to list auditable fields explicitly in the model's Meta class
self.get_fields(instance)) # or derive them otherwise
for attr in auditFields:
data[attr] = str(getattr(instance, attr, 'not set')) #Make every value a string in audit
self.log_it (session, 'insert', instance, data)
def db_before_flush(self, session, flush_context, instances):
for instance in session.dirty:
# Record the changes for this table
if instance.__tablename__ in self.config['tables']:
inspection = inspect(instance)
data = {}
auditFields = getattr(instance.__class__, 'Meta', None)
auditFields = getattr(auditFields,\
'auditFields',
self.get_fields(instance))
for attr in auditFields:
if getattr(inspection.attrs, attr).history.has_changes(): #We only log the new data
data[attr] = str(getattr(instance, attr, 'not set'))
self.log_it (session, 'change', instance, data)
for instance in session.deleted:
# Record the deletes for this table
# for this to be triggered, you must use this session based delete object construct.
# Eg: session.delete({query}.first())
if instance.__tablename__ in self.config['tables']:
data = {}
auditFields = getattr(instance.__class__, 'Meta', None)
auditFields = getattr(auditFields,\
'auditFields',
self.get_fields(instance))
for attr in auditFields:
data[attr] = str(getattr(instance, attr, 'not set'))
self.log_it (session, 'delete', instance, data)
def db_after_bulk_delete(self, delete_context):
instance = delete_context.query.column_descriptions[0]['type'] #only works for single table deletes
if delete_context.result.returns_rows:
# Not sure exactly how after_bulk_delete is expected work, since the context.results is empty,
# as delete statement return no results
for row in delete_context.result:
data = {}
auditFields = getattr(instance.__class__, 'Meta', None)
auditFields = getattr(auditFields,\
'auditFields',
self.get_fields(instance))
for attr in auditFields:
data[attr] = str(getattr(row, attr, 'not set')) #Make every value a string in audit
self.log_it (delete_context.session, 'delete', instance, data)
else:
# Audit what we can when we don't have indiividual rows to look at
self.log_it (delete_context.session, 'delete', instance,\
{"rowcount": delete_context.result.rowcount})
def log_it (self, session, action, instance, data):
if self.config['storage'] == 'log':
self.logger.info("request_id: %s, table: %s, action: %s, user id: %s, user email: %s, date: %s, data: %s" \
% (getattr(g, 'request_id', None), instance.__tablename__, action, getattr(user, 'id', None), getattr(user, 'email', None),\
datetime.datetime.now(), data))
elif self.config['storage'] == 'db':
audit = self.AuditModel(request_id=str(getattr(g, 'request_id', None)),
table=str(instance.__tablename__),
action=action,
user_id=getattr(user, 'id', None),
user_email=getattr(user, 'email', None),
date=datetime.datetime.now(),
data=data
)
session.add(audit)
def get_fields(self, instance):
fields = []
for attr in class_mapper(instance.__class__).column_attrs:
fields.append(attr.key)
return fields
Suggested Model, if you want to store audit records in the database.
class Audit(db.Model):
__tablename__ = 'audit'
id = db.Column(db.Integer, primary_key=True)
request_id = db.Column(db.Unicode(50), nullable=True, index=True, server_default=u'')
table = db.Column(db.Unicode(50), nullable=False, index=True, server_default=u'')
action = db.Column(db.Unicode(20), nullable=False, server_default=u'')
user_id = db.Column(db.Integer, db.ForeignKey('user.id', ondelete='SET NULL'), nullable=True, )
user_email = db.Column(db.Unicode(255), nullable=False, server_default=u'')
date = db.Column(db.DateTime, default=db.func.now())
data = db.Column(JSON)
In settings:
AUDIT_CONFIG = {
"tables": ['user', 'order', 'batch']
}

Querying joined tables in SQLAlchemy and displaying in an ObjectListView

I have an ObjectListView that displays information retrieved from an SQLite DB with SQLAlchemy.
def setupOLV(self):
self.loanResultsOlv.SetEmptyListMsg("No Loan Records Found")
self.loanResultsOlv.SetColumns([
ColumnDefn("Date Issued", "left", 100, "date_issued",
stringConverter="%d-%m-%y"),
ColumnDefn("Card Number", "left", 100, "card_id"),
ColumnDefn("Student Number", "left", 100, "person_id"),
ColumnDefn("Forename", "left", 150, "person_fname"),
ColumnDefn("Surname", "left", 150, "person_sname"),
ColumnDefn("Reason", "left", 150, "issue_reason"),
ColumnDefn("Date Due", "left", 100, "date_due",
stringConverter="%d-%m-%y"),
ColumnDefn("Date Returned", "left", 100, "date_returned",
stringConverter="%d-%m-%y")
])
I also have three models, Loan:
class Loan(DeclarativeBase):
"""
Loan model
"""
__tablename__ = "loans"
id = Column(Integer, primary_key=True)
card_id = Column(Unicode, ForeignKey("cards.id"))
person_id = Column(Unicode, ForeignKey("people.id"))
date_issued = Column(Date)
date_due = Column(Date)
date_returned = Column(Date)
issue_reason = Column(Unicode(50))
person = relation("Person", backref="loans", cascade_backrefs=False)
card = relation("Card", backref="loans", cascade_backrefs=False)
Person:
class Person(DeclarativeBase):
"""
Person model
"""
__tablename__ = "people"
id = Column(Unicode(50), primary_key=True)
fname = Column(Unicode(50))
sname = Column(Unicode(50))
and Card:
class Card(DeclarativeBase):
"""
Card model
"""
__tablename__ = "cards"
id = Column(Unicode(50), primary_key=True)
active = Column(Boolean)
I am trying to join the tables (loans and people) in order to retrieve and display the information in my ObjectListView. Here is my query method:
def getQueriedRecords(session, filterChoice, keyword):
"""
Searches the database based on the filter chosen and the keyword
given by the user
"""
qry = session.query(Loan)
if filterChoice == "person":
result = qry.join(Person).filter(Loan.person_id=='%s' % keyword).all()
elif filterChoice == "card":
result = qry.join(Person).filter(Loan.card_id=='%s' % keyword).all()
return result
I can retrieve and display every field stored in the loans table but forename and surname (should be drawn from people table and joined on person.id) are blank in my ObjectListView. I have SQL output on so I can see the query and it is not selecting at all from the people table.
How can I modify my query/ObjectListView to retrieve and display this information. ?
UPDATE: I have created an example script that is runnable here.
You're only querying for a Loan (qry = session.query(Loan)). Why do you expect something else to be in the results besides what's in the SELECT statement?
I admit that I am pretty new to SQLAlchemy myself, but I thought I would share what I use to display results from my queries. I have a program that uses a SQLite DB with 4+ tables and I pull data from 2-3 of them in a single query and display this information in an ObjectListView. I owe Mike Driscoll for his in depth tutorials, particularly wxPython and SqlAlchemy: An Intro to MVC and CRUD.
Here is what I would possibly add/change in your code.
In your model section add a "display" class such as:
def OlvDisplay(object):
def __init__(self, date_issued, card_id, person_id, fname, sname,
issue_reason, date_due, date_returned):
self.date_issued = date_issued
self.card_id = card_id
self.person_id = person_id
self.person_fname = fname
self.person_sname = sname
self.issue_reason = issue_reason
self.date_due = date_due
self.date_returned = date_returned
This display class is used in the convertResults definition below and assists with making sure the data is formatted properly for the ObjectListView.
The adjustment to your existing query function:
def getQueriedRecords(session, filterChoice, keyword):
"""
Searches the database based on the filter chosen and the keyword
given by the user
"""
qry = session.query(Loan)
if filterChoice == "person":
result = qry.join(Person).filter(Loan.person_id=='%s' % keyword).all()
elif filterChoice == "card":
result = qry.join(Person).filter(Loan.card_id=='%s' % keyword).all()
convertedResults = convertResults(result)
return convertedResults
What we're doing here is creating a local variable that is essentially running the conversion definition and storing the results for the next line, which returns those results.
And the "Convertor" function:
def convertResults(results):
finalResults = []
for record in results:
result = OlvDisplay(
record.date_issued,
record.card_id,
record.person_id,
record.person.fname,
record.person.sname,
record.issue_reason,
record.date_due,
record.date_returned
)
finalResults.append(result)
return finalResults
The important part here are the 2 lines:
record.person.fname
record.person.sname
Since we are wanting to pull information from another table using the established relationship it is important to refer to that relationship to actually see the data.
And to populate the ObjectListView Widget:
theOutput = getQueriedRecords(session, filterChoice, keyword)
self.setupOLV.SetObjects(theOutput)
Hope this helps you out.
-MikeS

Categories