I have some tests that were working when I ran them with regular database objects but are broken now that I am using FactoryBoy factories. I think I understand why they are broken but am struggling with the correct way to set this up.
Here are my factories:
#register
class UserFactory(BaseFactory):
"""User factory."""
username = Sequence(lambda n: 'user{0}'.format(n))
email = Sequence(lambda n: 'user{0}#example.com'.format(n))
password = PostGenerationMethodCall('set_password', 'example')
active = True
class Meta:
"""Factory configuration."""
model = User
#register
class ExperimentFactory(BaseFactory):
"""Experiment Factory."""
date = fake.date_this_decade(before_today=True, after_today=False)
scanner = Iterator(['GE', 'Sie', 'Phi'])
class Meta:
"""Factory configuration."""
model = Experiment
user = factory.SubFactory(UserFactory)
According to this answer and other examples, FactoryBoy is supposed to be handling the foreign key assignment behind the scenes.
But when I try to initialize my ExperimentFactory object in my fixture, I have a problem.
#pytest.fixture(scope='function')
#pytest.mark.usefixtures('db')
def mocked_scan_service(db, mocker, request):
user = UserFactory(password='myprecious')
db.session.add(user)
num_exp, num_scans, exp_id, scan_id, exp_uri, scan_uri = request.param
for i in range(num_exp):
experiment = ExperimentFactory(user_id = user.id)
db.session.add(experiment)
db.session.commit()
ss = ScanService(user.id, experiment.id)
for i in range(num_scans):
ss._add_scan_to_database()
ss.xc.upload_scan = mocker.MagicMock()
ss.xc.upload_scan.return_value = ('/data/archive/subjects/000001', exp_uri, scan_uri)
mocker.spy(ss, '_generate_xnat_identifiers')
ss.param = request.param
return ss
If I don't pass ExperimentFactory a user id, I get this error:
TypeError: __init__() missing 1 required positional argument: 'user_id'
Here's the model; it makes sense to me that the factory needs an argument user_id to initialize:
class Experiment(SurrogatePK, Model):
"""A user's experiment, during which they are scanned."""
__tablename__ = 'experiment'
date = Column(db.Date(), nullable=False)
scanner = Column(db.String(80), nullable=True)
num_scans = Column(db.Integer(), nullable=True, default=0)
xnat_experiment_id = Column(db.String(80), nullable=True)
xnat_uri = Column(db.String(80), nullable=True)
user_id = reference_col('user', nullable=False)
scans = relationship('Scan', backref='experiment')
def __init__(self, date, scanner, user_id, **kwargs):
"""Create instance."""
db.Model.__init__(self, date=date, scanner=scanner, user_id=user_id, **kwargs)
def __repr__(self):
"""Represent instance as a unique string."""
return '<Experiment({date})>'.format(date=self.date)
But if, as written, I explicitly create a user and then pass the user id, it looks like the ExperimentFactory eventually overwrites the foreign key with the SubFactory it generated. So later when I initialize an object called ScanService which must be initialized with a user_id and and experiment_id, my tests fail for one of two reasons. Either I initialize it with the user_id of my explicitly created user, and my tests fail because they don't find any sibling experiments to the experiment that experiment_id belongs to, or I initialize it with experiment.user.id, and my tests fail because they expect one user in the database, and in fact there are two. That latter problem would fairly easy to work around by rewriting my tests, but that seems janky and unclear. How am I supposed to initialize the ExperimentFactory when the Experiment model requires a user_id for initialization?
If anyone has a better solution feel free to comment, but here's what I realized: it really doesn't matter what I pass in for user_id; I just have to pass in something so that model initialization doesn't fail. And passing in user=user at the same time creates the situation I want: all experiments belong to the same user. Now all my tests pass. Here's the modified fixture code; everything else remained the same:
#pytest.fixture(scope='function')
#pytest.mark.usefixtures('db')
def mocked_scan_service(db, mocker, request):
num_exp, num_scans, exp_id, scan_id, exp_uri, scan_uri = request.param
user = UserFactory(password='myprecious')
for i in range(num_exp):
experiment = ExperimentFactory(user_id=user.id, user=user)
db.session.add(experiment)
db.session.commit()
ss = ScanService(experiment.user.id, experiment.id)
for i in range(num_scans):
ss._add_scan_to_database()
ss.xc.upload_scan = mocker.MagicMock()
ss.xc.upload_scan.return_value = ('/data/archive/subjects/000001', exp_uri, scan_uri)
mocker.spy(ss, '_generate_xnat_identifiers')
ss.param = request.param
return ss
Related
I have a data model which has a column that depends on other column values, following the instructions in this page I've created a context-sensitive function which is used to determine the value of this particular column on creation, something like this:
def get_column_value_from_context(context):
# Instructions to produce value
return value
class MyModel(db.Model):
id = db.Column(db.Integer,
primary_key=True)
my_column = db.Column(db.String(64),
nullable=False,
default=get_column_value_from_context)
name = db.Column(db.String(32),
nullable=False,
unique=True,
index=True)
title = db.Column(db.String(128),
nullable=False)
description = db.Column(db.String(256),
nullable=False)
This approach works pretty decent, I can create rows without problems from the command line or using a script.
I've also added a ModelView to the app using Flask-Admin:
class MyModelView(ModelView):
can_view_details = True
can_set_page_size = True
can_export = True
admin.add_view(MyModelView(MyModel, db.session))
This also works pretty decent until I click the Create button in the list view. I receive this error:
AttributeError: 'NoneType' object has no attribute 'get_current_parameters'
Because the implementation of the create_model handler in the ModelView is this:
def create_model(self, form):
"""
Create model from form.
:param form:
Form instance
"""
try:
model = self.model()
form.populate_obj(model)
self.session.add(model)
self._on_model_change(form, model, True)
self.session.commit()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to create record. %(error)s', error=str(ex)), 'error')
log.exception('Failed to create record.')
self.session.rollback()
return False
else:
self.after_model_change(form, model, True)
return model
and here there isn't a context when the model is instantiated. So, I've created a custom view where the model instantiation in the creation handler could be redefined:
class CustomSQLAView(ModelView):
def __init__(self, *args, **kwargs):
super(CustomSQLAView, self).__init__(*args, **kwargs)
def create_model(self, form):
"""
Create model from form.
:param form:
Form instance
"""
try:
model = self.get_populated_model(form)
self.session.add(model)
self._on_model_change(form, model, True)
self.session.commit()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to create record. %(error)s', error=str(ex)), 'error')
log.exception('Failed to create record.')
self.session.rollback()
return False
else:
self.after_model_change(form, model, True)
return model
def get_populated_model(self, form):
model = self.model()
form.populate_obj(model)
return model
Now I can redefine the get_populated_model method to instantiate the model in the usual way:
class MyModelView(CustomSQLAView):
can_view_details = True
can_set_page_size = True
can_export = True
def get_populated_model(self, form):
model = self.model(
name=form.name.data,
title=form.title.data,
description=form.description.data,
)
return model
Despite that this works, I suspect it breaks something. Flask-Admin has several implementation of the populate_obj method of forms and fields, so I would like to keep everything safe.
What is the proper way to do this?
So there are several factors which come in to play at this question.
But first, a small fully functional app to illustrate:
from flask_admin.contrib.sqla import ModelView
from flask_sqlalchemy import SQLAlchemy
from flask_admin import Admin
from flask import Flask
app = Flask(__name__)
db = SQLAlchemy(app)
admin = Admin(app)
app.secret_key = 'arstartartsar'
def get_column_value_from_context(context):
print('getting value from context...')
if context.isinsert:
return 'its an insert!'
else:
return 'aww, its not an insert'
class MyModel(db.Model):
id = db.Column(db.Integer, primary_key=True)
my_column = db.Column(db.String(64), nullable=False, default=get_column_value_from_context)
name = db.Column(db.String(32), nullable=False, unique=True, index=True)
class MyModelView(ModelView):
form_excluded_columns = ['my_column']
db.create_all()
admin.add_view(MyModelView(MyModel, db.session))
print('1')
x = MyModel(name='hey')
print('2')
db.session.add(x)
print('3')
db.session.commit()
print('4')
When we fire up this app, what's printed is:
1
2
3
getting value from context...
4
So, only after committing, does the get_column_value_from_context function fire up. When, in the create view, you 'create' a new model, you don't have a context yet because you're not committing anything to the database yet. You only get the context to set the default when you're committing your instance to the database! That why, in the source code of flask admin, this happens:
if getattr(default, 'is_callable', False):
value = lambda: default.arg(None) # noqa: E731
They check if the default you specified is a function, and if so, they call it without context (because there is no context yet!).
You have several options to overcome this depending on your goals:
1) Only calculate the value of my_column when adding it to the database:
Simply ignore the field in the form, and it will be determined when you add it to the db.
class MyModelView(ModelView):
form_excluded_columns = ['my_column']
2) Only calculate it when adding it to the db, but making it editable afterwards:
class MyModelView(ModelView):
form_edit_rules = ['name', 'my_column']
form_create_rules = ['name']
I am making a flask restful api, what I'm having trouble with is marshmallow-sqlalchemy, and webargs
in short here is my sqlalchemy model:
class User(Model):
id = Column(String, primary_key=True)
name = Column(String(64), nullable=False)
email = Column(String(120), nullable=False)
password = Column(String(128))
creation_date = Column(DateTime, default=datetime.utcnow)
and this is my schema
class UserSchema(ModelSchema):
class Meta:
model = User
strict = True
sqla_session = db.session
user_schema = UserSchema()
and an example of my routes using flask-classful and webargs:
class UserView(FlaskView):
trailing_slash = False
model = User
schema = user_schema
#use_kwargs(schema.fields)
def post(self, **kwargs):
try:
entity = self.model()
for d in kwargs:
if kwargs[d] is not missing:
entity.__setattr__(d, kwargs[d])
db.session.add(entity)
db.session.commit()
o = self.schema.dump(entity).data
return jsonify({'{}'.format(self.model.__table__.name): o})
except IntegrityError:
return jsonify({'message': '{} exist in the database. choose another id'
.format(self.model.__table__.name)}), 409
#use_kwargs(schema.fields)
def put(self, id, **kwargs):
entity = self.model.query.filter_by(id=id).first_or_404()
for d in kwargs:
if kwargs[d] is not missing:
entity.__setattr__(d, kwargs[d])
db.session.commit()
o = self.schema.dump(entity).data
return jsonify({'{}'.format(self.model.__table__.name): o})
UserView.register(app)
The problem:
as you can see in my sqlalchemy model, some fields are not nullable, thus my marshmallow schemda marks them as required. My get, index, delete and post methods all work perfectly. But I included post for one reason:
when I try to post a new user with no name for example, a 422 http code is raised because name field is required, which is something I want and it is done perfectly.
BUT when editing fields with put request, I wish EVERYTHING in my schema becomes optional.. right now if I wanted to update a user, I must provide not only the id.. but ALL other information required by default even if I didn't change them at all.
in short, how to mark all fields as "optional" when the method is "put"?
EDIT: just like the solution provided by #Mekicha, I made the following chnages:
change the schema to make the required fields in my model accept the value None. like this:
class UserSchema(ModelSchema):
class Meta:
model = User
...
name = fields.Str(missing=None, required=True)
email = fields.Email(missing=None, required=True)
...
change my put and post method condition from this:
if kwargs[d] is not missing:
to this:
if kwargs[d] is not missing and kwargs[d] is not None:
Since you want to make the fields optional during put, how about setting the missing attribute for the fields. From the doc:
missing is used for deserialization if the field is not found in the
input data
I think a combination of missing and allow_none(which defaults to True when missing=None) as pointed out here: https://github.com/marshmallow-code/marshmallow/blob/dev/src/marshmallow/fields.py#L89 should work for you
I've hacked my way to getting my code to work, but I'm pretty sure I'm not doing it as it was intended.
My constraint is I want to have separate DB and UI layers, so I have all the DB-logic encapsulated in SPs/functions that are called from Django's view layer. I tried doing this using the included managers, but kept getting this error:
Manager isn't accessible via %s instances" % cls.__name__)
So, I just removed the manager sub-class and kept going. It works with some extra hacks, but it doesn't feel right. My question is, how do I get my code to work, but still inheriting the stuff from the appropriate managers (i.e. BaseUserManager)? Here's the code:
models.py
from __future__ import unicode_literals
from django.db import models
from UsefulFunctions.dbUtils import *
from django.contrib.auth.models import AbstractBaseUser
class MyUserManager():
# Bypassing BaseUserManager which includes these methods: normalize_email, make_random_password, get_by_natural_key
# Create new user
def create_user(self, password, usertype = None, firstname = None, lastname = None, phonenumber = None, emailaddress = None):
user = MyUser( # TO-DO: Replace MyUser with "get_user_model" reference
userid=None,
usertype=usertype,
firstname=firstname,
lastname=lastname,
phonenumber=phonenumber,
emailaddress=emailaddress
)
# Hash and save password
user.set_password(password)
# Save user data
user.save()
return user
def upsertUser(self, myUser):
return saveDBData('SP_IGLUpsertUser',
(
myUser.userid,
myUser.usertype,
myUser.firstname,
myUser.lastname,
myUser.phonenumber,
myUser.emailaddress,
myUser.password,
myUser.last_login,
None,
)
)
# Create custom base user
class MyUser(AbstractBaseUser):
# Define attributes (inherited class includes password + other fields)
userid = models.IntegerField(unique=True)
usertype = models.CharField(max_length=2)
firstname = models.CharField(max_length=100)
lastname = models.CharField(max_length=100)
phonenumber = models.CharField(max_length=25)
emailaddress = models.CharField(max_length=250)
# Define data manager
MyUserMgr = MyUserManager()
# Create new constructor
def __init__(self, userid = None, usertype = None, firstname = None, lastname = None, phonenumber = None, emailaddress = None):
super(MyUser, self).__init__() # TO-DO: Convert MyUser to get_user_model()
self.userid = userid
self.usertype = usertype
self.firstname = firstname
self.lastname = lastname
self.phonenumber = phonenumber
self.emailaddress = emailaddress
# Define required fields for AbstractBaseUser class
USERNAME_FIELD = 'userid' # specify how Django recognizes the user
EMAIL_FIELD = 'emailaddress'
REQUIRED_FIELDS = ['usertype','firstname','lastname'] # email and password are required by default
# Define class meta info
class Meta:
managed = False
db_table = 'userprofile'
# Required methods
def get_full_name(self):
return self.firstname + " " + self.lastname + " (" + self.userid + ")"
def get_short_name(self):
return self.userid
def save(self):
return self.MyUserMgr.upsertUser(self)
# Define model managers (interface between DB and objects)
class ItemDataManager():
def getAllItems(self):
return getDBData('SP_IGLGetItem', (None,)) # Use tuple instead of array for input parameters
def getItem(self, myItem):
return getDBData('SP_IGLGetItem', (myItem.itemid,))
def getItemDetail(self, myItem):
return getDBData('SP_IGLGetItemDetail', (myItem.itemid,))
def upsertItem(self, myItem):
return saveDBData('SP_IGLUpsertItem',
(
myItem.itemid,
myItem.itemname,
myItem.itemdescription,
myItem.itemcontactuserid,
)
)
def deleteItem(self, myItem):
return deleteDBData('SP_IGLDeleteItem', (myItem.itemid, None))
# Define data models (i.e. tables)
class Item(models.Model):
# Model properties
itemid = models.IntegerField
itemname = models.CharField(max_length=100)
itemdescription = models.CharField(max_length=5000)
itemcontactuserid = models.IntegerField
# Create Item Data Manager instance
myItemMgr = ItemDataManager()
# Create new constructor
def __init__(self, itemid = None):
super(Item, self).__init__()
self.itemid = itemid
# Define static methods (don't depend on object instance)
#staticmethod
def get_all():
return ItemDataManager().getAllItems()
# Define instance methods
def get(self):
return self.myItemMgr.getItem(self)
# Define instance methods
def get_detail(self):
return self.myItemMgr.getItemDetail(self)
def save(self):
return self.myItemMgr.upsertItem(self)
def delete(self):
return self.myItemMgr.deleteItem(self)
Sample call:
from django.contrib.auth import get_user_model;
get_user_model().MyUserMgr.create_user('mypass','AD','Joe','Smith','1233','joe#smith.com')
This is the line that's giving me trouble:
def save(self):
return self.MyUserMgr.upsertUser(self)
Right now, it works fine. But when I subclass BaseUserManager, I can't get it to work. What am I doing wrong? How should I restructure the code/references to properly use the included manager classes?
I've read all the relevant posts. I'm guessing the answer is in there somewhere, but it's all a jumbled mess to me at this point.
I am using:
Django 1.11
Python 2.7
Postgres 9.6
The error is caused by you trying to access the model manager from the instance.
In save() you're dealing with an instance of the model to be saved, so you can't access the manager. self is an instance (object) of the class, not the class itself.
First of all, I'd swap to the standard django approach with your manager which would be objects = MyUserMgr() so then you can do MyUserModel.objects.all(), MyUserModel.objects.upsertUser() etc.
Normally in Django you'd use a model manager to run queries that you want to use a lot so that you don't have to duplicate them in your views/forms etc.
Then you can just stick to saving the instance in the model save() method to start to try to simplify what you're doing because you've got quite complex already.
Have a look at the docs for Managers; https://docs.djangoproject.com/en/1.11/topics/db/managers/#managers
Then have a look at this really simple approach to extending the user model https://simpleisbetterthancomplex.com/tutorial/2016/07/22/how-to-extend-django-user-model.html
I am creating a Point of Sales application, with the typical data hierarchy :
Company->branches->Sales->SaleData, this is the model definition (Note that, the User model already prepare and working as a flask-login compatible model) :
from flask_sqlalchemy import SQLAlchemy
from main import db
from collections import OrderedDict
class Users(db.Model,object):
'''
Adding object to trun sqlalchemy into json object
'''
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(60), unique=True)
firstname = db.Column(db.String(20))
lastname = db.Column(db.String(20))
password = db.Column(db.String)
email = db.Column(db.String(100), unique=True)
role = db.Column(db.String(20))
active = db.Column(db.Boolean)
company_id = db.Column(db.Integer, db.ForeignKey('companies.id'))
def __init__(self, username=None, password=None, email=None, firstname=None, lastname=None):
self.username = username
self.email = email
self.firstname = firstname
self.lastname = lastname
self.password = password
self.active = True
self.role = 'Admin'
def is_authenticated(self):
return True
def is_active(self):
return self.active
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.id)
def _asdict(self):
'''
Thanks to http://stackoverflow.com/questions/7102754/jsonify-a-sqlalchemy-result-set-in-flask
'''
result = OrderedDict()
for key in self.__mapper__.c.keys():
result[key] = getattr(self, key)
return result
class Companies(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), unique=True)
address = db.Column(db.String)
users = db.relation('Users', backref=db.backref('users'))
token = db.Column(db.String) #for identification of client
branches = db.relationship("Branches")
def __init__(self, name=None, address=None, token=None):
self.name = name
self.address = address
self.token = token
class Branches(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), unique=True)
address = db.Column(db.String)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
token = db.Column(db.String) #for identification of client
company_id = db.Column(db.Integer, db.ForeignKey('companies.id'))
sales = db.relation('Sales',
backref=db.backref('sales', lazy='dynamic'),
cascade="all, delete-orphan",
lazy='dynamic',
passive_deletes=True)
def __init__(self, name=None, address=None, token=None, user_id=None):
self.name = name
self.address = address
self.token = token
self.user_id = user_id
class Sales(db.Model):
id = db.Column(db.Integer, primary_key=True)
day = db.Column(db.Date)
branch_id = db.Column(db.Integer, db.ForeignKey('branches.id'))
data = db.relationship("SaleData")
def __init__(self, day=None):
self.day = day
class SaleData(db.Model):
id = db.Column(db.Integer, primary_key=True)
sale_id = db.Column(db.Integer, db.ForeignKey('sales.id'))
cash_start_of_day = db.Column(db.Integer)
cash_end_of_day = db.Column(db.Integer)
income = db.Column(db.Integer) # which is end - start
def __init__(self, cash_start_of_day = None, cash_end_of_day = None, income = None):
self.cash_start_of_day = cash_start_of_day
self.cash_end_of_day = cash_end_of_day
self.income = income
Now, if I try to add a Sales data to the branches, it didn't happen if I do this :
branch1 = company1.branches.filter().all()
I can olny do this :
branch1 = company1.branches[0]
If not using that [] operator, I got error message : AttributeError: 'InstrumentedList' object has no attribute'. I have already browse another answer here in SO, it got to do with that lazy things in backref definition, so I already modify my current model
But it seems like I am missing something here.. any clue?
Thanks!
EDIT 1 : Unit test added & User Model added too
I already got a concise answer from Mark Hildreth, and it saves me a lot! Because of that, I am going to put here the complete unit test of this model. I am sure it will help newbies out there in their very first step in SQLAlchemy. So, here goes :
import unittest
from main import db
import models
import md5
import helper
class DbTest(unittest.TestCase):
def setUp(self):
db.drop_all()
db.create_all()
def test_user_and_company(self):
"""admin of a company"""
user1 = models.Users('eko', helper.hash_pass('rahasia'), 'swdev.bali#gmail.com')
db.session.add(user1)
db.session.commit()
"""the company"""
company1 = models.Companies('CDI','Glagah Kidul', 'empty')
db.session.add(company1)
company1.users.append(user1)
db.session.commit()
assert company1.users[0].id == user1.id
"""branches"""
company1.branches.append(models.Branches(name='Kopjar',address='Penjara Malaysia', token='empty token', user_id=user1.id))
company1.branches.append(models.Branches(name='Selangor',address='Koperasi Selangor', token='empty token', user_id=user1.id))
db.session.commit()
'''sales'''
branch1 = company1.branches.filter(models.Branches.name=='Kopjar').first()
assert branch1.name=='Kopjar' and branch1.company_id == company1.id
sale = models.Sales(day='2013-02-02')
sale.data.append(models.SaleData(cash_start_of_day = 0, cash_end_of_day = 500000, income = 500000))
branch1.sales.append(sale)
db.session.commit()
assert sale.id is not None
if __name__ == '__main__':
unittest.main()
There may be bad practice in this model or unit test, and I will be delighted if you point that out :)
Thanks!
You may wish to review the "Collection Configuration" section of the documentation. There are a few main, built-in ways to deal with how relationships are handled in SQLAlchemy, and that section of the documentation shows the various ways.
By default, when you have...
class Companies(db.Model):
...
branches = db.relationship("Branches")
...
Loading a Company will load all of the branches in (and by default, it loads them into a list). Therefore, after retrieving a company, company.branches returns to you a list of branches. Because it is a list, it doesn't have functions such as filter() or all(). If you are not expecting a large list of branches, this might be preferred since it might make more sense for you to use branches as a list rather than as a query object. Having this as a list allows you to do things such as...
company = session.query(Companies).first()
my_branch = Branches(...)
company.branches.append(my_branch)
session.commit()
This will properly create the new Branches object without needing to add it specifically to the session (which I think is pretty nifty).
As a side note, if you were to do type(company.branches), you would not get <type 'list'>, because in order to pull off this magic, SQLAlchemy will actually set branches to an object type that works LIKE a list, but actually has additional SQLAlchemy-specific info. This object type, if you haven't guessed, is the "InstrumentedList" that you are getting the error message about.
However, you might not want to do this; specifically, this requires you to load in all of the branches at once, and you might only want to load a few in at a time because you have thousands of them (thousands of branches in a company, just imagine the bureaucracy...)
So, you change the relation, as the docs say...
A key feature to enable management of a large collection is the
so-called “dynamic” relationship. This is an optional form of
relationship() which returns a Query object in place of a collection
when accessed. filter() criterion may be applied as well as limits and
offsets, either explicitly or via array slices:
It appears that this is what you want to do if you want to be able to do things like company.branches.filter(...).all(). To do this, you would do as the docs show, by making the lazy attribute of the relationship "dynamic"...
class Companies(db.Model):
...
branches = db.relationship("Branches", lazy='dynamic')
...
It looks like you've done this already for the branches -> sales relationship, but you haven't for the company -> branches relationship, which is what is giving you the error.
Is it possible to create Column in SQLAlchemy which is going to be automatically populated with time when it inserted/updated last time ?
I created models, inherited from Base class
class Base(object):
def __tablename__(self):
return self.__name__.lower()
id = Column(Integer, primary_key=True)
last_time = Column(TIMESTAMP, server_default=func.now())
Base = declarative_base(cls=Base)
class EntityModel(Base):
__tablename__ = 'entities'
settlement_id = Column(Integer, ForeignKey('settlements.id'), nullable=False)
type = Column(String(20), nullable=False)
level = Column(Integer, nullable=False, default=0)
energy = Column(Float, nullable=False, default=0)
position_x = Column(Integer, default=0)
position_y = Column(Integer, default=0)
def __repr__(self):
return "<Entity('%s')>" % (self.type)
Every time when I update EntityModel I want to the last_time be updated on system function.now(). I can do this on the database level with triggers but I would rather do on application level if is it possible.
In Base class add onupdate in the last statement as follows:
from sqlalchemy.sql import func
last_time = Column(TIMESTAMP, server_default=func.now(), onupdate=func.current_timestamp())
If you use MySQL, I believe you can only have one auto-updating datetime column, so we use SQLAlchemy's event triggers instead.
You just attach a listener to the 'before_insert' and 'before_update' hooks and update as necessary:
from sqlalchemy import event
#event.listen(YourModel, 'before_insert')
def update_created_modified_on_create_listener(mapper, connection, target):
""" Event listener that runs before a record is updated, and sets the create/modified field accordingly."""
target.created = datetime.utcnow()
target.modified = datetime.utcnow()
#event.listen(YourModel, 'before_update')
def update_modified_on_update_listener(mapper, connection, target):
""" Event listener that runs before a record is updated, and sets the modified field accordingly."""
# it's okay if this field doesn't exist - SQLAlchemy will silently ignore it.
target.modified = datetime.utcnow()
I knew nobody would ever remember to add this to new models, so I tried to be clever and add it for them.
All our models inherit from a base object we cleverly called "DatabaseModel". We check who inherits from this object and dynamically add the triggers to all of them.
It's OK if a model doesn't have the created or modified field - SQLAlchemy appears to silently ignore it.
class DatabaseModel(db.Model):
__abstract__ = True
#...other stuff...
#classmethod
def _all_subclasses(cls):
""" Get all subclasses of cls, descending. So, if A is a subclass of B is a subclass of cls, this
will include A and B.
(Does not include cls) """
children = cls.__subclasses__()
result = []
while children:
next = children.pop()
subclasses = next.__subclasses__()
result.append(next)
for subclass in subclasses:
children.append(subclass)
return result
def update_created_modified_on_create_listener(mapper, connection, target):
""" Event listener that runs before a record is updated, and sets the create/modified field accordingly."""
# it's okay if one of these fields doesn't exist - SQLAlchemy will silently ignore it.
target.created = datetime.utcnow()
target.modified = datetime.utcnow()
def update_modified_on_update_listener(mapper, connection, target):
""" Event listener that runs before a record is updated, and sets the modified field accordingly."""
# it's okay if this field doesn't exist - SQLAlchemy will silently ignore it.
target.modified = datetime.utcnow()
for cls in DatabaseModel._all_subclasses():
event.listen(cls, 'before_insert', update_created_modified_on_create_listener)
event.listen(cls, 'before_update', update_modified_on_update_listener)
It is worth nothing that, if you follow Rachel Sanders' recommendation, you should definitely do:
if object_session(target).is_modified(target, include_collections=False):
target.modified = datetime.utcnow()
as part of the update_modified_on_update_listener() event listener, otherwise you'll do tons of redundant database updates. Checkout http://docs.sqlalchemy.org/en/latest/orm/events.html#mapper-events under the section "before_update" for more information.