I'm looking to create a new object from q2, which fails because the Question class is expecting options to be a dictionary of Options, and it's receiving a dict of dicts instead.
So, unpacking obviously fails with a nested model.
What is the best approach to handle this? Is there something that's equivalent to the elegance of the **dict for a nested model?
main.py
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import models.base
from models.question import Question
from models.option import Option
engine = create_engine('sqlite:///:memory:')
models.base.Base.metadata.create_all(engine, checkfirst=True)
Session = sessionmaker(bind=engine)
session = Session()
def create_question(q):
# The following hard coding works:
# q = Question(text='test text',
# frequency='test frequency',
# options=[Option(text='test option')]
# )
question = Question(**q)
session.add(question)
session.commit()
q1 = {
'text': 'test text',
'frequency': 'test frequency'
}
q2 = {
'text': 'test text',
'frequency': 'test frequency',
'options': [
{'text': 'test option 123'},
]
}
create_question(q1)
# create_question(q2) FAILS
base.py
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
question.py
from sqlalchemy import *
from sqlalchemy.orm import relationship
from .base import Base
class Question(Base):
__tablename__ = 'questions'
id = Column(Integer, primary_key=True)
text = Column(String(120), nullable=False)
frequency = Column(String(20), nullable=False)
active = Column(Boolean(), default=True, nullable=False)
options = relationship('Option', back_populates='question')
def __repr__(self):
return "<Question(id={0}, text={1}, frequency={2}, active={3})>".format(self.id, self.text, self.frequency, self.active)
option.py
from sqlalchemy import *
from sqlalchemy.orm import relationship
from .base import Base
class Option(Base):
__tablename__ = 'options'
id = Column(Integer, primary_key=True)
question_id = Column(Integer, ForeignKey('questions.id'))
text = Column(String(20), nullable=False)
question = relationship('Question', back_populates='options')
def __repr__(self):
return "<Option(id={0}, question_id={1}, text={2})>".format(self.id, self.question_id, self.text)
I liked the answer provided by #Abdou, but wanted to see if I couldn't make it a bit more generic.
I ended up coming up with the following, which should handle any nested model.
from sqlalchemy import event, inspect
#event.listens_for(Question, 'init')
#event.listens_for(Option, 'init')
def received_init(target, args, kwargs):
for rel in inspect(target.__class__).relationships:
rel_cls = rel.mapper.class_
if rel.key in kwargs:
kwargs[rel.key] = [rel_cls(**c) for c in kwargs[rel.key]]
Listens for the init event of any specified models, checks for relationships that match the kwargs passed in, and then converts those to the matching class of the relationship.
If anyone knows how to set this up so it can work on all models instead of specifying them, I would appreciate it.
Given that you need to create an Option object every time there is an options key in the dictionary passed to the create_question function, you should use dictionary comprehension to create your options before passing the result to the Question instantiator. I would rewrite the function as follows:
def create_question(q):
# The following hard coding works:
# q = Question(text='test text',
# frequency='test frequency',
# options=[Option(text='test option')]
# )
q = dict((k, [Option(**x) for x in v]) if k == 'options' else (k,v) for k,v in q.items())
print(q)
question = Question(**q)
session.add(question)
session.commit()
The dictionary comprehension part basically checks if there is an options key in the given dictionary; and if there is one, then it creates Option objects with the values. Otherwise, it carries on as normal.
The above function generated the following:
# {'text': 'test text', 'frequency': 'test frequency'}
# {'text': 'test text', 'frequency': 'test frequency', 'options': [<Option(id=None, question_id=None, text=test option 123)>]}
I hope this helps.
For SQLAlchemy objects you can simply use Model.__dict__
Building on #Searle's answer, this avoids needing to directly list all models in the decorators, and also provides handling for when uselist=False (e.g. 1:1, many:1 relationships):
from sqlalchemy import event
from sqlalchemy.orm import Mapper
#event.listens_for(Mapper, 'init')
def received_init(target, args, kwargs):
"""Allow initializing nested relationships with dict only"""
for rel in db.inspect(target).mapper.relationships:
if rel.key in kwargs:
if rel.uselist:
kwargs[rel.key] = [rel.mapper.class_(**c) for c in kwargs[rel.key]]
else:
kwargs[rel.key] = rel.mapper.class_(**kwargs[rel.key])
Possible further improvements:
add handling for if kwargs[rel.key] is a model instance (right now this fails if you pass a model instance for relationships instead of a dict)
allow relationships to be specified as None (right now requires empty lists or dicts)
source: SQLAlchemy "event.listen" for all models
Related
Short Description: I want to add insertion_date and last_update columns to a table in a Postgresql database using SQLalchemy ORM. I followed the documentation and the examples on SO, but it didn't work for me.
Here is my example:
base.py
from sqlalchemy.ext.declarative.api import declarative_base
Base = declarative_base()
movie_model.py
from __future__ import annotations
import datetime
from sqlalchemy import func
from base import Base
from sqlalchemy.sql.schema import Column
from typing import Optional, Dict, Union
from sqlalchemy.sql.sqltypes import String, Date, DateTime, Integer
class Movie(Base):
__tablename__ = 'movies'
movie_id = Column(String, primary_key=True)
movie_name = Column(String)
release_date = Column(Date)
movie_revenue = Column(Integer)
last_update = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)
insertion_date = Column(DateTime, server_default=func.now())
# __mapper_args__ = {"eager_defaults": True}
#staticmethod
def from_dict(movie_dict: Dict[str, Union[str, int, datetime]]) -> Optional[Movie]:
if not movie_dict:
return None
return Movie(
movie_id=movie_dict.get('movie_id'),
movie_name=movie_dict.get('movie_name'),
release_date=movie_dict.get('release_date'),
movie_revenue=movie_dict.get('movie_revenue')
)
db_manager.py
from movie_model import Movie
from sqlalchemy.dialects.postgresql import insert
from sqlalchemy.inspection import inspect
from typing import List
class DBManager:
def __init__(self, session):
self.session = session
self.columns = Movie.__table__.columns.keys()
self.primary_key = inspect(Movie).primary_key[0]
self.primary_key_name = self.primary_key.name
def add_many_on_conflict_do_update(self, movies: List[Movie]) -> None:
# Used self.columns[:-2] to exclude the last two columns (last_update and insertion_date) from the explicit insertion, so the ORM handles it
# on its own as mentioned here: https://docs.sqlalchemy.org/en/14/core/metadata.html#sqlalchemy.schema.Column.params.onupdate.
statement = insert(Movie.__table__).values([{attr: getattr(movie, attr) for attr in self.columns[:-2]} for movie in movies])
statement = statement.on_conflict_do_update(
index_elements=[self.primary_key_name], set_={attr: getattr(statement.excluded, attr) for attr in self.columns[:-2]}
)
self.session.execute(statement)
self.session.commit()
def get_all(self) -> List[Movie]:
return self.session.query(Movie).all()
main.py
from movie_model import Movie
from db_manager import DBManager
import datetime
from base import Base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
def main():
db_string = "postgresql+psycopg2://db_user:password#localhost:5432/db_name"
engine = create_engine(db_string, echo=False)
session = scoped_session(sessionmaker(bind=engine))
Base.metadata.create_all(engine, checkfirst=True)
movie1 = {
'movie_id': '1',
'movie_name': 'movie1',
'release_date': datetime.datetime.strptime('2010-09-23', '%Y-%m-%d').date(),
'movie_revenue': 1000
}
movie2 = {
'movie_id': '2',
'movie_name': 'movie2',
'release_date': datetime.datetime.strptime('2010-09-23', '%Y-%m-%d').date(),
'movie_revenue': 2000
}
movie3 = {
'movie_id': '3',
'movie_name': 'movie3',
'release_date': datetime.datetime.strptime('2010-09-24', '%Y-%m-%d').date(),
'movie_revenue': 3000
}
movie4 = {
'movie_id': '4',
'movie_name': 'movie4',
'release_date': datetime.datetime.strptime('2010-09-24', '%Y-%m-%d').date(),
'movie_revenue': 4000
}
movies = [movie1, movie2, movie3, movie4]
movies_models = []
for movie in movies:
movies_models.append(Movie.from_dict(movie))
db_manager = DBManager(session=session)
db_manager.add_many_on_conflict_do_update(movies_models)
retrieved_movies = db_manager.get_all()
for movie in retrieved_movies:
print(movie.last_update)
if __name__ == '__main__':
main()
Packages:
psycopg2-binary==2.9.1
SQLAlchemy==1.3.24
**Postgres version: ** 12.
When I run python main.py the insertion_date is inserted properly even if I run it for many times adding new movie each time. But the last_update is only inserted the first time I add a movie, while it is expected to be updated whenever an update is applied to the already inserted movie.
I tried replacing self.columns[:-2] by self.columns[:-1] or by self.columns in the insertion statement and/or in the _set parameter. But it ended up that last_update column is null or it is being updated every time I run main.py even if there are no updates/insertions to apply.
I also replaced onupdate by server_onupdate and datetime.datetime.now by func.now() in movie_model.py but nothing worked.
I even added __mapper_args__ = {"eager_defaults": True} to Movie() in movie_model.py as described here but still didn't work.
I checked the following questions on SO but nothing worked too:
- Is possible to create Column in SQLAlchemy which is going to be automatically populated with time when it inserted/updated last time?
- onupdate not overridinig current datetime value
Any suggestions to make it work?
Edit for clarification:
What I am looking for is to make the last_update column get updated with the time of the transaction whenever a row is updated (using on_conflict_do_update).
I have simple classes mapped to python dictionies, WHen trying to add them to an in memory SQLite Db using SQLalchemy i get troubles running filtered queries.
As the objects were already in dicts I added a class method to my mapped objects, that could create an instance of the mapped class from the dict.
here is the set up:
from sqlalchemy import Column, Integer, String, text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm.session import sessionmaker
Base = declarative_base()
class User(Base):
__tablename__ = 'Users'
mailbox = Column(String, primary_key=True)
uid = Column(Integer, primary_key=True)
name = Column(String)
#classmethod
def from_dict(cls, dict):
cls.mailbox = dict["mailbox"]
cls.uid = dict["uid"]
cls.name = dict["name"]
return cls(mailbox=cls.mailbox,
uid=cls.uid,
name=cls.name)
The classmethod of User returns the User class. All the other set up is pretty normal.
For demonstration i created two lists, one of mapped User objects. One of dictionaries with User Data. Despite the types, the info inside each is identical.
def dict_user_list():
return [{'mailbox':"inbox", 'uid':1, 'name':"tim"},
{'mailbox':"inbox", 'uid':2, 'name':"mark"},
{'mailbox':"inbox", 'uid':3, 'name':"susan"},
{'mailbox':"mybox", 'uid':1, 'name':"geoff"},
{'mailbox':"mybox", 'uid':2, 'name':"kim"},
{'mailbox':"mybox", 'uid':3, 'name':"trev"},
{'mailbox':"mybox", 'uid':4, 'name':"rick"}]
def user_list():
return [User(mailbox="inbox", uid=1, name="tim"),
User(mailbox="inbox", uid=2, name="mark"),
User(mailbox="inbox", uid=3, name="susan"),
User(mailbox="mybox", uid=1, name="geoff"),
User(mailbox="mybox", uid=2, name="kim"),
User(mailbox="mybox", uid=3, name="trev"),
User(mailbox="mybox", uid=4, name="rick")]
Here is the rest of the setup:
ENGINE = create_engine('sqlite://')
Base.metadata.create_all(ENGINE)
Base.metadata.bind = ENGINE
session_maker = sessionmaker()
session = session_maker()
Now if i add the Users via the .from_dict class method:
session.add_all([User.from_dict(user) for user in dict_user_list()])
Then run a query and filter:
query = session.query(User)
print("inbox filter: ", query.filter(User.mailbox == 'inbox').count())
print("mybox filter: ", query.filter(User.mailbox == 'mybox').count())
print("inbox filter_by: ", query.filter_by(mailbox='inbox').count())
print("mybox filter_by: ", query.filter_by(mailbox='mybox').count())
The output is:
inbox filter: 0
mybox filter: 7
inbox filter_by: 0
mybox filter_by: 7
This is wrong. But i can get the correct answers by using text()
print("inbox filter: ", query.filter(text('Users.mailbox = "inbox"')).count())
print("mybox filter: ", query.filter(text('Users.mailbox = "mybox"')).count())
The output gives the correct results:
inbox filter: 3
mybox filter: 4
If I change my .add_all() argument to create my instances via User(....):
session.add_all([user for user in user_list()])
Then run mu queries and filters again without using text():
query = session.query(User)
print("inbox filter: ", query.filter(User.mailbox == 'inbox').count())
print("mybox filter: ", query.filter(User.mailbox == 'mybox').count())
print("inbox filter_by: ", query.filter_by(mailbox='inbox').count())
print("mybox filter_by: ", query.filter_by(mailbox='mybox').count())
The output is correct:
inbox filter: 3
mybox filter: 4
inbox filter_by: 3
mybox filter_by: 4
Obviously the solution is not to invoke instances of my mapped class from a dict. But As i spend so long trying to work out what was going on here I would love to know what is going on here, Is it a lack of understanding of SQLalchemy? a lack of understanding of classmethods? or just a lack of understanding of python in general?
I've inherited a SQLAlchemy project and need some assistance figuring out why one relationship does not appear to be behaving like all the others. Our orm models are generated by a script and as such I would assume all would either work or not.
When I try to insert on the table with the FK I receive the below error message:
Foreign key associated with column 'RenewalBatchGroup.renewalbatchfrequencyid' could not find table 'RenewalBatchFrequency' with which to generate a foreign key to target column 'renewalbatchfrequencyid'
In the below code, as it is, I get the NoReferencedTableError exception. However if I change the import from Frequency importing Group to the other way around, it does work.
IE:
Remove the following from RenewalBatchFrequency.py
"from app.orm.LROCustomer.dbo.RenewalBatchGroup import RenewalBatchGroup"
and add the following to RenewalBatchGroup.py
from app.orm.LROCustomer.dbo.RenewalBatchFrequency import RenewalBatchFrequency
I need to know why this relationship is behaving differently from other relationships like the RenewalBatch relationship. It's setup the exact same way.
RenewalBatchGroup.py
from sqlalchemy.dialects.mssql import *
from sqlalchemy import Column, ForeignKey
from sqlalchemy.orm import relationship
from app.DatabaseCore import Base
from Utils.DateTimeUtils import getDateTimeString
from app.orm.LROCustomer.dbo.RenewalBatch import RenewalBatch
from app.orm.LROCustomer.dbo.RenewalBatchGroupProperty import RenewalBatchGroupProperty
class RenewalBatchGroup(Base):
__tablename__ = 'RenewalBatchGroup'
__table_args__ = {'implicit_returning': False} # http://docs.sqlalchemy.org/en/latest/dialects/mssql.html#triggers
batchleadtime = Column(SMALLINT)
batchsize = Column(SMALLINT)
groupname = Column(VARCHAR(400))
offerstatusid = Column(INTEGER)
renewalbatchfrequencyid = Column(INTEGER, ForeignKey('RenewalBatchFrequency.renewalbatchfrequencyid'))
renewalbatchfrequencyvalue = Column(VARCHAR(100))
renewalbatchgroupid = Column(INTEGER, primary_key=True)
updateuser = Column(VARCHAR(254))
RenewalBatches = relationship("RenewalBatch", backref="RenewalBatchGroup")
RenewalBatchGroupPropertys = relationship("RenewalBatchGroupProperty", backref="RenewalBatchGroup")
def jsonify(self):
return {"batchleadtime": self.batchleadtime,
"batchsize": self.batchsize,
"groupname": self.groupname,
"offerstatusid": self.offerstatusid,
"renewalbatchfrequencyid": self.renewalbatchfrequencyid,
"renewalbatchfrequencyvalue": self.renewalbatchfrequencyvalue,
"renewalbatchgroupid": self.renewalbatchgroupid,
"updateuser": self.updateuser
}
def update(self, row):
self.batchleadtime = row.batchleadtime
self.batchsize = row.batchsize
self.groupname = row.groupname
self.offerstatusid = row.offerstatusid
self.renewalbatchfrequencyid = row.renewalbatchfrequencyid
self.renewalbatchfrequencyvalue = row.renewalbatchfrequencyvalue
self.updateuser = row.updateuser
RenewalBatchFrequency.py
from sqlalchemy.dialects.mssql import *
from sqlalchemy import Column, ForeignKey
from sqlalchemy.orm import relationship
from app.DatabaseCore import Base
from Utils.DateTimeUtils import getDateTimeString
from app.orm.LROCustomer.dbo.RenewalBatchGroup import RenewalBatchGroup
class RenewalBatchFrequency(Base):
__tablename__ = 'RenewalBatchFrequency'
__table_args__ = {'implicit_returning': False} # http://docs.sqlalchemy.org/en/latest/dialects/mssql.html#triggers
name = Column(VARCHAR(100))
renewalbatchfrequencyid = Column(INTEGER, primary_key=True)
RenewalBatchGroups = relationship("RenewalBatchGroup", backref="RenewalBatchFrequency")
def jsonify(self):
return {"name": self.name,
"renewalbatchfrequencyid": self.renewalbatchfrequencyid
}
def update(self, row):
self.name = row.name
If any other information is needed just let me know.
Thank you.
As mentioned above, I can solve the issue by reversing the import statement. That's contrary to all the other relationships. I can only assume there is a circular reference somehow with the other relationships that SQLAlchemy is working out.
The way that I solved this issue is to put all of my model imports into the __ init__.py file and thus every model has access to all models. Now when I generate a new model I just need to make sure it's added to the __ init__.py file and I should be good to go.
A very frustrating experience.
This is very similar to another question that's over 3 years old: What's a good general way to look SQLAlchemy transactions, complete with authenticated user, etc?
I'm working on an application where I'd like to log all changes to particular tables. There's currently a really good "recipe" that does versioning, but I need to modify it to instead record a datetime when the change occurred and a user id of who made the change. I took the history_meta.py example that's packaged with SQLAlchemy and made it record times instead of version numbers, but I'm having trouble figuring out how to pass in a user id.
The question I referenced above suggests including the user id in the session object. That makes a lot of sense, but I'm not sure how to do that. I've tried something simple like session.userid = authenticated_userid(request) but in history_meta.py that attribute doesn't seem to be on the session object any more.
I'm doing all of this in the Pyramid framework and the session object that I'm using is defined as DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension())). In a view I do session = DBSession() and then proceed to use session. (I'm not really sure if that's necessary, but that's what's going on)
Here's my modified history_meta.py in case someone might find it useful:
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import mapper, class_mapper, attributes, object_mapper
from sqlalchemy.orm.exc import UnmappedClassError, UnmappedColumnError
from sqlalchemy import Table, Column, ForeignKeyConstraint, DateTime
from sqlalchemy import event
from sqlalchemy.orm.properties import RelationshipProperty
from datetime import datetime
def col_references_table(col, table):
for fk in col.foreign_keys:
if fk.references(table):
return True
return False
def _history_mapper(local_mapper):
cls = local_mapper.class_
# set the "active_history" flag
# on on column-mapped attributes so that the old version
# of the info is always loaded (currently sets it on all attributes)
for prop in local_mapper.iterate_properties:
getattr(local_mapper.class_, prop.key).impl.active_history = True
super_mapper = local_mapper.inherits
super_history_mapper = getattr(cls, '__history_mapper__', None)
polymorphic_on = None
super_fks = []
if not super_mapper or local_mapper.local_table is not super_mapper.local_table:
cols = []
for column in local_mapper.local_table.c:
if column.name == 'version_datetime':
continue
col = column.copy()
col.unique = False
if super_mapper and col_references_table(column, super_mapper.local_table):
super_fks.append((col.key, list(super_history_mapper.local_table.primary_key)[0]))
cols.append(col)
if column is local_mapper.polymorphic_on:
polymorphic_on = col
if super_mapper:
super_fks.append(('version_datetime', super_history_mapper.base_mapper.local_table.c.version_datetime))
cols.append(Column('version_datetime', DateTime, default=datetime.now, nullable=False, primary_key=True))
else:
cols.append(Column('version_datetime', DateTime, default=datetime.now, nullable=False, primary_key=True))
if super_fks:
cols.append(ForeignKeyConstraint(*zip(*super_fks)))
table = Table(local_mapper.local_table.name + '_history', local_mapper.local_table.metadata,
*cols
)
else:
# single table inheritance. take any additional columns that may have
# been added and add them to the history table.
for column in local_mapper.local_table.c:
if column.key not in super_history_mapper.local_table.c:
col = column.copy()
col.unique = False
super_history_mapper.local_table.append_column(col)
table = None
if super_history_mapper:
bases = (super_history_mapper.class_,)
else:
bases = local_mapper.base_mapper.class_.__bases__
versioned_cls = type.__new__(type, "%sHistory" % cls.__name__, bases, {})
m = mapper(
versioned_cls,
table,
inherits=super_history_mapper,
polymorphic_on=polymorphic_on,
polymorphic_identity=local_mapper.polymorphic_identity
)
cls.__history_mapper__ = m
if not super_history_mapper:
local_mapper.local_table.append_column(
Column('version_datetime', DateTime, default=datetime.now, nullable=False, primary_key=False)
)
local_mapper.add_property("version_datetime", local_mapper.local_table.c.version_datetime)
class Versioned(object):
#declared_attr
def __mapper_cls__(cls):
def map(cls, *arg, **kw):
mp = mapper(cls, *arg, **kw)
_history_mapper(mp)
return mp
return map
def versioned_objects(iter):
for obj in iter:
if hasattr(obj, '__history_mapper__'):
yield obj
def create_version(obj, session, deleted = False):
obj_mapper = object_mapper(obj)
history_mapper = obj.__history_mapper__
history_cls = history_mapper.class_
obj_state = attributes.instance_state(obj)
attr = {}
obj_changed = False
for om, hm in zip(obj_mapper.iterate_to_root(), history_mapper.iterate_to_root()):
if hm.single:
continue
for hist_col in hm.local_table.c:
if hist_col.key == 'version_datetime':
continue
obj_col = om.local_table.c[hist_col.key]
# get the value of the
# attribute based on the MapperProperty related to the
# mapped column. this will allow usage of MapperProperties
# that have a different keyname than that of the mapped column.
try:
prop = obj_mapper.get_property_by_column(obj_col)
except UnmappedColumnError:
# in the case of single table inheritance, there may be
# columns on the mapped table intended for the subclass only.
# the "unmapped" status of the subclass column on the
# base class is a feature of the declarative module as of sqla 0.5.2.
continue
# expired object attributes and also deferred cols might not be in the
# dict. force it to load no matter what by using getattr().
if prop.key not in obj_state.dict:
getattr(obj, prop.key)
a, u, d = attributes.get_history(obj, prop.key)
if d:
attr[hist_col.key] = d[0]
obj_changed = True
elif u:
attr[hist_col.key] = u[0]
else:
# if the attribute had no value.
attr[hist_col.key] = a[0]
obj_changed = True
if not obj_changed:
# not changed, but we have relationships. OK
# check those too
for prop in obj_mapper.iterate_properties:
if isinstance(prop, RelationshipProperty) and \
attributes.get_history(obj, prop.key).has_changes():
obj_changed = True
break
if not obj_changed and not deleted:
return
attr['version_datetime'] = obj.version_datetime
hist = history_cls()
for key, value in attr.items():
setattr(hist, key, value)
session.add(hist)
print(dir(session))
obj.version_datetime = datetime.now()
def versioned_session(session):
#event.listens_for(session, 'before_flush')
def before_flush(session, flush_context, instances):
for obj in versioned_objects(session.dirty):
create_version(obj, session)
for obj in versioned_objects(session.deleted):
create_version(obj, session, deleted = True)
UPDATE:
Okay, it seems that in the before_flush() method the session I get is of type sqlalchemy.orm.session.Session where the session I attached the user_id to was sqlalchemy.orm.scoping.scoped_session. So, at some point an object layer is stripped off. Is it safe to assign the user_id to the Session within the scoped_session? Can I be sure that it won't be there for other requests?
Old question, but still very relevant.
You should avoid trying to place web session information on the database session. It's combining unrelated concerns and each has it's own lifecycle (which don't match). Here's an approach I use in Flask with SQLAlchemy (not Flask-SQLAlchemy, but that should work too). I've tried to comment where Pyramid would be different.
from flask import has_request_context # How to check if in a Flask session
from sqlalchemy import inspect
from sqlalchemy.orm import class_mapper
from sqlalchemy.orm.attributes import get_history
from sqlalchemy.event import listen
from YOUR_SESSION_MANAGER import get_user # This would be something in Pyramid
from my_project import models # Where your models are defined
def get_object_changes(obj):
""" Given a model instance, returns dict of pending
changes waiting for database flush/commit.
e.g. {
'some_field': {
'before': *SOME-VALUE*,
'after': *SOME-VALUE*
},
...
}
"""
inspection = inspect(obj)
changes = {}
for attr in class_mapper(obj.__class__).column_attrs:
if getattr(inspection.attrs, attr.key).history.has_changes():
if get_history(obj, attr.key)[2]:
before = get_history(obj, attr.key)[2].pop()
after = getattr(obj, attr.key)
if before != after:
if before or after:
changes[attr.key] = {'before': before, 'after': after}
return changes
def my_model_change_listener(mapper, connection, target):
changes = get_object_changes(target)
changes.pop("modify_ts", None) # remove fields you don't want to track
user_id = None
if has_request_context():
# Call your function to get active user and extract id
user_id = getattr(get_user(), 'id', None)
if user_id is None:
# What do you want to do if user can't be determined
pass
# You now have the model instance (target), the user_id who is logged in,
# and a dictionary of changes.
# Either do somthing "quick" with it here or call an async task (e.g.
# Celery) to do something with the information that may take longer
# than you want the request to take.
# Add the listener
listen(models.MyModel, 'after_update', my_model_change_listener)
After a bunch of fiddling I seem to able to set values on the session object within the scoped_session by doing the following:
DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))
session = DBSession()
inner_session = session.registry()
inner_session.user_id = "test"
versioned_session(session)
Now the session object being passed around in history_meta.py has a user_id attribute on it which I set. I'm a little concerned about whether this is the right way of doing this as the object in the registry is a thread-local one and the threads are being re-used for different http requests.
I ran into this old question recently. My requirement is to log all changes to a set of tables.
I'll post the code I ended up with here in case anyone finds it useful. It has some limitations, especially around deletes, but works for my purposes. The code supports logging audit records for selected tables to either a log file, or an audit table in the db.
from app import db
import datetime
from flask import current_app, g
# your own session user goes here
# you'll need an id and an email in that model
from flask_user import current_user as user
import importlib
import logging
from sqlalchemy import event, inspect
from sqlalchemy.orm.attributes import get_history
from sqlalchemy.orm import ColumnProperty, class_mapper
from uuid import uuid4
class AuditManager (object):
config = {'storage': 'log',
#define class for Audit model for your project, if saving audit records in db
'auditModel': 'app.models.user_models.Audit'}
def __init__(self, app):
if 'AUDIT_CONFIG' in app.config:
app.before_request(self.before_request_handler)
self.config.update(app.config['AUDIT_CONFIG'])
event.listen(
db.session,
'after_flush',
self.db_after_flush
)
event.listen(
db.session,
'before_flush',
self.db_before_flush
)
event.listen(
db.session,
'after_bulk_delete',
self.db_after_bulk_delete
)
if self.config['storage'] == 'log':
self.logger = logging.getLogger(__name__)
elif self.config['storage'] == 'db':
# Load Audit model class at runtime, so that log file users dont need to define it
module_name, class_name = self.config['auditModel'].rsplit(".", 1)
self.AuditModel = getattr(importlib.import_module(module_name), class_name)
#Create a global request id
# Use this to group transactions together
def before_request_handler(self):
g.request_id = uuid4()
def db_after_flush(self, session, flush_context):
for instance in session.new:
if instance.__tablename__ in self.config['tables']:
# Record the inserts for this table
data = {}
auditFields = getattr(instance.__class__, 'Meta', None)
auditFields = getattr(auditFields,\
'auditFields', #Prefer to list auditable fields explicitly in the model's Meta class
self.get_fields(instance)) # or derive them otherwise
for attr in auditFields:
data[attr] = str(getattr(instance, attr, 'not set')) #Make every value a string in audit
self.log_it (session, 'insert', instance, data)
def db_before_flush(self, session, flush_context, instances):
for instance in session.dirty:
# Record the changes for this table
if instance.__tablename__ in self.config['tables']:
inspection = inspect(instance)
data = {}
auditFields = getattr(instance.__class__, 'Meta', None)
auditFields = getattr(auditFields,\
'auditFields',
self.get_fields(instance))
for attr in auditFields:
if getattr(inspection.attrs, attr).history.has_changes(): #We only log the new data
data[attr] = str(getattr(instance, attr, 'not set'))
self.log_it (session, 'change', instance, data)
for instance in session.deleted:
# Record the deletes for this table
# for this to be triggered, you must use this session based delete object construct.
# Eg: session.delete({query}.first())
if instance.__tablename__ in self.config['tables']:
data = {}
auditFields = getattr(instance.__class__, 'Meta', None)
auditFields = getattr(auditFields,\
'auditFields',
self.get_fields(instance))
for attr in auditFields:
data[attr] = str(getattr(instance, attr, 'not set'))
self.log_it (session, 'delete', instance, data)
def db_after_bulk_delete(self, delete_context):
instance = delete_context.query.column_descriptions[0]['type'] #only works for single table deletes
if delete_context.result.returns_rows:
# Not sure exactly how after_bulk_delete is expected work, since the context.results is empty,
# as delete statement return no results
for row in delete_context.result:
data = {}
auditFields = getattr(instance.__class__, 'Meta', None)
auditFields = getattr(auditFields,\
'auditFields',
self.get_fields(instance))
for attr in auditFields:
data[attr] = str(getattr(row, attr, 'not set')) #Make every value a string in audit
self.log_it (delete_context.session, 'delete', instance, data)
else:
# Audit what we can when we don't have indiividual rows to look at
self.log_it (delete_context.session, 'delete', instance,\
{"rowcount": delete_context.result.rowcount})
def log_it (self, session, action, instance, data):
if self.config['storage'] == 'log':
self.logger.info("request_id: %s, table: %s, action: %s, user id: %s, user email: %s, date: %s, data: %s" \
% (getattr(g, 'request_id', None), instance.__tablename__, action, getattr(user, 'id', None), getattr(user, 'email', None),\
datetime.datetime.now(), data))
elif self.config['storage'] == 'db':
audit = self.AuditModel(request_id=str(getattr(g, 'request_id', None)),
table=str(instance.__tablename__),
action=action,
user_id=getattr(user, 'id', None),
user_email=getattr(user, 'email', None),
date=datetime.datetime.now(),
data=data
)
session.add(audit)
def get_fields(self, instance):
fields = []
for attr in class_mapper(instance.__class__).column_attrs:
fields.append(attr.key)
return fields
Suggested Model, if you want to store audit records in the database.
class Audit(db.Model):
__tablename__ = 'audit'
id = db.Column(db.Integer, primary_key=True)
request_id = db.Column(db.Unicode(50), nullable=True, index=True, server_default=u'')
table = db.Column(db.Unicode(50), nullable=False, index=True, server_default=u'')
action = db.Column(db.Unicode(20), nullable=False, server_default=u'')
user_id = db.Column(db.Integer, db.ForeignKey('user.id', ondelete='SET NULL'), nullable=True, )
user_email = db.Column(db.Unicode(255), nullable=False, server_default=u'')
date = db.Column(db.DateTime, default=db.func.now())
data = db.Column(JSON)
In settings:
AUDIT_CONFIG = {
"tables": ['user', 'order', 'batch']
}
Using SQLAlchemy, I am trying to print out all of the attributes of each model that I have in a manner similar to:
SELECT * from table;
However, I would like to do something with each models instance information as I get it. So far the best that I've been able to come up with is:
for m in session.query(model).all():
print [getattr(m, x.__str__().split('.')[1]) for x in model.__table__.columns]
# additional code
And this will give me what I'm looking for, but it's a fairly roundabout way of getting it. I was kind of hoping for an attribute along the lines of:
m.attributes
# or
m.columns.values
I feel I'm missing something and there is a much better way of doing this. I'm doing this because I'll be printing everything to .CSV files, and I don't want to have to specify the columns/attributes that I'm interested in, I want everything (there's a lot of columns in a lot of models to be printed).
This is an old post, but I ran into a problem with the actual database column names not matching the mapped attribute names on the instance. We ended up going with this:
from sqlalchemy import inspect
inst = inspect(model)
attr_names = [c_attr.key for c_attr in inst.mapper.column_attrs]
Hope that helps somebody with the same problem!
Probably the shortest solution (see the recent documentation):
from sqlalchemy.inspection import inspect
columns = [column.name for column in inspect(model).c]
The last line might look more readable, if rewrite it in three lines:
table = inspect(model)
for column in table.c:
print column.name
Building on Rodney L's answer:
model = MYMODEL
columns = [m.key for m in model.__table__.columns]
Take a look at SQLAchemy's metadata reflection feature.
A Table object can be instructed to load information about itself from the corresponding database schema object already existing within the database. This process is called reflection.
print repr(model.__table__)
Or just the columns:
print str(list(model.__table__.columns))
I believe this is the easiest way:
print [cname for cname in m.__dict__.keys()]
EDIT: The answer above me using sqlalchemy.inspection.inspect() seems to be a better solution.
Put this together and found it helpful:
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
engine = create_engine('mysql+pymysql://testuser:password#localhost:3306/testdb')
DeclarativeBase = declarative_base()
metadata = DeclarativeBase.metadata
metadata.bind = engine
# configure Session class with desired options
Session = sessionmaker()
# associate it with our custom Session class
Session.configure(bind=engine)
# work with the session
session = Session()
And then:
d = {k: metadata.tables[k].columns.keys() for k in metadata.tables.keys()}
Example output print(d):
{'orderdetails': ['orderNumber', 'productCode', 'quantityOrdered', 'priceEach', 'orderLineNumber'],
'offices': ['addressLine1', 'addressLine2', 'city', 'country', 'officeCode', 'phone', 'postalCode', 'state', 'territory'],
'orders': ['comments', 'customerNumber', 'orderDate', 'orderNumber', 'requiredDate', 'shippedDate', 'status'],
'products': ['MSRP', 'buyPrice', 'productCode', 'productDescription', 'productLine', 'productName', 'productScale', 'productVendor', 'quantityInStock'],
'employees': ['employeeNumber', 'lastName', 'firstName', 'extension', 'email', 'officeCode', 'reportsTo', 'jobTitle'],
'customers': ['addressLine1', 'addressLine2', 'city', 'contactFirstName', 'contactLastName', 'country', 'creditLimit', 'customerName', 'customerNumber', 'phone', 'postalCode', 'salesRepEmployeeNumber', 'state'],
'productlines': ['htmlDescription', 'image', 'productLine', 'textDescription'],
'payments': ['amount', 'checkNumber', 'customerNumber', 'paymentDate']}
OR and then:
from sqlalchemy.sql import text
cmd = "SELECT * FROM information_schema.columns WHERE table_schema = :db ORDER BY table_name,ordinal_position"
result = session.execute(
text(cmd),
{"db": "classicmodels"}
)
result.fetchall()
I'm using SQL Alchemy v 1.0.14 on Python 3.5.2
Assuming you can connect to an engine with create_engine(), I was able to display all columns using the following code. Replace "my connection string" and "my table name" with the appropriate values.
from sqlalchemy import create_engine, MetaData, Table, select
engine = create_engine('my connection string')
conn = engine.connect()
metadata = MetaData(conn)
t = Table("my table name", metadata, autoload=True)
columns = [m.key for m in t.columns]
columns
the last row just displays the column names from the previous statement.
You may be interested in what I came up with to do this.
from sqlalchemy.orm import class_mapper
import collections
# structure returned by get_metadata function.
MetaDataTuple = collections.namedtuple("MetaDataTuple",
"coltype, colname, default, m2m, nullable, uselist, collection")
def get_metadata_iterator(class_):
for prop in class_mapper(class_).iterate_properties:
name = prop.key
if name.startswith("_") or name == "id" or name.endswith("_id"):
continue
md = _get_column_metadata(prop)
if md is None:
continue
yield md
def get_column_metadata(class_, colname):
prop = class_mapper(class_).get_property(colname)
md = _get_column_metadata(prop)
if md is None:
raise ValueError("Not a column name: %r." % (colname,))
return md
def _get_column_metadata(prop):
name = prop.key
m2m = False
default = None
nullable = None
uselist = False
collection = None
proptype = type(prop)
if proptype is ColumnProperty:
coltype = type(prop.columns[0].type).__name__
try:
default = prop.columns[0].default
except AttributeError:
default = None
else:
if default is not None:
default = default.arg(None)
nullable = prop.columns[0].nullable
elif proptype is RelationshipProperty:
coltype = RelationshipProperty.__name__
m2m = prop.secondary is not None
nullable = prop.local_side[0].nullable
uselist = prop.uselist
if prop.collection_class is not None:
collection = type(prop.collection_class()).__name__
else:
collection = "list"
else:
return None
return MetaDataTuple(coltype, str(name), default, m2m, nullable, uselist, collection)
I use this because it's slightly shorter:
for m in session.query(*model.__table__.columns).all():
print m