does SQLAlchemy have something similar to Django's signal concept? Basically, I'd like to trigger a few functions when I pre-save or post-save some entity objects. Thanks.
Edit: I JUST want equivalent of django-signals in SQLAlchemy.
I think you are looking for `ORM Events'.
You can find documentation here:
http://docs.sqlalchemy.org/en/latest/orm/events.html
You didn't make clear, whether you are integrating SQLAlchemy and Django, or you JUST want equivalent of django-signals in SQLAlchemy.
If you want equivalent of Django signals like post_save, pre_save, pre_delete etc, i would refer you the page,
sqlalchemy.orm.interfaces.MapperExtension
You may want to consider the sqlalchemy.orm.SessionExtension as well
Here's some code I threw together to set an owner id on an instance and set an update_date that get's the job done in a pylons app. the OrmExt class is where all the magic happens. And init_model is where you wire it up.
import logging
import sqlalchemy as sa
from sqlalchemy import orm
from pylons import session
import datetime
log = logging.getLogger(__name__)
class ORMSecurityException(Exception):
'''
thrown for security violations in orm layer
'''
pass
def _get_current_user():
log.debug('getting current user from session...')
log.debug(session)
return session['user']
def _is_admin(user):
return False
def set_update_date(instance):
if hasattr(instance,'update_date'):
instance.update_date = datetime.datetime.now()
def set_owner(instance):
'''
if owner_id, run it through the rules
'''
log.info('set_owner')
if hasattr(instance, 'owner_id'):
log.info('instance.owner_id=%s' % instance.owner_id)
u = _get_current_user()
log.debug('user: %s' % u.email)
if not u:
#anonymous users can't save owned objects
raise ORMSecurityException()
if instance.owner_id==None:
#must be new object thus, owned by current user
log.info('setting owner on object %s for user: %s' % (instance.__class__.__name__,u.email))
instance.owner_id = u.id
elif instance.owner_id!=u.id and not _is_admin(u):
#if owner_id does not match user_id and user is not admin VIOLATION
raise ORMSecurityException()
else:
log.info('object is already owned by this user')
return #good to go
else:
log.info('%s is not an owned object' % instance.__class__.__name__)
return
def instance_policy(instance):
log.info('setting owner for %s' % instance.__class__.__name__)
set_owner(instance)
log.info('setting update_date for %s' % instance.__class__.__name__)
set_update_date(instance)
class ORMExt(orm.SessionExtension):
'''
attempt at managing ownership logic on objects
'''
def __init__(self,policy):
self._policy = policy
def before_flush(self,sqlsess,flush_context,instances):
'''
check all instances for owner_id==user.id
'''
try:
for instance in sqlsess.deleted:
try:
log.info('running policy for deleted %s' % instance.__class__.__name__)
self._policy(instance)
except Exception,ex:
log.error(ex)
raise ex
for instance in sqlsess.new:
try:
log.info('running policy for new %s' % instance.__class__.__name__)
self._policy(instance)
except Exception,ex:
log.error(ex)
raise ex
for instance in sqlsess.dirty:
try:
if sqlsess.is_modified(instance,include_collections=False,passive=True):
log.info('running policy for updated %s' % instance.__class__.__name__)
self._policy(instance)
except Exception, ex:
log.error(ex)
raise ex
except Exception,ex:
sqlsess.expunge_all()
raise ex
def init_model(engine):
"""Call me before using any of the tables or classes in the model"""
sm = orm.sessionmaker(autoflush=True, autocommit=True, bind=engine,extension=ORMExt(instance_policy))
meta.engine = engine
meta.Session = orm.scoped_session(sm)
Here's my take at this problem, it uses Louie to dispatch signals:
dispatch.py
"""
Signals dispatching for SQLAlchemy mappers.
"""
import louie
from sqlalchemy.orm.interfaces import MapperExtension
import signals
class LouieDispatcherExtension(MapperExtension):
"""
Dispatch signals using louie on insert, update and delete actions.
"""
def after_insert(self, mapper, connection, instance):
louie.send(signals.after_insert, instance.__class__,
instance=instance)
return super(LouieDispatcherExtension, self).after_insert(mapper,
connection, instance)
def after_delete(self, mapper, connection, instance):
louie.send(signals.after_delete, instance.__class__,
instance=instance)
return super(LouieDispatcherExtension, self).after_delete(mapper,
connection, instance)
def after_update(self, mapper, connection, instance):
louie.send(signals.after_update, instance.__class__,
instance=instance)
return super(LouieDispatcherExtension, self).after_update(mapper,
connection, instance)
def before_delete(self, mapper, connection, instance):
louie.send(signals.before_delete, instance.__class__,
instance=instance)
return super(LouieDispatcherExtension, self).before_delete(mapper,
connection, instance)
def before_insert(self, mapper, connection, instance):
louie.send(signals.before_insert, instance.__class__,
instance=instance)
return super(LouieDispatcherExtension, self).before_insert(mapper,
connection, instance)
def before_update(self, mapper, connection, instance):
louie.send(signals.before_update, instance.__class__,
instance=instance)
return super(LouieDispatcherExtension, self).before_update(mapper,
connection, instance)
signals.py
from louie import Signal
class after_delete(Signal): pass
class after_insert(Signal): pass
class after_update(Signal): pass
class before_delete(Signal): pass
class before_insert(Signal): pass
class before_update(Signal): pass
Sample usage:
class MyModel(DeclarativeBase):
__mapper_args__ = {"extension": LouieDispatcherExtension()}
ID = Column(Integer, primary_key=True)
name = Column(String(255))
def on_insert(instance):
print "inserted %s" % instance
louie.connect(on_insert, signals.after_insert, MyModel)
You can use inner MapperExtension class:
class YourModel(db.Model):
class BaseExtension(MapperExtension):
def before_insert(self, mapper, connection, instance):
# do something here
def before_update(self, mapper, connection, instance):
# do something here
__mapper_args__ = { 'extension': BaseExtension() }
# ....
Related
Tech stack
Frontend: VueJS
Backend: Flask Python3
Issue:
I am using VueJS on the front end utilizing the socket io client library that when a button is clicked it emits an event on the socket connected to my backend.
Once the event is emitted and received on the backend the code enters a method that has a while true loop and is broken out of the loop when there is a certain value present in the DB; until that point ( 20 minutes, 4 hours ) it stays in this loops and checks the status state every few seconds.
The error I am receiving ( see below ) is due to too many connected clients which I dont get because the control exits the code that performs the query and the stack should be clear ( postgres client connection closed ) the fact that the query is in a with statement makes me says this, but obviously it's not.
My question is - how do I return the client? and allow this to do the same task without the error and more efficiently?
ERROR:
File "/home/kidcode/.cache/pypoetry/virtualenvs/application-backend-p56sp5Ck-py3.8/lib/python3.8/site-packages/psycopg2/__init__.py", line 122, in connect
conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
sqlalchemy.exc.OperationalError: (psycopg2.OperationalError) FATAL: sorry, too many clients already
FATAL: sorry, too many clients already
(Background on this error at: https://sqlalche.me/e/14/e3q8)
CODE
filename(io.py)
from flask_socketio import SocketIO
socketio = SocketIO(cors_allowed_origins="*")
filename(socket_listener.py)
from ..io import socketio
from flask_socketio import emit, disconnect
#socketio.on("connect")
def test_connect():
emit("after connect", {"data": "Client has connected!"})
#socketio.on("system_started")
def test_broadcast_message(message):
socketio.start_background_task(target=SystemMonitorHandler(socketio))
#socketio.on("disconnect_request", namespace="/test")
def disconnect_request():
#copy_current_request_context
def can_disconnect():
disconnect()
filename(system_monitor.py)
class SystemMonitorHandler:
def __init__(self, socketio):
socketio = socket.io
self.monitor_system_status()
def monitor_system_progress(self) -> None:
"""[summary]"""
# TODO use values from the query to break this loop
while True:
self.socketio.sleep(2)
_system_queries: object = SystemQueries()
_system_status: dict = (
_data_migration_history.get_system_job_status(system_id=1, uuid=2)
)
self.socketio.emit("message", {"data": status})
def get_system_job_status(self, system_id: str, uuid: str) -> dict:
_system_job_schema: object = SystemJobSchema(many=True)
try:
with SqlAlchemyUnitOfWork(
session=request_session_factory, files=SystemJobRepository
) as uow:
return _system_job_schema.dump(
uow.repositories.files.filter(
system_id=system_id, requested_by=uuid
)
.with_entities(
SystemModel.system_status,
SystemModel.job_end_date,
SystemModel.job_start_date,
)
.all()
)
except (DatabaseTransactionError, DatabaseSessionError) as err:
self.logger.exception(
ConsoleColourHelper.FAIL
+ "Unable to fetch requested resource from database : \
{}".format(
err
)
)
import abc
from functools import cached_property
from types import SimpleNamespace
from typing import Any, Callable, Iterable, Union
from sqlalchemy.orm import Session
from types import SimpleNamespace
class AbstractUnitOfWork(abc.ABC):
repositories: SimpleNamespace
def __enter__(self) -> "AbstractUnitOfWork":
return self
def __exit__(self, *args):
pass
#abc.abstractmethod
def commit(self):
raise NotImplementedError
#abc.abstractmethod
def rollback(self):
raise NotImplementedError
============= separate file START ==========================
class AbstractRepository(abc.ABC):
#abc.abstractmethod
def filter(self, **kwargs) -> Iterable[Any]:
raise NotImplementedError
#abc.abstractmethod
def save(self, model: Any):
raise NotImplementedError
#abc.abstractmethod
def get(self, id_: Any) -> Any:
raise NotImplementedError
class SQLAlchemyAbstractRepository(AbstractRepository, abc.ABC):
model = Any
def __init__(self, session: Session):
self.session = session
def filter(self, **kwargs):
return self.session.query(self.model).filter_by(**kwargs)
def save(self, model):
self.session.add(model)
self.session.flush()
def get(self, id_):
return self.session.get(id_)
def delete(self):
# TODO
pass
class SqlAlchemyUnitOfWork(AbstractUnitOfWork):
def __init__(
self,
session=Union[Callable[[], Session], Session],
**kwargs: SQLAlchemyAbstractRepository
):
self._session = session
self._repository_config = kwargs
#cached_property
def session(self):
return self._session() if callable(self._session) else self._session
def __enter__(self) -> AbstractUnitOfWork:
repositories = {
name: repository(self.session)
for name, repository in self._repository_config.items()
}
self.repositories = SimpleNamespace(**repositories)
return super().__enter__()
def __exit__(self, *args):
self.session.close()
def commit(self):
self.session.commit()
def rollback(self):
self.session.rollback()
========== separate file END ==========================
I'm trying to update a base class with a session token and user id for long polling.
Every time I call my function I create a new instance which calls a login function, that I don't want to happen.
I only want to call the login() method when the value is None
How do I return the instance of apiclient after the session token is set to use with the function for get_balance??
client.py
from __future__ import absolute_import, unicode_literals
import requests
import os
from matchbook import endpoints
class BaseClient(object):
def __init__(self, username, password=None, locale=None):
self.username = username
self.password = password
self.locale = locale
self.url_beta = 'https://beta.matchbook.com'
self.urn_main = '/bpapi/rest/'
self.session = requests.Session()
self.session_token = None
self.user_id = None
def set_session_token(self, session_token, user_id):
self.session_token = session_token
self.user_id = user_id
class APIClient(BaseClient):
def __init__(self, username, password=None):
super(APIClient, self).__init__(username, password)
self.login = endpoints.Login(self)
self.account = endpoints.Account(self)
def __repr__(self):
return '<APIClient [%s]>' % self.username
def __str__(self):
return 'APIClient'
get_bal.py
from client import APIClient
from celery import shared_task
def get_client():
apiclient = APIClient(username, password)
if apiclient.session_token is None:
apiclient.login()
session_token = apiclient.session_token
user_id = apiclient.user_id
apiclient.set_session_token(session_token,user_id)
else:
print('session token assigned',apiclient.session_token, apiclient.user_id)
return apiclient
#shared_task
def get_balance():
apiclient = get_client() *to call instance after login is set*
r = apiclient.account.get_account()
print(r)
You are creating a new instance of APIClient each time you call get_client(), which is what happens each time get_balance() get called.
You need to maintain an instance of the APIClient outside of the function scope for you to carry over your program and update your get_balance() to not call get_client() each time:
def get_balance(apiclient):
r = apiclient.account.get_account()
print(r)
def main():
apiclient = get_client()
get_balance(apiclient) # pass instance of APIClient in as an argument
Another note in your get_client() function, since both of the if conditions are the opposite of each other, just wrap them in an if... else block:
def get_client():
apiclient = APIClient(username, password)
if apiclient.session_token is None:
apiclient.login()
session_token = apiclient.session_token
user_id = apiclient.user_id
apiclient.set_session_token(session_token,user_id)
else:
print('session token assigned',apiclient.session_token, apiclient.user_id)
return apiclient
All that said, a much more OOP way would be to bake the get_balance() into the APIClient as an instance method so you don't even need to worry about the instance:
class APIClient(BaseClient):
...
def get_balance(self):
print(self.account.get_account())
# Then just call the get_balance() anywhere:
apiclient = get_client()
apiclient.get_balance()
I wrote a method in a class that tries to read some data from a database. I've also decorated this method with a decorator which checks if the connection to the database was open, and in case it was not, restarts it.
class HiveConnection(object):
def __init__(self, host, user, password):
"""Instantiate a HiveConnector object."""
self.host = host
self.user = user
self.password = password
self.port = 10000
self.auth_mechanism = 'PLAIN'
self._connection = self._connect()
def _connect(self):
"""Start the connection to database."""
try:
return connect(host=self.host, port=self.port,
user=self.user, password=self.password,
auth_mechanism=self.auth_mechanism)
except TTransportException as error:
print('Failed attempt to connect')
self._connect()
def _disconnect(self):
"""Close connection to database."""
self._connection.close()
def hadoop_connection_handler(function):
"""Start a database connection if not already open."""
#wraps(function)
def wrapper(inst, *args, **kwargs):
if not inst._connection:
inst._connect()
return function(inst, *args, **kwargs)
return wrapper
#hadoop_connection_handler
def read(self, query):
"""Execute a query to pull the data.
Args:
query: [str] Query to pull the data.
Returns:
A list of namedtuple (`Row`).
"""
columns = self._columns(query)
cursor = self._connection.cursor()
cursor.execute(query)
Record = namedtuple("Record", columns)
data = map(Record._make, cursor.fetchall())
cursor.close()
return data
Now I want to write a unit test to make sure this actually works.
from unittest.mock import patch, MagicMock
from nose.tools import assert_equal, raises
from services.db_connections import HiveConnection
class TestHiveConnection:
"""Integration test suite for HiveConnection class."""
def setUp(self):
self.hive = HiveConnection(user='username', password='password', host='myhost.net')
def test_reconnect(self):
"""If the connection drops, the object should be able to establish a
new connection.
"""
query = 'SELECT * FROM database.table 1'
self.hive._connect = MagicMock()
self.hive._disconnect()
self.hive.read(query)
assert_equal(self.hive._connect.called, True)
The above test always fails. self.hive._connect.called equals in fact to False. This I think it's because the connect() method is called within the decorator. How should I change the test to account for that?
I want to log, who is logging in the app that uses FLASK.
I tried using
#app.before_request but the problem is I need to access the username which is through flask globals and I get none if I use this decorator.
Also, using global variables like below also doesn`t work. How do I get a global variable in request context?
import logging
import time
from flask import request, flash
from flask import g
from forms import QueryForm, RequestForm, Approve_Reject_Btn_Form
from query.sqlQuery import SQLQuery
from database.oracle import Database
import datetime
from requests import Session
logger = logging.getLogger(__name__)
login_count = 0
'''
Homepage route - Displays all the tables in the homepage
'''
#app.route('/')
#app.route('/index')
def index():
try:
if not g.identity.is_authenticated():
return render_template('homepage.html')
else:
try:
global login_count
if login_count == 0:
username = g.identity.name
user_ip = request.headers.get('IP_header')
current_time = time.strftime('%c')
db = Database(db_config.username, db_config.password)
query = "INSERT INTO UserIP (username, login_time, ip_address) values ('%s', systimestamp, '%s')" % (username, user_ip)
dml_query(query)
logger.debug('User : %s, ip : %s, noted at time : %s, login_count : %s', username , user_ip, current_time, login_count)
login_count = 1
As far as your question "How to execute a block of code only once in flask?" goes, something like this should work fine:
app = Flask(__name__)
#app.before_first_request
def do_something_only_once():
app.logger.setLevel(logging.INFO)
app.logger.info("Initialized Flask logger handler")
The second part of your question is how to setup global variable, like the login counter in your example. For something like this I would recommend that you use an external cache. See example below with the werkzeug SimpleCache class. In production you should replace SimpleCache with redis or mongodb.
from werkzeug.contrib.cache import SimpleCache
class Cache(object):
cache = SimpleCache(threshold = 1000, default_timeout = 3600)
#classmethod
def get(cls, key = None):
return cls.cache.get(key)
#classmethod
def delete(cls, key = None):
return cls.cache.delete(key)
#classmethod
def set(cls, key = None, value = None, timeout = 0):
if timeout:
return cls.cache.set(key, value, timeout = timeout)
else:
return cls.cache.set(key, value)
#classmethod
def clear(cls):
return cls.cache.clear()
You can use Cache class like this.
from mycache import Cache
#app.route('/')
#app.route('/index')
def index():
if not g.identity.is_authenticated():
return render_template('homepage.html')
else:
login_count = Cache.get("login_count")
if login_count == 0:
# ...
# Your code
login_count += 1
Cache.set("login_count", login_count)
EDIT 1: Added after_request example
Per Flask documentation, after_request decorator is used to run registered function after each request. It can be used to invalidate cache, alter response object, or pretty much anything that would require a specific response modification.
#app.after_request
def after_request_callback(response):
# Do something with the response (invalidate cache, alter response object, etc)
return response
I need to pass a kwarg to the parent class of my equivalent of FingerFactoryFromService using super.
I know I am actually passing the kwarg to IFingerFactory because that is also where I pass the service that ends up in init FingerFactoryFromService and I can understand that it is getting tripped up somewhere in the component system but I cannot think of any other way.
The error I keep getting is
exceptions.TypeError: 'test' is an invalid keyword argument for this function
Versions of code in my virtualenv are:
pip (1.4.1)
setuptools (1.1.6)
Twisted (13.1.0)
wsgiref (0.1.2)
zope.interface (4.0.5)
This is a cutdown example from the finger tutorial demonstrating the issue:
from twisted.protocols import basic
from twisted.application import internet, service
from twisted.internet import protocol, reactor, defer
from twisted.python import components
from zope.interface import Interface, implements # #UnresolvedImport
class IFingerService(Interface):
def getUser(user): # #NoSelf
"""
Return a deferred returning a string.
"""
def getUsers(): # #NoSelf
"""
Return a deferred returning a list of strings.
"""
class IFingerFactory(Interface):
def getUser(user): # #NoSelf
"""
Return a deferred returning a string.
"""
def buildProtocol(addr): # #NoSelf
"""
Return a protocol returning a string.
"""
def catchError(err):
return "Internal error in server"
class FingerProtocol(basic.LineReceiver):
def lineReceived(self, user):
d = self.factory.getUser(user)
d.addErrback(catchError)
def writeValue(value):
self.transport.write(value + '\r\n')
self.transport.loseConnection()
d.addCallback(writeValue)
class FingerService(service.Service):
implements(IFingerService)
def __init__(self, filename):
self.filename = filename
self.users = {}
def _read(self):
self.users.clear()
for line in file(self.filename):
user, status = line.split(':', 1)
user = user.strip()
status = status.strip()
self.users[user] = status
self.call = reactor.callLater(30, self._read) # #UndefinedVariable
def getUser(self, user):
print user
return defer.succeed(self.users.get(user, "No such user"))
def getUsers(self):
return defer.succeed(self.users.keys())
def startService(self):
self._read()
service.Service.startService(self)
def stopService(self):
service.Service.stopService(self)
self.call.cancel()
class FingerFactoryFromService(protocol.ServerFactory):
implements(IFingerFactory)
protocol = FingerProtocol
#def __init__(self, srv):
def __init__(self, srv, test=None):
self.service = srv
## I need to call super here because my equivalent of ServerFactory requires
## a kwarg but this cutdown example doesnt so I just assign it to a property
# super(FingerFactoryFromService, self).__init__(test=test)
self.test_thing = test or 'Default Something'
def getUser(self, user):
return self.service.getUser(user)
components.registerAdapter(FingerFactoryFromService,
IFingerService,
IFingerFactory)
application = service.Application('finger')
serviceCollection = service.IServiceCollection(application)
finger_service = FingerService('/etc/passwd')
finger_service.setServiceParent(serviceCollection)
#line_finger_factory = IFingerFactory(finger_service)
line_finger_factory = IFingerFactory(finger_service, test='Something')
line_finger_server = internet.TCPServer(1079, line_finger_factory)
line_finger_server.setServiceParent(serviceCollection)
This has nothing to do with the component system. What you want to do is override the Factory's buildProtocol method, as documented here:
https://twistedmatrix.com/documents/current/core/howto/servers.html#auto9