Socket IO and postgresql hangups in while loop - python

Tech stack
Frontend: VueJS
Backend: Flask Python3
Issue:
I am using VueJS on the front end utilizing the socket io client library that when a button is clicked it emits an event on the socket connected to my backend.
Once the event is emitted and received on the backend the code enters a method that has a while true loop and is broken out of the loop when there is a certain value present in the DB; until that point ( 20 minutes, 4 hours ) it stays in this loops and checks the status state every few seconds.
The error I am receiving ( see below ) is due to too many connected clients which I dont get because the control exits the code that performs the query and the stack should be clear ( postgres client connection closed ) the fact that the query is in a with statement makes me says this, but obviously it's not.
My question is - how do I return the client? and allow this to do the same task without the error and more efficiently?
ERROR:
File "/home/kidcode/.cache/pypoetry/virtualenvs/application-backend-p56sp5Ck-py3.8/lib/python3.8/site-packages/psycopg2/__init__.py", line 122, in connect
conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
sqlalchemy.exc.OperationalError: (psycopg2.OperationalError) FATAL: sorry, too many clients already
FATAL: sorry, too many clients already
(Background on this error at: https://sqlalche.me/e/14/e3q8)
CODE
filename(io.py)
from flask_socketio import SocketIO
socketio = SocketIO(cors_allowed_origins="*")
filename(socket_listener.py)
from ..io import socketio
from flask_socketio import emit, disconnect
#socketio.on("connect")
def test_connect():
emit("after connect", {"data": "Client has connected!"})
#socketio.on("system_started")
def test_broadcast_message(message):
socketio.start_background_task(target=SystemMonitorHandler(socketio))
#socketio.on("disconnect_request", namespace="/test")
def disconnect_request():
#copy_current_request_context
def can_disconnect():
disconnect()
filename(system_monitor.py)
class SystemMonitorHandler:
def __init__(self, socketio):
socketio = socket.io
self.monitor_system_status()
def monitor_system_progress(self) -> None:
"""[summary]"""
# TODO use values from the query to break this loop
while True:
self.socketio.sleep(2)
_system_queries: object = SystemQueries()
_system_status: dict = (
_data_migration_history.get_system_job_status(system_id=1, uuid=2)
)
self.socketio.emit("message", {"data": status})
def get_system_job_status(self, system_id: str, uuid: str) -> dict:
_system_job_schema: object = SystemJobSchema(many=True)
try:
with SqlAlchemyUnitOfWork(
session=request_session_factory, files=SystemJobRepository
) as uow:
return _system_job_schema.dump(
uow.repositories.files.filter(
system_id=system_id, requested_by=uuid
)
.with_entities(
SystemModel.system_status,
SystemModel.job_end_date,
SystemModel.job_start_date,
)
.all()
)
except (DatabaseTransactionError, DatabaseSessionError) as err:
self.logger.exception(
ConsoleColourHelper.FAIL
+ "Unable to fetch requested resource from database : \
{}".format(
err
)
)
import abc
from functools import cached_property
from types import SimpleNamespace
from typing import Any, Callable, Iterable, Union
from sqlalchemy.orm import Session
from types import SimpleNamespace
class AbstractUnitOfWork(abc.ABC):
repositories: SimpleNamespace
def __enter__(self) -> "AbstractUnitOfWork":
return self
def __exit__(self, *args):
pass
#abc.abstractmethod
def commit(self):
raise NotImplementedError
#abc.abstractmethod
def rollback(self):
raise NotImplementedError
============= separate file START ==========================
class AbstractRepository(abc.ABC):
#abc.abstractmethod
def filter(self, **kwargs) -> Iterable[Any]:
raise NotImplementedError
#abc.abstractmethod
def save(self, model: Any):
raise NotImplementedError
#abc.abstractmethod
def get(self, id_: Any) -> Any:
raise NotImplementedError
class SQLAlchemyAbstractRepository(AbstractRepository, abc.ABC):
model = Any
def __init__(self, session: Session):
self.session = session
def filter(self, **kwargs):
return self.session.query(self.model).filter_by(**kwargs)
def save(self, model):
self.session.add(model)
self.session.flush()
def get(self, id_):
return self.session.get(id_)
def delete(self):
# TODO
pass
class SqlAlchemyUnitOfWork(AbstractUnitOfWork):
def __init__(
self,
session=Union[Callable[[], Session], Session],
**kwargs: SQLAlchemyAbstractRepository
):
self._session = session
self._repository_config = kwargs
#cached_property
def session(self):
return self._session() if callable(self._session) else self._session
def __enter__(self) -> AbstractUnitOfWork:
repositories = {
name: repository(self.session)
for name, repository in self._repository_config.items()
}
self.repositories = SimpleNamespace(**repositories)
return super().__enter__()
def __exit__(self, *args):
self.session.close()
def commit(self):
self.session.commit()
def rollback(self):
self.session.rollback()
========== separate file END ==========================

Related

using atexit.register with a defined __exit__ function

I'm trying to figure out how to best close a connection when I'm done with a custom object, but I'm not to sure how using atexit.register() will work with a defined __exit__() function
import atexit
from pyVim.connect import Disconnect, SmartConnect
class ConnectHostAPI(_ConnectHost):
def __init__(self, host_ip) -> None:
self.host_ip = host_ip
def __enter__(self):
# Connect to host
try:
self.service_instance = SmartConnect(
host=self.host_ip,
user=self.host_user,
pwd=self.host_password,
disableSslCertValidation=True
)
atexit.register(Disconnect, self.service_instance)
except IOError as io_error:
print(f"{io_error=}")
return self
def __exit__(self, type, value, traceback):
del self
host_api_connection = ConnectHostAPI(host_ip)
with host_api_connection as con:
# do something with con
pass

Psycopg2 auto reconnect inside a class

I've got class to connect to my Database.
import psycopg2, psycopg2.extensions
from parseini import config
import pandas as pd, pandas.io.sql as sqlio
class MyDatabase:
def __init__(self, name='mydb.ini'):
self.params = config(filename=name)
self.my_connection = psycopg2.connect(**self.params)
self.my_cursor = self.my_connection.cursor()
def fetch_all_as_df(self, sql_statement):
return sqlio.read_sql_query(sql_statement, self.my_connection)
def df_to_sql(self, df):
table = 'sometable'
return sqlio.to_sql(df, table, self.my_connection)
def __del__(self):
self.my_cursor.close()
self.my_connection.close()
How could I reconnect to database and handle psycopg2.OperationalError in my case?
You could make a decorator that tries to reconnect when psycopg2.InterfaceError or psycopg2.OperationalError are raised.
That's just an example how it could work and probably needs adjustments:
import time
from functools import wraps
import psycopg2, psycopg2.extensions
def retry(fn):
#wraps(fn)
def wrapper(*args, **kw):
cls = args[0]
for x in range(cls._reconnectTries):
print(x, cls._reconnectTries)
try:
return fn(*args, **kw)
except (psycopg2.InterfaceError, psycopg2.OperationalError) as e:
print ("\nDatabase Connection [InterfaceError or OperationalError]")
print ("Idle for %s seconds" % (cls._reconnectIdle))
time.sleep(cls._reconnectIdle)
cls._connect()
return wrapper
class MyDatabase:
_reconnectTries = 5
_reconnectIdle = 2 # wait seconds before retying
def __init__(self, name='mydb.ini'):
self.my_connection = None
self.my_cursor = None
self.params = config(filename=name)
self._connect()
def _connect(self):
self.my_connection = psycopg2.connect(**self.params)
self.my_cursor = self.my_connection.cursor()
#retry
def fetch_all_as_df(self, sql_statement):
return sqlio.read_sql_query(sql_statement, self.my_connection)
#retry
def dummy(self):
self.my_cursor.execute('select 1+2 as result')
return self.my_cursor.fetchone()
#retry
def df_to_sql(self, df):
table = 'sometable'
return sqlio.to_sql(df, table, self.my_connection)
def __del__(self):
# Maybe there is a connection but no cursor, whatever close silently!
for c in (self.my_cursor, self.my_connection):
try:
c.close()
except:
pass
db = MyDatabase()
time.sleep(30) # some time to shutdown the database
print(db.dummy())
Output:
Database Connection [InterfaceError or OperationalError]
Idle for 2 seconds
Database Connection [InterfaceError or OperationalError]
Idle for 2 seconds
Database Connection [InterfaceError or OperationalError]
Idle for 2 seconds
Database Connection [InterfaceError or OperationalError]
Idle for 2 seconds
(3,)
Note: _connect itself is not decorated, so this code assumes an initial connect always works!

unittest.mock: Check if method was called within a decorator

I wrote a method in a class that tries to read some data from a database. I've also decorated this method with a decorator which checks if the connection to the database was open, and in case it was not, restarts it.
class HiveConnection(object):
def __init__(self, host, user, password):
"""Instantiate a HiveConnector object."""
self.host = host
self.user = user
self.password = password
self.port = 10000
self.auth_mechanism = 'PLAIN'
self._connection = self._connect()
def _connect(self):
"""Start the connection to database."""
try:
return connect(host=self.host, port=self.port,
user=self.user, password=self.password,
auth_mechanism=self.auth_mechanism)
except TTransportException as error:
print('Failed attempt to connect')
self._connect()
def _disconnect(self):
"""Close connection to database."""
self._connection.close()
def hadoop_connection_handler(function):
"""Start a database connection if not already open."""
#wraps(function)
def wrapper(inst, *args, **kwargs):
if not inst._connection:
inst._connect()
return function(inst, *args, **kwargs)
return wrapper
#hadoop_connection_handler
def read(self, query):
"""Execute a query to pull the data.
Args:
query: [str] Query to pull the data.
Returns:
A list of namedtuple (`Row`).
"""
columns = self._columns(query)
cursor = self._connection.cursor()
cursor.execute(query)
Record = namedtuple("Record", columns)
data = map(Record._make, cursor.fetchall())
cursor.close()
return data
Now I want to write a unit test to make sure this actually works.
from unittest.mock import patch, MagicMock
from nose.tools import assert_equal, raises
from services.db_connections import HiveConnection
class TestHiveConnection:
"""Integration test suite for HiveConnection class."""
def setUp(self):
self.hive = HiveConnection(user='username', password='password', host='myhost.net')
def test_reconnect(self):
"""If the connection drops, the object should be able to establish a
new connection.
"""
query = 'SELECT * FROM database.table 1'
self.hive._connect = MagicMock()
self.hive._disconnect()
self.hive.read(query)
assert_equal(self.hive._connect.called, True)
The above test always fails. self.hive._connect.called equals in fact to False. This I think it's because the connect() method is called within the decorator. How should I change the test to account for that?

Twisted python: the correct way to pass a kwarg through the component system to a factory

I need to pass a kwarg to the parent class of my equivalent of FingerFactoryFromService using super.
I know I am actually passing the kwarg to IFingerFactory because that is also where I pass the service that ends up in init FingerFactoryFromService and I can understand that it is getting tripped up somewhere in the component system but I cannot think of any other way.
The error I keep getting is
exceptions.TypeError: 'test' is an invalid keyword argument for this function
Versions of code in my virtualenv are:
pip (1.4.1)
setuptools (1.1.6)
Twisted (13.1.0)
wsgiref (0.1.2)
zope.interface (4.0.5)
This is a cutdown example from the finger tutorial demonstrating the issue:
from twisted.protocols import basic
from twisted.application import internet, service
from twisted.internet import protocol, reactor, defer
from twisted.python import components
from zope.interface import Interface, implements # #UnresolvedImport
class IFingerService(Interface):
def getUser(user): # #NoSelf
"""
Return a deferred returning a string.
"""
def getUsers(): # #NoSelf
"""
Return a deferred returning a list of strings.
"""
class IFingerFactory(Interface):
def getUser(user): # #NoSelf
"""
Return a deferred returning a string.
"""
def buildProtocol(addr): # #NoSelf
"""
Return a protocol returning a string.
"""
def catchError(err):
return "Internal error in server"
class FingerProtocol(basic.LineReceiver):
def lineReceived(self, user):
d = self.factory.getUser(user)
d.addErrback(catchError)
def writeValue(value):
self.transport.write(value + '\r\n')
self.transport.loseConnection()
d.addCallback(writeValue)
class FingerService(service.Service):
implements(IFingerService)
def __init__(self, filename):
self.filename = filename
self.users = {}
def _read(self):
self.users.clear()
for line in file(self.filename):
user, status = line.split(':', 1)
user = user.strip()
status = status.strip()
self.users[user] = status
self.call = reactor.callLater(30, self._read) # #UndefinedVariable
def getUser(self, user):
print user
return defer.succeed(self.users.get(user, "No such user"))
def getUsers(self):
return defer.succeed(self.users.keys())
def startService(self):
self._read()
service.Service.startService(self)
def stopService(self):
service.Service.stopService(self)
self.call.cancel()
class FingerFactoryFromService(protocol.ServerFactory):
implements(IFingerFactory)
protocol = FingerProtocol
#def __init__(self, srv):
def __init__(self, srv, test=None):
self.service = srv
## I need to call super here because my equivalent of ServerFactory requires
## a kwarg but this cutdown example doesnt so I just assign it to a property
# super(FingerFactoryFromService, self).__init__(test=test)
self.test_thing = test or 'Default Something'
def getUser(self, user):
return self.service.getUser(user)
components.registerAdapter(FingerFactoryFromService,
IFingerService,
IFingerFactory)
application = service.Application('finger')
serviceCollection = service.IServiceCollection(application)
finger_service = FingerService('/etc/passwd')
finger_service.setServiceParent(serviceCollection)
#line_finger_factory = IFingerFactory(finger_service)
line_finger_factory = IFingerFactory(finger_service, test='Something')
line_finger_server = internet.TCPServer(1079, line_finger_factory)
line_finger_server.setServiceParent(serviceCollection)
This has nothing to do with the component system. What you want to do is override the Factory's buildProtocol method, as documented here:
https://twistedmatrix.com/documents/current/core/howto/servers.html#auto9

signals or triggers in SQLAlchemy

does SQLAlchemy have something similar to Django's signal concept? Basically, I'd like to trigger a few functions when I pre-save or post-save some entity objects. Thanks.
Edit: I JUST want equivalent of django-signals in SQLAlchemy.
I think you are looking for `ORM Events'.
You can find documentation here:
http://docs.sqlalchemy.org/en/latest/orm/events.html
You didn't make clear, whether you are integrating SQLAlchemy and Django, or you JUST want equivalent of django-signals in SQLAlchemy.
If you want equivalent of Django signals like post_save, pre_save, pre_delete etc, i would refer you the page,
sqlalchemy.orm.interfaces.MapperExtension
You may want to consider the sqlalchemy.orm.SessionExtension as well
Here's some code I threw together to set an owner id on an instance and set an update_date that get's the job done in a pylons app. the OrmExt class is where all the magic happens. And init_model is where you wire it up.
import logging
import sqlalchemy as sa
from sqlalchemy import orm
from pylons import session
import datetime
log = logging.getLogger(__name__)
class ORMSecurityException(Exception):
'''
thrown for security violations in orm layer
'''
pass
def _get_current_user():
log.debug('getting current user from session...')
log.debug(session)
return session['user']
def _is_admin(user):
return False
def set_update_date(instance):
if hasattr(instance,'update_date'):
instance.update_date = datetime.datetime.now()
def set_owner(instance):
'''
if owner_id, run it through the rules
'''
log.info('set_owner')
if hasattr(instance, 'owner_id'):
log.info('instance.owner_id=%s' % instance.owner_id)
u = _get_current_user()
log.debug('user: %s' % u.email)
if not u:
#anonymous users can't save owned objects
raise ORMSecurityException()
if instance.owner_id==None:
#must be new object thus, owned by current user
log.info('setting owner on object %s for user: %s' % (instance.__class__.__name__,u.email))
instance.owner_id = u.id
elif instance.owner_id!=u.id and not _is_admin(u):
#if owner_id does not match user_id and user is not admin VIOLATION
raise ORMSecurityException()
else:
log.info('object is already owned by this user')
return #good to go
else:
log.info('%s is not an owned object' % instance.__class__.__name__)
return
def instance_policy(instance):
log.info('setting owner for %s' % instance.__class__.__name__)
set_owner(instance)
log.info('setting update_date for %s' % instance.__class__.__name__)
set_update_date(instance)
class ORMExt(orm.SessionExtension):
'''
attempt at managing ownership logic on objects
'''
def __init__(self,policy):
self._policy = policy
def before_flush(self,sqlsess,flush_context,instances):
'''
check all instances for owner_id==user.id
'''
try:
for instance in sqlsess.deleted:
try:
log.info('running policy for deleted %s' % instance.__class__.__name__)
self._policy(instance)
except Exception,ex:
log.error(ex)
raise ex
for instance in sqlsess.new:
try:
log.info('running policy for new %s' % instance.__class__.__name__)
self._policy(instance)
except Exception,ex:
log.error(ex)
raise ex
for instance in sqlsess.dirty:
try:
if sqlsess.is_modified(instance,include_collections=False,passive=True):
log.info('running policy for updated %s' % instance.__class__.__name__)
self._policy(instance)
except Exception, ex:
log.error(ex)
raise ex
except Exception,ex:
sqlsess.expunge_all()
raise ex
def init_model(engine):
"""Call me before using any of the tables or classes in the model"""
sm = orm.sessionmaker(autoflush=True, autocommit=True, bind=engine,extension=ORMExt(instance_policy))
meta.engine = engine
meta.Session = orm.scoped_session(sm)
Here's my take at this problem, it uses Louie to dispatch signals:
dispatch.py
"""
Signals dispatching for SQLAlchemy mappers.
"""
import louie
from sqlalchemy.orm.interfaces import MapperExtension
import signals
class LouieDispatcherExtension(MapperExtension):
"""
Dispatch signals using louie on insert, update and delete actions.
"""
def after_insert(self, mapper, connection, instance):
louie.send(signals.after_insert, instance.__class__,
instance=instance)
return super(LouieDispatcherExtension, self).after_insert(mapper,
connection, instance)
def after_delete(self, mapper, connection, instance):
louie.send(signals.after_delete, instance.__class__,
instance=instance)
return super(LouieDispatcherExtension, self).after_delete(mapper,
connection, instance)
def after_update(self, mapper, connection, instance):
louie.send(signals.after_update, instance.__class__,
instance=instance)
return super(LouieDispatcherExtension, self).after_update(mapper,
connection, instance)
def before_delete(self, mapper, connection, instance):
louie.send(signals.before_delete, instance.__class__,
instance=instance)
return super(LouieDispatcherExtension, self).before_delete(mapper,
connection, instance)
def before_insert(self, mapper, connection, instance):
louie.send(signals.before_insert, instance.__class__,
instance=instance)
return super(LouieDispatcherExtension, self).before_insert(mapper,
connection, instance)
def before_update(self, mapper, connection, instance):
louie.send(signals.before_update, instance.__class__,
instance=instance)
return super(LouieDispatcherExtension, self).before_update(mapper,
connection, instance)
signals.py
from louie import Signal
class after_delete(Signal): pass
class after_insert(Signal): pass
class after_update(Signal): pass
class before_delete(Signal): pass
class before_insert(Signal): pass
class before_update(Signal): pass
Sample usage:
class MyModel(DeclarativeBase):
__mapper_args__ = {"extension": LouieDispatcherExtension()}
ID = Column(Integer, primary_key=True)
name = Column(String(255))
def on_insert(instance):
print "inserted %s" % instance
louie.connect(on_insert, signals.after_insert, MyModel)
You can use inner MapperExtension class:
class YourModel(db.Model):
class BaseExtension(MapperExtension):
def before_insert(self, mapper, connection, instance):
# do something here
def before_update(self, mapper, connection, instance):
# do something here
__mapper_args__ = { 'extension': BaseExtension() }
# ....

Categories