Psycopg2 auto reconnect inside a class - python

I've got class to connect to my Database.
import psycopg2, psycopg2.extensions
from parseini import config
import pandas as pd, pandas.io.sql as sqlio
class MyDatabase:
def __init__(self, name='mydb.ini'):
self.params = config(filename=name)
self.my_connection = psycopg2.connect(**self.params)
self.my_cursor = self.my_connection.cursor()
def fetch_all_as_df(self, sql_statement):
return sqlio.read_sql_query(sql_statement, self.my_connection)
def df_to_sql(self, df):
table = 'sometable'
return sqlio.to_sql(df, table, self.my_connection)
def __del__(self):
self.my_cursor.close()
self.my_connection.close()
How could I reconnect to database and handle psycopg2.OperationalError in my case?

You could make a decorator that tries to reconnect when psycopg2.InterfaceError or psycopg2.OperationalError are raised.
That's just an example how it could work and probably needs adjustments:
import time
from functools import wraps
import psycopg2, psycopg2.extensions
def retry(fn):
#wraps(fn)
def wrapper(*args, **kw):
cls = args[0]
for x in range(cls._reconnectTries):
print(x, cls._reconnectTries)
try:
return fn(*args, **kw)
except (psycopg2.InterfaceError, psycopg2.OperationalError) as e:
print ("\nDatabase Connection [InterfaceError or OperationalError]")
print ("Idle for %s seconds" % (cls._reconnectIdle))
time.sleep(cls._reconnectIdle)
cls._connect()
return wrapper
class MyDatabase:
_reconnectTries = 5
_reconnectIdle = 2 # wait seconds before retying
def __init__(self, name='mydb.ini'):
self.my_connection = None
self.my_cursor = None
self.params = config(filename=name)
self._connect()
def _connect(self):
self.my_connection = psycopg2.connect(**self.params)
self.my_cursor = self.my_connection.cursor()
#retry
def fetch_all_as_df(self, sql_statement):
return sqlio.read_sql_query(sql_statement, self.my_connection)
#retry
def dummy(self):
self.my_cursor.execute('select 1+2 as result')
return self.my_cursor.fetchone()
#retry
def df_to_sql(self, df):
table = 'sometable'
return sqlio.to_sql(df, table, self.my_connection)
def __del__(self):
# Maybe there is a connection but no cursor, whatever close silently!
for c in (self.my_cursor, self.my_connection):
try:
c.close()
except:
pass
db = MyDatabase()
time.sleep(30) # some time to shutdown the database
print(db.dummy())
Output:
Database Connection [InterfaceError or OperationalError]
Idle for 2 seconds
Database Connection [InterfaceError or OperationalError]
Idle for 2 seconds
Database Connection [InterfaceError or OperationalError]
Idle for 2 seconds
Database Connection [InterfaceError or OperationalError]
Idle for 2 seconds
(3,)
Note: _connect itself is not decorated, so this code assumes an initial connect always works!

Related

Socket IO and postgresql hangups in while loop

Tech stack
Frontend: VueJS
Backend: Flask Python3
Issue:
I am using VueJS on the front end utilizing the socket io client library that when a button is clicked it emits an event on the socket connected to my backend.
Once the event is emitted and received on the backend the code enters a method that has a while true loop and is broken out of the loop when there is a certain value present in the DB; until that point ( 20 minutes, 4 hours ) it stays in this loops and checks the status state every few seconds.
The error I am receiving ( see below ) is due to too many connected clients which I dont get because the control exits the code that performs the query and the stack should be clear ( postgres client connection closed ) the fact that the query is in a with statement makes me says this, but obviously it's not.
My question is - how do I return the client? and allow this to do the same task without the error and more efficiently?
ERROR:
File "/home/kidcode/.cache/pypoetry/virtualenvs/application-backend-p56sp5Ck-py3.8/lib/python3.8/site-packages/psycopg2/__init__.py", line 122, in connect
conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
sqlalchemy.exc.OperationalError: (psycopg2.OperationalError) FATAL: sorry, too many clients already
FATAL: sorry, too many clients already
(Background on this error at: https://sqlalche.me/e/14/e3q8)
CODE
filename(io.py)
from flask_socketio import SocketIO
socketio = SocketIO(cors_allowed_origins="*")
filename(socket_listener.py)
from ..io import socketio
from flask_socketio import emit, disconnect
#socketio.on("connect")
def test_connect():
emit("after connect", {"data": "Client has connected!"})
#socketio.on("system_started")
def test_broadcast_message(message):
socketio.start_background_task(target=SystemMonitorHandler(socketio))
#socketio.on("disconnect_request", namespace="/test")
def disconnect_request():
#copy_current_request_context
def can_disconnect():
disconnect()
filename(system_monitor.py)
class SystemMonitorHandler:
def __init__(self, socketio):
socketio = socket.io
self.monitor_system_status()
def monitor_system_progress(self) -> None:
"""[summary]"""
# TODO use values from the query to break this loop
while True:
self.socketio.sleep(2)
_system_queries: object = SystemQueries()
_system_status: dict = (
_data_migration_history.get_system_job_status(system_id=1, uuid=2)
)
self.socketio.emit("message", {"data": status})
def get_system_job_status(self, system_id: str, uuid: str) -> dict:
_system_job_schema: object = SystemJobSchema(many=True)
try:
with SqlAlchemyUnitOfWork(
session=request_session_factory, files=SystemJobRepository
) as uow:
return _system_job_schema.dump(
uow.repositories.files.filter(
system_id=system_id, requested_by=uuid
)
.with_entities(
SystemModel.system_status,
SystemModel.job_end_date,
SystemModel.job_start_date,
)
.all()
)
except (DatabaseTransactionError, DatabaseSessionError) as err:
self.logger.exception(
ConsoleColourHelper.FAIL
+ "Unable to fetch requested resource from database : \
{}".format(
err
)
)
import abc
from functools import cached_property
from types import SimpleNamespace
from typing import Any, Callable, Iterable, Union
from sqlalchemy.orm import Session
from types import SimpleNamespace
class AbstractUnitOfWork(abc.ABC):
repositories: SimpleNamespace
def __enter__(self) -> "AbstractUnitOfWork":
return self
def __exit__(self, *args):
pass
#abc.abstractmethod
def commit(self):
raise NotImplementedError
#abc.abstractmethod
def rollback(self):
raise NotImplementedError
============= separate file START ==========================
class AbstractRepository(abc.ABC):
#abc.abstractmethod
def filter(self, **kwargs) -> Iterable[Any]:
raise NotImplementedError
#abc.abstractmethod
def save(self, model: Any):
raise NotImplementedError
#abc.abstractmethod
def get(self, id_: Any) -> Any:
raise NotImplementedError
class SQLAlchemyAbstractRepository(AbstractRepository, abc.ABC):
model = Any
def __init__(self, session: Session):
self.session = session
def filter(self, **kwargs):
return self.session.query(self.model).filter_by(**kwargs)
def save(self, model):
self.session.add(model)
self.session.flush()
def get(self, id_):
return self.session.get(id_)
def delete(self):
# TODO
pass
class SqlAlchemyUnitOfWork(AbstractUnitOfWork):
def __init__(
self,
session=Union[Callable[[], Session], Session],
**kwargs: SQLAlchemyAbstractRepository
):
self._session = session
self._repository_config = kwargs
#cached_property
def session(self):
return self._session() if callable(self._session) else self._session
def __enter__(self) -> AbstractUnitOfWork:
repositories = {
name: repository(self.session)
for name, repository in self._repository_config.items()
}
self.repositories = SimpleNamespace(**repositories)
return super().__enter__()
def __exit__(self, *args):
self.session.close()
def commit(self):
self.session.commit()
def rollback(self):
self.session.rollback()
========== separate file END ==========================

How do I initiate a Snowflake connection within a class?

I want to create a class in Python that establishes a connection to SnowFlake. I have a user.txt file that specifies an account, warehouse, database, schema, and my user. Here is the code I have so far:
import pandas as pd
import snowflake.connector
import os
from getpass import getpass
import sfcommon.sfdb as sf
class Database:
def __init__(self):
self.environ = os.environ['SFPASSWORD'] = getpass(prompt='Password: ', stream=None)
self._conn = sf.get_connect()
self._cursor = self._conn.cursor()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
#property
def connection(self):
return self._conn
#property
def cursor(self):
return self._cursor
def commit(self):
self.connection.commit()
def close(self, commit=True):
if commit:
self.commit()
self.connection.close()
def execute(self, sql, params=None):
self.cursor.execute(sql, params or ())
def fetchall(self):
return self.cursor.fetchall()
def fetchone(self):
return self.cursor.fetchone()
def query(self, sql, params=None):
self.cursor.execute(sql, params or ())
return self.fetchall()
sql = "select * from test_database"
After running this class, I would hope to run this block of code to retrieve all the rows in my database in a pandas dataframe:
with Database() as test:
resultSet = pd.read_sql(sql, conn)
I have managed to create a connection to Snowflake but have been trying to get it into a class for easier readability.
Also, I have this block of code that I'm not sure how to integrate into my Database class
%reload_ext sql_magic
%config SQL.conn_name = 'conn'
You can use the below code for dataframe .
---------------------------------------------------------------------------------
import snowflake.connector
import pandas as pd
# creates a connection
def openConn():
ctx = snowflake.connector.connect(
account='',
user='',
password='',
database='',
schema='public',
warehouse='',
role='',
)
return ctx
def main():
conn = openConn()
cs = conn.cursor()
cs.execute("""<query>""")
# Put it all to a data frame
sql_data = pd.DataFrame(cs.fetchall())
print(sql_data)
if __name__ == "__main__":
main()
------------------------------------------------------------------------------

Python Postgres psycopg2 ThreadedConnectionPool exhausted

I have looked into several 'too many clients' related topic here but still can't solve my problem, so I have to ask this again, for me specific case.
Basically, I set up my local Postgres server and need to do tens of thousands of queries, so I used the Python psycopg2package. Here are my codes:
import psycopg2
import pandas as pd
import numpy as np
from flashtext import KeywordProcessor
from psycopg2.pool import ThreadedConnectionPool
from concurrent.futures import ThreadPoolExecutor
df = pd.DataFrame({'S':['California', 'Ohio', 'Texas'], 'T':['Dispatcher', 'Zookeeper', 'Mechanics']})
# df = pd.concat([df]*10000) # repeat df 10000 times
DSN = "postgresql://User:password#localhost/db"
tcp = ThreadedConnectionPool(1, 800, DSN)
def do_one_query(inputS, inputT):
conn = tcp.getconn()
c = conn.cursor()
q = r"SELECT * from eridata where "State" = 'California' and "Title" = 'Dispatcher' limit 1;"
c.execute(q)
all_results = c.fetchall()
for row in all_results:
return row
tcp.putconn(conn, close=True)
cnt=0
for idx, row in df.iterrows():
cnt+=1
with ThreadPoolExecutor(max_workers=1) as pool:
ret = pool.submit(do_one_query, row["S"], row["T"])
print ret.result()
print cnt
The code runs well with a small df. If I repeat df by 10000 times, I got error message saying connection pool exhausted
. I though the connections I used have been closed by this line:
tcp.putconn(conn, close=True)
But I guess actually they are not closed? How can I get around this issue?
I've struggled to find really detailed information on how the ThreadedConnectionPool works. https://bbengfort.github.io/observations/2017/12/06/psycopg2-transactions.html ain't bad, but it turns out that its claim that getconn blocks until a connection becomes available is incorrect. Checking the code, all ThreadedConnectionPool adds is a lock around the AbstractConnectionPool methods to prevent race conditions. If more than maxconn connections are attempted used at any point, the connection pool exhausted PoolError will be raised.
If you want something a bit simpler than the accepted answer, further wrapping the methods in a Semaphore providing the blocking until a connection becomes available should do the trick:
from psycopg2.pool import ThreadedConnectionPool as _ThreadedConnectionPool
from threading import Semaphore
class ThreadedConnectionPool(_ThreadedConnectionPool):
def __init__(self, minconn, maxconn, *args, **kwargs):
self._semaphore = Semaphore(maxconn)
super().__init__(minconn, maxconn, *args, **kwargs)
def getconn(self, *args, **kwargs):
self._semaphore.acquire()
try:
return super().getconn(*args, **kwargs)
except:
self._semaphore.release()
raise
def putconn(self, *args, **kwargs):
try:
super().putconn(*args, **kwargs)
finally:
self._semaphore.release()
# closeall is inherited as is. This means the Semaphore does
# not get reset, but neither do the core structures for
# maintaining the pool in the original ThreadedConnectionPool
# so a closed pool is not intended to be reused once closed.
Note that ConnectionPools, both standard and threaded, only come with the three putconn, getconn and closeall methods, and nothing fancy like context management. So the above should cover all existing functionality.
You need to use a queue on top of your pool.
Something like the following should work:
import gevent, sys, random, psycopg2, logging
from contextlib import contextmanager
from gevent.queue import Queue
from gevent.socket import wait_read, wait_write
from psycopg2.pool import ThreadedConnectionPool
from psycopg2 import extensions, OperationalError
import sys
logger = logging.getLogger(__name__)
poolsize = 100 #number of max connections
pdsn = '' # put your dsn here
if sys.version_info[0] >= 3:
integer_types = (int,)
else:
import __builtin__
integer_types = (int, __builtin__.long)
class ConnectorError(Exception):
""" This is a base class for all CONNECTOR related exceptions """
pass
#simplified calls etc. db.fetchall(SQL, arg1, arg2...)
def cursor(): return Pcursor()
def fetchone(PSQL, *args): return Pcursor().fetchone(PSQL, *args)
def fetchall(PSQL, *args): return Pcursor().fetchall(PSQL, *args)
def execute(PSQL, *args): return Pcursor().execute(PSQL, *args)
#singleton connection pool, gets reset if a connection is bad or drops
_pgpool = None
def pgpool():
global _pgpool
if not _pgpool:
try:
_pgpool = PostgresConnectionPool(maxsize=poolsize)
except psycopg2.OperationalError as exc:
_pgpool = None
return _pgpool
class Pcursor(object):
def __init__(self, **kwargs):
#in case of a lost connection lets sit and wait till it's online
global _pgpool
if not _pgpool:
while not _pgpool:
try:
pgpool()
except:
logger.debug('Attempting Connection To Postgres...')
gevent.sleep(1)
def fetchone(self, PSQL, *args):
with _pgpool.cursor() as cursor:
try:
cursor.execute(PSQL, args)
except TypeError:
cursor.execute(PSQL, args[0])
except Exception as exc:
print(sys._getframe().f_back.f_code)
print(sys._getframe().f_back.f_code.co_name)
logger.warning(str(exc))
logger.debug(cursor.query)
return cursor.fetchone()
def fetchall(self, PSQL, *args):
with _pgpool.cursor() as cursor:
try:
cursor.execute(PSQL, args)
except TypeError:
cursor.execute(PSQL, args[0])
except Exception as exc:
print(sys._getframe().f_back.f_code)
print(sys._getframe().f_back.f_code.co_name)
logger.warning(str(exc))
logger.debug(cursor.query)
return cursor.fetchall()
def execute(self, PSQL, *args):
with _pgpool.cursor() as cursor:
try:
cursor.execute(PSQL, args)
except TypeError:
cursor.execute(PSQL, args[0])
except Exception as exc:
print(sys._getframe().f_back.f_code)
print(sys._getframe().f_back.f_code.co_name)
logger.warning(str(exc))
finally:
logger.debug(cursor.query)
return cursor.query
def fetchmany(self, PSQL, *args):
with _pgpool.cursor() as cursor:
try:
cursor.execute(PSQL, args)
except TypeError:
cursor.execute(PSQL, args[0])
while 1:
items = cursor.fetchmany()
if not items:
break
for item in items:
yield item
class AbstractDatabaseConnectionPool(object):
def __init__(self, maxsize=poolsize):
if not isinstance(maxsize, integer_types):
raise TypeError('Expected integer, got %r' % (maxsize, ))
self.maxsize = maxsize
self.pool = Queue()
self.size = 0
def create_connection(self):
#overridden by PostgresConnectionPool
raise NotImplementedError()
def get(self):
pool = self.pool
if self.size >= self.maxsize or pool.qsize():
return pool.get()
self.size += 1
try:
new_item = self.create_connection()
except:
self.size -= 1
raise
return new_item
def put(self, item):
self.pool.put(item)
def closeall(self):
while not self.pool.empty():
conn = self.pool.get_nowait()
try:
conn.close()
except Exception:
pass
#contextmanager
def connection(self, isolation_level=None):
conn = self.get()
try:
if isolation_level is not None:
if conn.isolation_level == isolation_level:
isolation_level = None
else:
conn.set_isolation_level(isolation_level)
yield conn
except:
if conn.closed:
conn = None
self.closeall()
raise
else:
if conn.closed:
raise OperationalError("Cannot commit because connection was closed: %r" % (conn, ))
finally:
if conn is not None and not conn.closed:
if isolation_level is not None:
conn.set_isolation_level(isolation_level)
self.put(conn)
#contextmanager
def cursor(self, *args, **kwargs):
isolation_level = kwargs.pop('isolation_level', None)
with self.connection(isolation_level) as conn:
try:
yield conn.cursor(*args, **kwargs)
except:
global _pgpool
_pgpool = None
del(self)
class PostgresConnectionPool(AbstractDatabaseConnectionPool):
def __init__(self,**kwargs):
try:
self.pconnect = ThreadedConnectionPool(1, poolsize, dsn=pdsn)
except:
global _pgpool
_pgpool = None
raise ConnectorError('Database Connection Failed')
maxsize = kwargs.pop('maxsize', None)
self.kwargs = kwargs
AbstractDatabaseConnectionPool.__init__(self, maxsize)
def create_connection(self):
self.conn = self.pconnect.getconn()
self.conn.autocommit = True
return self.conn
def gevent_wait_callback(conn, timeout=None):
"""A wait callback useful to allow gevent to work with Psycopg."""
while 1:
state = conn.poll()
if state == extensions.POLL_OK:
break
elif state == extensions.POLL_READ:
wait_read(conn.fileno(), timeout=timeout)
elif state == extensions.POLL_WRITE:
wait_write(conn.fileno(), timeout=timeout)
else:
raise ConnectorError("Bad result from poll: %r" % state)
extensions.set_wait_callback(gevent_wait_callback)
Then you can call your connection via this:
import db
db.Pcursor().execute(PSQL, arg1, arg2, arg3)
Basically I borrowed the gevent example of async postgres and modified it to support threadpooling via pyscopg2.
https://github.com/gevent/gevent/blob/master/examples/psycopg2_pool.py
I added what psycogreen does inside the module, so all you need to do is import and call the class. Each call to the class stacks a new query on the queue, but only uses the pool at a certain size. This way you don't run out of connections. This is essentially similar to what PGBouncer does, which I think would also eliminate your problem.
https://pgbouncer.github.io/
Your problem here is, that you actually do not return the connection to the pool, but close it forever with
tcp.putconn(conn, close=True)
See the documentation here http://initd.org/psycopg/docs/pool.html
If close is True, discard the connection from the pool.
So, if you put 800 connections into your pool, after 801 loops you will get the "exhausted error" because your connection pool size is zero.
I think the reason why you get PoolError("exhausted connections") maybe you return before close connection when all_results is not None. so, connection pool exhausted
def do_one_query(inputS, inputT):
...
for row in all_results:
return row <---- return row before putconn when all_results is not None,
tcp.putconn(conn, close=True)
for idx, row in df.iterrows():
cnt+=1
with ThreadPoolExecutor(max_workers=1) as pool:
ret = pool.submit(do_one_query, row["S"], row["T"])
print ret.result()
print cnt
I make a ugly implementation with when exhausted or connection lost, try reconnect to get new conn, like below
class PostgresConnectionPool:
def __init__(self, minconn, maxconn, *args, **kwargs):
self.pool = ThreadedConnectionPool(minconn=minconn, maxconn=maxconn, *args, **kwargs)
def get_conn(self):
try:
# check if connection lost or pool exhausted
con = self.pool.getconn()
cur = con.cursor()
cur.execute("select 1;")
except (OperationalError, PoolError) as oe:
print(f"get pg connection with err:{oe}, reconnect")
# reconnect
key = str(uuid.uuid4())
con = self.pool._connect(key)
return con

unittest.mock: Check if method was called within a decorator

I wrote a method in a class that tries to read some data from a database. I've also decorated this method with a decorator which checks if the connection to the database was open, and in case it was not, restarts it.
class HiveConnection(object):
def __init__(self, host, user, password):
"""Instantiate a HiveConnector object."""
self.host = host
self.user = user
self.password = password
self.port = 10000
self.auth_mechanism = 'PLAIN'
self._connection = self._connect()
def _connect(self):
"""Start the connection to database."""
try:
return connect(host=self.host, port=self.port,
user=self.user, password=self.password,
auth_mechanism=self.auth_mechanism)
except TTransportException as error:
print('Failed attempt to connect')
self._connect()
def _disconnect(self):
"""Close connection to database."""
self._connection.close()
def hadoop_connection_handler(function):
"""Start a database connection if not already open."""
#wraps(function)
def wrapper(inst, *args, **kwargs):
if not inst._connection:
inst._connect()
return function(inst, *args, **kwargs)
return wrapper
#hadoop_connection_handler
def read(self, query):
"""Execute a query to pull the data.
Args:
query: [str] Query to pull the data.
Returns:
A list of namedtuple (`Row`).
"""
columns = self._columns(query)
cursor = self._connection.cursor()
cursor.execute(query)
Record = namedtuple("Record", columns)
data = map(Record._make, cursor.fetchall())
cursor.close()
return data
Now I want to write a unit test to make sure this actually works.
from unittest.mock import patch, MagicMock
from nose.tools import assert_equal, raises
from services.db_connections import HiveConnection
class TestHiveConnection:
"""Integration test suite for HiveConnection class."""
def setUp(self):
self.hive = HiveConnection(user='username', password='password', host='myhost.net')
def test_reconnect(self):
"""If the connection drops, the object should be able to establish a
new connection.
"""
query = 'SELECT * FROM database.table 1'
self.hive._connect = MagicMock()
self.hive._disconnect()
self.hive.read(query)
assert_equal(self.hive._connect.called, True)
The above test always fails. self.hive._connect.called equals in fact to False. This I think it's because the connect() method is called within the decorator. How should I change the test to account for that?

how to do Hollywood principle between processes in python?

i was planning to change my project to multiprocesses so i can use more resources,here's my database module code
import pymysql
import threading
class tdb:
def __init__(self):
self.totalEffected = 0
pass
def start(self):
self.conn = pymysql.connect(host='xxxx', port=3306, user='root', passwd='xxxx', db='xxxx', charset='utf8')
def select(self,sql,args=None):
cur = self.conn.cursor()
cur.execute(sql,args)
result = cur.fetchall()
cur.close()
return result
def execute(self,sql,args=None):
cur = self.conn.cursor()
result = cur.execute(sql,args)
cur.close()
self.totalEffected+=result
return result
# def __commit(self,callback):
def __commitCallback(self,result):
print('commit result:',result)
self.conn.close()
def errorc(self,*args):
print('error')
def end(self):
# init()
# p.apply_async(self.conn.commit, callback=self.__commitCallback,error_callback=self.errorc)
if self.totalEffected!=0:
thread = threading.Thread(target=self.t)
thread.start()
else:
self.conn.close()
# p.apply(self.conn.commit)
# self.conn.close()
# print('result:' ,result.get())
def t(self):
self.conn.commit()
self.conn.close()
the only operation that really need to handle is conn.commit(), i use thread to do it ,so i can immediately return. i once use Pool.apply_async(),but it didn't callback, so i want to know how to make the other process call me , so i don't have to spend my time waiting recieve.

Categories