I cannot execute my class because my class has no attribute 'execute'`? - python

I have my class here:
I have my code here:
import logging
import pymysql as pm
import os
import json
class className:
env=None
config=None
def __init__(self, env):
self.config=className.get_config(env)
self.env=env
def get_config(env):
cwd=os.getcwd()
f=open(cwd+"\config_file_name"+".json")
f_config=json.load(f)
f.close()
return f_config
def DB_connection(env):
cnxn = pm.connect(
user="+username+",
password="+password+",
host="+host+",
port="+port+",
database="+database+"
)
def table1():
cnxn=None
cnxn=className.DB_connection
cur=cnxn.cursor()
table1_INSERT_QUERY="CALL sp_table1_Insert_Daily_Records();"
table1_EXCEPTIONS_QUERY="CALL sp_table1_Exceptions();"
try:
cur.execute(table1_INSERT_QUERY)
cnxn.commit()
except pm.Error as e:
print(f"Error: {e}")
except Exception as e:
logging.exception(e)
else:
print("table1 insertion query ran successfully, {} records updated.".format(cur.rowcount))
try:
cur.execute(table1_EXCEPTIONS_QUERY)
cnxn.commit()
except pm.Error as e:
print(f"Error: {e}")
except Exception as e:
logging.exception(e)
else:
print("table1 exceptions query ran successfully, {} exceptions updated.".format(cur.rowcount))
cnxn.close()
def main():
ve=className
ve.execute()
if __name__ == "__main__":
main()
I am trying to execute my class so it can execute my functions. Basically it creates an exceptions report defining what records are missing from yesterday. This function executes a stored procedure. I get the error saying:
AttributeError: type object 'className' has no attribute 'execute'
How do I execute my class and my functions?

The initial problem is that your class method is table1() but you are calling execute(). After that things get a little muddles between class and instance.
I would try what this does for you. Note this is untested.
import json
import logging
import pymysql
class ClassName:
def __init__(self, env_filename):
self.env = env_filename
self.config = self.get_config()
def get_config(self):
with open(self.env) as file_in:
return json.load(file_in)
def DB_connection(self):
return pymysql.connect(
user=self.config["+username+"],
password=self.config["+password+"],
host=self.config["+host+"],
port=self.config["+port+"],
database=self.config["+database+"]
)
def execute(self):
table1_INSERT_QUERY = "CALL sp_table1_Insert_Daily_Records();"
table1_EXCEPTIONS_QUERY = "CALL sp_table1_Exceptions();"
with self.DB_connection() as cnxn:
with cnxn.cursor() as cur:
try:
cur.execute(table1_INSERT_QUERY)
print("table1 insertion query ran successfully, {} records updated.".format(cur.rowcount))
cur.execute(table1_EXCEPTIONS_QUERY)
print("table1 exceptions query ran successfully, {} exceptions updated.".format(cur.rowcount))
except pymysql.Error as e:
print(f"Error: {e}")
except Exception as e:
logging.exception(e)
else:
cur.commit()
def main():
ve = ClassName("<path to env file>")
ve.execute()
if __name__ == "__main__":
main()

Related

psycopg2 - Unkeyed Connection error in multithreading

I am implementing connection pooling with Postgres for a multithreading application, Here I am creating two pools and storing in a singleton instance to be able to access the same instance from the entire application with the class RepositoryConnPool
Then we have the second class that is for context management, this I made to be able to execute multiple query's with the same connection in a with context, I have two pools of connections because when I execute a query i want to be able to execute a select query to replica database and modification of data to the master database. I am getting the following error while executing in a multithreading environment. This is not happening while I run without multithreading. Here is the error log :
ERROR ;2020-06-03 11:05:42,826; ThreadPoolExecutor-0_4; modules.table_modify_module; table_modify_module:insert_host;60: trying to put unkeyed connection
ERROR ;2020-06-03 11:05:42,826; ThreadPoolExecutor-0_21; table_modify_module; table_modify_module:insert_host;60: trying to put unkeyed connection
ERROR ;2020-06-03 11:05:42,885; ThreadPoolExecutor-0_12; table_modify_module; table_modify_module:get_inst_detail;367: trying to put unkeyed connection
class RepositoryConnPool:
__instance = None
#This method overcharge is to make use of singleton pattern
def __new__(cls):
if cls.__instance is None:
cls.__instance = object.__new__(cls)
return cls.__instance
def __init__(self):
try:
#I get connection details from vault
inventory_master = vm.get_secret_by_base_path("xxxxxx")
inventory_repl = vm.get_secret_by_base_path("xxxxx")
self.repo_connection_pool_master = self.setup_conn_pool(inventory_master)
self.repo_connection_pool_repl = self.setup_conn_pool(inventory_repl)
except Exception as e:
logger.error("GET repository address failed: ", exc_info=e)
def setup_conn_pool(self,inventory_data):
repo_dbausername = 'dba_admin'
repo_dbapassword = inventory_data['accounts']['dba_admin']['password']
repo_host = inventory_data['fqdn']
repo_port = inventory_data['port']
try:
repo_connection_pool = psycopg2.pool.ThreadedConnectionPool(10, 30, user=repo_dbausername,
password=repo_dbapassword,
host=repo_host,
port=repo_port,
database="xxxxx")
except Exception as e:
logger.error(e)
return repo_connection_pool
#This method will fetch a connection from pool
def get_master_connection(self):
return self.repo_connection_pool_master.getconn()
def get_repl_connection(self):
return self.repo_connection_pool_repl.getconn()
class RepositoryConnection:
#Gets connection from RepositoryConnPool
def __init__(self):
self.repo_conn_pool = RepositoryConnPool()
self._repo_conn_master = self.repo_conn_pool.get_master_connection()
self._repo_conn_repl = self.repo_conn_pool.get_repl_connection()
def execute_query(self, query, commit=False, call_master = False, retry_count = 0):
cursor = None
try:
if query.strip().upper().startswith("SELECT") and call_master == False:
repo_query = query
cursor = self._repo_conn_repl.cursor()
cursor.execute(repo_query)
if commit:
self._repo_conn_repl.commit()
else:
try:
result = cursor.fetchall()
return result
except:
logging.info(f"Query executed:{query} and no result was returned")
else:
repo_query = query
cursor = self._repo_conn_master.cursor()
cursor.execute(repo_query)
if commit:
self._repo_conn_master.commit()
else:
try:
result = cursor.fetchall()
return result
except:
logging.info(f"Query executed:{query} and no result was returned")
#This needs to change to handle connection error connection in different way
except Exception as e:
if(retry_count < 3):
logger.warning(f"There was an error during execution of query, retry will be done {query} ")
return self.execute_query(query,commit,call_master = True, retry_count= retry_count+1 )
else:
logger.fatal("All 3 connection retry failed")
finally:
if cursor:
cursor.close()
#Checks if connection is up
def __enter__(self):
try:
if self._repo_conn_master and self._repo_conn_repl:
return self
else:
raise Exception()
except Exception as e:
logger.error(e)
#Returns connection to pool
def __exit__(self, exc_type, exc_val, exc_tb):
if self._repo_conn_master:
self.repo_conn_pool.repo_connection_pool_master.putconn(self._repo_conn_master)
if self._repo_conn_repl:
self.repo_conn_pool.repo_connection_pool_repl.putconn(self._repo_conn_repl)

Exception in thread StompReceiverThread-1

I'm having trouble with this error:
Exception in thread StompReceiverThread-1 (most likely raised during
interpreter shutdown):
That is no traceback at all.. just that.
Usualy everything works fine but rarely it happens and then the action does not conclude.
Any tips?
My code:
class Listener(stomp.ConnectionListener):
def __init__(self, conn, request):
self.conn = conn
self.request = request
def on_error(self, headers, message):
global WAITING_RESPONSE
print('received an error: ' + message)
WAITING_RESPONSE = False
def on_message(self, headers, message):
global WAITING_RESPONSE
try:
msg = json.loads(message)
if str(msg.get('transaction_id','')) == str(CURRENT_ID):
printDebugLine('Queue response:'+str(message))
manageQueueResponse(message,self.request)
WAITING_RESPONSE = False
self.conn.ack(headers['message-id'], '11')
except stomp.exception.ConnectFailedException:
print('Stomp error on message')
sys.exit(3)
except Exception as e:
print('ERROR: %s' % str(e))
sys.exit(3)
class Queue(object):
def __init__(self):
self.host = xx
self.port = xx
self.login = xx
self.passwd = xx
self.request = {}
self.start()
def start(self):
try:
self.conn = stomp.Connection(host_and_ports=[(self.host, self.port)])
self.conn.start()
self.conn.connect(self.login, self.passwd, wait=True)
self.conn.set_listener('xx', Listener(self.conn, self.request))
self.conn.subscribe(destination='xx', id='xx', ack='xx')
except stomp.exception.ConnectFailedException:
print('ERROR: unable to connect')
sys.exit(3)
except Exception as e:
print('ERROR: %s' % str(e))
sys.exit(3)
def send(self, data):
global CURRENT_ID
while WAITING_RESPONSE:
time.time(0.1)
try:
CURRENT_ID = str(uuid.uuid4())
data.update({'transaction_id': CURRENT_ID})
b = json.dumps(data)
self.request.update(data)
printDebugLine('Queue request:'+str(data))
self.conn.send(body=b, destination='xx')
timeout(data,self.request,29)
except stomp.exception.ConnectFailedException:
print('ERROR: unable to connect')
except Exception as e:
print('ERROR: %s' % str(e))
It looks like your main program is exiting, the interpreter is cleaning up things, but the stomp receiver thread was not shutdown first. The receiver thread goes to do something but basic modules are no longer available, so it gives an exception message, but cannot print a Traceback because that fuctionality is no longer available due to the program exiting.
Look at why the main program would be exiting.

Python Postgres psycopg2 ThreadedConnectionPool exhausted

I have looked into several 'too many clients' related topic here but still can't solve my problem, so I have to ask this again, for me specific case.
Basically, I set up my local Postgres server and need to do tens of thousands of queries, so I used the Python psycopg2package. Here are my codes:
import psycopg2
import pandas as pd
import numpy as np
from flashtext import KeywordProcessor
from psycopg2.pool import ThreadedConnectionPool
from concurrent.futures import ThreadPoolExecutor
df = pd.DataFrame({'S':['California', 'Ohio', 'Texas'], 'T':['Dispatcher', 'Zookeeper', 'Mechanics']})
# df = pd.concat([df]*10000) # repeat df 10000 times
DSN = "postgresql://User:password#localhost/db"
tcp = ThreadedConnectionPool(1, 800, DSN)
def do_one_query(inputS, inputT):
conn = tcp.getconn()
c = conn.cursor()
q = r"SELECT * from eridata where "State" = 'California' and "Title" = 'Dispatcher' limit 1;"
c.execute(q)
all_results = c.fetchall()
for row in all_results:
return row
tcp.putconn(conn, close=True)
cnt=0
for idx, row in df.iterrows():
cnt+=1
with ThreadPoolExecutor(max_workers=1) as pool:
ret = pool.submit(do_one_query, row["S"], row["T"])
print ret.result()
print cnt
The code runs well with a small df. If I repeat df by 10000 times, I got error message saying connection pool exhausted
. I though the connections I used have been closed by this line:
tcp.putconn(conn, close=True)
But I guess actually they are not closed? How can I get around this issue?
I've struggled to find really detailed information on how the ThreadedConnectionPool works. https://bbengfort.github.io/observations/2017/12/06/psycopg2-transactions.html ain't bad, but it turns out that its claim that getconn blocks until a connection becomes available is incorrect. Checking the code, all ThreadedConnectionPool adds is a lock around the AbstractConnectionPool methods to prevent race conditions. If more than maxconn connections are attempted used at any point, the connection pool exhausted PoolError will be raised.
If you want something a bit simpler than the accepted answer, further wrapping the methods in a Semaphore providing the blocking until a connection becomes available should do the trick:
from psycopg2.pool import ThreadedConnectionPool as _ThreadedConnectionPool
from threading import Semaphore
class ThreadedConnectionPool(_ThreadedConnectionPool):
def __init__(self, minconn, maxconn, *args, **kwargs):
self._semaphore = Semaphore(maxconn)
super().__init__(minconn, maxconn, *args, **kwargs)
def getconn(self, *args, **kwargs):
self._semaphore.acquire()
try:
return super().getconn(*args, **kwargs)
except:
self._semaphore.release()
raise
def putconn(self, *args, **kwargs):
try:
super().putconn(*args, **kwargs)
finally:
self._semaphore.release()
# closeall is inherited as is. This means the Semaphore does
# not get reset, but neither do the core structures for
# maintaining the pool in the original ThreadedConnectionPool
# so a closed pool is not intended to be reused once closed.
Note that ConnectionPools, both standard and threaded, only come with the three putconn, getconn and closeall methods, and nothing fancy like context management. So the above should cover all existing functionality.
You need to use a queue on top of your pool.
Something like the following should work:
import gevent, sys, random, psycopg2, logging
from contextlib import contextmanager
from gevent.queue import Queue
from gevent.socket import wait_read, wait_write
from psycopg2.pool import ThreadedConnectionPool
from psycopg2 import extensions, OperationalError
import sys
logger = logging.getLogger(__name__)
poolsize = 100 #number of max connections
pdsn = '' # put your dsn here
if sys.version_info[0] >= 3:
integer_types = (int,)
else:
import __builtin__
integer_types = (int, __builtin__.long)
class ConnectorError(Exception):
""" This is a base class for all CONNECTOR related exceptions """
pass
#simplified calls etc. db.fetchall(SQL, arg1, arg2...)
def cursor(): return Pcursor()
def fetchone(PSQL, *args): return Pcursor().fetchone(PSQL, *args)
def fetchall(PSQL, *args): return Pcursor().fetchall(PSQL, *args)
def execute(PSQL, *args): return Pcursor().execute(PSQL, *args)
#singleton connection pool, gets reset if a connection is bad or drops
_pgpool = None
def pgpool():
global _pgpool
if not _pgpool:
try:
_pgpool = PostgresConnectionPool(maxsize=poolsize)
except psycopg2.OperationalError as exc:
_pgpool = None
return _pgpool
class Pcursor(object):
def __init__(self, **kwargs):
#in case of a lost connection lets sit and wait till it's online
global _pgpool
if not _pgpool:
while not _pgpool:
try:
pgpool()
except:
logger.debug('Attempting Connection To Postgres...')
gevent.sleep(1)
def fetchone(self, PSQL, *args):
with _pgpool.cursor() as cursor:
try:
cursor.execute(PSQL, args)
except TypeError:
cursor.execute(PSQL, args[0])
except Exception as exc:
print(sys._getframe().f_back.f_code)
print(sys._getframe().f_back.f_code.co_name)
logger.warning(str(exc))
logger.debug(cursor.query)
return cursor.fetchone()
def fetchall(self, PSQL, *args):
with _pgpool.cursor() as cursor:
try:
cursor.execute(PSQL, args)
except TypeError:
cursor.execute(PSQL, args[0])
except Exception as exc:
print(sys._getframe().f_back.f_code)
print(sys._getframe().f_back.f_code.co_name)
logger.warning(str(exc))
logger.debug(cursor.query)
return cursor.fetchall()
def execute(self, PSQL, *args):
with _pgpool.cursor() as cursor:
try:
cursor.execute(PSQL, args)
except TypeError:
cursor.execute(PSQL, args[0])
except Exception as exc:
print(sys._getframe().f_back.f_code)
print(sys._getframe().f_back.f_code.co_name)
logger.warning(str(exc))
finally:
logger.debug(cursor.query)
return cursor.query
def fetchmany(self, PSQL, *args):
with _pgpool.cursor() as cursor:
try:
cursor.execute(PSQL, args)
except TypeError:
cursor.execute(PSQL, args[0])
while 1:
items = cursor.fetchmany()
if not items:
break
for item in items:
yield item
class AbstractDatabaseConnectionPool(object):
def __init__(self, maxsize=poolsize):
if not isinstance(maxsize, integer_types):
raise TypeError('Expected integer, got %r' % (maxsize, ))
self.maxsize = maxsize
self.pool = Queue()
self.size = 0
def create_connection(self):
#overridden by PostgresConnectionPool
raise NotImplementedError()
def get(self):
pool = self.pool
if self.size >= self.maxsize or pool.qsize():
return pool.get()
self.size += 1
try:
new_item = self.create_connection()
except:
self.size -= 1
raise
return new_item
def put(self, item):
self.pool.put(item)
def closeall(self):
while not self.pool.empty():
conn = self.pool.get_nowait()
try:
conn.close()
except Exception:
pass
#contextmanager
def connection(self, isolation_level=None):
conn = self.get()
try:
if isolation_level is not None:
if conn.isolation_level == isolation_level:
isolation_level = None
else:
conn.set_isolation_level(isolation_level)
yield conn
except:
if conn.closed:
conn = None
self.closeall()
raise
else:
if conn.closed:
raise OperationalError("Cannot commit because connection was closed: %r" % (conn, ))
finally:
if conn is not None and not conn.closed:
if isolation_level is not None:
conn.set_isolation_level(isolation_level)
self.put(conn)
#contextmanager
def cursor(self, *args, **kwargs):
isolation_level = kwargs.pop('isolation_level', None)
with self.connection(isolation_level) as conn:
try:
yield conn.cursor(*args, **kwargs)
except:
global _pgpool
_pgpool = None
del(self)
class PostgresConnectionPool(AbstractDatabaseConnectionPool):
def __init__(self,**kwargs):
try:
self.pconnect = ThreadedConnectionPool(1, poolsize, dsn=pdsn)
except:
global _pgpool
_pgpool = None
raise ConnectorError('Database Connection Failed')
maxsize = kwargs.pop('maxsize', None)
self.kwargs = kwargs
AbstractDatabaseConnectionPool.__init__(self, maxsize)
def create_connection(self):
self.conn = self.pconnect.getconn()
self.conn.autocommit = True
return self.conn
def gevent_wait_callback(conn, timeout=None):
"""A wait callback useful to allow gevent to work with Psycopg."""
while 1:
state = conn.poll()
if state == extensions.POLL_OK:
break
elif state == extensions.POLL_READ:
wait_read(conn.fileno(), timeout=timeout)
elif state == extensions.POLL_WRITE:
wait_write(conn.fileno(), timeout=timeout)
else:
raise ConnectorError("Bad result from poll: %r" % state)
extensions.set_wait_callback(gevent_wait_callback)
Then you can call your connection via this:
import db
db.Pcursor().execute(PSQL, arg1, arg2, arg3)
Basically I borrowed the gevent example of async postgres and modified it to support threadpooling via pyscopg2.
https://github.com/gevent/gevent/blob/master/examples/psycopg2_pool.py
I added what psycogreen does inside the module, so all you need to do is import and call the class. Each call to the class stacks a new query on the queue, but only uses the pool at a certain size. This way you don't run out of connections. This is essentially similar to what PGBouncer does, which I think would also eliminate your problem.
https://pgbouncer.github.io/
Your problem here is, that you actually do not return the connection to the pool, but close it forever with
tcp.putconn(conn, close=True)
See the documentation here http://initd.org/psycopg/docs/pool.html
If close is True, discard the connection from the pool.
So, if you put 800 connections into your pool, after 801 loops you will get the "exhausted error" because your connection pool size is zero.
I think the reason why you get PoolError("exhausted connections") maybe you return before close connection when all_results is not None. so, connection pool exhausted
def do_one_query(inputS, inputT):
...
for row in all_results:
return row <---- return row before putconn when all_results is not None,
tcp.putconn(conn, close=True)
for idx, row in df.iterrows():
cnt+=1
with ThreadPoolExecutor(max_workers=1) as pool:
ret = pool.submit(do_one_query, row["S"], row["T"])
print ret.result()
print cnt
I make a ugly implementation with when exhausted or connection lost, try reconnect to get new conn, like below
class PostgresConnectionPool:
def __init__(self, minconn, maxconn, *args, **kwargs):
self.pool = ThreadedConnectionPool(minconn=minconn, maxconn=maxconn, *args, **kwargs)
def get_conn(self):
try:
# check if connection lost or pool exhausted
con = self.pool.getconn()
cur = con.cursor()
cur.execute("select 1;")
except (OperationalError, PoolError) as oe:
print(f"get pg connection with err:{oe}, reconnect")
# reconnect
key = str(uuid.uuid4())
con = self.pool._connect(key)
return con

Using classes inside if statement

I have this kind of code
class disable_file_system_redirection:
if mysystem == "Windows":
_disable = ctypes.windll.kernel32.Wow64DisableWow64FsRedirection
_revert = ctypes.windll.kernel32.Wow64RevertWow64FsRedirection
def __enter__(self):
self.old_value = ctypes.c_long()
self.success = self._disable(ctypes.byref(self.old_value))
def __exit__(self, type, value, traceback):
if self.success:
self._revert(self.old_value)
else:
pass
If test == “yes”:
with disable_file_system_redirection:
try:
“some code”
else:
try:
“same code”
As you can see I wrote the same code twice. I cannot merge those two same codes without getting errors. Is there a possible way to do something like that
If test = = “yes”:
with disable_file_system_redirection:
else:
pass #without disable_file_system_redirection:
“some code”
you can outsource your code into a function:
def code_to_do():
print("code_to_do")
if test == "yes":
with disable_file_system_redirection:
try:
code_to_do()
except Exception as e:
print(str(e))
else:
try:
code_to_do()
except Exception as e:
print(str(e))

Is It safe to use del for SqlAlchemy connection in python

This is my code when I have used self.engine.dispose() It gives me Exception, so should i use this del self.engine it work fine, please give me any advice.
class MSSqlAlchemyConn(object):
def __init__(self, connection_obj):
"""
Constructor for SQLAlchemy.
"""
try:
# connection string
# example: mysql+mysqldb://[mysqluser]:[password]#[host]:[port]/[db_name]
constr = "mssql+pymssql://%s:%s#%s/%s" % (connection_obj['USER'], connection_obj['PASSWORD'],
connection_obj['HOST'],
connection_obj['DATABASE'])
self.engine = create_engine(constr, echo=False)
except Exception as e:
logger.error(e)
def __del__(self):
if self.engine is not None:
del self.engine

Categories