I'm new at Python and I'm trying to build a start project to get into this language.
I created a SQLite3 DB and managed to make transactions with it.
Everything works fine.
I wanted to get deeper in Python so I've been searching and discovered Decorators and Context Manager and I was trying to implement these concepts on my Query Execution's functions. However, I'm stuck with a problem.
I've created a class that handles the open and close connection tasks.
DB_ContextManager.py class:
class DB_ContextManager():
def __init__(self, db_connection):
self.db_connection = db_connection
def __enter__(self):
self.conn = sqlite3.connect(self.db_connection)
return self.conn
def __exit__(self, exc_type, exc_val, exc_tb): # obligatory params
self.conn.close()
And also created ConnectionDB.py which is responsible for executing queries.
from Database.DB_ContextManager import DB_ContextManager as DB_CM
# Handles SELECT queries
def ExecuteSelectQuery(self, pQuery):
try:
with DB_CM(db_connection_string) as conn:
cur = conn.cursor()
cur.execute(pQuery)
result = cur.fetchall()
return result
except Exception as e:
LH.Handler(log_folder, 'ConnectionDB', 'Queries', 'ExecuteSelectQuery', e)
raise DE.ConnectionDB_Exception()
# Handles INSERTs, UPDATEs, DELETEs queries
def ExecuteNonQuery(self, pQuery):
try:
with DB_CM(db_connection_string) as conn:
cur = conn.cursor()
cur.execute(pQuery)
except Exception as e:
LH.Handler(log_folder, 'ConnectionDB', 'Queries', 'ExecuteSelectNonQuery', e)
raise DE.ConnectionDB_Exception()
As you can see
with DB_CM(db_connection_string) as conn:
cur = conn.cursor()
cur.execute(pQuery)
is repeated in each function
To avoid this situation, I'd like to create a Decorator function that encapsulates this piece of code.
My problem is that the cursor 'dies' inside the ContextManager and, for example, ExecuteSelectQuery needs the cursor to fetch the return data after the query was executed.
I know it's a small project and thinking so long term in future may not be necessary. But, remember, it's a start project and I'm learning to apply new concepts.
SOLUTION
As #blhsing suggested, I return the connection object instead of the cursor in the ContextManager.
Also I handles commit() and rollback() in it.
So, summarizing:
ConnectionDB.py
def ExecuteSelectQuery(self, pQuery):
with DB_CM(db_connection_string, pQuery) as cur:
result = cur.fetchall()
return result
def ExecuteSelectNonQuery(self, pQuery):
with DB_CM(db_connection_string, pQuery) as cur:
pass
and ConnectionDB.py
class DB_ContextManager():
def __init__(self, db_connection, pQuery):
self.db_connection = db_connection
self.query = pQuery
def __enter__(self):
try:
self.conn = sqlite3.connect(self.db_connection)
cur = self.conn.cursor()
cur.execute(self.query)
self.conn.commit()
return cur
except Exception as e:
LH.Handler(log_folder, 'DB_ContextManager', 'DB_ContextManager', '__enter__', e)
self.conn.rollback()
raise DE.ConnectionDB_Exception()
def __exit__(self, exc_type, exc_val, exc_tb): # obligatory params
self.conn.close()
You can make the context manager return the cursor instead of the connection object:
class DB_CM():
def __init__(self, db_connection):
self.db_connection = db_connection
def __enter__(self):
self.conn = sqlite3.connect(self.db_connection)
cur = self.conn.cursor()
cur.execute(pQuery)
return cur
def __exit__(self, exc_type, exc_val, exc_tb): # obligatory params
self.conn.close()
so that the try block of ExecuteSelectQuery can be revised as:
with DB_CM(db_connection_string) as cur:
result = cur.fetchall()
return result
and the try block of ExecuteNonQuery can be simply:
with DB_CM(db_connection_string):
pass
Related
I was trying to create a wrapper class that used enter/exit to allow for a context manager to close a postgres db connection.
class ClosingConnection:
def __init__(self, schema_name: str) -> None:
"""
:param schema_name: the db schema (i.e. the tenant)
"""
super().__init__()
self.schema_name = schema_name
def __enter__(self):
try:
self.conn = psycopg2.connect(
host=os.environ["DB_HOST"],
port=os.environ["DB_PORT"],
database=os.environ["DB_NAME"],
user=os.environ["DB_USERNAME"],
password=password,
options=f"-c search_path={self.schema_name}",
cursor_factory=psycopg2.extras.DictCursor,
)
return self.conn
except psycopg2.OperationalError:
pass
def __exit__(self, exc_type, exc_val, exc_tb):
self.conn.close()
Usage
def get_db(schema_name: str):
return ClosingConnection(schema_name)
with get_db("test") as db:
cursor = db.cursor()
cursor.execute("some sql")
rv = cursor.fetchall()
cursor.close()
This works fine to retrieve data, but if I do an upsert, only the first upsert is performed. The remaining ones rv returns correctly, but the actual database is never updated.
In contrast if I get rid of the closing class, this works fine
Usage
def get_db(schema_name: str):
try:
self.conn = psycopg2.connect(
host=os.environ["DB_HOST"],
port=os.environ["DB_PORT"],
database=os.environ["DB_NAME"],
user=os.environ["DB_USERNAME"],
password=password,
options=f"-c search_path={self.schema_name}",
cursor_factory=psycopg2.extras.DictCursor,
)
return self.conn
except psycopg2.OperationalError:
pass
conn = get_db("test")
try:
cursor = db.cursor()
cursor.execute("some sql")
rv = cursor.fetchall()
cursor.close()
finally:
conn.close()
For curiosity here is the upsert command I'm using (I've also tried the older with upsert version)
INSERT INTO settings_account (setting_name, setting_value)
VALUES (%(setting_name)s, %(setting_value)s)
ON CONFLICT (setting_name)
DO
UPDATE SET setting_value=EXCLUDED.setting_value
RETURNING *
Why is the wrapper class causing the db writes to fail? Is the idea of wrapper class flawed?
Thanks the comment above, i was able to get this working with:
class ClosingConnection:
def __init__(self, schema_name: str) -> None:
"""
:param schema_name: the db schema (i.e. the tenant)
"""
super().__init__()
self.schema_name = schema_name
def __enter__(self):
try:
self.conn = psycopg2.connect(
host=os.environ["DB_HOST"],
port=os.environ["DB_PORT"],
database=os.environ["DB_NAME"],
user=os.environ["DB_USERNAME"],
password=password,
options=f"-c search_path={self.schema_name}",
cursor_factory=psycopg2.extras.DictCursor,
)
return self.conn
except psycopg2.OperationalError:
pass
def __exit__(self, exc_type, exc_val, exc_tb):
self.conn.commit()
self.conn.close()
def get_db(schema_name: str):
return ClosingConnection(schema_name)
with get_db("test") as db:
with db.cursor() as cursor:
cursor.execute("some sql")
rv = cursor.fetchall()
I am implementing connection pooling with Postgres for a multithreading application, Here I am creating two pools and storing in a singleton instance to be able to access the same instance from the entire application with the class RepositoryConnPool
Then we have the second class that is for context management, this I made to be able to execute multiple query's with the same connection in a with context, I have two pools of connections because when I execute a query i want to be able to execute a select query to replica database and modification of data to the master database. I am getting the following error while executing in a multithreading environment. This is not happening while I run without multithreading. Here is the error log :
ERROR ;2020-06-03 11:05:42,826; ThreadPoolExecutor-0_4; modules.table_modify_module; table_modify_module:insert_host;60: trying to put unkeyed connection
ERROR ;2020-06-03 11:05:42,826; ThreadPoolExecutor-0_21; table_modify_module; table_modify_module:insert_host;60: trying to put unkeyed connection
ERROR ;2020-06-03 11:05:42,885; ThreadPoolExecutor-0_12; table_modify_module; table_modify_module:get_inst_detail;367: trying to put unkeyed connection
class RepositoryConnPool:
__instance = None
#This method overcharge is to make use of singleton pattern
def __new__(cls):
if cls.__instance is None:
cls.__instance = object.__new__(cls)
return cls.__instance
def __init__(self):
try:
#I get connection details from vault
inventory_master = vm.get_secret_by_base_path("xxxxxx")
inventory_repl = vm.get_secret_by_base_path("xxxxx")
self.repo_connection_pool_master = self.setup_conn_pool(inventory_master)
self.repo_connection_pool_repl = self.setup_conn_pool(inventory_repl)
except Exception as e:
logger.error("GET repository address failed: ", exc_info=e)
def setup_conn_pool(self,inventory_data):
repo_dbausername = 'dba_admin'
repo_dbapassword = inventory_data['accounts']['dba_admin']['password']
repo_host = inventory_data['fqdn']
repo_port = inventory_data['port']
try:
repo_connection_pool = psycopg2.pool.ThreadedConnectionPool(10, 30, user=repo_dbausername,
password=repo_dbapassword,
host=repo_host,
port=repo_port,
database="xxxxx")
except Exception as e:
logger.error(e)
return repo_connection_pool
#This method will fetch a connection from pool
def get_master_connection(self):
return self.repo_connection_pool_master.getconn()
def get_repl_connection(self):
return self.repo_connection_pool_repl.getconn()
class RepositoryConnection:
#Gets connection from RepositoryConnPool
def __init__(self):
self.repo_conn_pool = RepositoryConnPool()
self._repo_conn_master = self.repo_conn_pool.get_master_connection()
self._repo_conn_repl = self.repo_conn_pool.get_repl_connection()
def execute_query(self, query, commit=False, call_master = False, retry_count = 0):
cursor = None
try:
if query.strip().upper().startswith("SELECT") and call_master == False:
repo_query = query
cursor = self._repo_conn_repl.cursor()
cursor.execute(repo_query)
if commit:
self._repo_conn_repl.commit()
else:
try:
result = cursor.fetchall()
return result
except:
logging.info(f"Query executed:{query} and no result was returned")
else:
repo_query = query
cursor = self._repo_conn_master.cursor()
cursor.execute(repo_query)
if commit:
self._repo_conn_master.commit()
else:
try:
result = cursor.fetchall()
return result
except:
logging.info(f"Query executed:{query} and no result was returned")
#This needs to change to handle connection error connection in different way
except Exception as e:
if(retry_count < 3):
logger.warning(f"There was an error during execution of query, retry will be done {query} ")
return self.execute_query(query,commit,call_master = True, retry_count= retry_count+1 )
else:
logger.fatal("All 3 connection retry failed")
finally:
if cursor:
cursor.close()
#Checks if connection is up
def __enter__(self):
try:
if self._repo_conn_master and self._repo_conn_repl:
return self
else:
raise Exception()
except Exception as e:
logger.error(e)
#Returns connection to pool
def __exit__(self, exc_type, exc_val, exc_tb):
if self._repo_conn_master:
self.repo_conn_pool.repo_connection_pool_master.putconn(self._repo_conn_master)
if self._repo_conn_repl:
self.repo_conn_pool.repo_connection_pool_repl.putconn(self._repo_conn_repl)
I use pymysql (python3.5)to operate the MySQL(ver>5.5), and I want to use it simply, so I write a Class:
class Mysql(object):
def __init__(self):
self.conn = connect(host=HOST, port=PORT, user=USER, passwd=PASSWORD, db=DB, charset=CHARSET)
"""DictCursor:A cursor which returns results as a dictionary"""
self.cursor = self.conn.cursor(cursor=pymysql.cursors.DictCursor)
def __enter__(self):
return self.cursor
def __exit__(self, exc_type, exc_val, exc_tb):
self.cursor.close()
self.conn.close()
Then, I wrote this:
def do_add(_name, _age):
with Mysql() as cursor:
param = (_name, _age)
sql = """insert into mydb(name, age) values (%s, %s)"""
cursor.execute(sql, param)
That does not work, I can not insert data into MySQL.
But, when I use the context Manager, it works very well:
#contextlib.contextmanager
def mysql():
conn = connect(host=HOST, port=PORT, user=USER, passwd=PASSWORD, db=DB, charset=CHARSET)
cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)
try:
yield cursor
conn.commit()
except:
conn.rollback()
finally:
cursor.close()
conn.close()
I want to know why I can not use Class?
I have a database class like:
rlock = RLock()
class PostgresDB:
host = Settings.DB_HOST
name = Settings.DB_NAME
username = Settings.DB_USERNAME
pwd = Settings.DB_PWD
def __init__(self):
self.con = None
def _create_connection(self):
if self.con is None or self.con.closed:
self.con = psycopg2.connect("host={h} dbname={n} user={u} password={p}".format(
h=self.host, n=self.name, u=self.username, p=self.pwd,
cursor_factory=RealDictCursor))
#contextlib.contextmanager
def reading_cursor(self, sql, params=None):
rlock.acquire()
self._create_connection()
try:
cur = self.con.cursor()
cur.execute(sql, params)
yield cur
except DatabaseError as e:
traceback.print_exc()
yield Exception()
finally:
self.con.close()
rlock.release()
#contextlib.contextmanager
def writing_cursor(self, sql, params=None):
rlock.acquire()
self._create_connection()
try:
cur = self.con.cursor()
cur.execute(sql, params)
yield cur
self.con.commit()
except DatabaseError as e:
traceback.print_exc()
yield Exception()
finally:
self.con.close()
rlock.release()
Note that I am explicitly asking for RealDictCursor.
However, when any request is done, the result is a list of tuples, not dicts. I am not sure what I am missing?
Ok this is incredibly stupid... If you look closely you will see I passed cursor_factory to .format() instead of passing it to .connect() ...
i was planning to change my project to multiprocesses so i can use more resources,here's my database module code
import pymysql
import threading
class tdb:
def __init__(self):
self.totalEffected = 0
pass
def start(self):
self.conn = pymysql.connect(host='xxxx', port=3306, user='root', passwd='xxxx', db='xxxx', charset='utf8')
def select(self,sql,args=None):
cur = self.conn.cursor()
cur.execute(sql,args)
result = cur.fetchall()
cur.close()
return result
def execute(self,sql,args=None):
cur = self.conn.cursor()
result = cur.execute(sql,args)
cur.close()
self.totalEffected+=result
return result
# def __commit(self,callback):
def __commitCallback(self,result):
print('commit result:',result)
self.conn.close()
def errorc(self,*args):
print('error')
def end(self):
# init()
# p.apply_async(self.conn.commit, callback=self.__commitCallback,error_callback=self.errorc)
if self.totalEffected!=0:
thread = threading.Thread(target=self.t)
thread.start()
else:
self.conn.close()
# p.apply(self.conn.commit)
# self.conn.close()
# print('result:' ,result.get())
def t(self):
self.conn.commit()
self.conn.close()
the only operation that really need to handle is conn.commit(), i use thread to do it ,so i can immediately return. i once use Pool.apply_async(),but it didn't callback, so i want to know how to make the other process call me , so i don't have to spend my time waiting recieve.