Connection Management Working with Database in Python - python

I have a class that working with db operations like below :
class DepartmentOperations(DatabaseOperations):
def __init__(self):
try:
self._connection = Database.create_connection()
self._cursor = self._connection.cursor()
self.isactive = True
except ConnectionException as ex:
print(ex.args)
def get_id(self, department_name):
if(self.isactive):
try:
self._cursor.execute("select BolumId from BOLUMLER where BolumAdi = %s" , department_name)
row = self._cursor.fetchone()
if row is not None:
return row[0]
else:
return 0
except:
raise DbException("Kayıt Getirirken Hata OLuştu...")
finally:
self._connection.close()
self._cursor.close()
self.isactive = False
else:
try:
self._connection = Database.create_connection()
self._cursor = self._connection.cursor()
self.isactive = True
except ConnectionException as ex:
print(ex.args)
try:
self._cursor.execute("select BolumId from BOLUMLER where BolumAdi = %s" , department_name)
row = self._cursor.fetchone()
if row is not None:
return row[0]
else:
return 0
except:
raise DbException("Kayıt Getirirken Hata OLuştu...")
finally:
self._connection.close()
self._cursor.close()
self.isactive = False
def add(self, department_name):
if(self.isactive):
try:
self._cursor.execute("insert into BOLUMLER values (%s)",(department_name))
self._connection.commit()
except:
raise DbException("Veri kayıt ederken hata oluştu.")
finally:
self._connection.close()
self._cursor.close()
self.isactive = False
else:
try:
self._connection = Database.create_connection()
self._cursor = self._connection.cursor()
self.isactive = True
except ConnectionException as ex:
print(ex.args)
try:
self._cursor.execute("insert into BOLUMLER values (%s)",(department_name))
self._connection.commit()
except:
raise DbException("Veri kayıt ederken hata oluştu.")
finally:
self._connection.close()
self._cursor.close()
self.isactive = False
When i instantiate this class and use it, works for the first but not second time because as u see in the code in finally block i close the connection . I delete finally block the methods work good but when i close the connection . How can i manage connections ?

Best way is don't keep the connection open if you are in a web application, instead you can use with statement :
Like this:
with pymssql.connect(server, user, password, "tempdb") as conn:
with conn.cursor(as_dict=True) as cursor:
cursor.execute('SELECT * FROM persons WHERE salesrep=%s', 'John Doe')
for row in cursor:
print("ID=%d, Name=%s" % (row['id'], row['name']))
This way, the connection will open and close in the context.
You can check if connection still active or not:
Use try/except and if the db connection is closed, reopen it.

You can use a custom pool:
def pool(ctor, limit=None):
local_pool = multiprocessing.Queue()
n = multiprocesing.Value('i', 0)
#contextlib.contextmanager
def pooled(ctor=ctor, lpool=local_pool, n=n):
# block iff at limit
try: i = lpool.get(limit and n.value >= limit)
except multiprocessing.queues.Empty:
n.value += 1
i = ctor()
yield i
lpool.put(i)
return pooled
example:
def do_something():
try:
with connection.cursor() as cursor:
# Create a new record
sql = "INSERT INTO `users` (`email`, `password`) VALUES (%s, %s)"
cursor.execute(sql, ('webmaster#python.org', 'very-secret'))
connection.commit()
with connection.cursor() as cursor:
# Read a single record
sql = "SELECT `id`, `password` FROM `users` WHERE `email`=%s"
cursor.execute(sql, ('webmaster#python.org',))
result = cursor.fetchone()
print(result)
finally:
connection.close()
and then
my_pool = pool(lambda: do_something())
with my_pool() as my_obj:
my_obj.do_something()
PYDAL:
I recommend you to use pydal, Extended documentation here
General usage is something like this: (Almost for every database usage):
from pydal import DAL
db = DAL('mysql://username:password#localhost/test', pool_size=150)
class DALHandler(Object):
def __init__(self, db):
self.db = db
def on_start(self):
self.db._adapter.reconnect()
def on_success(self):
self.db.commit()
def on_failure(self):
self.db.rollback()
def on_end(self):
self.db._adapter.close()

Related

Having trouble autoincrementing in an API

I built a to-do list API with Flask and SQlite, and now I'm trying to use AUTOINCREMENT for incrementing the id's for the tasks. However, I am getting an error ("Error: NOT NULL constraint failed: incomplete.id") when I try to add something to the list. I'm not sure why, I looked at the sqlite documentation, and I seem to be following. I even tried reformatting the create table statements. I'm not sure what else to do, i'd really appreciate some guidance/advice/help. Thanks!
Here is my helper.py
import helper
from flask import Flask, request, jsonify, Response
import json
app = Flask(__name__)
#app.route('/')
def hello_world():
return 'Hello World!'
#app.route('/tasks/new', methods=['PUT'])
def add_task():
# global idCount
# idCount = idCount + 1
# get item from the POST body, request module used to parse request and get HTTP body data. response is used to return response to the client, of type JSON
req_data = request.get_json()
task = req_data['task']
# add task to the list
res_data = helper.add_to_incomplete(task)
# return error if task cant be added
if res_data is None:
response = Response("{'error': 'Task not added - " + task + "'}", mimetype='application/json')
return response;
response = Response(json.dumps(res_data), mimetype='application/json')
return response
#app.route('/tasks/all', methods = ["GET"])
def get_all_items():
res_data = helper.get_all_completes(), helper.get_all_incompletes()
response = Response(json.dumps(res_data), mimetype='application/json')
return response
#app.route('/tasks/complete', methods = ["POST"])
def complete_task():
req_data = request.get_json()
inputId = req_data['id']
res_data = helper.add_to_complete(inputId)
# find matching task to input id
return "completed task" + inputId
#app.route('/tasks/incomplete', methods = ["PATCH"])
def uncomplete_task():
req_data = request.get_json()
inputId = req_data['id']
res_data = helper.uncomplete(inputId)
# find matching task to input id
return "un-completed task" + inputId
#app.route('/tasks/remove', methods = ["DELETE"])
def delete():
req_data = request.get_json()
inputId = req_data['id']
res_data = helper.delete_task(inputId)
if res_data is None:
response = Response("{'error': 'Error deleting task - '" + task + "}", status=400 , mimetype='application/json')
return "deleted task id" + " " + inputId
#app.route('/tasks/empty', methods = ["EMPTY"])
def delete_all():
helper.empty()
return "you deleted everything"
Here is my helper.py:
import sqlite3
import random
#for id's because users dont set them
DB_PATH = './todo.db'
# connect to database
conn = sqlite3.connect(DB_PATH)
c = conn.cursor()
c.execute("CREATE TABLE IF NOT EXISTS complete (id INTEGER PRIMARY KEY, task TEXT NOT NULL);")
# save the change
c.execute("CREATE TABLE IF NOT EXISTS incomplete (id INTEGER PRIMARY KEY, task TEXT NOT NULL);")
conn.commit()
def add_to_incomplete(task):
try:
# id = str(random.randrange(100,999))
conn = sqlite3.connect(DB_PATH)
c = conn.cursor()
c.execute('insert into incomplete(task) values(?)', (task,))
conn.commit()
return {"id": id}
except Exception as e:
print('Error: ', e)
return None
def add_to_complete(inputId):
try:
conn = sqlite3.connect(DB_PATH)
c = conn.cursor()
c.execute('select task from incomplete where id=?', (inputId,))
tasks = c.fetchone()[0]
c.execute('insert into complete values(?,?)', (inputId,tasks))
delete_task(inputId)
conn.commit()
return {"id": id}
except Exception as e:
print('Error: ', e)
return None
def get_all_completes():
try:
conn = sqlite3.connect(DB_PATH)
c = conn.cursor()
c.execute('select * from complete')
rows = c.fetchall()
conn.commit()
return { "complete": rows }
except Exception as e:
print('Error: ', e)
return None
def get_all_incompletes():
try:
conn = sqlite3.connect(DB_PATH)
c = conn.cursor()
c.execute('select * from incomplete')
rows = c.fetchall()
conn.commit()
return { "incomplete": rows }
except Exception as e:
print('Error: ', e)
return None
def uncomplete(inputId):
try:
conn = sqlite3.connect(DB_PATH)
c = conn.cursor()
c.execute('select task from complete where id=?', (inputId,))
tasks = c.fetchone()[0]
c.execute('insert into incomplete values(?,?)', (inputId,tasks))
delete_task(inputId)
conn.commit()
return {"id": id}
except Exception as e:
print('Error: ', e)
return None
def delete_task(inputId):
try:
conn = sqlite3.connect(DB_PATH)
c = conn.cursor()
c.execute('delete from complete where id=?', (inputId,))
c.execute('delete from incomplete where id=?', (inputId,))
conn.commit()
return {"id":id}
except Exception as e:
print('Error: ', e)
return None
def empty():
try:
conn = sqlite3.connect(DB_PATH)
c = conn.cursor()
c.execute('delete from complete')
c.execute('delete from incomplete')
conn.commit()
return "you deleted everything mwahaha"
except Exception as e:
print('Error: ', e)
return None
I would suggest changing your sql table creation code to:
create table if not exists complete
(
id int auto_increment,
constraint complete_pk
primary key (id)
);
However a better option is to use SQLAlchemy

How to use python to query database in parallel

I have two functions which I use to query database. Assuming two separate queries, how to run these in parallel to query same database, and also wait for both results to return before continuing the execution of the rest of the code?
def query1(param1, param2):
result = None
logging.info("Connecting to database...")
try:
conn = connect(host=host, port=port, database=db)
curs = conn.cursor()
curs.execute(query)
result = curs
curs.close()
conn.close()
except Exception as e:
logging.error("Unable to access database %s" % str(e))
return result
def query2(param1, param2):
result = None
logging.info("Connecting to database...")
try:
conn = connect(host=host, port=port, database=db)
curs = conn.cursor()
curs.execute(query)
result = curs
curs.close()
conn.close()
except Exception as e:
logging.error("Unable to access database %s" % str(e))
return result
Here is a multi-threaded code that does what you're trying to accomplish:
from threading import Thread, Lock
class DatabaseWorker(Thread):
__lock = Lock()
def __init__(self, db, query, result_queue):
Thread.__init__(self)
self.db = db
self.query = query
self.result_queue = result_queue
def run(self):
result = None
logging.info("Connecting to database...")
try:
conn = connect(host=host, port=port, database=self.db)
curs = conn.cursor()
curs.execute(self.query)
result = curs
curs.close()
conn.close()
except Exception as e:
logging.error("Unable to access database %s" % str(e))
self.result_queue.append(result)
delay = 1
result_queue = []
worker1 = DatabaseWorker("db1", "select something from sometable",
result_queue)
worker2 = DatabaseWorker("db1", "select something from othertable",
result_queue)
worker1.start()
worker2.start()
# Wait for the job to be done
while len(result_queue) < 2:
sleep(delay)
job_done = True
worker1.join()
worker2.join()

Trying to understand cx_Oracle`s LOB object

I have a database class in python which I use to query the database.
class Database():
def __init__(self, user, password, host, port, service_name, mode, *args):
#mode should be 0 if not cx_Oracle.SYSDBA
self.user = user
self.password = password
self.host = host
self.port = port
self.user = user
self.service_name = service_name
self.logger = logging.getLogger(__name__)
self.mode = 0
self.connection = None
self.connect_string = self.user + '/' + self.password + '#' + dsn
try:
self.connection = cx_Oracle.connect(self.connect_string, mode=self.mode, threaded=True)
self.connection.stmtcachesize = 1000
self.connection.client_identifier = 'my_app_scheduler'
self.cursor = self.connection.cursor()
self.cursor.arraysize = 10000
self.idVar = self.cursor.var(cx_Oracle.NUMBER)
except cx_Oracle.DatabaseError, exc:
error, = exc
self.logger.exception('Exception occured while trying to create database object : %s', error.message)
raise exc
def query(self, q):
try:
self.cursor.execute(q)
return self.cursor.fetchall(), self.cursor.rowcount
except cx_Oracle.DatabaseError, exc:
raise exc
And this is the code to manipulate the fetched data and convert it.
output, rowcount = db_run_query.query(sql_text)
#self.logger.debug('output : %s, type : %s', output, type(output))
end_time=time.time()
time_taken=end_time - start_time
self.logger.debug('Rowcount : %s, time_taken : %s', rowcount, time_taken)
column_name = [d[0] for d in db_run_query.cursor.description]
result = [dict(zip(column_name, row)) for row in output]
#Convert everything to string : Eg: datetime
try:
for each_dict in result:
for key in each_dict:
if isinstance(each_dict[key], cx_Oracle.LOB):
self.logger.debug('%s', each_dict[key].size())
each_dict[key]=each_dict[key].read()
#self.logger.debug('%s %s %s %s %s %s %s', key, each_dict, type(key), type(each_dict[key]), type(each_dict), temp_each_dict, type(temp_each_dict))
else:
each_dict[key]=str(each_dict[key])
except Exception as e:
self.logger.debug(e)
So without self.cursor.arraysize = 10000
and for a query like select clob_value from table it was able to fetch the data and logged Rowcount : 4901, time_taken : 0.196296930313 but was giving me an error like
LOB variable no longer valid after subsequent fetch
but when I mention the arraysize parameter the error goes away. ( Is arraysize only for lob columns coz it works fine for select other_column from table where rownum<20000 <- other_column in varchar)
Why does that happen?
Turns out CLOBs and fetchall don't place nice together:
Internally, Oracle uses LOB locators which are allocated based on the
cursor array size. Thus, it is important that the data in the LOB
object be manipulated before another internal fetch takes place. The
safest way to do this is to use the cursor as an iterator. In
particular, do not use the fetchall() method.
Avoiding cursor.fetchall() and using it like an iterator (e.g. for row in cursor: ...) and i was able to get around this problem.

When to use DRCP and sessionpool in cx_oracle python?

For a specific use case - in which I have 100 databases and 1 database is the central database, now my app connects to that one central database which spawns connections to any of the 100 databases as per the request of the user to run some query on any of them.
In this case does using DRCP makes same as I dont want the connection to be killed if the user is running the query at the same time I dont want too many connections to be opened to the db which I control by creating a profile on the database which limits the number of active sessions to some low number say 5 for that specific user(read_only_user) using that specific profile(read_only_profile).
Right now I am using the standard open a connection per request model. But Im not sure if thats the best way to go about it.
import cx_Oracle
import logging, time
class Database(object):
'''
Use this method to for DML SQLS :
Inputs - Sql to be executed. Data related to that sql
Returns - The last inserted, updated, deleted ID.
'''
def __init__(self, user, password, host, port, service_name, mode, *args):
#mode should be 0 if not cx_Oracle.SYSDBA
self.user = user
self.password = password
self.host = host
self.port = port
self.user = user
self.service_name = service_name
self.logger = logging.getLogger(__name__)
try:
self.mode = mode
except:
self.mode = 0
self.logger.info(" Mode is not mentioned while creating database object")
self.connection = None
dsn = cx_Oracle.makedsn(self.host, self.port, self.service_name)
self.connect_string = self.user + '/' + self.password + '#' + dsn
try:
self.connection = cx_Oracle.connect(self.connect_string, mode=self.mode,
threaded=True)
self.connection.stmtcachesize = 1000
self.connection.client_identifier = 'my_app'
self.cursor = self.connection.cursor()
self.idVar = self.cursor.var(cx_Oracle.NUMBER)
except cx_Oracle.DatabaseError, exc:
error, = exc
self.logger.exception(
'Exception occured while trying to create database object : %s',
error.message)
raise exc
def query(self, q):
try:
self.cursor.execute(q)
return self.cursor.fetchall()
except cx_Oracle.DatabaseError, exc:
error, = exc
self.logger.info(
"Error occured while trying to run query: %s, error : %s", q,
error.message)
return error.message
def dml_query(self, sql):
try:
self.cursor.execute(sql)
self.connection.commit()
return 1
except Exception as e:
self.logger.exception(e)
return 0
def dml_query_with_data(self, sql, data):
"""
Use this method to for DML SQLS :
Inputs - Sql to be executed. Data related to that sql
Returns - The last inserted, updated, deleted ID.
"""
try:
self.cursor.execute(sql, data)
self.connection.commit()
return 1
except Exception as e:
self.logger.exception(e)
return 0
def update_output(self, clob, job_id, flag):
try:
q = "Select output from my_table where job_id=%d" % job_id
self.cursor.execute(q)
output = self.cursor.fetchall()
#Checking if we already have some output in the clob for that job_id
if output[0][0] is None:
if flag == 1:
self.cursor.execute("""UPDATE my_table
SET OUTPUT = :p_clob
,job_status=:status WHERE job_id = :p_key""",
p_clob=clob, status="COMPLETED", p_key=job_id)
else:
self.cursor.execute("""UPDATE my_table
SET OUTPUT = :p_clob
,job_status=:status WHERE job_id = :p_key""",
p_clob=clob, status="FAILED", p_key=job_id)
else:
self.cursor.execute("""UPDATE my_table
SET OUTPUT = OUTPUT || ',' || :p_clob
WHERE job_id = :p_key""", p_clob=clob, p_key=job_id)
self.connection.commit()
rows_updated = self.cursor.rowcount
return rows_updated
except Exception as e:
self.logger.exception(e)
return 0
def __del__(self):
try:
if self.connection is not None:
self.connection.close()
except Exception as e:
self.logger.exception(
"Exception while trying to close database connection object : %s", e)
'''
if __name__ == '__main__':
db = Database('test', 'test', 'my_host', '1000', 'my_db', 0)
columns = db.query('select * from my-table')
print columns
'''
This is my database class, and I create an object whenever I need a connect to the DB. And the init and del method take care of constructing and destructing the object.
Should I be using DRCP/ sessionPool to improve performance.
What if there are too many users waiting coz all the connections in DRCP are taken?
Can I have sessionPool per database (for the 100 databases, each database can take atmost 5 connections at a time for that read_only_user)

How to use "INSERT" in psycopg2 connection pooling?

I use psycopg2 to connect to PostgreSQL on Python and I want to use connection pooling.
I don't know what should I do instead commit() and rollback() when I execute INSERT query.
db = pool.SimpleConnectionPool(1, 10,host=conf_hostname,database=conf_dbname,user=conf_dbuser,password=conf_dbpass,port=conf_dbport)
# Get Cursor
#contextmanager
def get_cursor():
con = db.getconn()
try:
yield con.cursor()
finally:
db.putconn(con)
with get_cursor() as cursor:
cursor.execute("INSERT INTO table (fields) VALUES (values) RETURNING id")
id = cursor.fetchone()
I don't get id of inserted record without commit().
UPDATE I can not test the code but I give you some ideas:
You do the commit in connection not in db
# Get Cursor
#contextmanager
def get_cursor():
con = db.getconn()
try:
yield con
finally:
db.putconn(con)
with get_cursor() as cursor:
con.cursor.execute("INSERT INTO table (fields) VALUES (values) RETURNING id")
con.commit()
id = cursor.fetchone()
or
# Get Cursor
#contextmanager
def get_cursor():
con = db.getconn()
try:
yield con.cursor()
con.commit()
finally:
db.putconn(con)
with get_cursor() as cursor:
con.cursor.execute("INSERT INTO table (fields) VALUES (values) RETURNING id")
id = cursor.fetchone()
Connection pooling exist because creating a new connection to a db can be expensive and not to avoid commits or rollbacks. So you can commit your data without any issue, committing data will not destroy the connection.
here is my working example:
db = pool.SimpleConnectionPool(1, 10,host=conf_hostname,database=conf_dbname,user=conf_dbuser,password=conf_dbpass,port=conf_dbport)
#contextmanager
def get_connection():
con = db.getconn()
try:
yield con
finally:
db.putconn(con)
def write_to_db():
with get_connection() as conn:
try:
cursor = conn.cursor()
cursor.execute("INSERT INTO table (fields) VALUES (values) RETURNING id")
id = cursor.fetchone()
cursor.close()
conn.commit()
except:
conn.rollback()
I think this will be a little more pythonic:
db_pool = pool.SimpleConnectionPool(1, 10,
host=CONF.db_host,
database=CONF.db_name,
user=CONF.db_user,
password=CONF.db_user,
port=CONF.db_port)
#contextmanager
def db():
con = db_pool.getconn()
cur = con.cursor()
try:
yield con, cur
finally:
cur.close()
db_pool.putconn(con)
if __name__ == '__main__':
with db() as (connection, cursor):
try:
cursor.execute("""INSERT INTO table (fields)
VALUES (values) RETURNING id""")
my_id = cursor.fetchone()
rowcount = cursor.rowcount
if rowcount == 1:
connection.commit()
else:
connection.rollback()
except psycopg2.Error as error:
print('Database error:', error)
except Exception as ex:
print('General error:', ex)

Categories