I'm working on a python program with functionality such as inserting and retrieving values from a postgres database using psycopg2. The issue is that every time I want to create a query I have to connect to the database so the following code snippet is present multiple times throughout the file:
# Instantiate Connection
try:
conn = psycopg2.connect(
user=userName,
password=passwrd,
host=hostAddr,
database=dbName
)
# Instantiate Cursor
cur = conn.cursor()
return cur
except psycopg2.Error as e:
print(f"Error connecting to Postgres Platform: {e}")
sys.exit(1)
My question is:
Is there a way I could just create a method to call every time I wish to connect to the database? I've tried creating one but I get a bunch of errors since variables cur and conn are not global
Could I just connect to the database once at the beginning of the program and keep the connection open for the entire time that the program is running? This seems like the easiest option but I am not sure if it would be bad practice (for reference the program will be running 24/7 so I assumed it would be better to only connect when a query is being made).
Thanks for the help.
You could wrap your own database handling class in a context manager, so you can manage the connections in a single place:
import psycopg2
import traceback
from psycopg2.extras import RealDictCursor
class Postgres(object):
def __init__(self, *args, **kwargs):
self.dbName = args[0] if len(args) > 0 else 'prod'
self.args = args
def _connect(self, msg=None):
if self.dbName == 'dev':
dsn = 'host=127.0.0.1 port=5556 user=xyz password=xyz dbname=development'
else:
dsn = 'host=127.0.0.1 port=5557 user=xyz password=xyz dbname=production'
try:
self.con = psycopg2.connect(dsn)
self.cur = self.con.cursor(cursor_factory=RealDictCursor)
except:
traceback.print_exc()
def __enter__(self, *args, **kwargs):
self._connect()
return (self.con, self.cur)
def __exit__(self, *args):
for c in ('cur', 'con'):
try:
obj = getattr(self, c)
obj.close()
except:
pass # handle it silently!?
self.args, self.dbName = None, None
Usage:
with Postgres('dev') as (con, cur):
print(con)
print(cur.execute('select 1+1'))
print(con) # verify connection gets closed!
Out:
<connection object at 0x109c665d0; dsn: '...', closed: 0>
[RealDictRow([('sum', 2)])]
<connection object at 0x109c665d0; dsn: '...', closed: 1>
It shouldn't be too bad to keep a connection open. The server itself should be responsible for closing connections it thinks have been around for too long or that are too inactive. We then just need to make our code resilient in case the server has closed the connection:
import pscyopg2
CONN = None
def create_or_get_connection():
global CONN
if CONN is None or CONN.closed:
CONN = psycopg2.connect(...)
return CONN
I have been down this road lots before and you may be reinventing the wheel. I would highly recommend you use a ORM like [Django][1] or if you need to interact with a database - it handles all this stuff for you using best practices. It is some learning up front but I promise it pays off.
If you don't want to use Django, you can use this code to get or create the connection and the context manager of cursors to avoid errors with
import pscyopg2
CONN = None
def create_or_get_connection():
global CONN
if CONN is None or CONN.closed:
CONN = psycopg2.connect(...)
return CONN
def run_sql(sql):
con = create_or_get_connection()
with conn.cursor() as curs:
return curs.execute(sql)
This will allow you simply to run sql statements directly to the DB without worrying about connection or cursor issues.
If I wrap your code-fragment into a function definition, I don't get "a bunch of errors since variables cur and conn are not global". Why would they need to be global? Whatever the error was, you removed it from your code fragment before posting it.
Your try-catch doesn't make any sense to me. Catching an error just to hide the calling site and then bail out seems like the opposite of helpful.
When to connect depends on how you structure your transactions, how often you do them, and what you want to do if your database ever restarts in the middle of a program execution.
Related
I am using Flask with MySQL (MariaDB) database. To handle sql connection and cursor I use self-made context manager. I open and close connection inside each Flask http request hadling function, so I can be sure that number of connections to db will not exceed the certain number, but it creates overhead. I am sure that the same mysql connections can be used by other users, what other approach to handle sql connection and cursor I can use, if I do not use ORM ?
Context managers to hangle cursor and connection:
from contextlib import contextmanager
import mysql.connector
from mysql.connector.errors import Error
#contextmanager
def mysql_connection(user, password, host, database, auth_plugin):
_conn = mysql.connector.connect(user=user, password=password, host=host, database=database, auth_plugin=auth_plugin)
try:
yield _conn
except (Exception, Error) as ex:
# if error happened all made changes during the connection will be rolled back:
_conn.rollback()
# this statement re-raise error to let it be handled in outer scope:
raise
else:
# if everything is fine commit all changes to save them in db:
_conn.commit()
finally:
# close connection to db, do not wait for timeout release:
_conn.close()
#contextmanager
def mysql_curs(user, password, host, database, auth_plugin) -> "curs":
with mysql_connection(user=user, password=password, host=host, database=database, auth_plugin=auth_plugin) as _conn:
_curs = _conn.cursor()
try:
yield _curs
finally:
_curs.close() # close cursor when everything is done
Some random Flask http handler function:
#app.route('/admin_panel/repair', methods=["GET"])
def repair_show_all_menu_webpages():
"""The page exists to repair menu if not existent flask function was added"""
try:
with common_db_ops.mysql_curs() as curs:
left_side_menu = []
webpages = admin_ops.show_all_menu_webpages_to_repair(curs)
except (Exception, Error) as err:
app.logger.error(f"Failed to repair website: {err}")
abort(500)
return render_template('repair_menu.html', webpages=webpages, left_side_menu=left_side_menu)
Edit: I would like to add that I found the following article which discuss how to use Flask with PostgreSQL and create your customized sql connection context manager, but I have question where in Flask I should declare sql connectors Pool:
Manage RAW database connection pool in Flask
Try to pool connections
From offical docs:
A pool opens a number of connections and handles thread safety when
providing connections to requesters
Implementing connection pooling, you can reuse existing connections
dbconfig = {
"database": "test",
"user": "joe"
}
cnxpool = mysql.connector.connect(pool_name = "mypool",
pool_size = 3, # or any number to suit your need
**dbconfig)
# then to get a connection from pool use
cnx = cnxpool.get_connection()
For more see: https://dev.mysql.com/doc/connector-python/en/connector-python-connection-pooling.html
If anybody is interested in the approach of handling sql connection without ORM, I made the following steps to combine MySQL Connections Pool, context manager and Flask:
SQL_CONN_POOL = pooling.MySQLConnectionPool(
pool_name="mysqlpool",
pool_size=10,
user=DB_USER,
password=DB_PASS,
host=DB_HOST,
database=DATABASE,
auth_plugin=DB_PLUGIN
)
#contextmanager
def mysql_connection_from_pool() -> "conn":
conn_pool = SQL_CONN_POOL # get connection from the pool, all the rest is the same
# you can add print(conn_pool) here to be sure that pool
# is the same for each http request
_conn = conn_pool.get_connection()
try:
yield _conn
except (Exception, Error) as ex:
# if error happened all made changes during the connection will be rolled back:
_conn.rollback()
# this statement re-raise error to let it be handled in outer scope:
raise
else:
# if everything is fine commit all changes to save them in db:
_conn.commit()
finally:
# actually it returns cursor to the pool, rather than close it
_conn.close()
#contextmanager
def mysql_curs_from_pool() -> "curs":
with mysql_connection_from_pool() as _conn:
_curs = _conn.cursor()
try:
yield _curs
finally:
_curs.close()
I used the following links to answer the question:
Manage RAW database connection pool in Flask
MySQL docs
I am using python to subscribe to one topic, parse JSON and store them in the database. I have problems with loosing connection with MySQL because it can't be opened too long. Message that I receive is below
_mysql_exceptions.OperationalError: (2006, 'MySQL server has gone away')
I managed to remove it by increasing timeout, but that is not good solution, because I can't know how long will the system need to wait for the message.
Is there a possibility I could create connection only when message is received?
i tried to add the connection details into on message, and then closing it but I still have the same problem
def on_message(client, userdata, msg):
sql="""INSERT INTO data(something) VALUES (%s)"""
data = ("some value")
with db:
try:
cursor.execute(sql,data)
except MySQLdb.Error:
db.ping(True)
cursor.execute(sql,data)
except:
print("error")
print(cursor._last_executed)
but then that variable is not visible outside this function. What is the best practise for this.
The part of code for making connection is bellow
import paho.mqtt.client as mqtt
import MySQLdb
import json
import time
#mysql config
try:
db = MySQLdb.connect(host="localhost", # your host
user="admin", # username
passwd="somepass", # password
db="mydb") # name of the database
except:
print("error")
So as you see, I have created one connection to mysql at the begging, and if there is no message for time longer then defined timeout my script stops working.
Try:
cur = db.cursor()
try:
cur.execute(query, params)
except MySQLdb.Error:
db.ping(True)
cur.execute(query, params)
db.ping(True) says to reconnect to DB is the connection was lost. You can also call db.ping(True) right after MySQLdb.connect. But to be on the safe side I'd better wrap execute() into try and call db.ping(True) in except block.
I have a script running, updating certain values in a DB once a second.
At the start of the script I first connect to the DB:
conn = pymysql.connect(host= "server",
user="user",
passwd="pw",
db="db",
charset='utf8')
x = conn.cursor()
I leave the connection open for the running time of the script (around 30min)
With this code I update certain values once every second:
query = "UPDATE missionFilesDelDro SET landLat = '%s', landLon='%s',landHea='%s' WHERE displayName='%s'" % (lat, lon, heading, mission)
x.execute(query)
conn.ping(True)
However now when my Internet connection breaks the script also crashes since It can't update the variables. My connection normally reestablishes within one minute. (the script runs on a vehicle which is moving. Internet connection is established via a GSM Modem)
Is it better to re-open every time the connection to the server prior an update of the variable so I can see if the connection has been established or is there a better way?
You could just ping the connection first, instead of after the query, as that should reconnect if necessary.
Setup:
conn = pymysql.connect(host= "server",
user="user",
passwd="pw",
db="db",
charset='utf8')
and every second:
query = "UPDATE missionFilesDelDro SET landLat = '%s', landLon='%s',landHea='%s' WHERE displayName='%s'" % (lat, lon, heading, mission)
conn.ping()
x = conn.cursor()
x.execute(query)
Ref https://github.com/PyMySQL/PyMySQL/blob/master/pymysql/connections.py#L872
It's still possible that the connection could drop after the ping() but before the execute(), which would then fail. For handling that you would need to trap the error, something similar to
from time import sleep
MAX_ATTEMPTS = 10
# every second:
query = "UPDATE missionFilesDelDro SET landLat = '%s', landLon='%s',landHea='%s' WHERE displayName='%s'" % (lat, lon, heading, mission)
inserted = False
attempts = 0
while (not inserted) and attempts < MAX_ATTEMPTS:
attempts += 1
try:
conn.ping()
x = conn.cursor()
x.execute(query)
inserted = True
except StandardError: # it would be better to use the specific error, not StandardError
sleep(10) # however long is appropriate between tries
# you could also do a whole re-connection here if you wanted
if not inserted:
# do something
#raise RuntimeError("Couldn't insert the record after {} attempts.".format(MAX_ATTEMPTS))
pass
I'm guessing the script fails with an exception at the line x.execute(query) when the connection drops.
You could trap the exception and retry opening the connection. The following 'pseudo-python' demonstrates the general technique, but will obviously need to be adapted to use real function, method, and exception names:
def open_connection(retries, delay):
for (x in range(0, retries)):
conn = pymysql.connection()
if (conn.isOpen()):
return conn
sleep(delay)
return None
conn = open_connection(30, 3)
x = conn.cursor()
while(conn is not None and more_data)
# read data here
query = ...
while(conn is not None): # Loop until data is definitely saved
try:
x.execute(query)
break # data saved, exit inner loop
except SomeException:
conn = open_connection(30,3)
x = conn.cursor()
The general idea is that you need to loop and retry until either the data is definitely saved, or until you encounter an unrecoverable error.
Hm. If you're sampling or receiving data at a constant rate, but are only able to send it irregularly because of network failures, you've created a classic producer-consumer problem. You'll need one thread to read or receive the data, a queue to hold any backlog, and another thread to store the data. Fun! ;-)
I am trying to write a simple program to connect MySQL and perform some operations
host = '10.0.106.40'
user = 'ddddd'
port = 3306
passwd = 'DDDDDD'
db = 'bbbbbbb'
''' Creates a MySQL connection and returns the cursor '''
def create_connection():
connection = mysql.connect(host, user, passwd, db, port)
cursor = connection.cursor()
return connection, cursor
''' Close the connection '''
def close_connection(cursor, connection):
cursor.close()
connection.commit()
connection.close()
The above functions are my skeletons. Now, when I try to do this
for user in users:
connection, cursor = create_connection()
...
close_connection(cursor, connection)
I get this error
TypeError: connect() argument 2 must be string, not long
However, when I do this
connection, cursor = create_connection()
for user in users:
...
close_connection(cursor, connection)
The code runs perfectly fine! I'm not sure but why should that be? I really want to run the earlier version of the code as the latter one is too slow for me.
My guess is that the user being passed to create_connection() is from the line
for user in users:
That is why it works outside the loop, because then it is accessing the correct user, namely the one with a String datatype.
An alternative would be to use a while loop instead or change the line to something like:
for u in users:
Or else you should do what warwaruk suggests.
users is a list of integers?
That's why it's bad to deal with globals. Better pass some parameters to create_connection instead of taking values from globals:
def create_connection(host, user, passwd, db, port):
connection = mysql.connect(host, user, passwd, db, port)
cursor = connection.cursor()
return connection, cursor
Using the code below leaves me with an open connection, how do I close?
import pyodbc
conn = pyodbc.connect('DRIVER=MySQL ODBC 5.1 driver;SERVER=localhost;DATABASE=spt;UID=who;PWD=testest')
csr = conn.cursor()
csr.close()
del csr
Connections have a close method as specified in PEP-249 (Python Database API Specification v2.0):
import pyodbc
conn = pyodbc.connect('DRIVER=MySQL ODBC 5.1 driver;SERVER=localhost;DATABASE=spt;UID=who;PWD=testest')
csr = conn.cursor()
csr.close()
conn.close() #<--- Close the connection
Since the pyodbc connection and cursor are both context managers, nowadays it would be more convenient (and preferable) to write this as:
import pyodbc
conn = pyodbc.connect('DRIVER=MySQL ODBC 5.1 driver;SERVER=localhost;DATABASE=spt;UID=who;PWD=testest')
with conn:
crs = conn.cursor()
do_stuff
# conn.commit() will automatically be called when Python leaves the outer `with` statement
# Neither crs.close() nor conn.close() will be called upon leaving the `with` statement!!
See https://github.com/mkleehammer/pyodbc/issues/43 for an explanation for why conn.close() is not called.
Note that unlike the original code, this causes conn.commit() to be called. Use the outer with statement to control when you want commit to be called.
Also note that regardless of whether or not you use the with statements, per the docs,
Connections are automatically closed when they are deleted (typically when they go out of scope) so you should not normally need to call [conn.close()], but you can explicitly close the connection if you wish.
and similarly for cursors (my emphasis):
Cursors are closed automatically when they are deleted (typically when they go out of scope), so calling [csr.close()] is not usually necessary.
You can wrap the whole connection in a context manager, like the following:
from contextlib import contextmanager
import pyodbc
import sys
#contextmanager
def open_db_connection(connection_string, commit=False):
connection = pyodbc.connect(connection_string)
cursor = connection.cursor()
try:
yield cursor
except pyodbc.DatabaseError as err:
error, = err.args
sys.stderr.write(error.message)
cursor.execute("ROLLBACK")
raise err
else:
if commit:
cursor.execute("COMMIT")
else:
cursor.execute("ROLLBACK")
finally:
connection.close()
Then do something like this where ever you need a database connection:
with open_db_connection("...") as cursor:
# Your code here
The connection will close when you leave the with block. This will also rollback the transaction if an exception occurs or if you didn't open the block using with open_db_connection("...", commit=True).
You might try turning off pooling, which is enabled by default. See this discussion for more information.
import pyodbc
pyodbc.pooling = False
conn = pyodbc.connect('DRIVER=MySQL ODBC 5.1 driver;SERVER=localhost;DATABASE=spt;UID=who;PWD=testest')
csr = conn.cursor()
csr.close()
del csr
You can define a DB class as below. Also, as andrewf suggested, use a context manager for cursor access.I'd define it as a member function.
This way it keeps the connection open across multiple transactions from the app code and saves unnecessary reconnections to the server.
import pyodbc
class MS_DB():
""" Collection of helper methods to query the MS SQL Server database.
"""
def __init__(self, username, password, host, port=1433, initial_db='dev_db'):
self.username = username
self._password = password
self.host = host
self.port = str(port)
self.db = initial_db
conn_str = 'DRIVER=DRIVER=ODBC Driver 13 for SQL Server;SERVER='+ \
self.host + ';PORT='+ self.port +';DATABASE='+ \
self.db +';UID='+ self.username +';PWD='+ \
self._password +';'
print('Connected to DB:', conn_str)
self._connection = pyodbc.connect(conn_str)
pyodbc.pooling = False
def __repr__(self):
return f"MS-SQLServer('{self.username}', <password hidden>, '{self.host}', '{self.port}', '{self.db}')"
def __str__(self):
return f"MS-SQLServer Module for STP on {self.host}"
def __del__(self):
self._connection.close()
print("Connection closed.")
#contextmanager
def cursor(self, commit: bool = False):
"""
A context manager style of using a DB cursor for database operations.
This function should be used for any database queries or operations that
need to be done.
:param commit:
A boolean value that says whether to commit any database changes to the database. Defaults to False.
:type commit: bool
"""
cursor = self._connection.cursor()
try:
yield cursor
except pyodbc.DatabaseError as err:
print("DatabaseError {} ".format(err))
cursor.rollback()
raise err
else:
if commit:
cursor.commit()
finally:
cursor.close()
ms_db = MS_DB(username='my_user', password='my_secret', host='hostname')
with ms_db.cursor() as cursor:
cursor.execute("SELECT ##version;")
print(cur.fetchall())
According to pyodbc documentation, connections to the SQL server are not closed by default. Some database drivers do not close connections when close() is called in order to save round-trips to the server.
To close your connection when you call close() you should set pooling to False:
import pyodbc
pyodbc.pooling = False
The most common way to handle connections, if the language does not have a self closing construct like Using in .NET, then you should use a try -> finally to close the objects. Its possible that pyodbc does have some form of automatic closing but here is the code I do just in case:
conn = cursor = None
try:
conn = pyodbc.connect('DRIVER=MySQL ODBC 5.1 driver;SERVER=localhost;DATABASE=spt;UID=who;PWD=testest')
cursor = conn.cursor()
# ... do stuff ...
finally:
try: cursor.close()
except: pass
try: conn.close()
except: pass