2013, 'Lost connection to MySQL server during query' - python

This is my MySQLConnector code.
import logging
import pymysql
logger = logging.getLogger(__name__)
class MySQLConnector:
def __init__(self, config):
self.connection = None
self._connect(config=config)
def _connect(self, config):
config["charset"] = "utf8mb4"
config["cursorclass"] = pymysql.cursors.DictCursor
self.connection = pymysql.connect(**config)
def read(self, query, params=None):
result = None
try:
with self.connection.cursor() as cursor:
cursor.execute(query, params)
result = cursor.fetchall()
self.connection.commit()
except Exception as e:
logger.error(e)
return result
And I use that like this.
connector = MySQLConnector(config=config)
while True:
query = "SELECT * FROM my_table"
print(connector.read(query=query))
time.sleep(30)
This works well but after few hours later, this process raise error (2013, 'Lost connection to MySQL server during query'), and I can't find the reason.

Related

AttributeError: 'NoneType' object has no attribute 'fetchall'

I tried to return the result of an SQL query.
This works perfectly with pyodbc but not with mysql-connector.
Error Output
File "/mySQLDB.py", line 17, in execute_and_fetch
result = conn.cursor().execute(query, params).fetchall()
AttributeError: 'NoneType' object has no attribute 'fetchall'
Code
import mysql.connector
class MYSQLDB:
def __init__(self):
self.host = 'xx'
self.database = 'xx$xx'
self.user = 'xx'
self.password = 'xx'
def execute(self, query, *params):
mysql.connector.connect(host=self.host, database=self.database,
user=self.user, password=self.password).cursor().execute(query, params).commit()
def execute_and_fetch(self, query, *params):
conn = mysql.connector.connect(host=self.host, database=self.database,
user=self.user, password=self.password)
result = conn.cursor().execute(query, params).fetchall()
conn.commit()
return result
PEP 249, the DBAPI spec, says that the return value of execute is undefined. Quite a few DBAPI implementations return None, meaning that you cannot (portably) chain execute().fetchall() as you are trying to do.
Try this code.
If you are still having problems check that the connection to the database is successful.
import mysql.connector
class MYSQLDB:
def __init__(self):
self.host = 'xx'
self.database = 'xx$xx'
self.user = 'xx'
self.password = 'xx'
def execute(self, query, *params):
mysql.connector.connect(host=self.host, database=self.database,
user=self.user, password=self.password).cursor().execute(query, params).commit()
def execute_and_fetch(self, query, *params):
conn = mysql.connector.connect(host=self.host, database=self.database,
user=self.user, password=self.password)
with conn.cursor() as cursor:
cursor.execute(query, params=None)
... result = cursor.fetchall()
conn.commit()
return result

How to modularize a code containing 'with' in Python

I have this code that I am using to get information from a mysql database
def query_result_connect(_query):
with SSHTunnelForwarder((ssh_host, ssh_port),
ssh_password=ssh_password,
ssh_username=ssh_user,
remote_bind_address=('127.0.0.1', 3306)) as server:
connection = mdb.connect(user=sql_username,
passwd=sql_password,
db=sql_main_database,
host='127.0.0.1',
port=server.local_bind_port)
cursor = connection.cursor()
cursor.execute(_query)
connection.commit()
try:
y = pd.read_sql(_query, connection)
return y
except TypeError as e:
x = cursor.fetchall()
return x
I would like to create a function that includes the following part.
with SSHTunnelForwarder((ssh_host, ssh_port),
ssh_password=ssh_password,
ssh_username=ssh_user,
remote_bind_address=('127.0.0.1', 3306)) as server:
connection = mdb.connect(user=sql_username,
passwd=sql_password,
db=sql_main_database,
host='127.0.0.1',
port=server.local_bind_port)
and execute it in the query_result_connect() function. The problem is that I don't know how to include more code within the 'with' statement. The code should look something like this:
# Maybe introduce some arguments
def db_connection():
with SSHTunnelForwarder((ssh_host, ssh_port),
ssh_password=ssh_password,
ssh_username=ssh_user,
remote_bind_address=('127.0.0.1', 3306)) as server:
connection = mdb.connect(user=sql_username,
passwd=sql_password,
db=sql_main_database,
host='127.0.0.1',
port=server.local_bind_port)
# Maybe return something
def query_result_connect(_query):
# call the db_connection() function somehow.
# Write the following code in a way that is within the 'with' statement of the db_connection() function.
cursor = connection.cursor()
cursor.execute(_query)
connection.commit()
try:
y = pd.read_sql(_query, connection)
return y
except TypeError as e:
x = cursor.fetchall()
return x
Thank you
What's about to make "do_connection" to be a context manager itself?
#contextmanager
def do_connection():
# prepare connection
# yield connection
# close connection (__exit__). Perhaps you even want to call "commit" here.
Then, you will use it like this:
with do_connection() as connection:
cursor = connection.cursor()
...
It is a common approach to use context managers for creating DB connections.
You could make you own Connection class, that works like a conext manager.
__enter__ sets up ssh tunnel and db connection.
__exit__, tries to close the cursor, db connection and the ssh tunnel.
from sshtunnel import SSHTunnelForwarder
import psycopg2, traceback
class MyDatabaseConnection:
def __init__(self):
self.ssh_host = '...'
self.ssh_port = 22
self.ssh_user = '...'
self.ssh_password = '...'
self.local_db_port = 59059
def _connect_db(self, dsn):
try:
self.con = psycopg2.connect(dsn)
self.cur = self.con.cursor()
except:
traceback.print_exc()
def _create_tunnel(self):
try:
self.tunnel = SSHTunnelForwarder(
(self.ssh_host, self.ssh_port),
ssh_password=self.ssh_password,
ssh_username=self.ssh_user,
remote_bind_address=('localhost', 5959),
local_bind_address=('localhost', self.local_db_port)
)
self.tunnel.start()
if self.tunnel.local_bind_port == self.local_db_port:
return True
except:
traceback.print_exc()
def __enter__(self):
if self._create_tunnel():
self._connect_db(
"dbname=mf port=%s host='localhost' user=mf_usr" %
self.local_db_port
)
return self
def __exit__(self, *args):
for c in ('cur', 'con', 'tunnel'):
try:
obj = getattr(self, c)
obj.close()
obj = None
del obj
except:
pass
with MyDatabaseConnection() as db:
print(db)
db.cur.execute('Select count(*) from platforms')
print(db.cur.fetchone())
Out:
<__main__.MyDatabaseConnection object at 0x1017cb6d0>
(8,)
Note:
I am connecting to Postgres, but that should work using mysql as well. Probably you need to adjust to match your own needs.

How to use python to query database in parallel

I have two functions which I use to query database. Assuming two separate queries, how to run these in parallel to query same database, and also wait for both results to return before continuing the execution of the rest of the code?
def query1(param1, param2):
result = None
logging.info("Connecting to database...")
try:
conn = connect(host=host, port=port, database=db)
curs = conn.cursor()
curs.execute(query)
result = curs
curs.close()
conn.close()
except Exception as e:
logging.error("Unable to access database %s" % str(e))
return result
def query2(param1, param2):
result = None
logging.info("Connecting to database...")
try:
conn = connect(host=host, port=port, database=db)
curs = conn.cursor()
curs.execute(query)
result = curs
curs.close()
conn.close()
except Exception as e:
logging.error("Unable to access database %s" % str(e))
return result
Here is a multi-threaded code that does what you're trying to accomplish:
from threading import Thread, Lock
class DatabaseWorker(Thread):
__lock = Lock()
def __init__(self, db, query, result_queue):
Thread.__init__(self)
self.db = db
self.query = query
self.result_queue = result_queue
def run(self):
result = None
logging.info("Connecting to database...")
try:
conn = connect(host=host, port=port, database=self.db)
curs = conn.cursor()
curs.execute(self.query)
result = curs
curs.close()
conn.close()
except Exception as e:
logging.error("Unable to access database %s" % str(e))
self.result_queue.append(result)
delay = 1
result_queue = []
worker1 = DatabaseWorker("db1", "select something from sometable",
result_queue)
worker2 = DatabaseWorker("db1", "select something from othertable",
result_queue)
worker1.start()
worker2.start()
# Wait for the job to be done
while len(result_queue) < 2:
sleep(delay)
job_done = True
worker1.join()
worker2.join()

When to use DRCP and sessionpool in cx_oracle python?

For a specific use case - in which I have 100 databases and 1 database is the central database, now my app connects to that one central database which spawns connections to any of the 100 databases as per the request of the user to run some query on any of them.
In this case does using DRCP makes same as I dont want the connection to be killed if the user is running the query at the same time I dont want too many connections to be opened to the db which I control by creating a profile on the database which limits the number of active sessions to some low number say 5 for that specific user(read_only_user) using that specific profile(read_only_profile).
Right now I am using the standard open a connection per request model. But Im not sure if thats the best way to go about it.
import cx_Oracle
import logging, time
class Database(object):
'''
Use this method to for DML SQLS :
Inputs - Sql to be executed. Data related to that sql
Returns - The last inserted, updated, deleted ID.
'''
def __init__(self, user, password, host, port, service_name, mode, *args):
#mode should be 0 if not cx_Oracle.SYSDBA
self.user = user
self.password = password
self.host = host
self.port = port
self.user = user
self.service_name = service_name
self.logger = logging.getLogger(__name__)
try:
self.mode = mode
except:
self.mode = 0
self.logger.info(" Mode is not mentioned while creating database object")
self.connection = None
dsn = cx_Oracle.makedsn(self.host, self.port, self.service_name)
self.connect_string = self.user + '/' + self.password + '#' + dsn
try:
self.connection = cx_Oracle.connect(self.connect_string, mode=self.mode,
threaded=True)
self.connection.stmtcachesize = 1000
self.connection.client_identifier = 'my_app'
self.cursor = self.connection.cursor()
self.idVar = self.cursor.var(cx_Oracle.NUMBER)
except cx_Oracle.DatabaseError, exc:
error, = exc
self.logger.exception(
'Exception occured while trying to create database object : %s',
error.message)
raise exc
def query(self, q):
try:
self.cursor.execute(q)
return self.cursor.fetchall()
except cx_Oracle.DatabaseError, exc:
error, = exc
self.logger.info(
"Error occured while trying to run query: %s, error : %s", q,
error.message)
return error.message
def dml_query(self, sql):
try:
self.cursor.execute(sql)
self.connection.commit()
return 1
except Exception as e:
self.logger.exception(e)
return 0
def dml_query_with_data(self, sql, data):
"""
Use this method to for DML SQLS :
Inputs - Sql to be executed. Data related to that sql
Returns - The last inserted, updated, deleted ID.
"""
try:
self.cursor.execute(sql, data)
self.connection.commit()
return 1
except Exception as e:
self.logger.exception(e)
return 0
def update_output(self, clob, job_id, flag):
try:
q = "Select output from my_table where job_id=%d" % job_id
self.cursor.execute(q)
output = self.cursor.fetchall()
#Checking if we already have some output in the clob for that job_id
if output[0][0] is None:
if flag == 1:
self.cursor.execute("""UPDATE my_table
SET OUTPUT = :p_clob
,job_status=:status WHERE job_id = :p_key""",
p_clob=clob, status="COMPLETED", p_key=job_id)
else:
self.cursor.execute("""UPDATE my_table
SET OUTPUT = :p_clob
,job_status=:status WHERE job_id = :p_key""",
p_clob=clob, status="FAILED", p_key=job_id)
else:
self.cursor.execute("""UPDATE my_table
SET OUTPUT = OUTPUT || ',' || :p_clob
WHERE job_id = :p_key""", p_clob=clob, p_key=job_id)
self.connection.commit()
rows_updated = self.cursor.rowcount
return rows_updated
except Exception as e:
self.logger.exception(e)
return 0
def __del__(self):
try:
if self.connection is not None:
self.connection.close()
except Exception as e:
self.logger.exception(
"Exception while trying to close database connection object : %s", e)
'''
if __name__ == '__main__':
db = Database('test', 'test', 'my_host', '1000', 'my_db', 0)
columns = db.query('select * from my-table')
print columns
'''
This is my database class, and I create an object whenever I need a connect to the DB. And the init and del method take care of constructing and destructing the object.
Should I be using DRCP/ sessionPool to improve performance.
What if there are too many users waiting coz all the connections in DRCP are taken?
Can I have sessionPool per database (for the 100 databases, each database can take atmost 5 connections at a time for that read_only_user)

Python 2.7 connection string to PostgreSQL (OOP method)

I'm new to python and I'm trying to make this work. I'm using Python 2.7 and PostgreSQL 9.3:
#! F:\Python2.7.6\python
import psycopg2
class Database:
host = "192.168.56.101"
user = "testuser"
passwd = "passwd"
db = "test"
def __init__(self):
self.connection = psycopg2.connect( host = self.host,
user = self.user,
password = self.passwd,
dbname = self.db )
self.cursor = self.connection.cursor
def query(self, q):
cursor = self.cursor
cursor.execute(q)
return cursor.fetchall()
def __del__(self):
self.connection.close()
if __name__ == "__main__":
db = Database()
q = "DELETE FROM testschema.test"
db.query(q)
However I am getting an error "AttributeError: 'builtin_function_or_method' object has no attribute 'execute'". I figure I should put something like self.execute = something in the Database class, but I can't figure it out what exactly I need to put there. Any suggestions?
You are missing the parenthesis at the end
self.cursor = self.connection.cursor()
or
cursor = self.cursor()
But not both

Categories