Influence of forking on existing mysql-connection - python

I have a server which is/should be able to handle a lot of connections at the same time.
Now sometimes I have an error: “Lost connection to MySQL server during query”. At the time I am fixing this with
def mysql_handling(string):
global cursor
tries=0
while True:
if tries<5:
try:
cursor.execute(string)
if 'SELECT' not in string:
db.commit()
break
except MySQLdb.Error, e:
print("Error %d: %s" %(e.args[0], e.args[1]))
cursor.close()
time.sleep(0.1)
cursor = get_cursor() #setting up a new mysql connection
#mysql_error_tracking(string)
tries+=1
else:
sys.exit(1)
So now I am able to handle a lot of connections at the same time, but at the times that I have this error (Lost conn.) I want to know at which query this happens, and if there is somekind of pattern in this. So to track this, I also want to store this in the mysql-database with a script somewhat like this:
def mysql_error_tracking(string):
#Clone/fork
try:
error_serv = os.fork()
except:
print "error_FORK failed!"
if error_serv:
print 'there is a MYSQL-ERROR, SAVE!!!'
cursor.execute("SELECT count FROM query_errors WHERE string= "+"'"+str(string)+"'")
data=cursor.fetchone()
print data
if data is None:
cursor.execute("INSERT INTO query_errors (string) VALUES ("+"'"+str(string)+"')")
db.commit()
print 'insert in queries'
else:
cursor.execute("UPDATE query_errors SET count=count+1 WHERE string= "+"'"+str(string)+"'")
db.commit()
print 'count ploes 1'
sys.exit(0)
This does work, but forking the connection seems to really pressurize the already existing mysql-connection, is it better to just not-fork instead?

Related

psycopg2.errors.InFailedSqlTransaction: current transaction is aborted, commands ignored until end of transaction block, dont know how to fix it

I am trying to insert data into my database using psycopg2 and I get this weird error. I tried some things but nothing works. This is my code:
def insert_transaction():
global username
now = datetime.now()
date_checkout = datetime.today().strftime('%d-%m-%Y')
time_checkout = now.strftime("%H:%M:%S")
username = "Peter1"
connection_string = "host='localhost' dbname='Los Pollos Hermanos' user='postgres' password='******'"
conn = psycopg2.connect(connection_string)
cursor = conn.cursor()
try:
query_check_1 = """(SELECT employeeid FROM employee WHERE username = %s);"""
cursor.execute(query_check_1, (username,))
employeeid = cursor.fetchone()[0]
conn.commit()
except:
print("Employee error")
try:
query_check_2 = """SELECT MAX(transactionnumber) FROM Transaction"""
cursor.execute(query_check_2)
transactionnumber = cursor.fetchone()[0] + 1
conn.commit()
except:
transactionnumber = 1
""""---------INSERT INTO TRANSACTION------------"""
query_insert_transaction = """INSERT INTO transactie (transactionnumber, date, time, employeeemployeeid)
VALUES (%s, %s, %s, %s);"""
data = (transactionnumber, date_checkout, time_checkout, employeeid)
cursor.execute(query_insert_transaction, data)
conn.commit()
conn.close()
this is the error:
", line 140, in insert_transaction
cursor.execute(query_insert_transaction, data) psycopg2.errors.InFailedSqlTransaction: current transaction is aborted, commands ignored until end of transaction block
The error message means that one of the preceding SQL statements has resulted in an error. If an exception occurs while executing an SQL statement you need to call the connection's rollback method (conn.rollback()) to reset the transaction's state. PostgreSQL will not permit further statement execution otherwise.
Ideally you want to record the actual error for later analysis, so your code ought to be structured like this:
try:
cursor.execute(sql, values)
conn.commit()
except Exception as e:
print(f'Error {e}')
print('Anything else that you feel is useful')
conn.rollback()

How to keep continuously write to Oracle table even if table is not accessible?

I am trying to insert multiple records into the one Oracle table continuously. For which I have written below python script.
import cx_Oracle
import config
connection = None
try:
# Make a connection
connection = cx_Oracle.connect(
config.username,
config.password,
config.dsn,
encoding=config.encoding)
# show the version of the Oracle Database
print(connection.version)
# Insert 20000 records
for i in range(1, 20001):
cursor = connection.cursor()
sql = "INSERT into SCHEMA.ABC (EVENT_ID, EVENT_TIME) VALUES( "+ str(i)+" , CURRENT_TIMESTAMP)"
cursor.execute(sql)
connection.commit()
except cx_Oracle.Error as error:
print(error)
finally:
if connection:
connection.close()
So, During the insert, when I change the table name it just create an exception and comes out of script(as the table is not available and cannot write). What I want is, Even if when I do the rename and table is not available, the script needs to keep continuously trying insert. Is there a way this can be possible?
Here's an example of what Ptit Xav was talking about. I added some code to quit after a max number of retries, since that's often desirable.
# Insert 20000 records
for i in range(1, 20001):
retry_count = 0
data_inserted = False
while not data_inserted:
try:
cursor = connection.cursor()
sql = "INSERT into SCHEMA.ABC (EVENT_ID, EVENT_TIME) VALUES( "+ str(i)+" , CURRENT_TIMESTAMP)"
cursor.execute(sql)
connection.commit()
data_inserted = True
except cx_Oracle.Error as error:
print(error)
time.sleep(5) # wait for 5 seconds between retries
retry_count += 1
if retry_count > 100:
print(f"Retry count exceeded on record {i}, quitting")
break
else:
# continue to next record if the data was inserted
continue
# retry count was exceeded; break the for loop.
break
See this answer for more explanation of the while... else logic.
You may want to encapsule the insert logik in a function that catches the possible exception and performs the retry
def safe_insert(con, i):
"""
insert a row with retry after exception
"""
retry_cnt = 0
sql_text = "insert into ABC(EVENT_ID, EVENT_TIME) VALUES(:EVENT_ID,CURRENT_TIMESTAMP) "
while True:
try:
with con.cursor() as cur:
cur.execute(sql_text, [i])
con.commit()
return
except cx_Oracle.Error as error:
print(f'error on inserting row {i}')
print(error)
time.sleep(1)
retry_cnt += 1
if (retry_cnt > 10):
raise error
Similar to #kfinity's answer I also added a limit on retry - if this limit is exceeded the function raise an exception.
Note also that the function uses bind variables in the INSERT statement which is preferable to the concatenation of the values in the statement.
The usage is as simple as
for i in range(1, 20001):
safe_insert(con, i)

MySQL Connector could not process parameters

I'm trying to loop through an array and insert each element into a table. As far as I can see my syntax is correct and I took this code straight from Microsoft Azure's documentation.
try:
conn = mysql.connector.connect(**config)
print("Connection established")
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with the user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cursor = conn.cursor()
data = ['1','2','3','4','5']
for x in data:
cursor.execute("INSERT INTO test (serial) VALUES (%s)",(x))
print("Inserted",cursor.rowcount,"row(s) of data.")
conn.commit()
cursor.close()
conn.close()
print("Done.")
When I run this is gets to cursor.execute(...) and then fails. Here is the stack trace.
Traceback (most recent call last):
File "test.py", line 29, in
cursor.execute("INSERT INTO test (serial) VALUES (%s)",("test"))
File "C:\Users\AlexJ\AppData\Local\Programs\Python\Python37\lib\site-packages\mysql\connector\cursor_cext.py", line 248, in execute
prepared = self._cnx.prepare_for_mysql(params)
File "C:\Users\AlexJ\AppData\Local\Programs\Python\Python37\lib\site-packages\mysql\connector\connection_cext.py", line 538, in prepare_for_mysql
raise ValueError("Could not process parameters")
ValueError: Could not process parameters
Try this:
for x in data:
value = "test"
query = "INSERT INTO test (serial) VALUES (%s)"
cursor.execute(query,(value,))
print("Inserted",cursor.rowcount,"row(s) of data.")
Since you are using mysql module, cursor.execute requires a sql query and a tuple as parameters
Nice answer from #lucas, but maybe this help other, cz i think more cleaner
sql = "INSERT INTO your_db (your_table) VALUES (%s)"
val = [("data could be array")]
cursor = cnx.cursor()
cursor.execute(sql, val)
print("Inserted",cursor.rowcount,"row(s) of data.")
cnx.commit()
cnx.close()
Cz this is useful for my purpose, to input multiple data.
I'm facing same issue but instead of array, I'm looping through a set and insert each item into mysql db and got this error mysql.connector.errors.ProgrammingError: Could not process parameters: str(Data_Tokens), it must be of type list, tuple or dict.
The uniqueTokenSet includes string data type, but as error shows that it must be list, tuple or dict. By converting item to list of tuple [(item)] work for me.
uniqueTokenSet = set()
for item in uniqueTokenSet:
tokenSql = "insert into tokens(token) values (%s)"
data = [(item)]
mycursor.execute(tokenSql, data)
print('data inserted')
mydb.commit()

MySQL update does not update

I have a problem that the update mysql part is sometimes not updating a value and it appears to be random I can't find the cause for it. it takes MYSQL entries with key value "TRANSFER" and schould set the same key value to "EXECUTED". Sometimes iI have 20 processes that work fine, sometimes I have 10 processes and half didnt get updated.
try:
db = MySQLdb.connect(
host='localhost',
user='root',
passwd='pw',
db='db',
)
db.autocommit(True)
except Exception as e:
sys.exit("Can't connect to db")
cur = db.cursor()
setstatus = "EXECUTED"
for fn in os.listdir('.'):
if os.path.isfile(fn):
UUID = fn.replace(".ac", "")
try:
cur.execute("""
UPDATE olorequest
SET status = %s
WHERE UUID = %s
""", (setstatus, UUID))
except Exception as e:
raise IOError(e)
ftp.storlines('STOR ' + fn, open(fn, 'r+'))
try:
shutil.move(fn, executed_ac_files)
except Exception as e:
shutil.move(fn, error_files)
raise IOError(e)
time.sleep(5)
Basically the reason why a row is not updated in an UPDATE request is that the predicate of the WHERE clause is not met. Additionnaly, as you perform this action through a program, also check its logic and its reliability.

Python: MySQL: Handling timeouts

I am using Python and mySQL, and there is a long lag between queries. As a result, I get an 'MySQL connection has gone away' error, that is wait_timeout is exceeded.
This has been discussed e.g. in
Gracefully handling "MySQL has gone away"
but this does not specifically answer my query.
So my approach to handling this -
I have wrapped all my sql execute statements in a method as -
def __execute_sql(self,sql,cursor):
try:
cursor.execute(sql)
except MySQLdb.OperationalError, e:
if e[0] == 2006:
self.logger.do_logging('info','DB', "%s : Restarting db" %(e))
self.start_database()
I have several places in the code which calls this query. The thing is, I also have several cursors, so the method invocations look like-
self.__execute_sql(sql,self.cursor_a)
self.__execute_sql(sql,self.cursor_b)
and so on
I need a way to gracefully re-execute the query after the db has been started. I could wrap the calls in an if statement, and re-execute so it would be
def __execute_sql(self,sql,cursor):
try:
cursor.execute(sql)
return 1
except MySQLdb.OperationalError, e:
if e[0] == 2006:
self.logger.do_logging('info','DB', "%s : Restarting db" %(e))
self.start_database()
return 0
and then
if (self.__execute_sql(sql,self.cursor_a) == 0):
self.__execute_sql(sql,self.cursor_a)
But this is clunky. Is there a better way to do this?
Thanks!!!
I tried Crasched's approach, which got me to a new OperationalError:
OperationalError: (2013, 'Lost connection to MySQL server during query')
My final solution was to first try the ping, and if another OperationalError was raised, to reconnect and recreate the cursor with the new connection, like so:
try:
self.connection.ping(True)
except MySQLdb.OperationalError:
self.connection = MySQLdb.connect(
self.db_host,
self.db_user,
self.db_passwd,
self.db_dbase,
self.db_port)
# reconnect your cursor as you did in __init__ or wherever
self.cursor = self.connection(
MySQLdb.cursors.DictCursor)
Back in business!
Python 2.7, MySQL 5.5.41
I had the same problem and wanted to wrap the exception to capture it but instead I solved it by using the following.
Before calling the execute, call
self.con.ping(TRUE)
http://www.neotitans.com/resources/python/mysql-python-connection-error-2006.html
http://mysql-python.sourceforge.net/MySQLdb.html
I can no longer find the original source material I found this out from, but this solved the problem immediately.
I was running into mystifying "MySQL server has gone away" errors, and here is my solution.
This solution will let you retry through MySQL errors, handle pretty much any type of query, include query variables in the query str or in a separate tuple, and collect and return all of the success and error messages you encounter along the way:
def execute_query(query_str, values=None):
# defaults
num_affected_rows = 0
result_rows = None
success = False
message = "Error executing query: {}".format(query_str)
# run the query
try:
mysql_conn = get_existing_mysql_connection()
cur = mysql_conn.cursor()
if values == None or len(values) < 1:
num_affected_rows = cur.execute(query_str)
else:
num_affected_rows = cur.execute(query_str, values)
result_rows = cur.fetchall() # only relevant to select, but safe to run with others
cur.close()
mysql_conn.commit()
success = True
message = "Mysql success for query: {}".format(query_str)
except BaseException as e:
message = "Mysql error: {}; for query: {}".format(repr(e), query_str)
return (success, num_affected_rows, result_rows, message)
def execute_query_with_retry(query_str, values=None, num_tries=3, message=""):
# defaults
success = False
num_affected_rows = 0
result_rows = None
this_message = "Error executing query: {}".format(query_str)
# should we still try?
if num_tries < 1:
this_message = "Ran out of tries for query: {}".format(query_str)
return (False, 0, None, message + '; ' + this_message)
num_tries_after_this = num_tries - 1
# try to execute query
try:
(success, num_affected_rows, result_rows, this_message) = execute_query(query_str, values)
except BaseException as e:
success = False
# handle success or failure
if success == True:
return (True, num_affected_rows, result_rows, message + '; ' + this_message)
else:
open_new_mysql_connection() # reconnect using password etc.
return(execute_query_with_retry(query_str, values=values, num_tries=num_tries_after_this, message=(message + '; ' + this_message)))

Categories