How to upload logging to mysql using pymysql - python

I want to collect and check what errors are occurring, so I am trying to upload log in the database. I wrote the code to upload the log to mysql by referring to this page. python logging to database.
However, I get the following error. Which part is wrong? Also, if there is another way to easily upload logs in mysql, please let me know.
import logging
import time
import pymysql
user = 'test'
passw = '******'
host = 'db'
port = ****
database = '****'
db_tbl_log = 'log'
log_file_path = 'C:\\Users\\Desktop\\test_log.txt'
log_error_level = 'DEBUG' # LOG error level (file)
log_to_db = True # LOG to database?
class LogDBHandler(logging.Handler):
'''
Customized logging handler that puts logs to the database.
pymssql required
'''
def __init__(self, sql_conn, sql_cursor, db_tbl_log):
logging.Handler.__init__(self)
self.sql_cursor = sql_cursor
self.sql_conn = sql_conn
self.db_tbl_log = db_tbl_log
def emit(self, record):
# Set current time
tm = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(record.created))
# Clear the log message so it can be put to db via sql (escape quotes)
self.log_msg = record.msg
self.log_msg = self.log_msg.strip()
self.log_msg = self.log_msg.replace('\'', '\'\'')
# Make the SQL insert
sql = 'INSERT INTO ' + self.db_tbl_log + ' (log_level, ' + \
'log_levelname, log, created_at, created_by) ' + \
'VALUES (' + \
'' + str(record.levelno) + ', ' + \
'\'' + str(record.levelname) + '\', ' + \
'\'' + str(self.log_msg) + '\', ' + \
'(convert(datetime2(7), \'' + tm + '\')), ' + \
'\'' + str(record.name) + '\')'
try:
self.sql_cursor.execute(sql)
self.sql_conn.commit()
# If error - print it out on screen. Since DB is not working - there's
# no point making a log about it to the database :)
except pymysql.Error as e:
print("error: ", e)
# print(sql)
# print('CRITICAL DB ERROR! Logging to database not possible!')
# Main settings for the database logging use
if (log_to_db):
# Make the connection to database for the logger
log_conn = pymysql.connect(host=host,
port=port,
user=user,
password=passw,
database=database,
charset='utf8')
log_cursor = log_conn.cursor()
logdb = LogDBHandler(log_conn, log_cursor, db_tbl_log)
# Set logger
logging.basicConfig(filename=log_file_path)
# Set db handler for root logger
if (log_to_db):
logging.getLogger('').addHandler(logdb)
# Register MY_LOGGER
log = logging.getLogger('MY_LOGGER')
log.setLevel(log_error_level)
# Example variable
test_var = 'This is test message'
# Log the variable contents as an error
log.error('This error occurred: %s' % test_var)
error: (1064, "You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''2021-02-22 16:52:06')), 'MY_LOGGER')' at line 1")

Don't format SQL statements by yourself, you will miss a lot of cases. Just pass them as the second parameter:
sql = f'INSERT INTO {self.db_tbl_log} (log_level, log_levelname, log, created_at, created_by) VALUES (%s, %s, %s, %s, %s)'
self.sql_cursor.execute(sql, (record.levelno, record.levelname, self.log_msg, tm, record.name))
%s is placeholder, pymysql will convert given params to valid formats one by one.

Related

Extract java.sql.SQLException from execute call in Python - AWS Glue

I am running a AWS Glue job to execute stored procedures in an oracle database. I want to be able to catch the sql exception when a stored procedure fails. I am using 'from py4j.java_gateway import java_import' to set up the connection and execute sql commands on the connection.
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
import boto3
############################################
### UPDATE THE STORE PROCEDURE NAME HERE ###
sp_names = [
'DEV_WF_POC1',
'DEV_WF_POC2'
]
############################################
#Set the conection name (Will be replaced for FT and PROD by powershell deployment script)
glue_connection_name = 'dw-dev-connection'
#Use systems args to return job name and pass to local variable
args = getResolvedOptions(sys.argv, ['JOB_NAME','WORKFLOW_NAME', 'WORKFLOW_RUN_ID'])
workflow_name = args['WORKFLOW_NAME']
workflow_run_id = args['WORKFLOW_RUN_ID']
glue_job_name = args['JOB_NAME']
#Create spark handler and update status of glue job
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(glue_job_name, args)
job.commit()
logger = glueContext.get_logger()
glue_client = boto3.client('glue')
#Extract connection details from Data Catelog
source_jdbc_conf = glueContext.extract_jdbc_conf(glue_connection_name)
#Import Python for Java Java.sql libs
from py4j.java_gateway import java_import
java_import(sc._gateway.jvm,"java.sql.Connection")
java_import(sc._gateway.jvm,"java.sql.DatabaseMetaData")
java_import(sc._gateway.jvm,"java.sql.DriverManager")
java_import(sc._gateway.jvm,"java.sql.SQLException")
#Extract the URL from the JDBC connection
oracleurl = source_jdbc_conf.get('url')
# Update connection string to expected Oracle format
oracleurl = oracleurl.replace("oracle://", "oracle:thin:#")
oracleurl = oracleurl + ':orcl'
#Create the connection to the Oracle database with java.sql
conn = sc._gateway.jvm.DriverManager.getConnection(oracleurl, source_jdbc_conf.get('user'), source_jdbc_conf.get('password'))
#Change autocommit to false to avoid Table lock error
conn.setAutoCommit(False);
# error dict
errs = {}
err = ''
try:
for sp_name in sp_names:
#Prepare call storeproc statement and execute
cstmt = conn.prepareCall("{call reporting." + sp_name + "}");
results = cstmt.execute();
conn.commit();
# capture error
except Exception as e: # work on python 3.x
##errs['msg'] = str(sc._gateway.jvm.SQLException.getMessage())- doesn't work
errs['error'] = str(e)
errs['sp_name'] = sp_name
errs['error_type'] = str(type(e)).replace("<class '","").replace("'>","")
if len(errs) != 0:
stmt = conn.createStatement();
sql = "insert into dev_workflow_errors (timestamp, workflow_id, workflow_name, job_name, sp_name, error_type, error) values (current_timestamp, '" + workflow_run_id + "', '" + workflow_name + "', '" + glue_job_name + "', '" + errs['sp_name'] + "', '" + errs['error_type'] + "', '" + errs['msg'] + "')"
rs = stmt.executeUpdate(sql);
conn.commit();
#sys.exit(1)
#Close down the connection
conn.close();
#Update Logger
logger.info("Finished")
I tried the pythonic 'Try' and 'Except' method but for the base exception i just get the full 'py4j.protocol.Py4JJavaError' error message. Inside this message conatins the database specific error messages i want to extract.
Can I use 'java_import(sc._gateway.jvm,"java.sql.SQLException")' in any way to extract database specific errors from the execute function?

Why Does Empty MySQL Database Claim to Have Duplicates (Python 3)?

So I have a quick function that's supposed to upload data (stored in a Python dictionary) to a MySQL database.
def uploadData(of_item):
global DB_HOST
global DB_USER
global DB_PASSWORD
global DB_DATABASE
my_db = connector.connect(host=DB_HOST, user=DB_USER, passwd=DB_PASSWORD, database=DB_DATABASE, port=3306)
my_db.autocommit = True
my_cursor = my_db.cursor()
print("\rThe DB pipeline is now connected.")
slots_text = ", ".join([a[0] for a in of_item.items()])
values_text = ", ".join(["'" + a[1].replace("'", "\\'") + "'" for a in of_item.items()])
set_portion_text = ", ".join([a[0] + " = " + "'" + a[1].replace("'", "\\'") + "'" for a in of_item.items()])
sql = 'INSERT INTO UsersData ({0}) VALUES ({1})'.format(slots_text, values_text)
try:
my_cursor.execute(sql)
row_cnt = my_cursor.rowcount
my_db.commit()
my_cursor.close()
my_db.close()
print("\r" + str(row_cnt) + " is now in UsersData.")
return [True, str(row_cnt)]
except Exception as exception:
print("\n".join(["The update failed for profileID: " + of_item['UniqueId'],
str(exception),
str(sql),
"*",
'Item:',
str(of_item),
"*"]))
my_cursor.close()
my_db.close()
return [False, 0]
Currently, the row_cnt sits at -1, so it should be entirely empty. However, when I execute the function, I'm constantly getting this thrown error:
1062 (23000): Duplicate entry 'ABCDEFGHIJKLMNOPQRSTUVWXYZ-123' for key 'profileId_2'
Now, profileId_2 is just this:
...
UNIQUE KEY `profileId_2` (`profileId`,`companyId`),
...
profileId is whatever the user's unique ID is, and companyId is just a preset (in this case, 123). It's odd that there would be a claimed duplicate, since there's nothing in the database yet.
First, what might be causing this error? Second, how can I get through it and successfully append new entries to UsersData?

PEP8 Cleanup - Access to a protected member _last_executed of a class

I have the block of code below that traps warnings a few different query types and writes them to a lot and sends me a google chat message if they fail/warn. I get a pep8 error on str(cursor._last_executed):
Access to a protected member _last_executed of a class
Do I need to make changes on this? It does do what I want - sends mysql for failures to the log
db = MySQLdb.connect("localhost", "root", "1234", "search")
cursor = db.cursor()
try:
with warnings.catch_warnings(record=True) as w:
if warn_type == 2:
cursor.execute(query_string, kargs['field_split'] + kargs['user'])
elif warn_type == 3:
cursor.execute(query_string, (
kargs['reportid'], kargs['timestamp'], kargs['reportid'], kargs['reportid'], kargs['timestamp']))
else:
cursor.execute(query_string, kargs['field_split'])
db.commit()
if w:
logger.warning('Mysql Warning : %s', w[-1])
logger.warning('Statement : %s', str(cursor._last_executed))
logger.warning(kargs['field_split'])
# noinspection PyUnresolvedReferences
string_google = 'Warning - ' + str(w[-1].message) + ' - ' + str(cursor._last_executed)
googlechat(string_google)

nginx uwsgi bottle postgresql psycopg2

My requirement:
About 300 (field sensor-like) clients report their status (json string) once every X (say 10) minutes. They can only use "curl" to do the reporting . The API/handler on the server needs to parse and dump all those values into the master table.
A few users (usually less than 10) connect to the app from their browsers to check the status of the sensor-reports and may linger around checking a few pages (main status page, detailed report page etc.). There are less than 10 pages (think reports) that the users want to see.
My setup:
Web Server: nginx
App Server: uwsgi
Framework: Bottle
Database: PostgreSQL
Python DB Driver: psycopg2
Frontend: Bootstrap
My code:
Please note that I did not include a lot of error checking and other security measures that we have in code, simply because they do not contribute to this discussion.
import os
from bottle import route, post, run, request, template, install, static_file
import psycopg2
import customemailservice
#route('/static/<filepath:path>')
def server_static(filepath):
return static_file(filepath, root='/webapp/ss/static')
# The URL that the sensors will hit to pass on their status report
#post('/logger')
def dolog():
sensor_id = request.json['id']
sensor_ts = request.json['ts']
sensor_ut = request.json['main']['ut']
sensor_ploss = request.json['main']['ploss']
sensor_s1 = request.json['main']['s1']
sensor_s2 = request.json['main']['s2']
sensor2_status = request.json['aux']['status']
sensor2_rts = request.json['aux']['rts']
try:
conn = psycopg2.connect('dbname=<dbnane> user=<username> password= <password> host=<dbhost> port=<dbport>')
except psycopg2.Error as pe:
print pe.pgerror
curr = conn.cursor()
if conn != None and curr != None:
curr.execute('''INSERT INTO tbllog (id, ts, ut, ploss, s1, s2, status, rts) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)''',(sensor_id, sensor_ts, sensor_ut, sensor_ploss, sensor_s1, sensor_s2, sensor2_status, sensor2_rts))
conn.commit()
curr.close()
conn.close()
else:
pass
# The code here is irrelevant
return template('Ok from {{sid}} at {{ts}}', sid=sid,ts=ts)
#route('/')
def index():
try:
conn = psycopg2.connect('dbname=<dbnane> user=<username> password=<password> host=<dbhost> port=<dbport>')
except psycopg2.Error as pe:
conn = None
print pe.pgerror
curr = conn.cursor()
if conn != None and curr != None:
sql = 'select t1.* from tbllog t1 where t1.ts = (select max(t2.ts) from tbllog t2 where t2.id=t1.id) order by id;'
curr.execute(sql)
rs=curr.fetchall()
html=""
for row in rs:
html = html + '<tr><td class="warning">' + row[0] + '</td><td class="warning">' + str(row[1]) + '</td><td class="success">' + str(row[2]) + '</td><td class="success">' + str(row[3]) + '</td><td class="success">' + str(row[4]) + '</td><td class="info">' + str(row[5]) + '</td><td class="info">' + str(row[6]) + '</td></tr>'
curr.close()
conn.close()
# Pass the raw html table that will be inserted into the index template.
return template('index',tdata=html)
#route('/status/<sensor_id>')
def getsensorid(sensor_id):
try:
conn = psycopg2.connect('dbname=<dbnane> user=<username> password=<password> host=<dbhost> port=<dbport>')
except psycopg2.Error as pe:
conn = None
print pe.pgerror
curr = conn.cursor()
if conn != None and curr != None:
sql = 'select * from tbllog where id=\'' + sensor_id + '\' order by ts;'
curr.execute(sql)
rs=curr.fetchall()
html=""
for row in rs:
html = html + '<tr class="info"><td>' + row[0] + '</td><td>' + str(row[1]) + '</td><td>' + str(row[2]) + '</td><td>' + str(row[3]) + '</td><td>' + str(row[4]) + '</td><td>' + str(row[5]) + '</td><td>' + str(row[6]) + '</td></tr>'
curr.close()
conn.close()
if __name__ == '__main__':
run (host="0.0.0.0", port=8080, debug=True)
else:
app = application = bottle.default_app()
My Question:
Given the requirements, is this a reasonable approach? Or, do you recommend I use DB connection pooling? I am a little confused about using pooling since I am not sure at what level (nginx, uwsgi or bottle) does my app code get duplicated (to serve concurrent clients) and how should I go about creating the pool that I can use across different threads/processes (each of which contain a copy of this app code).
Obviously, this is my initial foray into web apps (and even serious Python for that matter) and would like to hear back from you if you think there is a better (I'm assuming there are many) ways to skin the cat.

How do I solve the ORA-01704: string literal too long error in Python cx_oracle?

I am trying to update an entry in a table usinig Python cx_oracle. The column is named "template" and it has a data type of CLOB.
This is my code:
dsn = cx_Oracle.makedsn(hostname, port, sid)
orcl = cx_Oracle.connect(username + '/' + password + '#' + dsn)
curs = orcl.cursor()
sql = "update mytable set template='" + template + "' where id='6';"
curs.execute(sql)
orcl.close()
When I do this, I get an error saying the string literal too long. The template variable contains about 26000 characters. How do I solve this?
Edit:
I found this: http://osdir.com/ml/python.db.cx-oracle/2005-04/msg00003.html
So I tried this:
curs.setinputsizes(value = cx_Oracle.CLOB)
sql = "update mytable set template='values(:value)' where id='6';"
curs.execute(sql, value = template)
and I get a "ORA-01036: illegal variable name/number error"
Edit2:
So this is my code now:
curs.setinputsizes(template = cx_Oracle.CLOB)
sql = "update mytable set template= :template where id='6';"
print sql, template
curs.execute(sql, template=template)
I get an ORA-00911: invalid character error now.
Inserting values in sql statements is a very bad practice. You should use parameters instead:
dsn = cx_Oracle.makedsn(hostname, port, sid)
orcl = cx_Oracle.connect(username + '/' + password + '#' + dsn)
curs = orcl.cursor()
curs.setinputsizes(template = cx_Oracle.CLOB)
sql = "update mytable set template= :template where id='6'"
curs.execute(sql, template=template)
orcl.close()
Use IronPython
import sys
sys.path.append(r"...\Oracle\odp.net.11g.64bit")
import clr
clr.AddReference("Oracle.DataAccess")
from Oracle.DataAccess.Client import OracleConnection, OracleCommand, OracleDataAdapter
connection = OracleConnection('userid=user;password=hello;datasource=database_1')
connection.Open()
command = OracleCommand()
command.Connection = connection
command.CommandText = "SQL goes here"
command.ExecuteNonQuery()
Change your table definition. A varchar2 field can store up to 32767 bytes; so, if you're using an 8-bit encoding, you have a bit of room left to play with before resorting to LOBs.

Categories