PEP8 Cleanup - Access to a protected member _last_executed of a class - python

I have the block of code below that traps warnings a few different query types and writes them to a lot and sends me a google chat message if they fail/warn. I get a pep8 error on str(cursor._last_executed):
Access to a protected member _last_executed of a class
Do I need to make changes on this? It does do what I want - sends mysql for failures to the log
db = MySQLdb.connect("localhost", "root", "1234", "search")
cursor = db.cursor()
try:
with warnings.catch_warnings(record=True) as w:
if warn_type == 2:
cursor.execute(query_string, kargs['field_split'] + kargs['user'])
elif warn_type == 3:
cursor.execute(query_string, (
kargs['reportid'], kargs['timestamp'], kargs['reportid'], kargs['reportid'], kargs['timestamp']))
else:
cursor.execute(query_string, kargs['field_split'])
db.commit()
if w:
logger.warning('Mysql Warning : %s', w[-1])
logger.warning('Statement : %s', str(cursor._last_executed))
logger.warning(kargs['field_split'])
# noinspection PyUnresolvedReferences
string_google = 'Warning - ' + str(w[-1].message) + ' - ' + str(cursor._last_executed)
googlechat(string_google)

Related

How to upload logging to mysql using pymysql

I want to collect and check what errors are occurring, so I am trying to upload log in the database. I wrote the code to upload the log to mysql by referring to this page. python logging to database.
However, I get the following error. Which part is wrong? Also, if there is another way to easily upload logs in mysql, please let me know.
import logging
import time
import pymysql
user = 'test'
passw = '******'
host = 'db'
port = ****
database = '****'
db_tbl_log = 'log'
log_file_path = 'C:\\Users\\Desktop\\test_log.txt'
log_error_level = 'DEBUG' # LOG error level (file)
log_to_db = True # LOG to database?
class LogDBHandler(logging.Handler):
'''
Customized logging handler that puts logs to the database.
pymssql required
'''
def __init__(self, sql_conn, sql_cursor, db_tbl_log):
logging.Handler.__init__(self)
self.sql_cursor = sql_cursor
self.sql_conn = sql_conn
self.db_tbl_log = db_tbl_log
def emit(self, record):
# Set current time
tm = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(record.created))
# Clear the log message so it can be put to db via sql (escape quotes)
self.log_msg = record.msg
self.log_msg = self.log_msg.strip()
self.log_msg = self.log_msg.replace('\'', '\'\'')
# Make the SQL insert
sql = 'INSERT INTO ' + self.db_tbl_log + ' (log_level, ' + \
'log_levelname, log, created_at, created_by) ' + \
'VALUES (' + \
'' + str(record.levelno) + ', ' + \
'\'' + str(record.levelname) + '\', ' + \
'\'' + str(self.log_msg) + '\', ' + \
'(convert(datetime2(7), \'' + tm + '\')), ' + \
'\'' + str(record.name) + '\')'
try:
self.sql_cursor.execute(sql)
self.sql_conn.commit()
# If error - print it out on screen. Since DB is not working - there's
# no point making a log about it to the database :)
except pymysql.Error as e:
print("error: ", e)
# print(sql)
# print('CRITICAL DB ERROR! Logging to database not possible!')
# Main settings for the database logging use
if (log_to_db):
# Make the connection to database for the logger
log_conn = pymysql.connect(host=host,
port=port,
user=user,
password=passw,
database=database,
charset='utf8')
log_cursor = log_conn.cursor()
logdb = LogDBHandler(log_conn, log_cursor, db_tbl_log)
# Set logger
logging.basicConfig(filename=log_file_path)
# Set db handler for root logger
if (log_to_db):
logging.getLogger('').addHandler(logdb)
# Register MY_LOGGER
log = logging.getLogger('MY_LOGGER')
log.setLevel(log_error_level)
# Example variable
test_var = 'This is test message'
# Log the variable contents as an error
log.error('This error occurred: %s' % test_var)
error: (1064, "You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''2021-02-22 16:52:06')), 'MY_LOGGER')' at line 1")
Don't format SQL statements by yourself, you will miss a lot of cases. Just pass them as the second parameter:
sql = f'INSERT INTO {self.db_tbl_log} (log_level, log_levelname, log, created_at, created_by) VALUES (%s, %s, %s, %s, %s)'
self.sql_cursor.execute(sql, (record.levelno, record.levelname, self.log_msg, tm, record.name))
%s is placeholder, pymysql will convert given params to valid formats one by one.

SQLITE - NOT NULL Constraint Failed - transferring a row from one table to another

I'm building a to-do list API with sqlite and flask. I've got a main.py file, a todo.db, and a helper.py.
I have a function (which is my POST request). I have two database tables called incomplete and complete. My function is supposed to "complete" an action by deleting the corresponding task from incomplete, copying it, and putting it in complete.
I tried a lot of different things, kept getting different errors, from "database is locked" to "error binding parameter 1" and now this:
Error: NOT NULL constraint failed: complete.task
I've gotten the function to at least delete the corresponding task from incomplete, and add something to complete, but it adds it incorrectly. Instead of something like this output when I use the GET request:
[{"complete": [["678", "do dishes" ]]}, {"incomplete": [["756", "Setting up API"]]}]
I get something like this
[{"complete": [["678", "[]"]}, {"incomplete": [["756", "Setting up API"]]}]
I'm not sure what I'm doing wrong. I'm also getting an error when I'm un-completing items (I get "Internal Server Error") :(
Here's the helper file, with parts ommitted. This snippet includes the relevant parts, and the part I'm focusing on is the add_to_completes().
import sqlite3
import random
#for id's because users dont set them
DB_PATH = './todo.db'
# connect to database
conn = sqlite3.connect(DB_PATH)
c = conn.cursor()
c.execute("""CREATE TABLE IF NOT EXISTS "complete" ("id" TEXT NOT NULL, "task" TEXT NOT NULL, PRIMARY KEY("id"));""")
# save the change
c.execute("""CREATE TABLE IF NOT EXISTS "incomplete" ("id" TEXT NOT NULL, "task" TEXT NOT NULL, PRIMARY KEY("id"));""")
conn.commit()
def add_to_incomplete(task):
id = random.randint(100, 999)
try:
conn = sqlite3.connect(DB_PATH)
c = conn.cursor()
c.execute('insert into incomplete(id, task) values(?,?)', (id, task))
conn.commit()
return {"id": id}
except Exception as e:
print('Error: ', e)
return None
def add_to_complete(inputId):
try:
conn = sqlite3.connect(DB_PATH)
c = conn.cursor()
c.execute('select task from incomplete where id=?', (inputId,))
tasks = c.fetchone()
c.execute('insert into complete values(?,?)', (inputId,tasks))
delete_task(inputId)
conn.commit()
return {"id": id}
except Exception as e:
print('Error: ', e)
return None
def get_all_completes():
try:
conn = sqlite3.connect(DB_PATH)
c = conn.cursor()
c.execute('select * from complete')
rows = c.fetchall()
conn.commit()
return { "complete": rows }
except Exception as e:
print('Error: ', e)
return None
def get_all_incompletes():
try:
conn = sqlite3.connect(DB_PATH)
c = conn.cursor()
c.execute('select * from incomplete')
rows = c.fetchall()
conn.commit()
return { "incomplete": rows }
except Exception as e:
print('Error: ', e)
return None
def uncomplete(inputId):
try:
conn = sqlite3.connect(DB_PATH)
c = conn.cursor()
c.execute('select from complete where id=?', (inputId,))
row = c.fetchall()
c.execute('delete from complete where id=?', (inputId,))
#you have that format (item,) because we need to pass execute() a tuple even if theres only one thing in the tuple
add_to_incomplete(row)
conn.commit()
return {"id":id}
except Exception as e:
print('Error: ', e)
return None
def delete_task(inputId):
try:
conn = sqlite3.connect(DB_PATH)
c = conn.cursor()
c.execute('delete from complete where id=?', (inputId,))
c.execute('delete from incomplete where id=?', (inputId,))
conn.commit()
return {"id":id}
except Exception as e:
print('Error: ', e)
return None
Here are the relevant parts of my main.py file:
import helper
from flask import Flask, request, Response
import json
app = Flask(__name__)
...
#app.route('/tasks/all', methods = ["GET"])
def get_all_items():
res_data = helper.get_all_completes(), helper.get_all_incompletes()
response = Response(json.dumps(res_data), mimetype='application/json')
return response
#app.route('/tasks/complete', methods = ["POST"])
def complete_task():
req_data = request.get_json()
inputId = req_data['id']
res_data = helper.add_to_complete(inputId)
# find matching task to input id
return "completed task" + inputId
#app.route('/tasks/incomplete', methods = ["PATCH"])
def uncomplete_task():
req_data = request.get_json()
inputId = req_data['id']
res_data = helper.uncomplete(inputId)
if res_Data is None:
response = Response("{'error': 'Error uncompleting task - '" + task + ", " + status + "}", status=400 , mimetype='application/json')
response = Response(json.dumps(res_data), mimetype='application/json')
return "uncompleted task" + " " + inputId
...
#app.route('/tasks/remove', methods = ["DELETE"])
def delete():
req_data = request.get_json()
inputId = req_data['id']
res_data = helper.delete_task(inputId)
if res_data is None:
response = Response("{'error': 'Error deleting task - '" + task + "}", status=400 , mimetype='application/json')
return "deleted task id" + " " + inputId

Why Does Empty MySQL Database Claim to Have Duplicates (Python 3)?

So I have a quick function that's supposed to upload data (stored in a Python dictionary) to a MySQL database.
def uploadData(of_item):
global DB_HOST
global DB_USER
global DB_PASSWORD
global DB_DATABASE
my_db = connector.connect(host=DB_HOST, user=DB_USER, passwd=DB_PASSWORD, database=DB_DATABASE, port=3306)
my_db.autocommit = True
my_cursor = my_db.cursor()
print("\rThe DB pipeline is now connected.")
slots_text = ", ".join([a[0] for a in of_item.items()])
values_text = ", ".join(["'" + a[1].replace("'", "\\'") + "'" for a in of_item.items()])
set_portion_text = ", ".join([a[0] + " = " + "'" + a[1].replace("'", "\\'") + "'" for a in of_item.items()])
sql = 'INSERT INTO UsersData ({0}) VALUES ({1})'.format(slots_text, values_text)
try:
my_cursor.execute(sql)
row_cnt = my_cursor.rowcount
my_db.commit()
my_cursor.close()
my_db.close()
print("\r" + str(row_cnt) + " is now in UsersData.")
return [True, str(row_cnt)]
except Exception as exception:
print("\n".join(["The update failed for profileID: " + of_item['UniqueId'],
str(exception),
str(sql),
"*",
'Item:',
str(of_item),
"*"]))
my_cursor.close()
my_db.close()
return [False, 0]
Currently, the row_cnt sits at -1, so it should be entirely empty. However, when I execute the function, I'm constantly getting this thrown error:
1062 (23000): Duplicate entry 'ABCDEFGHIJKLMNOPQRSTUVWXYZ-123' for key 'profileId_2'
Now, profileId_2 is just this:
...
UNIQUE KEY `profileId_2` (`profileId`,`companyId`),
...
profileId is whatever the user's unique ID is, and companyId is just a preset (in this case, 123). It's odd that there would be a claimed duplicate, since there's nothing in the database yet.
First, what might be causing this error? Second, how can I get through it and successfully append new entries to UsersData?

nginx uwsgi bottle postgresql psycopg2

My requirement:
About 300 (field sensor-like) clients report their status (json string) once every X (say 10) minutes. They can only use "curl" to do the reporting . The API/handler on the server needs to parse and dump all those values into the master table.
A few users (usually less than 10) connect to the app from their browsers to check the status of the sensor-reports and may linger around checking a few pages (main status page, detailed report page etc.). There are less than 10 pages (think reports) that the users want to see.
My setup:
Web Server: nginx
App Server: uwsgi
Framework: Bottle
Database: PostgreSQL
Python DB Driver: psycopg2
Frontend: Bootstrap
My code:
Please note that I did not include a lot of error checking and other security measures that we have in code, simply because they do not contribute to this discussion.
import os
from bottle import route, post, run, request, template, install, static_file
import psycopg2
import customemailservice
#route('/static/<filepath:path>')
def server_static(filepath):
return static_file(filepath, root='/webapp/ss/static')
# The URL that the sensors will hit to pass on their status report
#post('/logger')
def dolog():
sensor_id = request.json['id']
sensor_ts = request.json['ts']
sensor_ut = request.json['main']['ut']
sensor_ploss = request.json['main']['ploss']
sensor_s1 = request.json['main']['s1']
sensor_s2 = request.json['main']['s2']
sensor2_status = request.json['aux']['status']
sensor2_rts = request.json['aux']['rts']
try:
conn = psycopg2.connect('dbname=<dbnane> user=<username> password= <password> host=<dbhost> port=<dbport>')
except psycopg2.Error as pe:
print pe.pgerror
curr = conn.cursor()
if conn != None and curr != None:
curr.execute('''INSERT INTO tbllog (id, ts, ut, ploss, s1, s2, status, rts) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)''',(sensor_id, sensor_ts, sensor_ut, sensor_ploss, sensor_s1, sensor_s2, sensor2_status, sensor2_rts))
conn.commit()
curr.close()
conn.close()
else:
pass
# The code here is irrelevant
return template('Ok from {{sid}} at {{ts}}', sid=sid,ts=ts)
#route('/')
def index():
try:
conn = psycopg2.connect('dbname=<dbnane> user=<username> password=<password> host=<dbhost> port=<dbport>')
except psycopg2.Error as pe:
conn = None
print pe.pgerror
curr = conn.cursor()
if conn != None and curr != None:
sql = 'select t1.* from tbllog t1 where t1.ts = (select max(t2.ts) from tbllog t2 where t2.id=t1.id) order by id;'
curr.execute(sql)
rs=curr.fetchall()
html=""
for row in rs:
html = html + '<tr><td class="warning">' + row[0] + '</td><td class="warning">' + str(row[1]) + '</td><td class="success">' + str(row[2]) + '</td><td class="success">' + str(row[3]) + '</td><td class="success">' + str(row[4]) + '</td><td class="info">' + str(row[5]) + '</td><td class="info">' + str(row[6]) + '</td></tr>'
curr.close()
conn.close()
# Pass the raw html table that will be inserted into the index template.
return template('index',tdata=html)
#route('/status/<sensor_id>')
def getsensorid(sensor_id):
try:
conn = psycopg2.connect('dbname=<dbnane> user=<username> password=<password> host=<dbhost> port=<dbport>')
except psycopg2.Error as pe:
conn = None
print pe.pgerror
curr = conn.cursor()
if conn != None and curr != None:
sql = 'select * from tbllog where id=\'' + sensor_id + '\' order by ts;'
curr.execute(sql)
rs=curr.fetchall()
html=""
for row in rs:
html = html + '<tr class="info"><td>' + row[0] + '</td><td>' + str(row[1]) + '</td><td>' + str(row[2]) + '</td><td>' + str(row[3]) + '</td><td>' + str(row[4]) + '</td><td>' + str(row[5]) + '</td><td>' + str(row[6]) + '</td></tr>'
curr.close()
conn.close()
if __name__ == '__main__':
run (host="0.0.0.0", port=8080, debug=True)
else:
app = application = bottle.default_app()
My Question:
Given the requirements, is this a reasonable approach? Or, do you recommend I use DB connection pooling? I am a little confused about using pooling since I am not sure at what level (nginx, uwsgi or bottle) does my app code get duplicated (to serve concurrent clients) and how should I go about creating the pool that I can use across different threads/processes (each of which contain a copy of this app code).
Obviously, this is my initial foray into web apps (and even serious Python for that matter) and would like to hear back from you if you think there is a better (I'm assuming there are many) ways to skin the cat.

When to use DRCP and sessionpool in cx_oracle python?

For a specific use case - in which I have 100 databases and 1 database is the central database, now my app connects to that one central database which spawns connections to any of the 100 databases as per the request of the user to run some query on any of them.
In this case does using DRCP makes same as I dont want the connection to be killed if the user is running the query at the same time I dont want too many connections to be opened to the db which I control by creating a profile on the database which limits the number of active sessions to some low number say 5 for that specific user(read_only_user) using that specific profile(read_only_profile).
Right now I am using the standard open a connection per request model. But Im not sure if thats the best way to go about it.
import cx_Oracle
import logging, time
class Database(object):
'''
Use this method to for DML SQLS :
Inputs - Sql to be executed. Data related to that sql
Returns - The last inserted, updated, deleted ID.
'''
def __init__(self, user, password, host, port, service_name, mode, *args):
#mode should be 0 if not cx_Oracle.SYSDBA
self.user = user
self.password = password
self.host = host
self.port = port
self.user = user
self.service_name = service_name
self.logger = logging.getLogger(__name__)
try:
self.mode = mode
except:
self.mode = 0
self.logger.info(" Mode is not mentioned while creating database object")
self.connection = None
dsn = cx_Oracle.makedsn(self.host, self.port, self.service_name)
self.connect_string = self.user + '/' + self.password + '#' + dsn
try:
self.connection = cx_Oracle.connect(self.connect_string, mode=self.mode,
threaded=True)
self.connection.stmtcachesize = 1000
self.connection.client_identifier = 'my_app'
self.cursor = self.connection.cursor()
self.idVar = self.cursor.var(cx_Oracle.NUMBER)
except cx_Oracle.DatabaseError, exc:
error, = exc
self.logger.exception(
'Exception occured while trying to create database object : %s',
error.message)
raise exc
def query(self, q):
try:
self.cursor.execute(q)
return self.cursor.fetchall()
except cx_Oracle.DatabaseError, exc:
error, = exc
self.logger.info(
"Error occured while trying to run query: %s, error : %s", q,
error.message)
return error.message
def dml_query(self, sql):
try:
self.cursor.execute(sql)
self.connection.commit()
return 1
except Exception as e:
self.logger.exception(e)
return 0
def dml_query_with_data(self, sql, data):
"""
Use this method to for DML SQLS :
Inputs - Sql to be executed. Data related to that sql
Returns - The last inserted, updated, deleted ID.
"""
try:
self.cursor.execute(sql, data)
self.connection.commit()
return 1
except Exception as e:
self.logger.exception(e)
return 0
def update_output(self, clob, job_id, flag):
try:
q = "Select output from my_table where job_id=%d" % job_id
self.cursor.execute(q)
output = self.cursor.fetchall()
#Checking if we already have some output in the clob for that job_id
if output[0][0] is None:
if flag == 1:
self.cursor.execute("""UPDATE my_table
SET OUTPUT = :p_clob
,job_status=:status WHERE job_id = :p_key""",
p_clob=clob, status="COMPLETED", p_key=job_id)
else:
self.cursor.execute("""UPDATE my_table
SET OUTPUT = :p_clob
,job_status=:status WHERE job_id = :p_key""",
p_clob=clob, status="FAILED", p_key=job_id)
else:
self.cursor.execute("""UPDATE my_table
SET OUTPUT = OUTPUT || ',' || :p_clob
WHERE job_id = :p_key""", p_clob=clob, p_key=job_id)
self.connection.commit()
rows_updated = self.cursor.rowcount
return rows_updated
except Exception as e:
self.logger.exception(e)
return 0
def __del__(self):
try:
if self.connection is not None:
self.connection.close()
except Exception as e:
self.logger.exception(
"Exception while trying to close database connection object : %s", e)
'''
if __name__ == '__main__':
db = Database('test', 'test', 'my_host', '1000', 'my_db', 0)
columns = db.query('select * from my-table')
print columns
'''
This is my database class, and I create an object whenever I need a connect to the DB. And the init and del method take care of constructing and destructing the object.
Should I be using DRCP/ sessionPool to improve performance.
What if there are too many users waiting coz all the connections in DRCP are taken?
Can I have sessionPool per database (for the 100 databases, each database can take atmost 5 connections at a time for that read_only_user)

Categories