I'm trying to develop Job program which in turn calls Service program.
ChangeDetectorService does have procedure named process. I don't know why I get the warning message:
AttributeError: 'ChangeDetectorService' object has no attribute 'process'
"change_detector_job.py" and ChangeDetectorService source code are attached.
# change_detector_job.py
import argparse
import datetime
# import multiprocessing
import os
# import sys
# import traceback
import sys
import traceback
from common.utils.config import Config, log_start, log_end
from ReplicationService.module.ChangeDetectorService import ChangeDetectorService
args = None
main_config = None
main_logger = None
app_name = os.path.basename(sys.argv[0]).replace('.py', '') + '_main'
class ChangeDetectorJob:
def __init__(self):
self.config = None
self.logger = None
self.env = 'test'
def setup(self):
self.env = args.env
self.config = Config(args.config, app_name)
self.logger = self.config.get_logger()
def process(self):
self.setup()
t1 = log_start("ChangeDetector job started (UTC): %s" % (datetime.datetime.utcnow()), with_memory=True,
logger=self.logger)
service = ChangeDetectorService(args.config, app_name)
success = service.process()
log_end("ChangeDetector job completed (UTC): %s" % (datetime.datetime.utcnow()), success=success, start_time=t1,
logger=self.logger)
if __name__ == '__main__':
try:
parser = argparse.ArgumentParser(description="Data ChangeDetector Job")
parser.add_argument("-e", "--env", default="test", choices=['test', 'dev', 'uat', 'prd'],
help="environment this job to be deployed (default %default)")
parser.add_argument("-c", "--config", default="../config/cmo_backend_local_config.ini",
help="config file (test only default %default)")
args = parser.parse_args()
ini_file = args.config
main_config = Config(ini_file, app_name)
main_logger = main_config.get_logger()
# multiprocessing.set_start_method('spawn')
main_logger.info("**** ChangeDetector job started (UTC): %s" % datetime.datetime.utcnow())
# service = ChangeDetectorService(args.config, app_name)
# success = service.process()
ChangeDetectorJob().process()
main_logger.info("**** ChangeDetector job completed (UTC): %s" % datetime.datetime.utcnow())
except Exception as error:
if main_logger is not None:
main_logger.error("Exception encountered: " + str(error))
main_logger.error(traceback.format_exc())
traceback.print_exc()
print("FINAL EXCEPTION OUT .... ")
# ChangeDetectorService.py
# import mysql.connector
from datetime import datetime
from datetime import timedelta
from common.database.mysql_manager import MysqlManager
from common.utils.config import Config, log_start, log_end
# mysql.connector.connect(host='localhost',port="3306",user='user',password='pass',database='dbname')
import mysql.connector
# rows_inserted = 0;
class ChangeDetectorService:
#
def __init__(self, ini_file, app_name):
self.config_object = Config(ini_file, app_name)
self.logger = self.config_object.get_logger()
self.mysql_manager = MysqlManager(self.config_object)
try:
def get_connection(self):`enter code here`
connection = mysql.connector.connect(user='user', password='zzzz',
host='host',
database='dbname'
)
return connection
def init_change_detector(self):
print("Change Detector started at " + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
mysqlManager = MysqlManager
# conn = self.get_connection()
conn = self.mysqlManager.get_db_connection()
cursor = conn.cursor()
query = ('SELECT table_name, column_name, key_name '
'FROM cmooptimizationsvc.csj_chngdtct_tab_col '
'WHERE is_active=1 '
'ORDER BY table_name, column_name')
cursor.execute(query)
# get all records
records = cursor.fetchall()
for record in records:
self.process_col_tab_chg(record[0], record[1], record[2])
if conn.is_connected():
conn.close()
cursor.close()
def insert_change_log(self, table_name, key_name, attr_name, old_attr_value, new_attr_value):
# global rows_inserted;
insert_query = """INSERT INTO csj_shipment_changelog(table_name, key_name,
attr_name, old_attr_value,
new_attr_value)
VALUES (%s, %s, %s, %s, %s)"""
conn = self.get_connection()
cursor = conn.cursor()
tuple1 = (table_name, key_name, attr_name, old_attr_value, new_attr_value)
# tuples.append(tuple1)
cursor.execute(insert_query, tuple1)
# rows_inserted += 1
# print( rows_inserted );
# if (rows_inserted%10==0):
# cursor.executemany(insert_query, tuples)
conn.commit()
rows_inserted = 0
# tuples = []
# quit()
cursor.close()
conn.close()
# Look for Shipment, in past date
def find_past_shipment(self,
table_name,
key_name,
column_name,
before_date,
curr_key
):
saved_col_name = column_name
saved_key_name = key_name
conn = self.get_connection()
cursor = conn.cursor()
query = 'SELECT ' + saved_key_name + ' , ' + saved_col_name + ' FROM ' + table_name \
+ ' where rec_cre_dt_utc < ' + "'" + before_date.strftime('%Y-%m-%d 00:00:00') + "'" \
+ ' and shipment_num = ' + "'" + curr_key + "'" + ' order by rec_cre_dt_utc desc LIMIT 1'
cursor.execute(query)
records = cursor.fetchone()
cursor.close()
conn.close()
if records is not None:
past_attr_val = records[1]
return past_attr_val
else:
return 0
def process_col_tab_chg(self,table_name, column_name, key_name):
saved_key_name = key_name
saved_col_name = column_name
old_val = 0
ini_time_for_now = datetime.now()
date_before_1day = ini_time_for_now - timedelta(days=1)
query = 'SELECT ' + key_name + ' , ' + saved_col_name + ' , ' + ' rec_cre_dt_utc FROM ' + table_name \
+ ' where rec_cre_dt_utc >= ' + "'" + date_before_1day.strftime('%Y-%m-%d 00:00:00') + "'"
conn = self.get_connection()
cursor = conn.cursor()
cursor.execute(query)
for (key_name, column_name, rec_cre_dt_utc) in cursor:
curr_attr_val = column_name
curr_key_val = key_name
old_val = self.find_past_shipment(table_name,
saved_key_name,
saved_col_name,
rec_cre_dt_utc,
curr_key_val
)
if curr_attr_val != old_val \
and old_val != 0:
self.insert_change_log(table_name, key_name, saved_col_name, old_val, curr_attr_val )
else:
continue
cursor.close
conn.close()
def cleanup():
print("Change Detector stopped " + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
def process():
start = datetime.now()
init_change_detector()
# cleanup()
# mysqlManager = MysqlManager
# conn = get_connection(self)
# conn = self.mysql_manager.get_db_connection()
# cursor = conn.cursor()
# query = ('SELECT table_name, column_name, key_name '
# 'FROM cmooptimizationsvc.csj_chngdtct_tab_col '
# 'WHERE is_active=1 '
# 'ORDER BY table_name, column_name')
# cursor.execute(query)
# # get all records
# records = cursor.fetchall()
# for record in records:
# self.process_col_tab_chg(record[0], record[1], record[2])
# if conn.is_connected():
# conn.close()
# cursor.close()
end = datetime.now()
time_diff = (end - start)
execution_time = time_diff.total_seconds()
print("Elapsed time(secs): " + str(execution_time))
except Exception as e:
print("Exception " + e)
finally:
cleanup()
# if __name__ == "__main__":
# main()
You get AttributeError when you try to use an attribute (method or property) which does not exist for your object (class). In this case, the error states that ChangeDetectorService is under the question:
AttributeError: 'ChangeDetectorService' object has no attribute 'process'
In your error message, right before the above line, you should see the code line which caused this error:
success = service.process()
service was assigned to this expression:
service = ChangeDetectorService(args.config, app_name)
So, service is of type (class) ChangeDetectorService. It must be it... Inspecting the source code we see this:
class ChangeDetectorService:
#
def __init__(self, ini_file, app_name):
self.config_object = Config(ini_file, app_name)
self.logger = self.config_object.get_logger()
self.mysql_manager = MysqlManager(self.config_object)
try:
...
try has the same indentation as class. So, try has closed the ChangeDetectorService class definition. It means, your ChangeDetectorService class has only the constructor (__init__) defined. ChangeDetectorService class does not have any other method defined. It means, it doesn't have process method either. That's why you get the error. If you want ChangeDetectorService class to have the process method, make sure try or anything else does not interfere with all the method definitions of the class.
Related
import sqlite3
import traceback
from time import sleep
import mysql.connector
def check_user(user_id):
conn = mysql.connector.connect(host='localhost', database='online', user='root1', password='rootRRR111_')
cur = conn.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS online(id INT, last_online_date TEXT)')
conn.commit()
select = "SELECT * FROM online WHERE id = %s LIMIT 0, 1"
result = cur.execute(select, (user_id,))
if result is None:
insert = ('INSERT INTO online (id, last_online_date) VALUES (%s, %s)')
cur.reset()
cur.execute(insert, (user_id, online_time))
conn.commit()
def update_online_status(user_id, online_time):
conn = mysql.connector.connect(host='localhost', database='online', user='root1', password='rootRRR111_')
cursor = conn.cursor()
select = 'SELECT last_online_date FROM online WHERE id = %s'
result = cursor.execute(select, (user_id,))
old_online = result
online_time = f'{old_online},{online_time}'
cursor.reset()
cursor.execute('UPDATE online SET last_online_date = %s WHERE id = %s', (online_time, user_id))
conn.commit()
app = Client("my_account")
app.start()
while True:
try:
with open('ids.ini', 'r') as file:
users = file.read().splitlines()
for user in users:
result = app.get_users(user)
user_id = result['id']
if result['status'] == 'offline':
unix_timestamp = float(result['last_online_date'])
local_timezone = tzlocal.get_localzone()
local_time = datetime.fromtimestamp(unix_timestamp, local_timezone)
online_time = local_time.strftime("%Y/%m/%d %H:%M:%S")
elif result['status'] == 'online':
now = datetime.now()
online_time = now.strftime("%Y/%m/%d %H:%M:%S")
check_user(user_id)
update_online_status(user_id, online_time)
# sleep(300)
except Exception:
traceback.print_exc()
continue
app.stop()
I am writing a program that would read the online status of a user in telegram.
Instead of writing online to an existing user, a huge number of identical rows appear in the database.
Example:
Table with repetitions
When I try to fix something, there are a lot of errors.
mysql.connector.errors.programmingerror: not all parameters were used in the sql statement
mysql.connector.errors.internalerror: unread result found
and other...
Pls help!!
I used the following code to get items from sqlite3 database
def get(self, item_name, attrs=True): #get attr from item and return as dict, if attr==True: get all items
conn = self.conn
if attrs: #all
return conn.execute('SELECT * FROM %s WHERE __item_key__ = "%s";' %(self.table, item_name))
else:
command = 'SELECT '
for attr in attrs:
command+= attr+' '
command+='FROM %s WHERE __item_key__ = "%s";' %(self.table, item_name)
return conn.execute(command)
print(get('name1'))
the code print the following:
<sqlite3.Cursor at 0x213d4c0f490>
instead of the values from the table.
When I try this:
get('name1')[0]
it returns:
TypeError: 'sqlite3.Cursor' object is not subscriptable
Full code:
import sqlite3 as sql
import sqlite3 as sql
class db:
'''
This class turns dicts into sqlite databases
and output sqlite databases as dicts
'''
def __init__(self, db_name, table_name): #open or create a database
conn = sql.connect(db_name).cursor()
self.table = table_name
self.conn = conn
def create(self, table_name, cols):
command = "CREATE TABLE %s(_item_key_ TEXT," % table_name
for key, value in cols.items():
command+="%s %s," %(key, value)
command=command[:-1]
command+=");"
self.conn.execute(command)
self.table = table_name
def get(self, item_name, attrs=True): #get attr from item and return as dict, if attr==True: get all items
conn = self.conn
if attrs: #all
return conn.execute('SELECT * FROM %s WHERE _item_key_ = "%s";' %(self.table, item_name))
else:
command = 'SELECT '
for attr in attrs:
if type(attr) == str:
attr = '"'+attr+'"'
command+= str(attr)+' '
command+='FROM %s WHERE _item_key_ = "%s";' %(self.table, item_name)
return conn.execute(command).fetchall()
def change(self, item_name, attrs): #change certain attrs of item
command = 'UPDATE %s SET ' %self.table
for key, value in attrs:
command += '%s=%s,'%(key, value)
command = command[:-1]+' WHERE _item_name_ = "'+item_name+'";'
def add(self, item_name, attrs): #add an item with attrs to database
command = 'INSERT INTO %s VALUES ("%s",' %(self.table, item_name)
for attr in attrs:
if type(attr) == str:
attr = '"'+attr+'"'
command += str(attr)+','
command = command[:-1]+');'
#print(command)
self.conn.execute(command)
def close(self): #close database
self.conn.close()
The table is supposed to look like the following (although I never saw it):
__item_name__ A B
---------------------------
'name1' 123 'hi'
'name2' 344 'bye'
Does anyone know how this works?
edit: I realized some bugs in create() and add(). However, after fixing some stuff it still prints the same thing in get().
It returns that no cursor object found.
If you want to get the results you need to add these lines:
cur = conn.cursor() # create a cursor to your connection
cur.execute(your_query) # execute your query
results = cur.fetchall() # fetch the results
Also don't forget to iterate over the cursor after results = cur.fetchall():
for row in results:
A = row[0]
B = row[1]
Should revise all code and implement this self.conn.commit() after self.conn.execute(---).
self.conn.execute(command)
self.conn.commit() #<--- THIS NEW line, to after .execute()
self.table = table_name
I am trying to query the dataset present in s3 bucket, using Athena query via python script with help of boto3 functions.
I am using start_query_execution() to run my query. this is being executed perfectly, next to get results in my python script, so that I get access to the result of the query I am using the function get_query_results().
Now if I run these two function separately(one script after another) I get the data which is an output of Athena query. I want them to be written in a single script - something like, fetch data from s3 and start manipulating the output of query using python code.
Since the query is asyn in nature, i am using pool technique, where it waits till the Athena query is executed. But if i run the below codethe, the status show is running for the query.
I think I am doing some silly mistake as if I run them separately I get desired output. In short, I want to query the data present in s3 using Athena, then do some processing on this fetched data in python script, hence this approach. Please help
Here is the sample code
#!/usr/bin/env python3
import boto3
import time
from functools import partial
from multiprocessing.dummy import Pool
pool = Pool(processes=1)
# def async_function(name):
# time.sleep(1)
# return name
#
# def callback_function(name, age):
# print(name, age)
def run_query(query, database, s3_output):
client = boto3.client('athena')
response = client.start_query_execution(
QueryString=query,
QueryExecutionContext={
'Database': database
},
ResultConfiguration={
'OutputLocation': s3_output,
}
)
print('Execution ID: ' + response['QueryExecutionId'])
return response
def show_res(res, q):
client = boto3.client('athena')
print("Executing query: %s" % (q))
print('Execution ID: ' + res['QueryExecutionId'])
# response = client.stop_query_execution(
# QueryExecutionId=res['QueryExecutionId']
# )
response = client.get_query_results(
# QueryExecutionId='f3642735-d9d9-4246-ade4-7453eaed0717'
QueryExecutionId=res['QueryExecutionId']
)
print("Executing query: %s" % (q))
print('Execution ID: ' + res['QueryExecutionId'])
print('rRespone:'.join(str(x) for x in response['ResultSet']['Rows']));
return response
# for age, name in enumerate(['jack', 'jill', 'james']):
# new_callback_function = partial(callback_function, age=age)
# pool.apply_async(
# async_function,
# args=[name],
# callback=new_callback_function
# )
#Athena configuration
s3_input = 's3://dummy/'
s3_ouput = 's3://dummy/results/'
database = 'dummy'
table = 'dummy'
#Query definitions
query_1 = "SELECT * FROM %s.%s where sex = 'F';" % (database, table)
query_2 = "SELECT * FROM %s.%s where age > 30;" % (database, table)
#Execute all queries
queries = [ query_1 ]
for q in queries:
print("Executing query: %s" % (q))
new_callback_function = partial(show_res, q=q)
pool.apply_async(
run_query,
args=[q, database, s3_ouput],
callback=new_callback_function
)
pool.close()
pool.join()
Instead of use apply_async try with:
pool = Pool(cores)
df = pd.concat(pool.map(func, [value_1,...,value_n]))
pool.close()
pool.join()
I wrote you my code that it works great and I expect you can reuse some lines. Basically, I run multiples queries in Athena at the "same" time (I parallelized the array named endpoints), and I store each result in a row of a Pandas dataframe. Also, you can fetch data for each query and I added a status print then you can see the status of each query. Remember that Athena has a limit of queries that you can run concurrently.
import time
import boto3
import pandas as pd
from multiprocessing import Pool
class QueryAthena:
def __init__(self, endpoint, init_date, end_date):
self.s3_input = 's3://my_bucket/input'
self.s3_output = 's3://my_bucket/output'
self.database = 'datalake'
self.table = 'my_table'
self.endpoint = "'" + endpoint + "'"
self.init_date = "'" + init_date + "'"
self.end_date = "'" + end_date + "'"
self.year = self.init_date[1:5]
self.month = self.init_date[6:8]
self.day = self.init_date[9:11]
self.region_name = 'us-east-1'
self.aws_access_key_id = "my_id"
self.aws_secret_access_key = "my_key"
def load_conf(self, q):
self.client = boto3.client('athena',
region_name = self.region_name,
aws_access_key_id = self.aws_access_key_id,
aws_secret_access_key= self.aws_secret_access_key)
try:
response = self.client.start_query_execution(
QueryString = q,
QueryExecutionContext={
'Database': self.database
},
ResultConfiguration={
'OutputLocation': self.s3_output,
}
)
print('Execution ID: ' + response['QueryExecutionId'])
except Exception as e:
print(e)
return response
def query(self):
self.query = "SELECT count(*) as total_requests, SUM(CASE WHEN count_endpoints > 1 THEN 1 ELSE 0 END) as total_repeated, AVG(CASE WHEN count_endpoints > 1 THEN count_endpoints END) as TRAFFIC_QUALITY FROM (SELECT * from (SELECT domain, size, device_id, ip, array_join(array_agg(distinct endpoint), ',') as endpoints_all, count(distinct endpoint) as count_endpoints FROM %s.%s WHERE year=%s and month=%s and day=%s and ts between timestamp %s and timestamp %s and status = '2' GROUP BY domain, size, device_id, ip) l1 where endpoints_all LIKE '%%' || %s || '%%') l2;" % (self.database, self.table, self.year, self.month, self.day, self.init_date, self.end_date, self.endpoint)
def run_query(self):
self.query()
queries = [self.query]
for q in queries:
#print("Executing query: %s" % (q))
res = self.load_conf(q)
try:
query_status = None
while query_status == 'QUEUED' or query_status == 'RUNNING' or query_status is None:
query_status = self.client.get_query_execution(QueryExecutionId=res["QueryExecutionId"])['QueryExecution']['Status']['State']
print(query_status + " " + self.endpoint)
if query_status == 'FAILED' or query_status == 'CANCELLED':
raise Exception('Athena query with the string "{}" failed or was cancelled'.format(query_string))
time.sleep(20)
print("Query %s finished." % (self.endpoint))
response = self.client.get_query_results(QueryExecutionId=res['QueryExecutionId'])
df = self.results_to_df(response)
df = pd.DataFrame(df)
df["endpoint"] = str(self.endpoint)
try:
df["percentaje_repeated"] = str(int(df["total_repeated"].iloc[0]) * 100 / int(df["total_requests"].iloc[0]))
except Exception as e:
print(self.endpoint + " here")
df["date"] = str(self.init_date + "-" + self.end_date)
return df
except Exception as e:
print(e + " " + endpoint)
print(df["total_repeated"].iloc[0])
print(df["total_requests"].iloc[0])
def results_to_df(self, results):
columns = [
col['Label']
for col in results['ResultSet']['ResultSetMetadata']['ColumnInfo']
]
listed_results = []
for res in results['ResultSet']['Rows'][1:]:
values = []
for field in res['Data']:
try:
values.append(list(field.values())[0])
except:
values.append(list(' '))
listed_results.append(
dict(zip(columns, values))
)
return listed_results
def func(end):
qa = QueryAthena(end, "2018-10-09 00:00:00", "2018-10-09 05:59:59")
result = qa.run_query()
return result
endpoints = ["677SRI149821","V14509674","1426R"]
if __name__ == '__main__':
pool = Pool(15)
df = pd.concat(pool.map(func, endpoints))
pool.close()
pool.join()
Using mysql and python, I have created a table within PyQt that will allow a user to frequently update and track their meeting sessions. The only problem is, I do not know how I would go about coding a button that will allow me to individually delete a row of data.
My table looks like so:
What would be the simplest way to create a button that will appear besides the row of data when new data is inserted that will allow the user to delete that entire row?
Edit:
def deleteRows(self):
items = self.treeWidget.selectedItems()
current = self.treeWidget.currentItem()
for item in items:
sip.delete(current)
self.UpdateTree()
Edit 2:
import mysql.connector
from mysql.connector import errorcode
from datetime import datetime
class DatabaseUtility:
def __init__(self, database, tableName):
self.db = database
self.tableName = tableName
f = open('C:\\Users\\Vlad\\Desktop\\Myfiles\\EnterprisePassport\\password.txt', 'r')
p = f.read(); f.close();
self.cnx = mysql.connector.connect(user = 'root',
password = p,
host = '127.0.0.1')
self.cursor = self.cnx.cursor()
self.ConnectToDatabase()
self.CreateTable()
def ConnectToDatabase(self):
try:
self.cnx.database = self.db
except mysql.connector.Error as err:
if err.errno == errorcode.ER_BAD_DB_ERROR:
self.CreateDatabase()
self.cnx.database = self.db
else:
print(err.msg)
def CreateDatabase(self):
try:
self.RunCommand("CREATE DATABASE %s DEFAULT CHARACTER SET 'utf8';" %self.db)
except mysql.connector.Error as err:
print("Failed creating database: {}".format(err))
def CreateTable(self):
cmd = (" CREATE TABLE IF NOT EXISTS " + self.tableName + " ("
" `ID` int(5) NOT NULL AUTO_INCREMENT,"
" `date` date NOT NULL,"
" `time` time NOT NULL,"
" `message` char(50) NOT NULL,"
" PRIMARY KEY (`ID`)"
") ENGINE=InnoDB;")
self.RunCommand(cmd)
def GetTable(self):
self.CreateTable()
return self.RunCommand("SELECT * FROM %s;" % self.tableName)
def GetColumns(self):
return self.RunCommand("SHOW COLUMNS FROM %s;" % self.tableName)
def RunCommand(self, cmd):
print ("RUNNING COMMAND: " + cmd)
try:
self.cursor.execute(cmd)
except mysql.connector.Error as err:
print ('ERROR MESSAGE: ' + str(err.msg))
print ('WITH ' + cmd)
try:
msg = self.cursor.fetchall()
except:
msg = self.cursor.fetchone()
return msg
def AddEntryToTable(self, message):
date1 = datetime.now().strftime("%y-%m-%d")
time = datetime.now().strftime("%H:%M")
cmd = " INSERT INTO " + self.tableName + " (date, time, message)"
cmd += " VALUES ('%s', '%s', '%s' );" % (date1, time, message)
self.RunCommand(cmd)
def __del__(self):
self.cnx.commit()
self.cursor.close()
self.cnx.close()
if __name__ == '__main__':
db = 'enterprisepassport'
tableName = 'session'
dbu = DatabaseUtility(db, tableName)
Just add a button at the top that deletes the selected rows. It looks like your using a QTreeWidget
def __init__(...)
...
self.deleteButton.clicked.connect(self.deleteRows)
def deleteRows(self):
items = self.tree.selectedItems()
for item in items:
# Code to delete items in database
self.refreshTable()
#Randomator, why not explain as if you're explaining to a beginner. Even I do not also understand what you're suggesting.
The guy wrote his codes, make corrections in his code so he can easily run the file
I have the class below to handle my Postgres DB and I'm running into trouble with multiple inserts where foreign keys are involved. If I insert first in a parent table and then a child table I get a foreign key violation error although I think I have all the deferrable things in place. (autocommit is not enabled)
The constraint on the foreign key is set as follows:
CONSTRAINT tblorganisations_status_tblorganisations_fkey FOREIGN KEY (org_id)
REFERENCES organisations.tblorganisations (org_id) MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE NO ACTION DEFERRABLE INITIALLY IMMEDIATE;
The code that calls the class:
postgres = Postgresql("organisations")
r = postgres.insert(self.db_table, data, return_cols='org_id')
self.org_id = r['org_id']
postgres.insert('tblorganisations_status',
{'org_id': self.org_id,
'org_status_id': 'NEW_CGM'})
postgres.commit()
And the class:
class Postgresql():
conn = None
cur = None
last_result = None
def __init__(self, schema=None):
reload(sys) # Reload does the trick!
sys.setdefaultencoding("utf-8")
self.log = Log()
self.connect()
if schema is not None:
self.schema = schema
self.set_default_schema(schema)
def connection_string(self):
return 'host=%s port=%s dbname=%s user=%s password=%s' % \
(get_config('DATABASE', 'host'),
get_config('DATABASE', 'port'),
get_config('DATABASE', 'dbname'),
get_config('DATABASE', 'user'),
get_config('DATABASE', 'password'))
def connect(self):
try:
self.conn = psycopg2.connect(self.connection_string())
self.conn.set_session(isolation_level='read uncommitted', deferrable=True)
self.cur = self.conn.cursor(cursor_factory=RealDictCursor)
except Exception, e:
self.log.error(e.message)
raise
def set_default_schema(self, schema):
try:
self.cur.execute("SET search_path TO %s,public;", (schema, ))
except Exception, e:
self.log.error(e.message)
raise
def commit(self):
self.conn.commit()
self.close()
def rollback(self):
self.conn.rollback()
self.close()
def close(self):
self.cur.close()
self.conn.close()
def insert(self, table, data, return_cols=None, **kwargs):
data = self.cleanup_data(table, data)
fields = data.keys()
if self.schema is not None:
table = self.schema + '.' + table
sql = "INSERT INTO " + table + " ("
sql += ",".join(fields) + ") VALUES (" + ",".join(["%s"]*len(fields)) + ")"
if return_cols:
sql += " RETURNING " + return_cols
sql += ";"
if 'debug' in kwargs:
raise Exception(sql % tuple(data.values()))
try:
self.log.event('POSTGRES: ' + (sql % tuple(data.values())))
self.cur.execute(sql, data.values())
if return_cols:
result = self.cur.fetchone()
return result
except Exception, e:
self.log.error(e.message)
self.conn.rollback()
self.close()
raise
`
I figured it out myself. Apparently psycopg2 behaves this way because I declared the connection and class variables outside __init__.