Writing to db from python durables engine - python

I have been trying to write business rules using durables engine in python. The source code is as below :
import pymysql
import pandas as pd
import datetime
from durable.lang import *
from tickets import *
con = pymysql.connect(host='localhost', user='XXXX', password='XXXX', database='nms')
#df_30min = pd.read_sql('select * from recentalarms where dbinserttime > now() - interval 30 minute', con)
df = pd.read_sql('select * from recentalarms', con)
df_tt = pd.read_sql('select tt_id from ticket', con)
format = "%Y%m%d%H%M%S"
time_before_5min = datetime.datetime.strftime(datetime.datetime.now() - datetime.timedelta(minutes=5), format)
time_before_15min = datetime.datetime.strftime(datetime.datetime.now() - datetime.timedelta(minutes=15), format)
with ruleset('nms'):
#when_all((m.status == 'open') & ( m.instance >= 3) & (m.tt_id == None) & (m.readflag == None ))
def rule_action1(c):
tt_id = Ticket.create(domain_prefix,entity=entity,start_time=down_since,severity=severity)
cursor = con.cursor()
sql_query = f"UPDATE recentalarm SET read_flag = 'true' , tt_id = '{tt_id}' WHERE id = '{alarm_id}';"
cursor.execute(sql_query)
con.commit()
print(f"1. Create ticket Run Completed")
#when_all((m.status == 'close') & ( m.uptime <= time_before_15min) & (m.uptime != None))
def rule_action2(c):
Ticket.resolve(tt_id,time_stmp)
cursor = con.cursor()
sql_query = f"UPDATE recentalarm SET read_flag = 'true' , tt_id = '{tt_id}' WHERE id = '{alarm_id}';"
cursor.execute(sql_query)
con.commit()
print(f"2. Resolve ticket Run Completed")
############some more rules
for i, row in df.iterrows():
stat = row.status.lower()
olt = row.oltname
result = df.iloc[0:i+1]['oltname'] == f'{row.oltname}'
instance_count = int(result.value_counts()[True])
ONTPort = row.ONTParentPort
alarm_id = row.id
entity = row.entity
severity = row.severity
up_since = str(row.eventclosetime)
down_since = str(row.eventime)
try:
post('gponnms', { 'status' : stat , 'instance' : instance_count , 'readflag' : row.read_flag , 'uptime' : up_since , 'downtime' : down_since})
except:
pass
con.close()
I have 2 queries.
In the rule_action I require to update the field back in the sql table for read_flag. This is being done through sql query. Is there another way to update the database other than writing sql query in the action function
My second question is in the last there is post message. I need to understand if this is really required.But if I delete this I get error message. The documentation on durables does not give much insight on it.

Related

MySQL: I don't understand why this is happening?

import sqlite3
import traceback
from time import sleep
import mysql.connector
def check_user(user_id):
conn = mysql.connector.connect(host='localhost', database='online', user='root1', password='rootRRR111_')
cur = conn.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS online(id INT, last_online_date TEXT)')
conn.commit()
select = "SELECT * FROM online WHERE id = %s LIMIT 0, 1"
result = cur.execute(select, (user_id,))
if result is None:
insert = ('INSERT INTO online (id, last_online_date) VALUES (%s, %s)')
cur.reset()
cur.execute(insert, (user_id, online_time))
conn.commit()
def update_online_status(user_id, online_time):
conn = mysql.connector.connect(host='localhost', database='online', user='root1', password='rootRRR111_')
cursor = conn.cursor()
select = 'SELECT last_online_date FROM online WHERE id = %s'
result = cursor.execute(select, (user_id,))
old_online = result
online_time = f'{old_online},{online_time}'
cursor.reset()
cursor.execute('UPDATE online SET last_online_date = %s WHERE id = %s', (online_time, user_id))
conn.commit()
app = Client("my_account")
app.start()
while True:
try:
with open('ids.ini', 'r') as file:
users = file.read().splitlines()
for user in users:
result = app.get_users(user)
user_id = result['id']
if result['status'] == 'offline':
unix_timestamp = float(result['last_online_date'])
local_timezone = tzlocal.get_localzone()
local_time = datetime.fromtimestamp(unix_timestamp, local_timezone)
online_time = local_time.strftime("%Y/%m/%d %H:%M:%S")
elif result['status'] == 'online':
now = datetime.now()
online_time = now.strftime("%Y/%m/%d %H:%M:%S")
check_user(user_id)
update_online_status(user_id, online_time)
# sleep(300)
except Exception:
traceback.print_exc()
continue
app.stop()
I am writing a program that would read the online status of a user in telegram.
Instead of writing online to an existing user, a huge number of identical rows appear in the database.
Example:
Table with repetitions
When I try to fix something, there are a lot of errors.
mysql.connector.errors.programmingerror: not all parameters were used in the sql statement
mysql.connector.errors.internalerror: unread result found
and other...
Pls help!!

Can't merge table using cx_oracle in python

I'm trying to merge two tables by using cx_oracle.
The following sql language works at sql developer tools:
MERGE INTO ASBODS_SDDM_SCHEMA.EULA_DATA E
USING (SELECT * FROM ASBODS_SDDM_SCHEMA.EULA_STG_DATA) ES
ON (E.MASTER_METER_NBR = ES.MASTER_METER_NBR AND E.MASTER_ACCOUNT_NBR = ES.MASTER_ACCOUNT_NBR AND
E.METER_NBR = ES.METER_NBR AND E.ACCOUNT_NBR = ES.ACCOUNT_NBR AND E.EULA_DATA_TYPE_CD = ES.EULA_STG_DATA_TYPE_CD)
WHEN MATCHED THEN
UPDATE SET
E.ACCEPTED_DT = ES.ACCEPTED_DT ,
E.DECLN_OF_CONSENT_FLG = ES.DECLN_OF_CONSENT_FLG ,
E.EULA_VERSION_NBR = ES.EULA_VERSION_NBR,
E.SOFTWARE_VERSION_NBR = ES.SOFTWARE_VERSION_NBR,
E.COMPANY_NM = ES.COMPANY_NM ,
E.MACH_ID = ES.MACH_ID ,
E.LOAD_DT = ES.LOAD_DT
WHERE
E.MASTER_METER_NBR = ES.MASTER_METER_NBR AND E.MASTER_ACCOUNT_NBR = ES.MASTER_ACCOUNT_NBR AND
E.METER_NBR = ES.METER_NBR AND E.ACCOUNT_NBR = ES.ACCOUNT_NBR AND E.EULA_DATA_TYPE_CD = ES.EULA_STG_DATA_TYPE_CD
But when I insert this sql into cx_Orcale:
import cx_Oracle
try:
con = cx_Oracle.connect('username/pwd#server')
cursor = con.cursor()
cursor.execute(''' MERGE INTO ASBODS_SDDM_SCHEMA.EULA_DATA E
USING (SELECT * FROM ASBODS_SDDM_SCHEMA.EULA_STG_DATA) ES
ON (E.MASTER_METER_NBR = ES.MASTER_METER_NBR AND E.MASTER_ACCOUNT_NBR = ES.MASTER_ACCOUNT_NBR AND
E.METER_NBR = ES.METER_NBR AND E.ACCOUNT_NBR = ES.ACCOUNT_NBR AND E.EULA_DATA_TYPE_CD = ES.EULA_STG_DATA_TYPE_CD)
WHEN MATCHED THEN
UPDATE SET
E.ACCEPTED_DT = ES.ACCEPTED_DT ,
E.DECLN_OF_CONSENT_FLG = ES.DECLN_OF_CONSENT_FLG ,
E.EULA_VERSION_NBR = ES.EULA_VERSION_NBR,
E.SOFTWARE_VERSION_NBR = ES.SOFTWARE_VERSION_NBR,
E.COMPANY_NM = ES.COMPANY_NM ,
E.MACH_ID = ES.MACH_ID ,
E.LOAD_DT = ES.LOAD_DT
WHERE
E.MASTER_METER_NBR = ES.MASTER_METER_NBR AND E.MASTER_ACCOUNT_NBR = ES.MASTER_ACCOUNT_NBR AND
E.METER_NBR = ES.METER_NBR AND E.ACCOUNT_NBR = ES.ACCOUNT_NBR AND E.EULA_DATA_TYPE_CD = ES.EULA_STG_DATA_TYPE_CD
''')
con.commit()
except cx_Oracle.DatabaseError as e:
print('There is problem with sql', e)
finally:
if cursor:
con.commit()
cursor.close()
if con:
con.close()
It has long time no feedback in linux, seems it is running all the time,and not work.
Any friend can help?
Did you commit in the tool session? Maybe the tables are locked.

Python return error when trying to send a MySQL query that contains MySQL variables

I am trying to retrieve data from a MySQL database by sending a MySQL query using Python.
When I send the MySQL Query in MySQL workbench, it runs perfectly fine.
When I try the same using Python (in a Jupyter Notebook), it returns an error.
Python Code:
import pymysql
import pandas as pd
def run_mysql(SQLQ):
conn = pymysql.connect(host='IP address', user='username', passwd='password', db='database name')
df = pd.read_sql(SQLQ, conn)
conn.close()
return df
mysql_query = '''set #Yesterday = curdate() -1 ;
SELECT * FROM mt4_daily
where date(time) = date(#Yesterday)
'''
df = run_mysql(mysql_query)
display(df)
Error:
DatabaseError: Execution failed on sql 'set #Yesterday = curdate() -1 ;
SELECT * FROM mt4_daily
where date(time) = date(#Yesterday)
': (1064, "You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'SELECT * FROM mt4_daily\n where date(time) = date(#Yesterday)' at line 2")
If I remove the variable in the MySQL Query it runs fine:
import pymysql
import pandas as pd
def run_mysql(SQLQ):
conn = pymysql.connect(host='IP address', user='username', passwd='password', db='database name')
df = pd.read_sql(SQLQ, conn)
conn.close()
return df
mysqlquery = '''SELECT * FROM mt4_daily
where date(time) = date(curdate() -1)
'''
df = run_mysql(mysqlquery)
display(df)
What am I doing wrong?
Final Solution:
Thank you Prashant Sharma for the solution.
I tweaked it a bit so it returns a pandas dataframe and allows for a list of variables to be passed prior to the Select query.
Here is the code:
import pymysql
import pandas as pd
def run_mysql(SQLQ,MySQL_Variable_List=''):
try:
conn = pymysql.connect(host='Server IP', user='UserName', passwd='Password', db='Database name')
cursor = conn.cursor()
for i in MySQL_Variable_List:
cursor.execute(i)
df = pd.read_sql(SQLQ, conn)
except Exception as e:
print(str(e))
finally:
cursor.close()
conn.close()
return df
MySQL_Variable_List = ["set #Yesterday = curdate() -1 ;"]
SQLQ = "SELECT * FROM mt4_daily where date(time) = date(#Yesterday) limit 10"
df1 = run_mysql(MySQL_Variable_List,SQLQ)
display(df1)
The below code does the job, have tested it. You might have to rectify some indentation issue incase if something pops up.
import pymysql
def run_mysql(query1, query2):
try:
conn = pymysql.connect(host='localhost', user='root', passwd='', db='data_new_es')
cursor = conn.cursor()
cursor.execute(query1)
cursor.execute(query2)
row = cursor.fetchone()
print(row)
except Exception as e:
print(str(e))
finally:
cursor.close()
conn.close()
mysqlquery1 = "set #Yesterday = curdate() -1 ;"
mysqlquery2 = "select * from abcde where date(accrual_date) =
date(#Yesterday)"
df1 = run_mysql(mysqlquery1,mysqlquery2)
Try to run them as two separate queries.
mysql_query = '''set #Yesterday = curdate() -1 ;'''
df = run_mysql(mysql_query)
mysql_query = '''SELECT * FROM mt4_daily
where date(time) = date(#Yesterday)
'''
df = run_mysql(mysql_query)
I think because there are two statements and this function only allows to read and execute one at the same time. According to pandas read_sql documentetation you can use read_sql "params" keyword parameter to solve this problem and move #Yesterday value calculation to python side:
import pymysql
import pandas as pd
from datetime import datetime, timedelta
def run_mysql(SQLQ, params):
conn = pymysql.connect(host='IP address', user='username', passwd='password', db='database name')
df = pd.read_sql(SQLQ, conn, params=params)
conn.close()
return df
mysqlquery = '''SELECT * FROM mt4_daily
where date(time) = date(%(yesterday)s)
'''
yesterday = datetime.date(datetime.now())- timedelta(days=1)
params = {'yesterday': yesterday}
df = run_mysql(mysqlquery, params)
display(df)
I could not execute the code, but the idea is this.

Updating results from a mysql-connector fetchall

I'm trying to select certain records from the civicrm_address table and update the geocode columns. I use fetchall to retrieve the rows then, within the same loop, I try to update with the results of the geocoder API, passing the civicrm_address.id value in the update_sql statement.
The rowcount after the attempted update and commit is always -1 so I am assuming it failed for some reason but I have yet to figure out why.
import geocoder
import mysql.connector
mydb = mysql.connector.connect(
[redacted]
)
mycursor = mydb.cursor(dictionary=True)
update_cursor = mydb.cursor()
sql = """
select
a.id
, street_address
, city
, abbreviation
from
civicrm_address a
, civicrm_state_province b
where
location_type_id = 6
and
a.state_province_id = b.id
and
street_address is not null
and
city is not null
limit 5
"""
mycursor.execute(sql)
rows = mycursor.fetchall()
print(mycursor.rowcount, "records selected")
for row in rows:
address_id = int(row["id"])
street_address = str(row["street_address"])
city = str(row["city"])
state = str(row["abbreviation"])
myaddress = street_address + " " + city + ", " + state
g = geocoder.arcgis(myaddress)
d = g.json
latitude = d["lat"]
longitude = d["lng"]
update_sql = """
begin work;
update
civicrm_address
set
geo_code_1 = %s
, geo_code_2 = %s
where
id = %s
"""
var=(latitude, longitude, address_id)
print(var)
update_cursor.execute(update_sql, var, multi=True)
mydb.commit()
print(update_cursor.rowcount)
mycursor.close()
update_cursor.close()
mydb.close()
Here is a simpler script:
I have executed the update_sql statement directly in the MySQL workbench and it succeeds. It is not working from Python.
import geocoder
import mysql.connector
try:
mydb = mysql.connector.connect(
[redacted]
)
mycursor = mydb.cursor(dictionary=True)
update_cursor = mydb.cursor()
update_sql = """
begin work;
update
civicrm_address
set
geo_code_1 = 37.3445
, geo_code_2 = -118.5366074
where
id = 65450;
"""
update_cursor.execute(update_sql, multi=True)
mydb.commit()
print(update_cursor.rowcount, "row(s) were updated")
except mysql.connector.Error as error:
print("Failed to update record to database: {}".format(error))
mydb.rollback()
finally:
# closing database connection.
if (mydb.is_connected()):
mydb.close()
I have it working now. I did remove the "begin work" statement but not the multi=True and it wouldn't work. Later I removed the multi=True statement and it works.

How to change the cursor to the next row using pyodbc in Python

I am trying to fetch records after a regular interval from a database table which growing with records. I am using Python and its pyodbc package to carry out the fetching of records. While fetching, how can I point the cursor to the next row of the row which was read/fetched last so that with every fetch I can only get the new set of records inserted.
To explain more,
my table has 100 records and they are fetched.
after an interval the table has 200 records and I want to fetch rows from 101 to 200. And so on.
Is there a way with pyodbc cursor?
Or any other suggestion would be very helpful.
Below is the code I am trying:
#!/usr/bin/python
import pyodbc
import csv
import time
conn_str = (
"DRIVER={PostgreSQL Unicode};"
"DATABASE=postgres;"
"UID=userid;"
"PWD=database;"
"SERVER=localhost;"
"PORT=5432;"
)
conn = pyodbc.connect(conn_str)
cursor = conn.cursor()
def fetch_table(**kwargs):
qry = kwargs['qrystr']
try:
#cursor = conn.cursor()
cursor.execute(qry)
all_rows = cursor.fetchall()
rowcnt = cursor.rowcount
rownum = cursor.description
#return (rowcnt, rownum)
return all_rows
except pyodbc.ProgrammingError as e:
print ("Exception occured as :", type(e) , e)
def poll_db():
for i in [1, 2]:
stmt = "select * from my_database_table"
rows = fetch_table(qrystr = stmt)
print("***** For i = " , i , "******")
for r in rows:
print("ROW-> ", r)
time.sleep(10)
poll_db()
conn.close()
I don't think you can use pyodbc, or any other odbc package, to find "new" rows. But if there is a 'timestamp' column in your database, or if you can add such a column (some databases allow for it to be automatically populated as the time of insertion so you don't have to change the insert queries) then you can change your query to select only the rows whose timestamp is greater than the previous timestamp. And you can keep changing the prev_timestamp variable on each iteration.
def poll_db():
prev_timestamp = ""
for i in [1, 2]:
if prev_timestamp == "":
stmt = "select * from my_database_table"
else:
# convert your timestamp str to match the database's format
stmt = "select * from my_database_table where timestamp > " + str(prev_timestamp)
rows = fetch_table(qrystr = stmt)
prev_timestamp = datetime.datetime.now()
print("***** For i = " , i , "******")
for r in rows:
print("ROW-> ", r)
time.sleep(10)

Categories