Cannot get updated DB Data with PYODBC and FLASK Python - python

I seem to be having issues querying a PYODBC connection from a Flask connection in Python. Initially I had the entire file using the same connection, but noticed when you would hit the URL for the Get request it would not return the most recent data. So i adjusted script to start and kill connection each time a get request is initiated this returns the same non updated data. I do notice that occasionally it will update the data but most times it will not.
Please assist better clarify this process.
def getChg(s, cursor, arr):
getIncResolved = "SELECT COUNT(incident.number) FROM SCHEMA.OAUSER.incident incident WHERE incident.dv_resolved_by = '" + str(s) + "' AND incident.resolved_at BETWEEN '" + str(past) + "' AND '" + str(current) + "' AND incident.dv_opened_by != '" + str(s) + "';"
getTaskResolved = "SELECT COUNT(sc_task.number) FROM SCHEMA.OAUSER.sc_task sc_task WHERE sc_task.dv_closed_by = '" + str(s) + "' AND sc_task.closed_at BETWEEN '" + str(past) + "' AND '" + str(current) + "' AND sc_task.dv_opened_by != '" + str(s) + "';"
getCallStarted = "SELECT COUNT(new_call.number) FROM SCHEMA.OAUSER.new_call new_call WHERE new_call.opened_at BETWEEN '" + str(past) + "' AND '" + str(current) + "' AND new_call.dv_opened_by = '" + str(s) + "';"
i = 0
t = 0
c = 0
cursor.execute(getIncResolved)
for row in cursor.fetchall():
i = row[0]
cursor.execute(getTaskResolved)
for row in cursor.fetchall():
t = row[0]
cursor.execute(getCallStarted)
for row in cursor.fetchall():
c = row[0]
if c > -1:
test = {'Agent': str(s), 'Calls': c, 'Inc': i, 'Task': t}
arr.append(test)
#app.route('/data',methods=['GET', 'POST'])
def api_root(): cnxn=pyodbc.connect('DSN=ServiceNow;Uid=ServiceOps;Pwd=********;',autocommit=True)
cursor = cnxn.cursor()
data = []
staffjson = request.get_json(force=True)
staff = staffjson['users']
print(staff)
del data[:]
for s in staff:
getChg(s, cursor, data)
print(data)
cnxn.close()
return json.dumps(data)

The Current variable was only being created at the initial start of the script. Therefore once it was loaded it never updated to the current time.
Created variable at each request now and is working as i expected.

Related

Stock flow log connections in Lambda

I'm actually doing a python script in AWS Lambda to transform statless logs from Flow Logs to statfull logs.
But I've some difficulties to save connections inside Lambda.
Variable "stock" is my list which will save connections
def init():
global stock
stock = []
def lambda_handler(event, context):
# retrieve bucket name and file_key from the S3 event
bucket_name = event['Records'][0]['s3']['bucket']['name']
file_key = event['Records'][0]['s3']['object']['key']
logger.info('Reading {} from {}'.format(file_key, bucket_name))
# get the object
obj = s3.get_object(Bucket=bucket_name, Key=file_key)
if obj['ContentType'] =='application/x-gzip' or key.endswith('.gz'):
lines_read = gzip.decompress(obj['Body'].read())
else:
lines_read = obj['Body'].read()
lines_read = lines_read[3:-3]
lines = lines_read.decode().split("\n")
for line in lines:
lines_split = line.split(" ")
if lines_split[17] == "3" or lines_split[17] == "19":
ts = str(time.time())
line_whrite = ts + " " + "Short Connection " + lines_split[5] + " " + lines_split[6] + " " + lines_split[7] + " " + lines_split[8] + " " + lines_split[16]
elif lines_split[17] == "1":
i = 0
ts = str(time.time())
json2 = {"time": lines_split[15], "srcaddr": lines_split[5], "dstaddr": lines_split[6], "srcport": lines_split[7], "dstport": lines_split[8]}
for connection in stock:
if json2["srcaddr"] == connection["srcaddr"] or connection["dstaddr"] and json2["dstaddr"] == connection["srcaddr"] or connection["dstaddr"] and json2["srcport"] == connection["srcport"] or connection["dstport"] and json2["dstport"] == connection["srcport"] or connection["dstport"]:
tpsstart = int(json2["time"])
tpsend = int(connection["time"])
converted_tpsstart = datetime.datetime.fromtimestamp(round(tpsstart / 1000))
converted_tpsend = datetime.datetime.fromtimestamp(round(tpsend / 1000))
tpsdiff = converted_tpsend - converted_tpsstart
line_whrite = "Connection Start :" + str(converted_tpsstart) + " Connection End:" + str(converted_tpsend) + " Connection Duration:" + str(tpsdiff) + " " + json2["srcaddr"] + " " + json2["dstaddr"] + " " + json2["srcport"] + " " + json2["dstport"]
del stock[i]
print(line_whrite)
i = i + 1
The problem is (I think) that my variable "stock" is always reset so empty...
Do you have some idea ?
Thanks :)

Python: MQTT broker messages bulk insert into mysql database

I have subscribed to multiple topics using paho mqtt client. On receiving the messages from broker, I want to store the messages into mysql database. I want to gather the messages altogether before inserting into DB.I have set the threshold say 1000 messages. Only when the threshold is reached the messages has to be inserted into DB altogether at once. I am checking the row_count after cursor.execute(). But it shows the count as 1. So the bulk insert is not happening. here is my sample code snippet
//main.py
#mysql database class
db = MySQLDBClass()
#mqtt client class where subscription,connection to broker,some callbacks
mqttclient = MyMQTTClient()
mqttclient.on_message = db.onMessage
mqttclient.loop_forever()
//MySQLDBClass.py
def __init__(self):
self.insertcounter = 0
self.insertStatement = ''
self.bulkpayload = ''
self.maxInsert = 1000
def onMessage(self, client, userdata, msg):
if msg.topic.startswith("topic1/"):
self.bulkpayload += "(" + msg.payload.decode("utf-8") + "," + datetime + "),"
elif msg.topic.startswith("topic2/"):
self.insertStatement += "INSERT INTO mydatabase.table1 VALUES (" + msg.payload.decode("utf-8") + "," + datetime + ");"
elif msg.topic.startswith("topic3/")
self.insertStatement += "INSERT INTO mydatabase.table2 VALUES (" +msg.payload.decode("utf-8") + "," + datetime + ");"
elif msg.topic.startswith("messages"):
self.insertStatement += "INSERT INTO mydatabase.table3 VALUES ('" + msg.topic + "'," + msg.payload.decode("utf-8") + "," + datetime + ");"
else:
return # do not store in DB
self.insertcounter += 1
if ( self.insertcounter > self.maxInsert ):
if ( self.bulkpayload != '' ):
self.insertStatement += "INSERT INTO mydatabase.table4 VALUES" + self.bulkpayload + ";"
self.bulkpayload = ''
cursor.execute(self.insertStatement)
cursor.commit()
print (cursor.rowcount) #prints always count as one , expecting bulk count
self.insertcounter = 0
self.insertStatement = ''
with pymysql module, execute can only execute only one query at a time whereas by using mysql-connector-python, we can set multi=True in execute(mutli=true) to execute multiple statements.
https://dev.mysql.com/doc/connector-python/en/connector-python-api-mysqlcursor-execute.html

How to check if sql insert added anything and select got something with db class in python

I have this database class written for Python 2.7 and MariaDB. And I want to print a message if an insert was successful or a warning if the element doesn't exist and the insert failed. Should I modify the database class or is it alright with the return values it provides? This is my try:
def insertProduct (db, name, attrName, attrValue):
sql = 'INSERT IGNORE `product` SET ' + attrName + ' = %s WHERE `name` = %s'
sql2 = """SELECT * FROM `product` WHERE `name` = %s"""
if db.execute (sql, (attrValue, name))['retval']:
print "Successfully added ('" + name + "', '" + attrName + "': " + unicode(attrValue) + ") to `product` table"
elif not db.execute (sql, (attrValue, name))['retval']:
print "ERROR: Couldn't add ('" + name + "', '" + attrName + "': " + unicode(attrValue) + ") to `product` table"
if db.execute (sql, (attrValue, name))['retval']:
print "Successfully added ('" + name + "', '" + attrName + "': " + unicode(attrValue) + ") to `product` table"
elif not db.execute (**sql**, (attrValue, name))['retval']:
print "ERROR: Couldn't add ('" + name + "', '" + attrName + "': " + unicode(attrValue) + ") to `product` table"
Here, you are calling db.execute() in if block & elif. Suppose, if db.execute() returns True then it will print successfully added string. But db.execute returns False then it will go to elif block and again it will execute db.execute() which can return True or False, so if it is True which means insertion were successful but it won't print success message but if is False then it will print Error: Couldn't add record.
if db.execute (sql, (attrValue, name))['retval']:
print "Successfully added ('" + name + "', '" + attrName + "': " + unicode(attrValue) + ") to `product` table"
elif not db.execute (**sql2**, (attrValue, name))['retval']:
print "ERROR: Couldn't add ('" + name + "', '" + attrName + "': " + unicode(attrValue) + ") to `product` table"
In else block it should be select query

How to change records on BigQuery from different rows to one row?

I have done inserting values into BigQuery from JSON file but my JSON file have multiple objects.
Eg:
{"A":{"queryID": "newId", "newCol": "newCol"}}
{"B":{"date":"2013-05-31 20:56:41", "device":"pc"}}
{"C":{"keyword": ["new", "ict"]}}
The results on BigQuery is one row per object, with empty rows for other objects. How do I do to make it all in one row with different columns?
def loadTable(http, service):
url = "https://www.googleapis.com/upload/bigquery/v2/projects/" + projectId + "/jobs"
newresource = ('--xxx\n' +
'Content-Type: application/json; charset=UTF-8\n' + '\n' +
'{\n' +
' "configuration": {\n' +
' "load": {\n' +
' "sourceFormat": "NEWLINE_DELIMITED_JSON",\n' +
' "autodetect": "' + "True" + '",\n' +
' "destinationTable": {\n' +
' "projectId": "' + projectId + '",\n' +
' "datasetId": "' + datasetId + '",\n' +
' "tableId": "' + tableId + '"\n' +
' }\n' +
' }\n' +
' }\n' +
'}\n' +
'--xxx\n' +
'Content-Type: application/octet-stream\n' +
'\n')
f = open('samplejson.json', 'r')
newresource += f.read().replace('\n', '\r\n')
newresource += ('--xxx--\n')
print newresource
headers = {'Content-Type': 'multipart/related; boundary=xxx'}
resp, content = http.request(url, method="POST", body=newresource, headers=headers)
if not resp.status == 200:
print resp
print content
else:
jsonResponse = json.loads(content)
jobReference = jsonResponse['jobReference']['jobId']
while True:
jobCollection = service.jobs()
getJob = jobCollection.get(projectId=projectId, jobId=jobReference).execute()
currentStatus = getJob['status']['state']
if 'DONE' == currentStatus:
print "Done Loading!"
return
else:
print 'Waiting to load...'
print 'Current status: ' + currentStatus
print time.ctime()
time.sleep(10)
def main(argv):
credentials = ServiceAccountCredentials.from_json_keyfile_name("samplecredentials.json")
scope = ['https://www.googleapis.com/auth/bigquery']
credentials = credentials.create_scoped(scope)
http = httplib2.Http()
http = credentials.authorize(http)
service = build('bigquery','v2', http=http)
loadTable(http, service)
I would recommend doing that final "assembling" into one row using below type of query (BigQuery Standard SQL)
#standardSQL
SELECT
ARRAY_AGG(A IGNORE NULLS) AS A,
ARRAY_AGG(B IGNORE NULLS) AS B,
ARRAY_AGG(C IGNORE NULLS) AS C
FROM `yourtable`
If you would have some extra field that would indicate which rows to combine/group together into one - for example some id - the query can look as below
#standardSQL
SELECT
id,
ARRAY_AGG(A IGNORE NULLS) AS A,
ARRAY_AGG(B IGNORE NULLS) AS B,
ARRAY_AGG(C IGNORE NULLS) AS C
FROM `yourtable`
GROUP BY id

Mysql data to be save as CSV/Excel using flask-python when button click

I'm new at exporting data, I research all over the net but it was really hard for me to understand, can someone help me to know the basic about it.
This is my main problem: I want to download a specific data from mysql base on the date range I choose in my client, then when I click the download button, I want these data from mysql to be save in my computer together the user have the option to save it as CSV/Excel, I'm using python for my webservice. Thank you
This is my code right know in my webservice:
#api.route('/export_file/', methods=['GET', 'POST'])
def export_file():
if request.method == 'POST':
selectAttendance = """SELECT * FROM attendance"""
db.session.execute(selectAttendance)
db.session.commit()
f = csv.writer(open("file.csv", "w"))
for row in selectAttendance:
f.writerow([str(row)])
return jsonify({'success': True})
In general:
Set the mime header "Content-Type" part of the http header to the corresponding MIME-Type matching your data.
This tells the browser what type of data the webserver is going to send.
Send the actual data in the 'body'
With flask:
Forcing application/json MIME type in a view (Flask)
http://flask.pocoo.org/docs/0.10/patterns/streaming/
def get(self):
try:
os.stat(BACKUP_PATH)
except:
os.mkdir(BACKUP_PATH)
now = datetime.now() # current date and time
year = now.strftime("%Y")
month = now.strftime("%m")
day = now.strftime("%d")
time = now.strftime("%H:%M:%S")
date_time = now.strftime("%d_%m_%Y_%H:%M:%S")
TODAYBACKUPPATH = BACKUP_PATH + '/' + date_time
try:
os.stat(TODAYBACKUPPATH)
except:
os.mkdir(TODAYBACKUPPATH)
print ("checking for databases names file.")
if os.path.exists(DB_NAME):
file1 = open(DB_NAME)
multi = 1
print ("Databases file found...")
print ("Starting backup of all dbs listed in file " + DB_NAME)
else:
print ("Databases file not found...")
print ("Starting backup of database " + DB_NAME)
multi = 0
if multi:
in_file = open(DB_NAME,"r")
flength = len(in_file.readlines())
in_file.close()
p = 1
dbfile = open(DB_NAME,"r")
while p <= flength:
db = dbfile.readline() # reading database name from file
db = db[:-1] # deletes extra line
dumpcmd = "mysqldump -h " + DB_HOST + " -u " + DB_USER + " -p" + DB_USER_PASSWORD + " " + db + " > " + pipes.quote(TODAYBACKUPPATH) + "/" + db + ".sql"
os.system(dumpcmd)
gzipcmd = "gzip " + pipes.quote(TODAYBACKUPPATH) + "/" + db + ".sql"
os.system(gzipcmd)
p = p + 1
dbfile.close()
else:
db = DB_NAME
dumpcmd = "mysqldump -h " + DB_HOST + " -u " + DB_USER + " -p" + DB_USER_PASSWORD + " " + db + " > " + pipes.quote(TODAYBACKUPPATH) + "/" + db + ".sql"
os.system(dumpcmd)
gzipcmd = "gzip " + pipes.quote(TODAYBACKUPPATH) + "/" + db + ".sql"
os.system(gzipcmd)
# t = ("Your backups have been created in '" + TODAYBACKUPPATH + "' directory")
return "Your Folder have been created in '" + TODAYBACKUPPATH + "'."

Categories