Rasperry Pi loses Mysql Connection? - python

I am trying to insert data into a mysql database with a keypad connected to a Rasperry PI using python.
The code:
#!/usr/bin/python
import os
import MySQLdb
import RPi.GPIO as GPIO
import time
#Open db conn
db = MySQLdb.connect("remote_server.com","user","password","database")
# prep cursor
cursor = db.cursor()
GPIO.setmode(GPIO.BOARD)
MATRIX = [ [1,2,3],
[4,5,6],
[7,8,9],
['*',0,'#'] ]
ROW = [7,11,13,15]
COL = [23,21,19]
for j in range(3):
GPIO.setup(COL[j], GPIO.OUT)
GPIO.output(COL[j], 1)
for i in range(4):
GPIO.setup(ROW[i], GPIO.IN, pull_up_down = GPIO.PUD_UP)
try:
while(True):
for j in range(3):
GPIO.output(COL[j],0)
for i in range(4):
if GPIO.input(ROW[i]) == 0:
mysql_code = MATRIX[i][j]
print mysql_code
try:
cursor.execute('''Insert into Rasperry_Codes (Code, insertTS) VALUES (%s, NOW())''', (mysql_code))
except MySQLdb.Error, e:
try:
print "MySQL Error [%d]: %s" % (e.args[0], e.args[1])
except IndexError:
print "MySQL Error: %s" % str(e)
db.commit()
time.sleep(0.2)
while(GPIO.input(ROW[i]) == 0):
pass
GPIO.output(COL[j],1)
except KeyboardInterrupt:
GPIO.cleanup()
Sometimes the data is inserted into database, sometimes not.
No error is given from mysql MySQLdb.Error.
print mysql_code always prints the correct pressed number.
Does anybody see a problem that could cause that random malfunction?

Related

SQL Data is fetched only once

I have troubles while fetching data from a SQL-Database. The program gets data from a PHP-site and plays the sound if it is a youtube link. Currently, it has troubles when the database is empty. My goal is, that it exits the for-loop, what it does, but it does not receive any data and tries to fetch it once more until data is inserted by the site.
import mysql.connector
import pafy
import vlc
import time
from mysql.connector.errors import Error
url = "NULL"
try:
connection = mysql.connector.connect(host='localhost',
database='musicbox',
user='',
password='')
if connection.is_connected():
db_Info = connection.get_server_info()
print("Connected to MySQL Server version ", db_Info)
cursor = connection.cursor()
while True :
db_Info = connection.get_server_info()
print("Connected to MySQL Server version ", db_Info)
cursor = connection.cursor()
print "test2"
cursor.execute("SELECT * FROM playlist")
record = cursor.fetchall()
print record
time.sleep(1)
for x in record:
print x[0]
if "youtube" in x[0]:
if not all(x):
print "empty"
else:
print "true"
url = x[0]
video = pafy.new(url)
best = video.getbestaudio()
playurl = best.url
Instance = vlc.Instance()
player = Instance.media_player_new()
Media = Instance.media_new(playurl)
Media.get_mrl()
player.set_media(Media)
player.play()
time.sleep(1.5)
duration = player.get_length() / 1000
time.sleep(duration)
cursor.execute("DELETE FROM playlist LIMIT 1")
connection.commit()
except Error as e:
print("Error while connecting to MySQL", e)
finally:
if (connection.is_connected()):
cursor.close()
connection.close()
print("MySQL connection is closed")

Python calling one function form another

I have written a little python script to get files in a directory, get a hash and then write them to a table.
The first part, getting the files and calculating the hash was easy. But now I added the function (write_record) to store the filename, log date and hash to a database. But I am struggling how to call it form the get_files function an write a record for each file in the directory
from datetime import datetime
from os import scandir
import os
import hashlib
import psycopg2
BLOCKSIZE = 65536
hasher = hashlib.sha256()
basepath = '.'
def convert_date(timestamp):
d = datetime.utcfromtimestamp(timestamp)
formated_date = d.strftime('%d%m%Y%H%M%S')
return formated_date
def get_hash(entry):
with open(entry, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
# print(hasher.hexdigest())
def get_files():
dir_entries = scandir('.')
for entry in dir_entries:
if entry.is_file():
info = entry.stat()
print(' %s %s %s' % (entry.name, convert_date(info.st_mtime),hasher.hexdigest()))
log_filename = entry.name
log_hashvalue = hasher.hexdigest()
log_date = convert_date(info.st_mtime)
return log_filename,log_hashvalue,log_date
# write_record()
def write_record():
log_filename,log_hashvalue,log_date = get_files()
try:
print(log_filename,log_hashvalue,log_date)
connection = psycopg2.connect(user="postgres",password="xxxxxxxx",host="xxx.xxx.xxx.xxx",port="5432",database="evidence_logging")
cursor = connection.cursor()
postgres_insert_query = """ INSERT INTO logfiles (log_name,log_date,log_hashvalue) VALUES (%s,%s,%s)"""
record_to_insert = (log_filename,log_date,log_hashvalue)
print(postgres_insert_query, record_to_insert)
cursor.execute(postgres_insert_query, record_to_insert)
connection.commit()
count = cursor.rowcount
print (count, "Record inserted successfully into logfiles table")
except (Exception, psycopg2.Error) as error :
if(connection):
print("Failed to insert record into logfiles table", error)
finally:
#closing database connection.
if(connection):
cursor.close()
connection.close()
print("PostgreSQL connection is closed")
write_record()
Thanks in advance
Regards
Georg
In your code you are calling write_record() method this will insert only one file beacause get_files() method will return the first file not all the files.
first you need to call get_files() method instead of returning in this method you should call write_record() method with the values you are returning from get_files().
And do not close the connection after insertion of every record close the connection after insertion of all the records.
try this
from datetime import datetime
from os import scandir
import os
import hashlib
import psycopg2
BLOCKSIZE = 65536
hasher = hashlib.sha256()
basepath = '.'
connection = None
def convert_date(timestamp):
d = datetime.utcfromtimestamp(timestamp)
formated_date = d.strftime('%d%m%Y%H%M%S')
return formated_date
def get_hash(entry):
with open(entry, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
# print(hasher.hexdigest())
def get_files():
dir_entries = scandir('.')
for entry in dir_entries:
if entry.is_file():
info = entry.stat()
print(' %s %s %s' % (entry.name, convert_date(info.st_mtime),hasher.hexdigest()))
log_filename = entry.name
log_hashvalue = hasher.hexdigest()
log_date = convert_date(info.st_mtime)
write_record(log_filename,log_hashvalue,log_date)
#close the connection after writing all records
close_connection()
def write_record(log_filename,log_hashvalue,log_date):
try:
print(log_filename,log_hashvalue,log_date)
connection = psycopg2.connect(user="postgres",password="xxxxxxxx",host="xxx.xxx.xxx.xxx",port="5432",database="evidence_logging")
cursor = connection.cursor()
postgres_insert_query = """ INSERT INTO logfiles (log_name,log_date,log_hashvalue) VALUES (%s,%s,%s)"""
record_to_insert = (log_filename,log_date,log_hashvalue)
print(postgres_insert_query, record_to_insert)
cursor.execute(postgres_insert_query, record_to_insert)
connection.commit()
count = cursor.rowcount
print (count, "Record inserted successfully into logfiles table")
except (Exception, psycopg2.Error) as error :
if(connection):
print("Failed to insert record into logfiles table", error)
finally:
cursor.close()
def close_connection():
if(connection):
connection.close()
print("PostgreSQL connection is closed")
get_files()

Python/MySQL, No errors but data not inserting

I have a code, shown below that reads 4 lines of data over a serial connection. The data is assigned a variable and then an attempt is made to insert this into a local database. However, once the code has run, there is no new data in the database.
I have inserted print commands in to check that the data is definitely being received over terminal and it is, I have also successfully inserted data into the database via terminal, but that was static values such as 10.0, 10.0, 0, 10.
import MySQLdb
import serial
import time
ser = serial.Serial('/dev/ttyACM0', 115200)
conn = MySQLdb.connect(host= "localhost", user= "JP", passwd= "password", db= "serialdb")
cursor = conn.cursor()
while 1:
print "waiting for data"
print ""
xs = ser.readline()
print xs
time.sleep(1)
ys = ser.readline()
print ys
time.sleep(1)
zs = ser.readline()
print zs
time.sleep(1)
vs = ser.readline()
print vs
time.sleep(1)
try:
x= float(xs)
except ValueError:
pass
try:
y= float(xs)
except ValueError:
pass
try:
z= float(xs)
except ValueError:
pass
v = int(vs)
print "inserting into database"
print ""
time.sleep(1)
sql = "INSERT INTO Arduino_Data(Temperature, Humidity, RPM, Distance) VALUES (%f, %f, %f, %d)" %(x, y, z, v)
cursor.execute(sql)
conn.commit
break
Commit is a function, you are not calling it :)
conn.commit()
That should do it

String indices must be integers in one place of code in python

import pymongo
import sys
client=pymongo.MongoClient('localhost',27017)
db=client.rop
try:
cntcur=db.albums.aggregate([{"$unwind":"$images"},"$group":"_id":"null","count":{'$sum':1}}}])
cursor1=db.images.find()
for im in cursor1:
id1=int(im['_id'])
cnt=0
cursor= db.albums.aggregate([{"$unwind":"$images"}])
print id1
for image in cursor:
print "moving to images collection"
ig=image['images']
if (id1 == ig):
break;
else:
cnt=cnt+1
if (cnt == cntr):
print "removing"
db.images.remove({'_id':id1})
except Exception as e:
print "unexpected error", type(e),e
I get error in the line ig=image['images']. I tried changing it to ig=int(image['images']), but I get same error.
import pymongo
import sys
client = pymongo.MongoClient('localhost',27017)
db=client.rop
try:
cursor1=db.images.find()
for im in cursor:
id1=int(im['_id'])
print id1
cnt=0
cursor=db.albums.find()
for image in cursor:
ig=image['images']
for data in ig:
if (id1 == data):
cnt=1
break;
if cnt==0:
print "removing"
db.images.remove({'_id':id1})
except Exception as e:
print "unexpected error",type(e) , e

Best practice for UDP python server handling many connections

I have a small server in python that receives UDP Packets from GPS devices.
Once data is received, it is decoded and then stored in a postgresql database.
How do I monitor the UDP packets?
I think that for some reason the queue is filling up to quick and some of the information is being discarded.
For some reason if I restart the daemon, the queue gets processed as it seems there is data still there. Is there a better way to handle this?
Maybe should I run multiple servers to decrease the load?
Or is there any setting to increase the size of the queue?
I am new to socket programming so please bear with me!!
Code as follows:
#!/usr/bin/python
import sys, os, time
def main():
import socket
import struct
import sys, time
import psycopg2
import logging
try:
server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_socket.bind(("myip", "udpport"))
con = psycopg2.connect("conn info")
cur = con.cursor()
while True:
data = server_socket.recv(65535)
#print data
#print sys.getsizeof(data)
logging.info(data)
if len(data) is 34:
# extract info
info = struct.unpack("<hhBBchfflccccccccccccc", data)
tid = ''.join([info[9] ,info[10] ,info[11] , info[12] , info[13] , info[14] ,info[15] , info[16] ,info[17] , info[18]])
alarm = info[4]
direction = info[5]
longt = info[6]
lat = info[7]
speed = ord(info[4])
fuel = ord(info[21])*2
datasize = sys.getsizeof(data)
messagetype = info[1]
if messagetype == 8:
dbmes = "cycle"
keystatus = int(info[2])
elif messagetype == 3:
dbmes = "alarm"
keystatus = int(info[3])
try:
# Update info on DB
cur.execute("update in database")
except Exception, e:
pass
# Commit Changes to DB
con.commit()
cur.close()
con.close()
except Exception, e:
print "\n\nNothing to do. Daemon Already Running!!!!\n"
if __name__ == "__main__":
# do the UNIX double-fork magic, see Stevens' "Advanced
# Programming in the UNIX Environment" for details (ISBN 0201563177)
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
print >>sys.stderr, "fork #1 failed: %d (%s)" % (e.errno, e.strerror)
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent, print eventual PID before
print "GREENCITY TANZANIA LTD GPRS SERVER PID %d" % pid
sys.exit(0)
except OSError, e:
print >>sys.stderr, "fork #2 failed: %d (%s)" % (e.errno, e.strerror)
sys.exit(1)
# start the daemon main loop
main()

Categories