SQL Data is fetched only once - python

I have troubles while fetching data from a SQL-Database. The program gets data from a PHP-site and plays the sound if it is a youtube link. Currently, it has troubles when the database is empty. My goal is, that it exits the for-loop, what it does, but it does not receive any data and tries to fetch it once more until data is inserted by the site.
import mysql.connector
import pafy
import vlc
import time
from mysql.connector.errors import Error
url = "NULL"
try:
connection = mysql.connector.connect(host='localhost',
database='musicbox',
user='',
password='')
if connection.is_connected():
db_Info = connection.get_server_info()
print("Connected to MySQL Server version ", db_Info)
cursor = connection.cursor()
while True :
db_Info = connection.get_server_info()
print("Connected to MySQL Server version ", db_Info)
cursor = connection.cursor()
print "test2"
cursor.execute("SELECT * FROM playlist")
record = cursor.fetchall()
print record
time.sleep(1)
for x in record:
print x[0]
if "youtube" in x[0]:
if not all(x):
print "empty"
else:
print "true"
url = x[0]
video = pafy.new(url)
best = video.getbestaudio()
playurl = best.url
Instance = vlc.Instance()
player = Instance.media_player_new()
Media = Instance.media_new(playurl)
Media.get_mrl()
player.set_media(Media)
player.play()
time.sleep(1.5)
duration = player.get_length() / 1000
time.sleep(duration)
cursor.execute("DELETE FROM playlist LIMIT 1")
connection.commit()
except Error as e:
print("Error while connecting to MySQL", e)
finally:
if (connection.is_connected()):
cursor.close()
connection.close()
print("MySQL connection is closed")

Related

How to request url and download folders with python?

I'm new to python. I'm trying to request an URL (thanks to an id stored in a postgresql data base) which sends me zip folders with several files inside.
import psycopg2
import requests
url = "https://myurl/"
conn = psycopg2.connect(user="XXX", password="XXX", database="XXX", host="localhost", port="5432")
print("Successfully connected!")
cur = conn.cursor()
sql ="select id from public.base"
cur.execute(sql)
row = [item[0] for item in cur.fetchall()]
for d in row:
requests.post(url+d)
The requests.post(url+d) is working, i have a 200 response.
But I don't know how to do the following steps, that is to say to upload in my workspace these zip folders...
You could use the zipfile & io library for that, specifying your download location within a extractall, like so :)
from psycopg2 import (
connect,
OpertionalError,
)
from zipfile import (
BadZipFile,
ZipFile,
)
from io import BytesIO
import requests
def download_zip(url):
response = requests.get(url)
if response.ok:
try:
z = ZipFile(BytesIO(response.content))
z.extractall("/path/to/destination_directory")
except BadZipFile as ex:
print('Error: {}'.format(ex))
print('Download succeeded: {}'.format(url))
else:
print('Connection failed: {}'.format(url))
def main():
conn = connect(
user='XXX',
password='XXX',
database='XXX',
host='localhost',
port='5432',
)
try:
cur = conn.cursor()
cur.execute('select id from public.base')
except OperationalError:
exit(0)
row = [item[0] for item in cur.fetchall()]
for id in row:
download_zip('https://myurl/{}'.format(id))
print('Download completed')
if __name__ == '__main__':
main()

Python calling one function form another

I have written a little python script to get files in a directory, get a hash and then write them to a table.
The first part, getting the files and calculating the hash was easy. But now I added the function (write_record) to store the filename, log date and hash to a database. But I am struggling how to call it form the get_files function an write a record for each file in the directory
from datetime import datetime
from os import scandir
import os
import hashlib
import psycopg2
BLOCKSIZE = 65536
hasher = hashlib.sha256()
basepath = '.'
def convert_date(timestamp):
d = datetime.utcfromtimestamp(timestamp)
formated_date = d.strftime('%d%m%Y%H%M%S')
return formated_date
def get_hash(entry):
with open(entry, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
# print(hasher.hexdigest())
def get_files():
dir_entries = scandir('.')
for entry in dir_entries:
if entry.is_file():
info = entry.stat()
print(' %s %s %s' % (entry.name, convert_date(info.st_mtime),hasher.hexdigest()))
log_filename = entry.name
log_hashvalue = hasher.hexdigest()
log_date = convert_date(info.st_mtime)
return log_filename,log_hashvalue,log_date
# write_record()
def write_record():
log_filename,log_hashvalue,log_date = get_files()
try:
print(log_filename,log_hashvalue,log_date)
connection = psycopg2.connect(user="postgres",password="xxxxxxxx",host="xxx.xxx.xxx.xxx",port="5432",database="evidence_logging")
cursor = connection.cursor()
postgres_insert_query = """ INSERT INTO logfiles (log_name,log_date,log_hashvalue) VALUES (%s,%s,%s)"""
record_to_insert = (log_filename,log_date,log_hashvalue)
print(postgres_insert_query, record_to_insert)
cursor.execute(postgres_insert_query, record_to_insert)
connection.commit()
count = cursor.rowcount
print (count, "Record inserted successfully into logfiles table")
except (Exception, psycopg2.Error) as error :
if(connection):
print("Failed to insert record into logfiles table", error)
finally:
#closing database connection.
if(connection):
cursor.close()
connection.close()
print("PostgreSQL connection is closed")
write_record()
Thanks in advance
Regards
Georg
In your code you are calling write_record() method this will insert only one file beacause get_files() method will return the first file not all the files.
first you need to call get_files() method instead of returning in this method you should call write_record() method with the values you are returning from get_files().
And do not close the connection after insertion of every record close the connection after insertion of all the records.
try this
from datetime import datetime
from os import scandir
import os
import hashlib
import psycopg2
BLOCKSIZE = 65536
hasher = hashlib.sha256()
basepath = '.'
connection = None
def convert_date(timestamp):
d = datetime.utcfromtimestamp(timestamp)
formated_date = d.strftime('%d%m%Y%H%M%S')
return formated_date
def get_hash(entry):
with open(entry, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
# print(hasher.hexdigest())
def get_files():
dir_entries = scandir('.')
for entry in dir_entries:
if entry.is_file():
info = entry.stat()
print(' %s %s %s' % (entry.name, convert_date(info.st_mtime),hasher.hexdigest()))
log_filename = entry.name
log_hashvalue = hasher.hexdigest()
log_date = convert_date(info.st_mtime)
write_record(log_filename,log_hashvalue,log_date)
#close the connection after writing all records
close_connection()
def write_record(log_filename,log_hashvalue,log_date):
try:
print(log_filename,log_hashvalue,log_date)
connection = psycopg2.connect(user="postgres",password="xxxxxxxx",host="xxx.xxx.xxx.xxx",port="5432",database="evidence_logging")
cursor = connection.cursor()
postgres_insert_query = """ INSERT INTO logfiles (log_name,log_date,log_hashvalue) VALUES (%s,%s,%s)"""
record_to_insert = (log_filename,log_date,log_hashvalue)
print(postgres_insert_query, record_to_insert)
cursor.execute(postgres_insert_query, record_to_insert)
connection.commit()
count = cursor.rowcount
print (count, "Record inserted successfully into logfiles table")
except (Exception, psycopg2.Error) as error :
if(connection):
print("Failed to insert record into logfiles table", error)
finally:
cursor.close()
def close_connection():
if(connection):
connection.close()
print("PostgreSQL connection is closed")
get_files()

execute a large sql file sqlite

i need to execute 178 mg sql file (bdd RED: Rich_Epinions_Dataset_anonym )
i have this error message :MemoryError
i use sqlite and python here my code :
import sqlite3
def import():
print("Opened database ...")
conn = sqlite3.connect('F:\\PROJECT\\testDict.db')
print("Opened database successfully")
qry = open('F:\\PROJECT\\epinions_anonym.sql', 'r').read()
sqlite3.complete_statement(qry)
cursor = conn.cursor()
try:
cursor.executescript(qry)
except Exception as e:
MessageBoxW = ctypes.windll.user32.MessageBoxW
errorMessage = databaseFile + ': ' + str(e)
MessageBoxW(None, errorMessage, 'Error', 0)
cursor.close()
raise
import()

AWS Lambda RDS MySQL DB Connection InterfaceError

When I try to connect to AWS RDS (MySQL), most of the time I receive an InterfaceError. When I edit the Lambda code and re-run, it will work fine the first time, but then the same error occurs.
My code:
import sys
import logging
import pymysql
import json
import traceback
rds_host = "*****.rds.amazonaws.com"
name = "*****"
password = "****"
db_name = "myDB"
logger = logging.getLogger()
logger.setLevel(logging.INFO)
try:
conn = pymysql.connect(rds_host, user=name, passwd=password, db=db_name, connect_timeout=5)
except:
logger.error("ERROR: Unexpected error: Could not connect to MySql instance.")
sys.exit()
logger.info("SUCCESS: Connection to RDS mysql instance succeeded")
def handler(event, context):
sub = event['sub']
username = event['username']
givenname = event['givenname']
isAdmin = event['isAdmin']
print (sub)
print (username)
print (givenname)
print (isAdmin)
data = {}
cur = conn.cursor()
try:
cmd = "SELECT AuthState FROM UserInfo WHERE UserName=" + "\'" + username + "\'"
rowCnt = cur.execute(cmd)
print (cmd)
except:
print("ERROR: DB Query Execution failed.")
traceback.print_exc()
data['errorMessage'] = 'Internal server error'
response = {}
response['statusCode'] = 500
response['body'] = data
return response
if rowCnt <= 0:
print (username)
data['errorMessage'] = 'No User Name Found'
response = {}
response['statusCode'] = 400
response['body'] = data
conn.close()
return response
for row in cur:
print row[0]
if int(row[0]) == 0:#NOT_AUTHORIZED
ret = "NOT_AUTHORIZED"
elif int(row[0]) == 1:#PENDING
ret = "PENDING"
elif int(row[0]) == 2:#AUTHORIZED
ret = "AUTHORIZED"
else:#BLOCKED
ret = "BLOCKED"
data['state'] = ret
response = {}
response['statusCode'] = 200
response['body'] = data
conn.close()
return response
The stacktrace:
Traceback (most recent call last):
File "/var/task/app.py", line 37, in handler
File "/var/task/pymysql/connections.py", line 851, in query
self._execute_command(COMMAND.COM_QUERY, sql)
File "/var/task/pymysql/connections.py", line 1067, in _execute_command
raise err.InterfaceError("(0, '')")
InterfaceError: (0, '')
Read Understanding Container Reuse in Lambda.
It was written about Node but is just as accurate for Python.
Your code doesn't run from the top with each invocation. Sometimes it starts with the handler.
Why? It's faster.
How do you know when this will happen? You don't... except for each time you redeploy the function, of course, you'll always get a fresh container on the first invocation, because the old containers would have been abandoned by the redeploy.
If you're going to do your DB connection outside the handler, don't call conn.close(), because on the next invocation of the function, you might find your container is still alive, and the handler is invoked with an already-closed database handle.
You have to write Lambda functions so that they neither fail if a container is reused, nor fail if a container is not reused.
The simpler solution is to open the DB connection inside the handler. The more complex but also more optimal solution (in terms of runtime) is to never close it, so that it can potentially be reused.

sqlite3.OperationalError: database is locked - How to avoid this?

I'm using an open source piece of python code that basically pulls in a location of an entity and saves these details to a DB in real time. lets call it scanner the scanner program. DB file it saves it to is a sqlite file: db.sqlite.
As this is happening my piece of code in question is searching the db file every 45 seconds performing a select statement to find a certain value. This will work a couple of times but after running for a couple of minutes concurrently with the scanner program they run into a DB lock error:
sqlite3.OperationalError: database is locked
So what can I do to my code to ensure this lock does not happen. I cannot change how the scanner program accesses the DB. Only my program.
Any help here would be great. I've seen timeouts mentioned along with threading but I am not sure on either.
from datetime import datetime
import sqlite3
import time
import json
import tweepy
def get_api(cfg):
auth = tweepy.OAuthHandler(cfg['consumer_key'], cfg['consumer_secret'])
auth.set_access_token(cfg['access_token'], cfg['access_token_secret'])
return tweepy.API(auth)
# Fill in the values noted in previous step here
cfg = {
"consumer_key" : "X",
"consumer_secret" : "X",
"access_token" : "X",
"access_token_secret" : "X"
}
with open('locales/pokemon.en.json') as f:
pokemon_names = json.load(f)
currentid = 1
pokemonid = 96 #test
while 1==1:
conn = sqlite3.connect('db.sqlite')
print "Opened database successfully";
print "Scanning DB....";
time.sleep(1)
cur = conn.execute("SELECT * FROM sightings WHERE pokemon_id = ? and id > ?", (pokemonid, currentid))
row = cur.fetchone()
if row is None:
print "No Pokemon Found \n "
time.sleep(1)
while row is not None:
#get pokemon name
name = pokemon_names[str(pokemonid)]
#create expiry time
datestr = datetime.fromtimestamp(row[3])
dateoutput = datestr.strftime("%H:%M:%S")
#create location
location = "https://www.google.com/maps/place/%s,%s" % (row[5], row[6])
#inform user
print "%s found! - Building tweet! \n" % (name)
time.sleep(1)
#create tweet
buildtweet = "a wild %s spawned in #Dublin - It will expire at %s. %s #PokemonGo \n "%(name, dateoutput, location)
#print tweet
#log
print buildtweet
currentid = row[0]
time.sleep(1)
#send tweet
api = get_api(cfg)
tweet = buildtweet
try:
status = api.update_status(status=tweet)
print "sent!"
except:
pass
print "this tweet failed \n"
time.sleep(30)
row = cur.fetchone()
cur.close()
conn.close()
print "Waiting..... \n "
time.sleep(45)
conn.close()

Categories