Python SQLite3 - cursor.execute - no error - python

This is a piece of code which needs to perform the follow functionality:
Dump all table names in a database
From each table search for a column with either Latitude or Longitude in
Store these co-ords as a json file
The code was tested and working on a single database. However once it was put into another piece of code which calls it with different databases it now is not entering line 49. However there is no error either so I am struggling to see what the issue is as I have not changed anything.
Code snippet line 48 is the bottom line -
cursor.execute("SELECT name FROM sqlite_master WHERE type='table'")
print (cursor)
for tablerow in cursor.fetchall():
I am running this in the /tmp/ dir due to an earlier error with sqlite not working outside the temp.
Any questions please ask them.
Thanks!!
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sqlite3
import os
import sys
filename = sys.argv[1]
def validateFile(filename):
filename, fileExt = os.path.splitext(filename)
print ("[Jconsole] Python: Filename being tested - " + filename)
if fileExt == '.db':
databases(filename)
elif fileExt == '.json':
jsons(fileExt)
elif fileExt == '':
blank()
else:
print ('Unsupported format')
print (fileExt)
def validate(number):
try:
number = float(number)
if -90 <= number <= 180:
return True
else:
return False
except ValueError:
pass
def databases(filename):
dbName = sys.argv[2]
print (dbName)
idCounter = 0
mainList = []
lat = 0
lon = 0
with sqlite3.connect(filename) as conn:
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type='table'")
print (cursor)
for tablerow in cursor.fetchall():
print ("YAY1")
table = tablerow[0]
cursor.execute('SELECT * FROM {t}'.format(t=table))
for row in cursor:
print(row)
print ("YAY")
tempList = []
for field in row.keys():
tempList.append(str(field))
tempList.append(str(row[field]))
for i in tempList:
if i in ('latitude', 'Latitude'):
index = tempList.index(i)
if validate(tempList[index + 1]):
idCounter += 1
tempList.append(idCounter)
(current_item, next_item) = \
(tempList[index], tempList[index + 1])
lat = next_item
if i in ('longitude', 'Longitude'):
index = tempList.index(i)
if validate(tempList[index + 1]):
(current_item, next_item) = \
(tempList[index], tempList[index + 1])
lon = next_item
result = '{ "id": ' + str(idCounter) \
+ ', "content": "' + dbName + '", "title": "' \
+ str(lat) + '", "className": "' + str(lon) \
+ '", "type": "box"},'
mainList.append(result)
file = open('appData.json', 'a')
for item in mainList:
file.write('%s\n' % item)
file.close()
# {
# ...."id": 1,
# ...."content": "<a class='thumbnail' href='./img/thumbs/thumb_IMG_20161102_151122.jpg'>IMG_20161102_151122.jpg</><span><img src='./img/thumbs/thumb_IMG_20161102_151122.jpg' border='0' /></span></a>",
# ...."title": "50.7700721944444",
# ...."className": "-0.8727045",
# ...."start": "2016-11-02 15:11:22",
# ...."type": "box"
# },
def jsons(filename):
print ('JSON')
def blank():
print ('blank')
validateFile(filename)

Fixed.
The issue was up here
filename, fileExt = os.path.splitext(filename)
The filename variable was being overwritten without the file extension so when SQLite searched it didn't find the file.
Strange no error appeared but it is fixed now by changing the filename var to filename1.

Related

how to improve the performance python script for extracting big size(3-4GB)of oracle table

I'm connecting to oracle database using python script and extracting around 10 tables. one table is having 3Gb of data it took around 4 hours to extract with below code and upload it to S3. How can we improve the performance of the below python script?
Different file format other than csv will improve the performance like parquet?
Any suggestions or solutions will be highly appreciated.
Below is the code I tried:
def extract_handler():
# Parameters defined in cloudwatch event
env = os.environ['Environment'] if 'Environment' in os.environ else 'sit'
# FTP parameters
host = f"/{env}/connet_HOSTNAME"
username = f"/{env}/connect_USERNAME"
password = f"/{env}/connect_PASSWORD"
host = get_parameters(host)
username = get_parameters(username)
password = get_parameters(password)
today = date.today()
current_date = today.strftime("%Y%m%d")
con = None
cur = None
tables = ["table1", "table2","table3"........."table10"]
bucket = "bucket_name"
for table in tables:
try:
con = cx_Oracle.connect(username, password, host, encoding="UTF-8")
cur = con.cursor()
logging.info('Successfully established the connection to Oracle db')
table_name = table.split(".")[1]
logging.info("######## Table name:"+ table +" ###### ")
logging.info("****** PROCESSING:" +table_name+" *********")
cur.execute("SELECT count(*) FROM {}".format(table))
count = cur.fetchone()[0]
logging.info("Count:", count)
if count > 0:
cur1 = con.cursor()
# Define the desired timestamp format
timestamp_format = '%Y/%m/%d %H:%M:%S'
# Execute a query to read a table
cur1.execute( "select * from {} where TRUNC(DWH_CREATED_ON)=TRUNC(SYSDATE)-1".format(table))
batch_size = 10000
rows = cur1.fetchmany(batch_size)
csv_file = f"/tmp/{table_name}.csv"
with open(csv_file, "w", newline="") as f:
# Add file_date column as the first column
writer = csv.DictWriter(f, fieldnames=['file_date'] + [col[0] for col in cur1.description],
delimiter='\t')
writer.writeheader()
logging.info("Header added to the table:" + table + "######")
while rows:
for row in rows:
row_dict = {'file_date': current_date}
for i, col in enumerate(cur1.description):
if col[1] == cx_Oracle.DATETIME:
if row[i] is not None:
row_dict[col[0]] = row[i].strftime(timestamp_format)
else:
row_dict[col[0]] = ""
else:
row_dict[col[0]] = row[i]
with open(csv_file, "a", newline="") as f:
# Add file_date column as the first column
writer = csv.DictWriter(f, fieldnames=['file_date'] + [col[0] for col in cur1.description],
delimiter='\t')
writer.writerow(row_dict)
# Fetch the next batch of 100 rows
rows = cur1.fetchmany(batch_size)
logging.info("Records written to the temp file for the table :" + table + "######")
s3_path = "NorthernRegion" + '/' + table_name + '/' + current_date + '/' + table_name + '.csv'
s3_client = boto3.client('s3', region_name='region-central-1')
s3_client.upload_file('/tmp/' + table_name + '.csv', bucket, s3_path)
logging.info(table + "File uploaded to S3 ######")
else:
logging.info('Table not having data')
return 'Data is not refreshed yet, Hence quitting..'
if cur1:
cur1.close()
except Exception as err:
#Handle or log other exceptions such as bucket doesn't exist
logging.error(err)
finally:
if cur:
cur.close()
if con:
con.close()
return "Successfully processed"

How to run an .sql file full of commands as a command line argument to a python program?

I have a .sql file full of commands that I want to run through my python program. When I enter each line into the terminal individually, my program works as expected, as it has methods of parsing through the individual line entry to do what it needs to do. However, when I run the program (pa_2.py) by typing it into the terminal as such:
python3 pa_2.py < PA2_test.sql
the program doesn't read each line correctly and malfunctions, my guess being that it is unable to parse through the sql file correctly. The expected output is:
-- Database CS457_PA2 created.
-- Using database CS457_PA2.
-- Table Product created.
-- 1 new record inserted.
-- 1 new record inserted.
-- 1 new record inserted.
-- 1 new record inserted.
-- 1 new record inserted.
-- pid int|name varchar(20)|price float
-- 1|Gizmo|19.99
-- 2|PowerGizmo|29.99
-- 3|SingleTouch|149.99
-- 4|MultiTouch|199.99
-- 5|SuperGizmo|49.99
-- 1 record modified.
-- 2 records modified.
-- pid int|name varchar(20)|price float
-- 1|Gizmo|14.99
-- 2|PowerGizmo|29.99
-- 3|SingleTouch|149.99
-- 4|MultiTouch|199.99
-- 5|Gizmo|14.99
-- 2 records deleted.
-- 1 record deleted.
-- pid int|name varchar(20)|price float
-- 2|PowerGizmo|29.99
-- 3|SingleTouch|149.99
-- name varchar(20)|price float
-- SingleTouch|149.99
When I type each command as an individual line after typing in the terminal:
python3 pa_2.py
I get the expected output. However, when I run the script in the command line as:
python3 pa_2.py < PA2_test.sql
the output I get is:
created.CS457_PA2
because it does not exist.7_PA2
Created table Product.
1 new record inserted.
1 new record inserted.
1 new record inserted.
1 new record inserted.
1 new record inserted.
because it does not exist.uct
0 records modified.
0 records modified.
because it does not exist.uct
0 records modified.
Traceback (most recent call last):
File "/Users/isaac_reilly/Desktop/College/UNR/Classes/Year 3 2022-2023/Semester 1 2022/CS 457 Database Managemant Systems/Project 3/pa_2.py", line 79, in <module>
tablefunctions.deleteData(user_input, currentdb)
File "/Users/isaac_reilly/Desktop/College/UNR/Classes/Year 3 2022-2023/Semester 1 2022/CS 457 Database Managemant Systems/Project 3/tablefunctions.py", line 114, in deleteData
if float(splitter[4]) > float(searchText):
ValueError: could not convert string to float: '19.99)'
I want it to know that the end of each line is a semicolon ";". How would I use PA2_test.sql as a command line argument and run each line as expected? Below is the .sql file, as well as the rest of my program.
PA2_test.sql:
CREATE DATABASE CS457_PA2;
USE CS457_PA2;
CREATE TABLE Product (pid int, name varchar(20), price float);
INSERT INTO Product values(1, 'Gizmo', 19.99);
INSERT INTO Product values(2, 'PowerGizmo', 29.99);
INSERT INTO Product values(3, 'SingleTouch', 149.99);
INSERT INTO Product values(4, 'MultiTouch', 199.99);
INSERT INTO Product values(5, 'SuperGizmo', 49.99);
SELECT * FROM Product;
UPDATE Product set name = 'Gizmo' where name = 'SuperGizmo';
UPDATE Product set price = 14.99 where name = 'Gizmo';
SELECT * FROM Product;
DELETE FROM Product where name = 'Gizmo';
DELETE FROM Product where price > 150;
SELECT * FROM Product;
SELECT name, price FROM Product where pid != 2;
.EXIT
pa_2.py:
import fileinput
import sys
import dbfunctions
import tablefunctions
import selections
currentdb = None
user_input = None
TableList = [None]
#Loop continously prompts the terminal for an input from the user and then decides what to do based on input.
while (user_input != ".EXIT"):
user_input = input()
#print(user_input)
#States that all commands must end with a ';' if user types invalid command
if ";" not in user_input and user_input != ".EXIT":
print("Invalid command, all commands must end with ';'")
#Creates a database
if "CREATE DATABASE" in user_input:
dbName = dbfunctions.inputCleaner("CREATE DATABASE ", user_input)
dbfunctions.create_db(dbName)
#Deletes a database
if "DROP DATABASE" in user_input:
dbName = dbfunctions.inputCleaner("DROP DATABASE ", user_input)
dbfunctions.remove_db(dbName)
#Creates a table using attributes inputted by user
if "CREATE TABLE" in user_input:
tInput = dbfunctions.inputCleaner("CREATE TABLE ", user_input)
tableName = tInput.split()[0]
tablefunctions.createTable(tInput, tableName, currentdb)
#Deletes a table
if "DROP TABLE" in user_input:
tableName = dbfunctions.inputCleaner("DROP TABLE ", user_input)
tablefunctions.dropTable(tableName, currentdb)
#Modifies a table using attributes inputted by the user
if "ALTER TABLE" in user_input:
rawInput = dbfunctions.inputCleaner("ALTER TABLE ", user_input)
tablefunctions.alterTable(rawInput, currentdb)
#Sets current working database
if "USE" in user_input:
dbName = dbfunctions.inputCleaner("USE ", user_input)
#print(dbName)
currentdb = dbName
dbfunctions.finddb(currentdb)
#print("Using database " + currentdb)
#elif dbfunctions.finddb(currentdb) == 0:
#print("Unable to use database " + dbName + " because it does not exist.")
#Selects data from a user specified table and prints contents to terminal
if "SELECT" in user_input:
selections.selectSpecified(user_input, currentdb)
#Inserts given data into a specified table
if "INSERT INTO" in user_input:
dataInput = dbfunctions.inputCleaner("INSERT INTO ", user_input)
tableName = dataInput.split()[0]
tablefunctions.insertData(dataInput, tableName, currentdb)
#Changes data in table as specified
if "UPDATE" in user_input:
tablefunctions.updateData(user_input, currentdb)
#Deletes data from table as specified
if "DELETE FROM" in user_input:
tablefunctions.deleteData(user_input, currentdb)
dbfunctions.py:
import os
import subprocess
import shlex
import shutil
#Removes semicolon and given phrase from input
def inputCleaner(removePhrase, input):
cleaned = input.replace(";", "")
return cleaned.replace(removePhrase, "")
#Function used to create specified database (local directory)
def create_db(dbName):
try:
#Tries making directory
os.makedirs(dbName)
print("Database " + dbName + " created.")
except FileExistsError:
#Checks to see if directory already exists, throws exception if it does
print("!Failed to create database " + dbName + " because it already exists.")
#Function used to remove specified database (local directory)
def remove_db(dbName):
#Checks to see if specified directory exists and deletes if it does
if os.path.exists(dbName):
shutil.rmtree(dbName)
print("Database " + dbName + " deleted.")
#If selected directory does not exists, prints an error message to the screen
else:
print("!Failed to delete " + dbName + " because it does not exist.")
#Checks to make sure that specified database exists
def finddb(dbName):
if dbName in subprocess.run(['ls', '|', 'grep', dbName], capture_output = True, text = True).stdout:
print("Using database " + dbName)
else:
print("Unable to use database", dbName,"because it does not exist.")
def getOperand(op):
operand = None
if (op == '='):
operand = 0
elif (op == '!='):
operand = -3
elif (op == '<'):
operand = -1
elif (op == '>'):
operand = 1
return operand
tablefunctions.py:
import subprocess
import os
#Checks to make sure that specified table exists
def findtable(tableName, currentdb):
if tableName in subprocess.run(['ls', currentdb, '|', 'grep', tableName], capture_output = True, text = True).stdout:
return 1
else:
return 0
#Creates table with specified headers
def createTable(dataInput, tableName, currentdb):
unformattedAttributes = dataInput.replace(tableName, "")
tableAttributes1 = unformattedAttributes[2:]
tableAttributes2 = tableAttributes1[:-1]
formattedAttributes = tableAttributes2.split(",")
if (currentdb != None):
if findtable(tableName, currentdb) == 0:
os.system(f'touch {currentdb}/{tableName}.txt')
filename = currentdb + '/' + tableName + '.txt'
fedit = open(filename, 'w')
fedit.write(" |".join(formattedAttributes))
fedit.close()
print(f"Created table {tableName}.")
else:
print("!Failed to create table " + tableName + " because it already exists.")
else:
print("Please specify which database to use.")
#Deletes specified table
def dropTable(tableName, currentdb):
if (currentdb != None):
if findtable(tableName, currentdb) != 0:
os.system(f'rm {currentdb}/{tableName}.txt')
print("Table " + tableName + " deleted.")
else:
print("!Failed to delete " + tableName + " because it does not exist.")
else:
print("No specified database, enter 'USE <database_name>;'")
#Inserts data into specified table
def insertData(dataInput, tableName, currentdb):
unformattedInput = dataInput.replace(tableName, "")
cleanedInput1 = unformattedInput.replace("'", "")
cleanedInput2 = cleanedInput1.replace(" ", "")
unformattedAttributes = cleanedInput2[7:-1]
formattedAttributes = unformattedAttributes.split(",")
if (currentdb != None):
if findtable(tableName, currentdb):
fedit = open(f'{currentdb}/{tableName}.txt', 'a')
fedit.write("\n" + " | ".join(formattedAttributes))
fedit.close()
print("1 new record inserted.")
else:
print("!Failed to insert data into " + tableName + " because it does not exist.")
else:
print("No specified database, enter 'USE <database_name>;'")
#Modifies a table using attributes inputted by the user
def alterTable(rawInput, currentdb):
tableName = rawInput.split()[0]
alterCmd = rawInput.split()[1]
alterAttribute1 = rawInput.replace(tableName, "")
alterAttribute2 = alterAttribute1.replace(alterCmd, "")
newAttr = alterAttribute2[2:]
if (currentdb != None):
if findtable(tableName, currentdb):
fedit = open(f'{currentdb}/{tableName}.txt', 'a')
fedit.write(f" | {newAttr}")
fedit.close()
print("Table " + tableName + " modified.")
else:
print("!Failed to modify " + tableName + " because it does not exist.")
else:
print("No specified database, enter 'USE <database_name>;'")
#Removes data from specified table
def deleteData(user_input, currentdb):
if (currentdb != None):
cleanedInput1 = user_input[12:-1]
cleanedInput2 = cleanedInput1.replace("'", "")#Cleans input
tableName = cleanedInput2.split()[0]
if findtable(tableName, currentdb) != 0:
replaceText = ""
searchText = cleanedInput2.split()[4]
searchCategory = cleanedInput2.split()[2]
with open(f'{currentdb}/{tableName}.txt', 'r') as file:
count = 0
replacement = ""
if cleanedInput2.split()[3] == "=":
#Loops line by line for keywords
for line in file:
line = line.strip()
splitter = line.split()#Puts line into list elements
if searchText == splitter[2]: #If elements matches search text
updatedLine = "" #delete it
count += 1 #Keeps track of number of edits
else:
updatedLine = line + "\n" #Reads line unchanged if specified data is not present
replacement = replacement + updatedLine
if cleanedInput2.split()[3] == ">":
lineCount = 0
for line in file:
line = line.strip()
splitter = line.split()
if lineCount == 0:
lineCount += 1
updatedLine = line + "\n"
else:
if float(splitter[4]) > float(searchText):
updatedLine = ""
count += 1
else:
updatedLine = line + "\n"
replacement = replacement + updatedLine
if cleanedInput2.split()[3] == "<":
lineCount = 0
for line in file:
line = line.strip()
splitter = line.split()
if lineCount == 0:
lineCount += 1
updatedLine = line + "\n"
else:
if float(splitter[4]) < float(searchText):
updatedLine = ""
count += 1
else:
updatedLine = line + "\n"
replacement = replacement + updatedLine
file.close()
with open(f'{currentdb}/{tableName}.txt', 'w') as file:
file.write(replacement)
file.close()
if count == 1:
print(str(count) + " record modified.")
else:
print(str(count) + " records modified.")
else:
print("!Failed to update " + tableName + " table because it does not exist.")
else:
print("No specified database, enter 'USE <database_name>;'")
def updateData(user_input,currentdb):
if (currentdb != None):
cleanedInput1 = user_input[7:-1]
cleanedInput2 = cleanedInput1.replace("'", "")
tableName = cleanedInput2.split()[0]
if findtable(tableName, currentdb) != 0:
replaceText = cleanedInput2.split()[4]
searchText = cleanedInput2.split()[8]
replaceCategory = cleanedInput2.split()[2]
searchCategory = cleanedInput2.split()[6]
with open(f'{currentdb}/{tableName}.txt', 'r') as file:
count = 0
replacement = ""
if (replaceCategory == searchCategory):#if both columns being referenced are the same
for line in file:
line = line.strip()
if searchText in line:
updatedLine = line.replace(searchText, replaceText)
count += 1
else:
updatedLine = line
replacement = replacement + updatedLine + "\n"
else:
for line in file:
splitter = line.split()
splitter[4] = replaceText
line = line.strip()
if searchText == splitter[2]:
updatedLine = " ".join(splitter)
count += 1
else:
updatedLine = line
replacement = replacement + updatedLine + "\n"
file.close()
with open(f'{currentdb}/{tableName}.txt', 'w') as file:
file.write(replacement)
file.close()
if count == 1:
print(str(count) + " record modified.")
else:
print(str(count) + " records modified.")
else:
print("!Failed to update " + tableName + " table because it does not exist.")
else:
print("No specified database, enter 'USE <database_name>;'")
selections.py:
import tablefunctions
import dbfunctions
def selectAll(tableName, currentdb):
if currentdb == None:
print("No specified database, enter 'USE <database_name>;'")
else:
if tablefunctions.findtable(tableName, currentdb):
fedit = open(f'{currentdb}/{tableName}.txt', 'r')
print(fedit.read())
fedit.close()
else:
print("!Failed to query table " + tableName + " because it does not exist.")
def selectSpecified(user_input, currentdb):
if "SELECT * FROM" in user_input:
tableName = dbfunctions.inputCleaner("SELECT * FROM ", user_input)
selectAll(tableName, currentdb)
else:
if "SELECT" in user_input:
selLower = user_input[7:-1]
selection = user_input[7:-1]
elif "select" in user_input:
selection = user_input[7:-1]
# Gathering list of variables
selectColumns = selection.replace(",", "").split()
selectColumns = selectColumns[:selectColumns.index("FROM")]
# Table name
tableName = selection.split()[len(selectColumns)+1]
# Gathering what to filter by
whereColumn = selection.split()[len(selectColumns)+3]
whereRecord = selection.split()[len(selectColumns)+5]
operand = dbfunctions.getOperand(selection.split()[len(selectColumns)+4])
if currentdb != None:
if tablefunctions.findtable(tableName, currentdb):
f = open(f'{currentdb}/{tableName}.txt', 'r')
file = f.readlines()
f.close()
selectColumnNums = []
columnNameString = ""
listToReturn = []
count = 0
for line in file:
if (count == 0): # Headers
# Finding the indexes of select and where columns
columnList = line.split()
columnListWithTypes = columnList.copy()
del columnListWithTypes[2::3]
del columnList[1::3]
columnCount = 0
# If variable is found in table, record its index
for word in columnList:
if word in selectColumns:
selectColumnNums.append(columnCount)
if (word == whereColumn):
whereColumnNum = columnCount
columnCount += 1
# Creating a custom table header for the selected columns
for index in selectColumnNums:
columnNameString += f"{columnListWithTypes[index]} {columnListWithTypes[index+1]} | "
queryHeader = columnNameString[:-3]
listToReturn.append(queryHeader)
if (count > 0): # Values
tupleDetails = line.split()
# Determines what to do with each row
def querySpecificHelper():
# Creates the row output
def queryStringMaker():
queryString = ""
for index in selectColumnNums:
queryString += f"{tupleDetails[index]} | "
queryResult = queryString[:-3]
listToReturn.append(queryResult)
if (operand == 0): # Equality
# The type checking here handles strings and numbers separately
# Ex. 150 or 150.00 would not find 150.00 or 150, respectively
if (type(tupleDetails[whereColumnNum]) is str):
if (tupleDetails[whereColumnNum] == whereRecord):
queryStringMaker()
elif (type(tupleDetails[whereColumnNum]) is not str):
if (float(tupleDetails[whereColumnNum]) == float(whereRecord)):
queryStringMaker()
elif (operand == 1): # Greater than
if (float(tupleDetails[whereColumnNum]) > float(whereRecord)):
queryStringMaker()
elif (operand == -1): # Less than
if (float(tupleDetails[whereColumnNum]) < float(whereRecord)):
queryStringMaker()
elif (operand == -3): # Inequality
if (type(tupleDetails[whereColumnNum]) is str):
if (tupleDetails[whereColumnNum] != whereRecord):
queryStringMaker()
elif (type(tupleDetails[whereColumnNum]) is not str):
if (float(tupleDetails[whereColumnNum]) != float(whereRecord)):
queryStringMaker()
querySpecificHelper()
count += 1
for line in listToReturn: # Prints table
print(line)
else:
print(f"Could not query table {tableName} because it does not exist.")
else:
print("Please specify which database to use.")

How to retrieve column values by column name in python whit cx_Oracle

I'm programming a script that connects to an Oracle database and get the results into a log file. I want to get a output like this:
FEC_INCLUSION = 2005-08-31 11:43:48,DEBITO_PENDIENTE = None,CAN_CUOTAS = 1.75e-05,COD_CUENTA = 67084,INT_TOTAL = None,CAN_CUOTAS_ANTERIOR = None,COD_INVERSION = 1,FEC_MODIFICACION = 10/04/2012 09:45:22,SAL_TOT_ANTERIOR = None,CUOTA_COMISION = None,FEC_ULT_CALCULO = None,MODIFICADO_POR = CTAPELA,SAL_TOTAL = 0.15,COD_TIPSALDO = 1,MONTO_COMISION = None,COD_EMPRESA = 1,SAL_INFORMATIVO = None,COD_OBJETIVO = 5,SAL_RESERVA = None,INCLUIDO_POR = PVOROPE,APORTE_PROM = 0.0,COSTO_PROM = None,CREDITO_PENDIENTE = None,SAL_PROM = 0.0,
FEC_INCLUSION = 2005-08-31 11:43:49,DEBITO_PENDIENTE = None,CAN_CUOTAS = 0.0,COD_CUENTA = 67086,INT_TOTAL = None,CAN_CUOTAS_ANTERIOR = None,COD_INVERSION = 9,FEC_MODIFICACION = 25/02/2011 04:38:52,SAL_TOT_ANTERIOR = None,CUOTA_COMISION = None,FEC_ULT_CALCULO = None,MODIFICADO_POR = OPEJAMO,SAL_TOTAL = 0.0,COD_TIPSALDO = 1,MONTO_COMISION = None,COD_EMPRESA = 1,SAL_INFORMATIVO = None,COD_OBJETIVO = 5,SAL_RESERVA = None,INCLUIDO_POR = PVOROPE,APORTE_PROM = 0.0,COSTO_PROM = None,CREDITO_PENDIENTE = None,SAL_PROM = 0.0,
I created a dictionary with the query results:
def DictFactory(description,data):
column_names = [col[0] for col in description]
results = []
for row in data:
results.append(dict(zip(column_names,row)))
return results
Then I created this function which finally save the results into my log:
def WriteLog(log_file,header,data):
file_exist = os.path.isfile(log_file)
log = open(log_file,'a')
if not file_exist:
print "File does not exist, writing new log file"
open(log_file,'w').close()
mydata = DictFactory(header,data)
checkpoint_name = ReadCheckpointName()
string = ''
for m in mydata:
for k,v in m.items():
string = string + k + ' = ' + str(v) + ','
if k == checkpoint_name:
#print "KEY FOUND"
cur_checkpoint = v
cur_checkpoint = str(cur_checkpoint)
#print string
string = string + '\n'
print cur_checkpoint
log.write(string + '\n')
WriteCheckpoint(cur_checkpoint,checkpoint_file)
log.close()
This is the main function:
def GetInfo():
mypool = PoolToDB()
con = mypool.acquire()
cursor = con.cursor()
GetLastCheckpoint()
sql = ReadQuery()
#print sql
cursor.execute(sql)
data = cursor.fetchall()
WriteLog(log_file,cursor.description,data)
#WriteCsvLog(log_file,cursor.description,data)
cursor.close()
But I realized that it works if I use a query that fetch few records, however if I try to fetch many records my script never ends.
This is my output when I executed a query with 5000 records. As you can see it takes too long.
jballesteros#SplunkPorvenir FO_TIPSALDOS_X_CUENTA]$ python db_execution.py
Starting connection: 5636
GetLastCheckpoint function took 0.073 ms
GetLastCheckpoint function took 0.025 ms
ReadQuery function took 0.084 ms
File does not exist, writing new log file
DictFactory function took 23.050 ms
ReadCheckpointName function took 0.079 ms
WriteCheckpoint function took 0.204 ms
WriteLog function took 45112.133 ms
GetInfo function took 46193.033 ms
I'm pretty sure you know a much better way to do what I am trying to do.
This is the complete code:
#!/usr/bin/env python
# encoding: utf-8
import re
import sys
try:
import cx_Oracle
except:
print "Error: Oracle module required to run this plugin."
sys.exit(0)
import datetime
import re
import commands
import os
from optparse import OptionParser
import csv
import time
#################################
#### Database Variables ####
#################################
Config = {
"host" : "",
"user" : "",
"password" : "",
"instance" : "",
"port" : "",
}
Query = {
"sql" : "",
"checkpoint_datetype" : "",
"checkpoint_name" : "",
}
dir = '/home/jballesteros/PENS2000/FO_TIPSALDOS_X_CUENTA/'
connection_dir = '/home/jballesteros/PENS2000/Connection'
checkpoint_file = dir + 'checkpoint.conf'
log_file = '/var/log/Pens2000/FO_TIPSALDOS_X_CUENTA.csv'
internal_log = '/var/log/Pens2000/internal.log'
query = dir + 'query'
sys.path.append(os.path.abspath(connection_dir))
from db_connect_pool import *
def Timing(f):
def wrap(*args):
time1 = time.time()
ret = f(*args)
time2 = time.time()
print "%s function took %0.3f ms" % (f.func_name,(time2- time1)*1000.0)
return ret
return wrap
#Timing
def InternalLogWriter(message):
now = datetime.datetime.now()
log = open(internal_log, 'a')
log.write("%s ==> %s" % (now.strftime("%Y-%m-%d %H:%M:%S"),message))
log.close()
return
#Timing
def GetLastCheckpoint():
global cur_checkpoint
conf = open(checkpoint_file, 'r')
cur_checkpoint = conf.readline()
cur_checkpoint = cur_checkpoint.rstrip('\n')
cur_checkpoint = cur_checkpoint.rstrip('\r')
conf.close()
#Timing
def ReadQuery():
global cur_checkpoint
GetLastCheckpoint()
qr = open(query, 'r')
line = qr.readline()
line = line.rstrip('\n')
line = line.rstrip('\r')
Query["sql"], Query["checkpoint_datetype"],Query["checkpoint_name"] = line.split(";")
sql = Query["sql"]
checkpoint_datetype = Query["checkpoint_datetype"]
checkpoint_name = Query["checkpoint_name"]
if (checkpoint_datetype == "DATETIME"):
sql = sql + " AND " + checkpoint_name + " >= " + "TO_DATE('%s','YYYY-MM-DD HH24:MI:SS') ORDER BY %s" % (cur_checkpoint,checkpoint_name)
if (checkpoint_datetype == "NUMBER"):
sql = sql + " AND " + checkpoint_name + " > " + "%s ORDER BY %s" % (cur_checkpoint,checkpoint_name)
qr.close()
return str(sql)
#Timing
def ReadCheckpointName():
qr = open(query, 'r')
line = qr.readline()
line = line.rstrip('\n')
line = line.rstrip('\r')
Query["sql"], Query["checkpoint_datetype"],Query["checkpoint_name"] = line.split(";")
checkpoint_name = Query["checkpoint_name"]
return str(checkpoint_name)
#Timing
def LocateCheckPoint(description):
description
checkpoint_name = ReadCheckpointName()
#print checkpoint_name
#print description
startcounter = 0
finalcounter = 0
flag = 0
for d in description:
prog = re.compile(checkpoint_name)
result = prog.match(d[0])
startcounter = startcounter + 1
if result:
finalcounter = startcounter - 1
counterstr = str(finalcounter)
print "Checkpoint found in the array position number: " + counterstr
flag = 1
if (flag == 0):
print "Checkpoint did not found"
return finalcounter
#Timing
def DictFactory(description,data):
column_names = [col[0] for col in description]
results = []
for row in data:
results.append(dict(zip(column_names,row)))
return results
#Timing
def WriteCsvLog(log_file,header,data):
checkpoint_index = LocateCheckPoint(header)
file_exists = os.path.isfile(log_file)
with open(log_file,'ab') as csv_file:
headers = [i[0] for i in header]
csv_writer = csv.writer(csv_file,delimiter='|')
if not file_exists:
print "File does not exist, writing new CSV file"
csv_writer.writerow(headers) # Writing headers once
for d in data:
csv_writer.writerow(d)
cur_checkpoint = d[checkpoint_index]
cur_checkpoint = str(cur_checkpoint)
WriteCheckpoint(cur_checkpoint,checkpoint_file)
csv_file.close()
#Timing
def WriteLog(log_file,header,data):
file_exist = os.path.isfile(log_file)
log = open(log_file,'a')
if not file_exist:
print "File does not exist, writing new log file"
open(log_file,'w').close()
mydata = DictFactory(header,data)
checkpoint_name = ReadCheckpointName()
#prin #string = ''
for m in mydata:
for k,v in m.items():
string = string + k + ' = ' + str(v) + ','
if k == checkpoint_name:
#print "KEY FOUND"
cur_checkpoint = v
cur_checkpoint = str(cur_checkpoint)
#print string
string = string + '\n'
print cur_checkpoint
log.write(string + '\n')
WriteCheckpoint(cur_checkpoint,checkpoint_file)
log.close()
#Timing
def WriteCheckpoint(cur_checkpoint,conf_file):
conf = open(conf_file,'w')
conf.write(cur_checkpoint)
conf.close()
#Timing
def GetInfo():
mypool = PoolToDB()
con = mypool.acquire()
cursor = con.cursor()
GetLastCheckpoint()
sql = ReadQuery()
#print sql
cursor.execute(sql)
#data = cursor.fetchall()
#WriteLog(log_file,cursor.description,data)
#WriteCsvLog(log_file,cursor.description,data)
cursor.close()
def __main__():
parser = OptionParser()
parser.add_option("-c","--change- password",dest="pass_to_change",help="Change the password for database connection",metavar="1")
(options, args) = parser.parse_args()
if (options.pass_to_change):
UpdatePassword()
else:
GetInfo()
__main__()
This is a query sample:
SELECT COD_EMPRESA, COD_TIPSALDO, COD_INVERSION, COD_CUENTA, COD_OBJETIVO, CAN_CUOTAS, SAL_TOTAL, INT_TOTAL, SAL_RESERVA, APORTE_PROM, SAL_PROM, COSTO_PROM, SAL_TOT_ANTERIOR, FEC_ULT_CALCULO, INCLUIDO_POR, FEC_INCLUSION, MODIFICADO_POR, TO_CHAR(FEC_MODIFICACION,'DD/MM/YYYY HH24:MI:SS') AS FEC_MODIFICACION, CUOTA_COMISION, MONTO_COMISION, SAL_INFORMATIVO, CREDITO_PENDIENTE, DEBITO_PENDIENTE, CAN_CUOTAS_ANTERIOR FROM FO.FO_TIPSALDOS_X_CUENTA WHERE ROWNUM <=100000 AND FEC_INCLUSION >= TO_DATE('2005-08-31 11:43:49','YYYY-MM-DD HH24:MI:SS') ORDER BY FEC_INCLUSION
PS: I've really been searching in google and this forum about my question but I haven't found anything similar.

Need help to improve performance of my python code

Hello to all passionate programmers out there. I need your help with my code.
My Goal: To efficiently move data from Amazon S3 to Amazon Redshift.
Basically I am moving all CSV files on my S3 to Redshift using the below code. I parse through part of the file, build a table structure and then use the copy command to load data into redshift.
'''
Created on Feb 25, 2015
#author: Siddartha.Reddy
'''
import sys
from boto.s3 import connect_to_region
from boto.s3.connection import Location
import csv
import itertools
import psycopg2
''' ARGUMENTS TO PASS '''
AWS_KEY = sys.argv[1]
AWS_SECRET_KEY = sys.argv[2]
S3_DOWNLOAD_PATH = sys.argv[3]
REDSHIFT_SCHEMA = sys.argv[4]
TABLE_NAME = sys.argv[5]
UTILS = S3_DOWNLOAD_PATH.split('/')
class UTIL():
global UTILS
def bucket_name(self):
self.BUCKET_NAME = UTILS[0]
return self.BUCKET_NAME
def path(self):
self.PATH = ''
offset = 0
for value in UTILS:
if offset == 0:
offset += 1
else:
self.PATH = self.PATH + value + '/'
return self.PATH[:-1]
def GETDATAINMEMORY():
conn = connect_to_region(Location.USWest2,aws_access_key_id = AWS_KEY,
aws_secret_access_key = AWS_SECRET_KEY,
is_secure=False,host='s3-us-west-2.amazonaws.com'
)
ut = util()
BUCKET_NAME = ut.bucket_name()
PATH = ut.path()
filelist = conn.lookup(BUCKET_NAME)
''' Fecth part of the data from S3 '''
for path in filelist:
if PATH in path.name:
DATA = path.get_contents_as_string(headers={'Range': 'bytes=%s-%s' % (0,100000000)})
return DATA
def TRAVERSEDATA():
DATA = getdatainmemory()
CREATE_TABLE_QUERY = 'CREATE TABLE ' + REDSHIFT_SCHEMA + '.' + TABLE_NAME + '( '
JUNKED_OUT = DATA[3:]
PROCESSED_DATA = JUNKED_OUT.split('\n')
CSV_DATA = csv.reader(PROCESSED_DATA,delimiter=',')
COUNTER,STRING,NUMBER = 0,0,0
COLUMN_TYPE = []
''' GET COLUMN NAMES AND COUNT '''
for line in CSV_DATA:
NUMBER_OF_COLUMNS = len(line)
COLUMN_NAMES = line
break;
''' PROCESS COLUMN NAMES '''
a = 0
for REMOVESPACE in COLUMN_NAMES:
TEMPHOLDER = REMOVESPACE.split(' ')
temp1 = ''
for x in TEMPHOLDER:
temp1 = temp1 + x
COLUMN_NAMES[a] = temp1
a = a + 1
''' GET COLUMN DATA TYPES '''
# print(NUMBER_OF_COLUMNS,COLUMN_NAMES,COUNTER)
# print(NUMBER_OF_COLUMNS)
i,j,a= 0,500,0
while COUNTER < NUMBER_OF_COLUMNS:
for COLUMN in itertools.islice(CSV_DATA,i,j+1):
if COLUMN[COUNTER].isdigit():
NUMBER = NUMBER + 1
else:
STRING = STRING + 1
if NUMBER == 501:
COLUMN_TYPE.append('INTEGER')
# print('I CAME IN')
NUMBER = 0
else:
COLUMN_TYPE.append('VARCHAR(2500)')
STRING = 0
COUNTER = COUNTER + 1
# print(COUNTER)
COUNTER = 0
''' BUILD SCHEMA '''
while COUNTER < NUMBER_OF_COLUMNS:
if COUNTER == 0:
CREATE_TABLE_QUERY = CREATE_TABLE_QUERY + COLUMN_NAMES[COUNTER] + ' ' + COLUMN_TYPE[COUNTER] + ' NOT NULL,'
else:
CREATE_TABLE_QUERY = CREATE_TABLE_QUERY + COLUMN_NAMES[COUNTER] + ' ' + COLUMN_TYPE[COUNTER] + ' ,'
COUNTER += 1
CREATE_TABLE_QUERY = CREATE_TABLE_QUERY[:-2]+ ')'
return CREATE_TABLE_QUERY
def COPY_COMMAND():
S3_PATH = 's3://' + S3_DOWNLOAD_PATH
COPY_COMMAND = "COPY "+REDSHIFT_SCHEMA+"."+TABLE_NAME+" from '"+S3_PATH+"' credentials 'aws_access_key_id="+AWS_KEY+";aws_secret_access_key="+AWS_SECRET_KEY+"' REGION 'us-west-2' csv delimiter ',' ignoreheader as 1 TRIMBLANKS maxerror as 500"
return COPY_COMMAND
def S3TOREDSHIFT():
conn = psycopg2.connect("dbname='xxx' port='5439' user='xxx' host='xxxxxx' password='xxxxx'")
cursor = conn.cursor()
cursor.execute('DROP TABLE IF EXISTS '+ REDSHIFT_SCHEMA + "." + TABLE_NAME)
SCHEMA = TRAVERSEDATA()
print(SCHEMA)
cursor.execute(SCHEMA)
COPY = COPY_COMMAND()
print(COPY)
cursor.execute(COPY)
conn.commit()
S3TOREDSHIFT()
Current Challenges:
Challenges with creating the table structure :
Field lengths : Right now I am just hardcoding the VARCHAR fields to 2500. All my files are > 30gb and parsing through the whole file to calculate length of a field takes lot of processing time.
Determining if a column is null: I am simply hard coding the first column to NOT NULL using the COUNTER variable. ( All my files have ID as first column ). Would like to know if there is a better way of doing it.
Is there any data structure I can use? I am always interested in learning new ways to improve the performance, if you guys have any suggestions please feel free to comment.

separate line output by groups

My python script checks mysqldump and if any problems script prints :
Dump is old for db;
Dump is not complete for db;
Dump is empty for db;
MySQL dump does not exist for db;
Script logs these records to the file line by line.
My question is there are a way to format output in the file like:
Dump is old for db;
Dump is old for db;
Dump is old for db;
Dump is not complete for db;
Dump is not complete for db;
Dump is not complete for db;
Dump is empty for db;
Dump is empty for db;
Dump is empty for db;
Because now my file looks like:
Dump is old for db;
Dump is empty for db;
Dump is old for db;
MySQL dump does not exist for db;
...
etc
Here my small script :)
#!/bin/env python
import psycopg2
import sys,os
from subprocess import Popen, PIPE
from datetime import datetime
import smtplib
con = None
today = datetime.now().strftime("%Y-%m-%d")
log_dump_fail = '/tmp/mysqldump_FAIL'
log_fail = open(log_dump_fail,'w').close()
log_fail = open(log_dump_fail, 'a')
sender = 'PUT_SENDER_NAME_HERE'
receiver = ['receiver_name']
smtp_daemon_host = 'localhost'
def db_backup_file_does_not_exist(db_backup_file):
if not os.path.exists(db_backup_file): return True
else: return False
def dump_health(last_dump_row, file_name,db):
last_row = last_dump_row.rsplit(" ")
tms = ''.join(last_row[4:5])
status = last_row[1:3]
if (status) and (tms != today):
log_fail.write("\nDB is old for "+ str(db) + str(file_name) + ", \nDump finished at " + str(''.join(tms)))
log_fail.write("\n-------------------------------------------")
elif not (status) and (tms == None):
log_fail.write("\nDump is not complete for "+str(db) + str(file_name) + " , end of file is not correct")
log_fail.write("\n-------------------------------------------")
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
def humansize(nbytes):
if nbytes == 0: return '0 B'
i = 0
while nbytes >= 1024 and i < len(suffixes)-1:
nbytes /= 1024.
i += 1
f = ('%.2f' % nbytes).rstrip('0').rstrip('.')
return '%s %s' % (f, suffixes[i])
def dump_size(dump_file, file_name,db):
size = os.path.getsize(dump_file)
if (size < 1024):
human_readable = humansize(size)
log_fail.write("\nDump is empty for " +str(db) + "\n" +"\t" + str (file_name)+", file size is " + str(human_readable))
log_fail.write("\n-------------------------------------------")
def report_to_noc(isubject,text):
TEXT = text
SUBJECT = subject
message = 'Subject: %s\n\n%s' % (SUBJECT, TEXT)
server = smtplib.SMTP(smtp_daemon_host)
server.sendmail(sender, receiver, message)
server.quit()
try:
con = psycopg2.connect(database='**', user='***', password='***', host='****')
cur = con.cursor()
cur.execute("""\
select ad.servicename, (select name from servers where id = ps.server_id) as servername
from packages as p, account_data as ad, package_servers as ps
where p.id=ad.package_id and
p.date_deleted IS NULL and
p.id=ps.package_id and
p.aktuel IS NULL and
p.pre_def_package_id = 4 and
p.mother_package_id !=0 and
ps.subservice_id=5 and
p.mother_package_id NOT IN (select id from packages where date_deleted IS NOT NULL)
ORDER BY servername;
""")
while (1):
row = cur.fetchone ()
if row == None:
break
db = row[0]
server_name = str(row[1])
if (''.join(server_name) == 'SKIP_THIS') or (''.join(server_name) == 'SKIP_THIS'):
continue
else:
db_backup_file = '/storage/backup/db/mysql/' + str(db) + '/current/' + str(db) + '.mysql.gz'
db_backup_file2 = '/storage/backup/' + str(''.join(server_name.split("DB"))) + '/mysql/' + str(db) + '/current/'+ str(db) + '.mysql.gz'
db_file_does_not_exist = False
db_file2_does_not_exist = False
if db_backup_file_does_not_exist(db_backup_file):
db_file_does_not_exist = True
if db_backup_file_does_not_exist(db_backup_file2):
db_file2_does_not_exist = True
if db_file_does_not_exist and db_file2_does_not_exist:
log_fail.write("\nMySQL dump does not exist for " + str(db) + "\n" + "\t" + str(db_backup_file2) + "\n" + "\t" + str(db_backup_file))
log_fail.write("\n-------------------------------------------")
continue
elif (db_file_does_not_exist) and not (db_file2_does_not_exist):
p_zcat = Popen(["zcat", db_backup_file2], stdout=PIPE)
p_tail = Popen(["tail", "-2"], stdin=p_zcat.stdout, stdout=PIPE)
dump_status = str(p_tail.communicate()[0])
dump_health(dump_status,db_backup_file2,db)
dump_size(db_backup_file2, db_backup_file2,db)
elif (db_file2_does_not_exist) and not (db_file_does_not_exist):
p_zcat = Popen(["zcat", db_backup_file], stdout=PIPE)
p_tail = Popen(["tail", "-2"], stdin=p_zcat.stdout, stdout=PIPE)
dump_status = str(p_tail.communicate()[0])
dump_health(dump_status,db_backup_file,db)
dump_size(db_backup_file,db_backup_file,db)
con.close()
except psycopg2.DatabaseError, e:
print 'Error %s' % e
sys.exit(1)
log_fail.close()
if os.path.getsize(log_dump_fail) > 0:
subject = "Not all MySQL dumps completed successfully. Log file backup:" + str(log_dump_fail)
fh = open(log_dump_fail, 'r')
text = fh.read()
fh.close()
report_to_noc(subject,text)
else:
subject = "MySQL dump completed successfullyi for all DBs, listed in PC"
text = "Hello! \nI am notifying you that I checked mysqldump files this morning.\nThere are nothing to worry about. :)"
report_to_noc(subject,text)
You can process your log file after it has been written.
One option is to read your file and sort the lines:
lines = open('log.txt').readlines()
lines.sort()
open('log_sorted.txt', 'w').write("\n".join(lines))
This won't emit an empty line between log types.
Another option is to use a Counter:
from collections import Counter
lines = open('log.txt').readlines()
counter = Counter()
for line in lines:
counter[line] += 1
out_file = open('log_sorted.txt', 'w')
for line, num in counter.iteritems():
out_file.write(line * num + "\n")
Looks like you want to group the output of the script, rather than log the info as it comes while searching.
Easiest would be to maintain 4 lists, on each for empty, not empty and so on. In the script add the db names to appropriate list instead of logging, and then dump the lists one by one into the file with appropriate prefixes("not empty for" + dbname).
For example, remove all the log_fail.write() from the functions and replace them with list.append() and write a separate function that writes to the log file as you like:
Add lists:
db_dump_is_old_list = []
db_dump_is_empty_list = []
db_dump_is_not_complete_list = []
db_dump_does_not_exist_list = []
Modify the Functions:
def dump_health(last_dump_row, file_name,db):
last_row = last_dump_row.rsplit(" ")
tms = ''.join(last_row[4:5])
status = last_row[1:3]
if (status) and (tms != today):
db_dump_is_old_list.append(str(db))
#log_fail.write("\nDB is old for "+ str(db) + str(file_name) + ", \nDump finished at " + str(''.join(tms)))
#log_fail.write("\n-------------------------------------------")
elif not (status) and (tms == None):
db_dump_is_not_complete_list.append(str(db)
#log_fail.write("\nDump is not complete for "+str(db) + str(file_name) + " , end of file is not correct")
#log_fail.write("\n-------------------------------------------")
def dump_size(dump_file, file_name,db):
size = os.path.getsize(dump_file)
if (size < 1024):
human_readable = humansize(size)
db_dump_is_empty_list.append(str(db))
#log_fail.write("\nDump is empty for " +str(db) + "\n" +"\t" + str (file_name)+", file size is " + str(human_readable))
#log_fail.write("\n-------------------------------------------")
if db_file_does_not_exist and db_file2_does_not_exist:
db_dump_does_not_exist_list.append(str(db))
#log_fail.write("\nMySQL dump does not exist for " + str(db) + "\n" + "\t" + str(db_backup_file2) + "\n" + "\t" + str(db_backup_file))
#log_fail.write("\n-------------------------------------------")
continue
And add a logger function:
def dump_info_to_log_file():
log_dump_fail = '/tmp/mysqldump_FAIL'
log_fail = open(log_dump_fail,'w').close()
log_fail = open(log_dump_fail, 'a')
for dbname in db_dump_is_old_list:
log_fail.write("Dump is Old for" + str(dbname))
log_fail.write("\n\n")
for dbname in db_dump_is_empty_list:
log_fail.write("Dump is Empty for" + str(dbname))
log_fail.write("\n\n")
for dbname in db_dump_is_not_complete_list:
log_fail.write("Dump is Not Complete for" + str(dbname))
log_fail.write("\n\n")
for dbname in db_dump_does_not_exist_list:
log_fail.write("Dump Does Not Exist for" + str(dbname))
log_fail.close()
Or you could simply log as you are doing, and then read in the file, sort and write back the file.
Thank you all for all interesting ideas.
I have really tried all options :)
To my mind:
With Counter object the pros is to few lines of code.
But cons are - many read\write operations. Log file is not big, however, I decided to decrease read(s) \ write(s)
With array the cons are to many lines of code :) but the pros is - write to the file only once.
So I implemented arrays.. :)
Thank you guys!!!

Categories