Inserting into Postgres DB column with python - python

conn = psycopg2.connect("dbname=name host=host user=user password=pass port=port")
cur = conn.cursor()
with open('big shot.json') as f:
data = json.load(f)
for key in data["permissions"]:
cur.execute("INSERT INTO permissions (name) VALUES (%s);", (key,))
conn.commit()
output = cur.execute("SELECT * FROM permissions")
print(output)
I have this that I'm trying to use to create new rows in my database, but it doesn't do anything. It doesn't return any errors, but it also doesn't write to my database, and output, obviously, returns "None" in the console.

You need to fetch the data from the cursor:
cur.execute("SELECT * FROM permissions")
data = cur.fetchall()
print(data)

Related

how to add new values in sql python if data is already in database?

I have a function to connect to a db and write data to it
The function accepts a list of dictionaries with data
conn = psycopg2.connect(
database="db_database",
user='user',
password='password',
host='127.0.0.1',
)
conn.autocommit = True
cursor = conn.cursor()
get_data = "SELECT * FROM customer_new WHERE login_id = %s;" # Availability check
cursor.execute(get_data, (values_list['login id'],))
last_item = cursor.fetchone()
if last_item == None:
'''If there are NO matches by login_id in the database, then it adds to the database'''
sql = ("""INSERT INTO customer_new (login, telegram_id, orders,
sum_orders, average_check, balance, group_customer, zamena, reg_date, bot_or_site, login_id)
VALUES (%(login)s, %(telegram_id)s, %(orders)s, %(sum_orders)s, %(average_check)s, %(balance)s, %(group)s, %(zamena)s, %(reg_date)s, %(bot_or_site)s, %(login id)s);""")
cursor.execute(sql, values_list)
else:
'''If there are matches in the database, then get the row id and update the data in it'''
item_id_last = last_item[0]
sql = ("""UPDATE customer_new SET (login, telegram_id, orders,
sum_orders, average_check, balance, group_customer, zamena, reg_date, bot_or_site, login_id)
VALUES (%(login)s, %(telegram_id)s, %(orders)s, %(sum_orders)s, %(average_check)s, %(balance)s, %(group)s, %(zamena)s, %(reg_date)s, %(bot_or_site)s, %(login id)s)""")
cursor.execute(sql, [values_list, item_id_last])
conn.close()
I need to write the received data to the database, if the received data is in the database, then they need to be updated.
How to implement it correctly?

Getting row counts from Redshift during unload process and counting rows loaded in S3

My python code looks like below where I am unloading data from Redshift to Amazon S3 bucket. I am trying to get row count from Redshift and S3 bucket to ensure that all the data is loaded. Additionally, I would also like to get last uploaded date from S3 bucket so that I know when last unload was performed. Kindly suggest the code with explanation.
Thanks in advance for your time and efforts!
import csv
import redshift_connector
import sys
CSV_FILE="Tables.csv"
CSV_DELIMITER=';'
S3_DEST_PATH="s3://..../"
DB_HOST="MY HOST"
DB_PORT=1234
DB_DB="MYDB"
DB_USER="MY_READ"
DB_PASSWORD="MY_PSWD"
IM_ROLE="arn:aws:iam::/redshift-role/unload data","arn:aws::iam::/write in bucket"
def get_tables(path):
tables=[]
with open (path, 'r') as file:
csv_reader = csv.reader (file,delimiter=CSV_DELIMITER)
header = next(csv_reader)
if header != None:
for row in csv_reader:
tables.append(row)
return tables
def unload(conn, tables, s3_path):
cur = conn.cursor()
for table in tables:
print(f">{table[0]}.{table[1]}")
try:
query= f'''unload('select * from {table[0]}.{table[1]}' to '{s3_path}/{table[1]}/'
iam_role '{IAM_ROLE}'
CSV
PARALLEL FALSE
CLEANPATH;'''
print(f"loading in progress")
cur.execute(query)
print(f"Done.")
except Esception as e:
print("Failed to load")
print(str(e))
sys.exit(1)
cur.close()
def main():
try:
conn = redshift_connector.connect(
host=DB_HOST,
port=DB_PORT,
database= DB_DB,
user= DB_USER,
password=DB_PASSWORD
)
tables = get_tables(CSV_FILE)
unload(conn,tables,S3_DEST_PATH)
conn.close()
except Exception as e:
print(e)
sys.exit(1)
Update code based on SO User's comment
tables=['schema1.tablename','schema2.table2']
conn=redshift_connector.connect(
host='my_host',
port= "my_port",
database='my_db'
user="user"
password='password')
cur=conn.cursor()
cur.execute ('select count(*) from {',' .join("'"+y+"'" for y in tables)}')
results=cur.fetchall()
print("The table {} contained".format(tables[0]),*result[0],"rows"+"\n" ) #Printing row counts along with table names
cur.close()
conn.close()
2nd Update:
tables=['schema1.tablename','schema2.table2']
conn=redshift_connector.connect(
host='my_host',
port= "my_port",
database='my_db'
user="user"
password='password')
cur=conn.cursor()
for table in tables:
cur.execute(f'select count(*) from {table};')
results=cur.fetchone()
for row in result:
print("The table {} contained".format(tables[0]),result[0],"rows"+"\n" ) #Printing row counts along with table names
The simple query to get number of rows is
query = "select count(*) from {table_name}"
For Redshift, all you need to do is
cur.execute(query)
row_count = cur.fetchall()
Using boto3, you can use a similar SQL query to fetch S3 row count as well, as elucidated in this answer.
Edit:
Corrected your updated approach a little:
cur=conn.cursor()
for table in tables:
cur.execute(f'select count(*) from {table};')
result=cur.fetchone()
count = result[0] if result else 0
print(f"The table {table} contained {count} rows.\n" )

sqlite3 view() prints with empty list?

I have testtable() function that works to create the table if necessary and list all the PDF file names in a column. However, when I execute my view() function, it prints an empty list. Am I missing something or just going about this in the wrong way?
import os, sys
import sqlite3
import csv
testdb = 'pdftestdir.db'
def testtable():
conn = sqlite3.connect(testdb)
cur = conn.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS test (name TEXT)')
path = os.listdir('/root/Desktop/PDF')
conn = sqlite3.connect(testdb)
cur = conn.cursor()
cur.execute('SELECT * FROM test')
exists = cur.fetchall()
for name in path:
if name.endswith('.pdf'):
if not exists:
cur.execute('INSERT INTO test VALUES (?)', (name,))
else:
pass
conn.commit()
conn.close()
def view():
conn = sqlite3.connect(testdb)
cur = conn.cursor()
cur.execute('SELECT * FROM test')
cur.fetchall()
rows = cur.fetchall()
conn.close()
print(rows)
You unnecessarily call cur.fetchall() without storing the returning value to a variable, and the cursor has already reached the end of the rows returned with that call, so the second time you call cur.fetchall() it no longer has any more rows to return.
You can fix this by simply removing the redundant call.
Change:
cur.fetchall()
rows = cur.fetchall()
to:
rows = cur.fetchall()

How to get id of newly created record -Postgresql-Flask?

I am trying to insert a new row into postgresql database using psycopg2 and flask.
Here is the code:
con = psycopg2.connect("host=localhost dbname=crm_whatsapp user=user_name password=user_password")
cur = con.cursor()
create = cur.execute("INSERT INTO crm_user_chat_data (number) VALUES (%s) returning id",(user_number,)) //Here it returns none
con.commit()
But I am getting None instead of id.
How can i solve this?
You need to fetch() from the cursor to get the id:
cur = con.cursor()
create = cur.execute("INSERT INTO crm_user_chat_data (number) VALUES (%s) returning id",(user_number,))
con.commit()
insertId = cur.fetchone()

Return a mapped dictionary based on multiple queries

Issue: I can't figure out how to run a query in the correct way so that it returns a mapped dictionary. The query will use counts from multiple tables.
I am using psycopg2 for a postgresql database, and I will be using the results to create a report on day to day deltas on these counts.
Given that, can someone provide an example on how to execute multiple queries and return a dictionary that I can use for comparison purposes? Thanks! I image in a for loop is needed somewhere in here.
tables = ['table1', 'table2']
def db_query():
query = "select count(*) from (a_table) where error_string != '';"
conn = psycopg2.connect(database=db, user=user, password=password, host=host)
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute(query, tables)
output = cur.fetchall()
conn.close()
return output
I haven't used postgresql, so you might want to also check this out as a reference: How to store count values in python.
That being said, rearrange your code into something like this. Be sure to make conn global so you don't have to make more than one connection, and make sure you're also closing cur:
conn = None
def driverFunc():
global conn
try:
conn = psycopg2.connect(database=db, user=user, password=password, host=host)
tables = ['table1', 'table2']
countDict = {}
for thisTable in tables:
db_query(thisTable, countDict)
finally:
if not conn == None:
conn.close()
def db_query(tableName, countDict):
# Beware of SQL injection with the following line:
query = "select count(*) from " + tableName + " where error_string != '';"
cur = None
try:
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute(query)
countDict[tableName] = int(cur.fetchone())
finally:
if not cur == None:
cur.close()

Categories