How to create sql db2 data table using python? - python

I would like to run an sql db2 query on python that will create a data table in a public schema but I'm stuck because of this error
ResourceClosedError: This result object does not return rows. It has been closed automatically.
Below is the Python code I'm using, I have deleted the private information.
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
#connection string
user = "xxx"
pwd = "xxx"
host = "xxx"
port = "xxx"
db = "xxx"
conn_strng = "redshift+psycopg2://%s:%s#%s:%s/%s" %(user,pwd,host,port,db)
#establish connection
engine = create_engine(conn_strng)
with engine.connect() as conn, conn.begin():
pd.read_sql("""
drop table if exists public.fc_SER_ACC_By_DLCX_Date;
create table public.fc_SER_ACC_By_DLCX_Date as
SELECT DLCX_Date, tool_id as DLCX_Tool,Model, avg(SQZSER) as SQZSER_Mean, avg(SQZSER_BASE) as SQZSER_BASE_Mean,
avg(PRED_ACC_SMR) as PRED_ACC_SMR_Mean, avg(PRED_ACC_CMR) as PRED_ACC_CMR_Mean, count(slider_id) as Tested_Sliders
FROM (SELECT DISTINCT a.slider_id, LEFT(a.product_id,2) as Model,
a.xti_wrn1_p23 AS SQZSER,a.SER0 AS SQZSER_BASE, a.FOM2 AS PRED_ACC_SMR, a.FOM1 AS PRED_ACC_CMR,
TRUNC(c.transaction_date_time) as DLCX_Date, c.tool_id
FROM ah.param_jade_wide a
LEFT JOIN ah.param_lap_summary b ON (a.wafer_id, a.row_number) = (b.wafer_id, b.row_number)
LEFT JOIN ah.his_job c ON c.job_number = b.job_number
WHERE c.transaction_date_time > '2020-03-01'
AND LEFT(a.product_id,2) IN ('C3')
AND b.source_system_code IN ('MFG2.SLDR.LAPRUN')
AND a.xti_wrn1_p23 between -10 and 0
AND a.SER0 between -10 and 0
AND c.operation_id IN ('510150')
AND a.retest_number = 0
AND a.class_description IN ('PROD')
AND NOT c.tool_id = 0 AND NOT c.tool_id in (''))
group by DLCX_Date, DLCX_Tool, Model
Union
SELECT DLCX_Date, tool_id as DLCX_Tool,Model, avg(SQZSER) as SQZSER_Mean, avg(SQZSER_BASE) as SQZSER_BASE_Mean,
avg(PRED_ACC_SMR) as PRED_ACC_SMR_Mean,'0'PRED_ACC_CMR_Mean, count(slider_id) as Tested_Sliders
FROM (SELECT DISTINCT a.slider_id, LEFT(a.product_id,2) as Model,
a.XTI_WRN1_P19 AS SQZSER,a.XTI_WRN1_P18 AS SQZSER_BASE, a.XTI_RSVD0 AS PRED_ACC_SMR,
TRUNC(c.transaction_date_time) as DLCX_Date, c.tool_id
FROM ah.param_jade_wide a
LEFT JOIN ah.param_lap_summary b ON (a.wafer_id, a.row_number) = (b.wafer_id, b.row_number)
LEFT JOIN ah.his_job c ON c.job_number = b.job_number
WHERE c.transaction_date_time > '2020-03-01'
AND LEFT(a.product_id,2) IN ('L2','L3')
AND b.source_system_code IN ('MFG2.SLDR.LAPRUN')
AND c.operation_id IN ('510150')
AND a.XTI_WRN1_P19 between -10 and 0
AND a.XTI_WRN1_P18 between -10 and 0
AND a.retest_number = 0
AND a.class_description IN ('PROD')
AND NOT c.tool_id = 0 AND NOT c.tool_id in (''))
group by DLCX_Date, DLCX_Tool, Model
order by DLCX_Date;
commit;""", conn)
conn.close()
engine.dispose()
print("Table has been updated!")
Please help in fixing my code and thanks in advance.

Error indicates Pandas read_sql cannot import data into a data frame since you only have DDL actions: DROP TABLE and CREATE TABLE and nothing that return rows like SELECT.
If not using Pandas for data analytics, simply run your queries with SQL Alchemy transactions. And no need to close using context manager like with:
with engine.connect() as conn:
with conn.begin()
conn.execute("""DROP TABLE IF EXISTS public.fc_SER_ACC_By_DLCX_Date;""")
conn.execute("""CREATE TABLE public.fc_SER_ACC_By_DLCX_Date AS
...
""")
Or combined:
with engine.begin() as conn:
conn.execute("""DROP TABLE IF EXISTS public.fc_SER_ACC_By_DLCX_Date;""")
conn.execute("""CREATE TABLE public.fc_SER_ACC_By_DLCX_Date AS
...
""")
And if you really do need a data frame, use engine object in read_sql after transactions:
df = pd.read_sql("""SELECT * FROM public.fc_SER_ACC_By_DLCX_Date;""", engine)

Related

Applying where condition when using pandas read_sql_table() -SQLAlchemy

I want to use SQLAlchemy and Pandas to read a table from a Posgresql database into a Pandas dataframe using read_sql_table(). The SQL query to the database is similar to this:
SELECT col1,col2 FROM my_table WHERE col1=='value'
I tried this code to get the Pandas dataframe from the table:
from sqlalchemy import create_engine
db_uri = environ.get('SQLALCHEMY_DATABASE_URI')
engine = create_engine(db_uri, echo=True)
table_df = pd.read_sql_table(
'my_table',
con=engine,
schema="public"
columns=['col1','col2'])
This code works but how can I apply the condition, similar to WHERE in the SQL query, and filter the dataframe based on that condition?
I don't want to load the dataframe first in memory, I want to do it while querying the database.
As mentioned in a comment to the question, you can use read_sql_query() to filter your results. If you want to avoid passing a raw SQL statement to the function you can create the query using SQLAlchemy Core and pass that instead:
import sqlalchemy as sa
# …
team = sa.Table("team", sa.MetaData(), autoload_with=engine)
qry = sa.select(team.c.city, team.c.name).where(team.c.id == 1)
df = pd.read_sql_query(qry, engine)
print(df)
"""
city name
0 Calgary Flames
"""
import pandas as pd
import sqlalchemy as sa
engine = sa.create_engine('oracle+cx_oracle://user:senha#db', echo=False)
team = sa.Table('oracle_table', sa.MetaData(), autoload_with=engine, schema='db')
qry = sa.select(team.c.column_a, team.c.column_b).where(
team.c.column_b == 'OPTION')
df = pd.read_sql_query(qry, engine)
print(df)
engine.dispose()

Unable to fetch data from PostgreSQL table connected using Python

I can fetch the data using this command:
connection = psg.connect( database = "Test" , host="localhost", user="postgres", password="password")
data_1 = psql.read_sql('SELECT * FROM table_1 WHERE id IN (101 , 102)', connection)
But when I run the command below, it gives me an error. A user will put the dynamic ID values and it'll show the data corresponding to the respected IDs. That's why a variable is created which will be on user interface.
connection = psg.connect( database = "Test" , host="localhost", user="postgres", password="password")
variable_p = (108 ) # 108 is the id column value.
data_1 = psql.read_sql('SELECT * FROM table_1 WHERE id IN (variable_p[0])', connection)
Error - Column variable_p does not exist.
you are not using your variable variable_p in your read sql string. You can use f-strings here:
connection = psg.connect( database = "Test" , host="localhost", user="postgres", password="password")
variable_p = 108 # don't need parentheses for single value
data_1 = psql.read_sql(f'SELECT * FROM table_1 WHERE id = {variable_p}', connection)
But here you are vulnerable to SQL injection. So try parametrising your query to make it safe.
Parametrising and using WHERE IN this might be something like (untested):
variable_p = (1, 2, 3)
data_1 = psql.read_sql('SELECT * FROM table_1 WHERE id IN %s', connection, params=variable_p)

How to upsert pandas DataFrame to PostgreSQL table?

I've scraped some data from web sources and stored it all in a pandas DataFrame. Now, in order harness the powerful db tools afforded by SQLAlchemy, I want to convert said DataFrame into a Table() object and eventually upsert all data into a PostgreSQL table. If this is practical, what is a workable method of going about accomplishing this task?
Update: You can save yourself some typing by using this method.
If you are using PostgreSQL 9.5 or later you can perform the UPSERT using a temporary table and an INSERT ... ON CONFLICT statement:
import sqlalchemy as sa
# …
with engine.begin() as conn:
# step 0.0 - create test environment
conn.exec_driver_sql("DROP TABLE IF EXISTS main_table")
conn.exec_driver_sql(
"CREATE TABLE main_table (id int primary key, txt varchar(50))"
)
conn.exec_driver_sql(
"INSERT INTO main_table (id, txt) VALUES (1, 'row 1 old text')"
)
# step 0.1 - create DataFrame to UPSERT
df = pd.DataFrame(
[(2, "new row 2 text"), (1, "row 1 new text")], columns=["id", "txt"]
)
# step 1 - create temporary table and upload DataFrame
conn.exec_driver_sql(
"CREATE TEMPORARY TABLE temp_table AS SELECT * FROM main_table WHERE false"
)
df.to_sql("temp_table", conn, index=False, if_exists="append")
# step 2 - merge temp_table into main_table
conn.exec_driver_sql(
"""\
INSERT INTO main_table (id, txt)
SELECT id, txt FROM temp_table
ON CONFLICT (id) DO
UPDATE SET txt = EXCLUDED.txt
"""
)
# step 3 - confirm results
result = conn.exec_driver_sql("SELECT * FROM main_table ORDER BY id").all()
print(result) # [(1, 'row 1 new text'), (2, 'new row 2 text')]
I have needed this so many times, I ended up creating a gist for it.
The function is below, it will create the table if it is the first time persisting the dataframe and will update the table if it already exists:
import pandas as pd
import sqlalchemy
import uuid
import os
def upsert_df(df: pd.DataFrame, table_name: str, engine: sqlalchemy.engine.Engine):
"""Implements the equivalent of pd.DataFrame.to_sql(..., if_exists='update')
(which does not exist). Creates or updates the db records based on the
dataframe records.
Conflicts to determine update are based on the dataframes index.
This will set unique keys constraint on the table equal to the index names
1. Create a temp table from the dataframe
2. Insert/update from temp table into table_name
Returns: True if successful
"""
# If the table does not exist, we should just use to_sql to create it
if not engine.execute(
f"""SELECT EXISTS (
SELECT FROM information_schema.tables
WHERE table_schema = 'public'
AND table_name = '{table_name}');
"""
).first()[0]:
df.to_sql(table_name, engine)
return True
# If it already exists...
temp_table_name = f"temp_{uuid.uuid4().hex[:6]}"
df.to_sql(temp_table_name, engine, index=True)
index = list(df.index.names)
index_sql_txt = ", ".join([f'"{i}"' for i in index])
columns = list(df.columns)
headers = index + columns
headers_sql_txt = ", ".join(
[f'"{i}"' for i in headers]
) # index1, index2, ..., column 1, col2, ...
# col1 = exluded.col1, col2=excluded.col2
update_column_stmt = ", ".join([f'"{col}" = EXCLUDED."{col}"' for col in columns])
# For the ON CONFLICT clause, postgres requires that the columns have unique constraint
query_pk = f"""
ALTER TABLE "{table_name}" DROP CONSTRAINT IF EXISTS unique_constraint_for_upsert;
ALTER TABLE "{table_name}" ADD CONSTRAINT unique_constraint_for_upsert UNIQUE ({index_sql_txt});
"""
engine.execute(query_pk)
# Compose and execute upsert query
query_upsert = f"""
INSERT INTO "{table_name}" ({headers_sql_txt})
SELECT {headers_sql_txt} FROM "{temp_table_name}"
ON CONFLICT ({index_sql_txt}) DO UPDATE
SET {update_column_stmt};
"""
engine.execute(query_upsert)
engine.execute(f"DROP TABLE {temp_table_name}")
return True
Here is my code for bulk insert & insert on conflict update query for postgresql from pandas dataframe:
Lets say id is unique key for both postgresql table and pandas df and you want to insert and update based on this id.
import pandas as pd
from sqlalchemy import create_engine, text
engine = create_engine(postgresql://username:pass#host:port/dbname)
query = text(f"""
INSERT INTO schema.table(name, title, id)
VALUES {','.join([str(i) for i in list(df.to_records(index=False))])}
ON CONFLICT (id)
DO UPDATE SET name= excluded.name,
title= excluded.title
""")
engine.execute(query)
Make sure that your df columns must be same order with your table.
EDIT 1:
Thanks to Gord Thompson's comment, I realized that this query won't work if there is single quote in columns. Therefore here is a fix if there is single quote in columns:
import pandas as pd
from sqlalchemy import create_engine, text
df.name = df.name.str.replace("'", "''")
df.title = df.title.str.replace("'", "''")
engine = create_engine(postgresql://username:pass#host:port/dbname)
query = text("""
INSERT INTO author(name, title, id)
VALUES %s
ON CONFLICT (id)
DO UPDATE SET name= excluded.name,
title= excluded.title
""" % ','.join([str(i) for i in list(df.to_records(index=False))]).replace('"', "'"))
engine.execute(query)
Consider this function if your DataFrame and SQL Table contain the same column names and types already.
Advantages:
Good if you have a long dataframe to insert. (Batching)
Avoid writing long sql statement in your code.
Fast
.
from sqlalchemy import Table
from sqlalchemy.engine.base import Engine as sql_engine
from sqlalchemy.dialects.postgresql import insert
from sqlalchemy.ext.automap import automap_base
import pandas as pd
def upsert_database(list_input: pd.DataFrame, engine: sql_engine, table: str, schema: str) -> None:
if len(list_input) == 0:
return None
flattened_input = list_input.to_dict('records')
with engine.connect() as conn:
base = automap_base()
base.prepare(engine, reflect=True, schema=schema)
target_table = Table(table, base.metadata,
autoload=True, autoload_with=engine, schema=schema)
chunks = [flattened_input[i:i + 1000] for i in range(0, len(flattened_input), 1000)]
for chunk in chunks:
stmt = insert(target_table).values(chunk)
update_dict = {c.name: c for c in stmt.excluded if not c.primary_key}
conn.execute(stmt.on_conflict_do_update(
constraint=f'{table}_pkey',
set_=update_dict)
)
If you already have a pandas dataframe you could use df.to_sql to push the data directly through SQLAlchemy
from sqlalchemy import create_engine
#create a connection from Postgre URI
cnxn = create_engine("postgresql+psycopg2://username:password#host:port/database")
#write dataframe to database
df.to_sql("my_table", con=cnxn, schema="myschema")

Fetching data from Postgresq Database using sqlalchemy.select() in python

I am using python and SQLalchemy to fetch data from a table.
import sqlalchemy as db
import pandas as pd
DATABASE_URI = 'postgres+psycopg2://postgres:postgresql#localhost:5432/postgres'
engine = db.create_engine(DATABASE_URI)
connection = engine.connect()
project_table = db.Table('project', metadata, autoload=True, autoload_with=engine)
here i want to fetch records based on a list of ids which i have.
l=[557997, 558088, 623106, 558020, 623108, 557836, 557733, 622792, 623511, 623185]
query1 = db.select([project_table ]).where(project_table .columns.project_id.in_(l))
#sql query= "select * from project where project_id in l"
Result = connection.execute(query1)
Rset = Result.fetchall()
df = pd.DataFrame(Rset)
print(df.head())
Here when i print df.head() I am getting an empty dataframe. I am not able to pass a list to the above query. Is there a way to send a list to in to above query.
The result should contain the rows in the table which are equal to project_id's given.
i.e.
project_id project_name project_date project_developer
557997 Test1 24-05-2011 Ajay
558088 Test2 24-06-2003 Alex
These rows will be inserted into dataset.
The Query is
"select * from project where project_id in (557997, 558088, 623106, 558020, 623108, 557836, 557733, 622792, 623511, 623185)"
here as i cant give static values I will insert the values to a list and pass this list to query as a parameter.
This is where i am having a problem. I cant pass a list as a parameter to db.select().How can i pass a list to db.select()
After many trails i have found out that because of large data the query is fetching and also less ram in my workstation, the query returned null(no results). so what I did was
Result = connection.execute(query1)
while True:
rows = Result.fetchmany(10000)
if not rows:
break
for row in rows:
table_data.append(row)
pass
df1 = pd.DataFrame(table_data)
df1.columns = columns
After this the program was working fine.

Cannot drop table in pandas to_sql using SQLAlchemy

I'm trying to drop an existing table, do a query and then recreate the table using the pandas to_sql function. This query works in pgadmin, but not here. Any ideas of if this is a pandas bug or if my code is wrong?
Specific error is ValueError: Table 'a' already exists.
import pandas.io.sql as psql
from sqlalchemy import create_engine
engine = create_engine(r'postgresql://user#localhost:port/dbname')
c = engine.connect()
conn = c.connection
sql = """
drop table a;
select * from some_table limit 1;
"""
df = psql.read_sql(sql, con=conn)
print df.head()
df.to_sql('a', engine)
conn.close()
Why are you doing this like that? There is a shorter way: the if_exists kwag in to_sql. Try this:
import pandas.io.sql as psql
from sqlalchemy import create_engine
engine = create_engine(r'postgresql://user#localhost:port/dbname')
c = engine.connect()
conn = c.connection
sql = """
select * from some_table limit 1;
"""
df = psql.read_sql(sql, con=conn)
print df.head()
# Notice how below line is different. You forgot the schema argument
df.to_sql('a', con=conn, schema=schema_name, if_exists='replace')
conn.close()
According to docs:
replace: If table exists, drop it, recreate it, and insert data.
Ps. Additional tip:
This is better way to handle the connection:
with engine.connect() as conn, conn.begin():
sql = """select * from some_table limit 1"""
df = psql.read_sql(sql, con=conn)
print df.head()
df.to_sql('a', con=conn, schema=schema_name, if_exists='replace')
Because it ensures that your connection is always closed, even if your program exits with an error. This is important to prevent data corruption. Further, I would just use this:
import pandas as pd
...
pd.read_sql(sql, conn)
instead of the way you are doing it.
So, if I was in your place writing that code, it would look like this:
import pandas as pd
from sqlalchemy import create_engine
engine = create_engine(r'postgresql://user#localhost:port/dbname')
with engine.connect() as conn, conn.begin():
df = pd.read_sql('select * from some_table limit 1', con=conn)
print df.head()
df.to_sql('a', con=conn, schema=schema_name, if_exists='replace')

Categories