Create table in specific tablespace with SQLalchemy - python

I can create new table in tablespace using raw query:
engine = sqlalchemy.engine.create_engine(ENGINE_PATH_DDS, encoding="utf-8")
connection = engine.connect()
conn = engine.raw_connection()
cur = conn.cursor()
sql = """
CREATE TABLE {schema}.{table_name}
(
ID NUMBER GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
STATUS NVARCHAR2(32) NOT NULL,
DESCR NVARCHAR2(256) DEFAULT NULL
)
TABLESPACE SOME_TABLESPACE
""".format(table_name = table_name, schema = SCHEMA)
cur.execute(sql)
Created table is accesseble for all users with privileges. But how to do same thing without raw_connection? With this approach:
metadata = MetaData(engine)
table = Table('Example',metadata,
Column('id',Integer, primary_key=True),
Column('status',String))
metadata.create_all()
How to specify tablespase for new table?
Such code works:
df = pd.DataFrame(columns=['id', 'status', 'descr']) # pandas data frame
df.to_sql(con=connection, name=tbl, schema=SCHEMA, index=False, dtype=data_types, if_exists='replace')
but when tring to insert any data i got:
ORA-01950: no privileges on tablespace 'USERS'

Related

SQLAlchemy: When Creating INSERT Query, Occured CompileError "Uncosumed column names"

Envirionment:
SQLAlchemy + PostgreSQL (pg8000)
Situation:
I created two table from psql console.
CREATE TABLE testidtable(
testid BIGSERIAL PRIMARY KEY NOT NULL
);
CREATE TABLE testidtable2(
testid bigint PRIMARY KEY NOT NULL,
CONSTRAINT testid_is_valid FOREIGN KEY(testid) REFERENCES testidtable (testid)
);
Here is the python code:
import sqlalchemy
from sqlalchemy import Table, MetaData
from config import Config
conf = Config()
engine = sqlalchemy.create_engine(
sqlalchemy.engine.URL.create(
"postgresql+pg8000",
database="databasename",
username=conf.db1_user,
password=conf.db1_pass),
client_encoding='utf-8')
metadata = MetaData()
metadata.reflect(bind=engine)
engine.connect()
table1 = Table("testidtable", metadata, autoload_with=engine)
ins1 = table1.insert().values({})
ret1 = engine.execute(ins1)
testid = ret1.inserted_primary_key[0]
table2 = Table("testidtable2", metadata, autoload_with=engine)
ins2 = table2.insert().values({"testid":testid})
ret2 = engine.execute(ins2)
And the error occurred:
sqlalchemy.exc.CompileError: Unconsumed column names: testid
I tried to show ins2 which is INSERT query for debugging, but faced the same situation.
How can I do INSERT testid into testidtable2 successfully?
Thanks.

Primary key constraint gets removed when creating postgres table from pandas dataframe

I am trying to create few tables in Postgres from pandas dataframe but I am kept getting this error.
psycopg2.errors.InvalidForeignKey: there is no unique constraint matching given keys for referenced table "titles"
After looking into this problem for hours, i finally found that when I am inserting the data into parent table from pandas dataframe, the primary key constraint gets removed for some reasons and due to that I am getting this error when trying to refernece it from another table.
But I am not having this problem when I am using pgAdmin4 to create the table and inserting few rows of data manually.
you can see when I created the tables using pgAdmin, the primary key and foreign keys are getting created as expected and I have no problem with it.
But when I try to insert the data from pandas dataframe using psycopg2 library, the primary key is not getting created.
I Can't able to understand why is this happening.
The code I am using to create the tables -
# function for faster data insertion
def psql_insert_copy(table, conn, keys, data_iter):
"""
Execute SQL statement inserting data
Parameters
----------
table : pandas.io.sql.SQLTable
conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
keys : list of str
Column names
data_iter : Iterable that iterates the values to be inserted
"""
# gets a DBAPI connection that can provide a cursor
dbapi_conn = conn.connection
with dbapi_conn.cursor() as cur:
s_buf = StringIO()
writer = csv.writer(s_buf)
writer.writerows(data_iter)
s_buf.seek(0)
columns = ", ".join('"{}"'.format(k) for k in keys)
if table.schema:
table_name = "{}.{}".format(table.schema, table.name)
else:
table_name = table.name
sql = "COPY {} ({}) FROM STDIN WITH CSV".format(table_name, columns)
cur.copy_expert(sql=sql, file=s_buf)
def create_titles_table():
# connect to the database
conn = psycopg2.connect(
dbname="imdb",
user="postgres",
password=os.environ.get("DB_PASSWORD"),
host="localhost",
)
# create a cursor
c = conn.cursor()
print()
print("Creating titles table...")
c.execute(
"""CREATE TABLE IF NOT EXISTS titles(
title_id TEXT PRIMARY KEY,
title_type TEXT,
primary_title TEXT,
original_title TEXT,
is_adult INT,
start_year REAL,
end_year REAL,
runtime_minutes REAL
)
"""
)
# commit changes
conn.commit()
# read the title data
df = load_data("title.basics.tsv")
# replace \N with nan
df.replace("\\N", np.nan, inplace=True)
# rename columns
df.rename(
columns={
"tconst": "title_id",
"titleType": "title_type",
"primaryTitle": "primary_title",
"originalTitle": "original_title",
"isAdult": "is_adult",
"startYear": "start_year",
"endYear": "end_year",
"runtimeMinutes": "runtime_minutes",
},
inplace=True,
)
# drop the genres column
title_df = df.drop("genres", axis=1)
# convert the data types from str to numeric
title_df["start_year"] = pd.to_numeric(title_df["start_year"], errors="coerce")
title_df["end_year"] = pd.to_numeric(title_df["end_year"], errors="coerce")
title_df["runtime_minutes"] = pd.to_numeric(
title_df["runtime_minutes"], errors="coerce"
)
# create SQLAlchemy engine
engine = create_engine(
"postgresql://postgres:" + os.environ["DB_PASSWORD"] + "#localhost:5432/imdb"
)
# insert the data into titles table
title_df.to_sql(
"titles", engine, if_exists="replace", index=False, method=psql_insert_copy
)
# commit changes
conn.commit()
# close cursor
c.close()
# close the connection
conn.close()
print("Completed!")
print()
def create_genres_table():
# connect to the database
conn = psycopg2.connect(
dbname="imdb",
user="postgres",
password=os.environ.get("DB_PASSWORD"),
host="localhost",
)
# create a cursor
c = conn.cursor()
print()
print("Creating genres table...")
c.execute(
"""CREATE TABLE IF NOT EXISTS genres(
title_id TEXT NOT NULL,
genre TEXT,
FOREIGN KEY (title_id) REFERENCES titles(title_id)
)
"""
)
# commit changes
conn.commit()
# read the data
df = load_data("title.basics.tsv")
# replace \N with nan
df.replace("\\N", np.nan, inplace=True)
# rename columns
df.rename(columns={"tconst": "title_id", "genres": "genre"}, inplace=True)
# select only relevant columns
genres_df = df[["title_id", "genre"]].copy()
genres_df = genres_df.assign(genre=genres_df["genre"].str.split(",")).explode(
"genre"
)
# create engine
engine = create_engine(
"postgresql://postgres:" + os.environ["DB_PASSWORD"] + "#localhost:5432/imdb"
)
# insert the data into genres table
genres_df.to_sql(
"genres", engine, if_exists="replace", index=False, method=psql_insert_copy
)
# commit changes
conn.commit()
# close cursor
c.close()
# close the connection
conn.close()
print("Completed!")
print()
if __name__ == "__main__":
print()
print("Creating IMDB Database...")
# connect to the database
conn = psycopg2.connect(
dbname="imdb",
user="postgres",
password=os.environ.get("DB_PASSWORD"),
host="localhost",
)
# create the titles table
create_titles_table()
# create genres table
create_genres_table()
# close the connection
conn.close()
print("Done with Everything!")
print()
I think the problem is to_sql(if_exists="replace"). Try using to_sql(if_exists="append") - my understanding is that "replace" drops the whole table and creates a new one with no constraints.

How to create sql db2 data table using python?

I would like to run an sql db2 query on python that will create a data table in a public schema but I'm stuck because of this error
ResourceClosedError: This result object does not return rows. It has been closed automatically.
Below is the Python code I'm using, I have deleted the private information.
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
#connection string
user = "xxx"
pwd = "xxx"
host = "xxx"
port = "xxx"
db = "xxx"
conn_strng = "redshift+psycopg2://%s:%s#%s:%s/%s" %(user,pwd,host,port,db)
#establish connection
engine = create_engine(conn_strng)
with engine.connect() as conn, conn.begin():
pd.read_sql("""
drop table if exists public.fc_SER_ACC_By_DLCX_Date;
create table public.fc_SER_ACC_By_DLCX_Date as
SELECT DLCX_Date, tool_id as DLCX_Tool,Model, avg(SQZSER) as SQZSER_Mean, avg(SQZSER_BASE) as SQZSER_BASE_Mean,
avg(PRED_ACC_SMR) as PRED_ACC_SMR_Mean, avg(PRED_ACC_CMR) as PRED_ACC_CMR_Mean, count(slider_id) as Tested_Sliders
FROM (SELECT DISTINCT a.slider_id, LEFT(a.product_id,2) as Model,
a.xti_wrn1_p23 AS SQZSER,a.SER0 AS SQZSER_BASE, a.FOM2 AS PRED_ACC_SMR, a.FOM1 AS PRED_ACC_CMR,
TRUNC(c.transaction_date_time) as DLCX_Date, c.tool_id
FROM ah.param_jade_wide a
LEFT JOIN ah.param_lap_summary b ON (a.wafer_id, a.row_number) = (b.wafer_id, b.row_number)
LEFT JOIN ah.his_job c ON c.job_number = b.job_number
WHERE c.transaction_date_time > '2020-03-01'
AND LEFT(a.product_id,2) IN ('C3')
AND b.source_system_code IN ('MFG2.SLDR.LAPRUN')
AND a.xti_wrn1_p23 between -10 and 0
AND a.SER0 between -10 and 0
AND c.operation_id IN ('510150')
AND a.retest_number = 0
AND a.class_description IN ('PROD')
AND NOT c.tool_id = 0 AND NOT c.tool_id in (''))
group by DLCX_Date, DLCX_Tool, Model
Union
SELECT DLCX_Date, tool_id as DLCX_Tool,Model, avg(SQZSER) as SQZSER_Mean, avg(SQZSER_BASE) as SQZSER_BASE_Mean,
avg(PRED_ACC_SMR) as PRED_ACC_SMR_Mean,'0'PRED_ACC_CMR_Mean, count(slider_id) as Tested_Sliders
FROM (SELECT DISTINCT a.slider_id, LEFT(a.product_id,2) as Model,
a.XTI_WRN1_P19 AS SQZSER,a.XTI_WRN1_P18 AS SQZSER_BASE, a.XTI_RSVD0 AS PRED_ACC_SMR,
TRUNC(c.transaction_date_time) as DLCX_Date, c.tool_id
FROM ah.param_jade_wide a
LEFT JOIN ah.param_lap_summary b ON (a.wafer_id, a.row_number) = (b.wafer_id, b.row_number)
LEFT JOIN ah.his_job c ON c.job_number = b.job_number
WHERE c.transaction_date_time > '2020-03-01'
AND LEFT(a.product_id,2) IN ('L2','L3')
AND b.source_system_code IN ('MFG2.SLDR.LAPRUN')
AND c.operation_id IN ('510150')
AND a.XTI_WRN1_P19 between -10 and 0
AND a.XTI_WRN1_P18 between -10 and 0
AND a.retest_number = 0
AND a.class_description IN ('PROD')
AND NOT c.tool_id = 0 AND NOT c.tool_id in (''))
group by DLCX_Date, DLCX_Tool, Model
order by DLCX_Date;
commit;""", conn)
conn.close()
engine.dispose()
print("Table has been updated!")
Please help in fixing my code and thanks in advance.
Error indicates Pandas read_sql cannot import data into a data frame since you only have DDL actions: DROP TABLE and CREATE TABLE and nothing that return rows like SELECT.
If not using Pandas for data analytics, simply run your queries with SQL Alchemy transactions. And no need to close using context manager like with:
with engine.connect() as conn:
with conn.begin()
conn.execute("""DROP TABLE IF EXISTS public.fc_SER_ACC_By_DLCX_Date;""")
conn.execute("""CREATE TABLE public.fc_SER_ACC_By_DLCX_Date AS
...
""")
Or combined:
with engine.begin() as conn:
conn.execute("""DROP TABLE IF EXISTS public.fc_SER_ACC_By_DLCX_Date;""")
conn.execute("""CREATE TABLE public.fc_SER_ACC_By_DLCX_Date AS
...
""")
And if you really do need a data frame, use engine object in read_sql after transactions:
df = pd.read_sql("""SELECT * FROM public.fc_SER_ACC_By_DLCX_Date;""", engine)

How to upsert pandas DataFrame to PostgreSQL table?

I've scraped some data from web sources and stored it all in a pandas DataFrame. Now, in order harness the powerful db tools afforded by SQLAlchemy, I want to convert said DataFrame into a Table() object and eventually upsert all data into a PostgreSQL table. If this is practical, what is a workable method of going about accomplishing this task?
Update: You can save yourself some typing by using this method.
If you are using PostgreSQL 9.5 or later you can perform the UPSERT using a temporary table and an INSERT ... ON CONFLICT statement:
import sqlalchemy as sa
# …
with engine.begin() as conn:
# step 0.0 - create test environment
conn.exec_driver_sql("DROP TABLE IF EXISTS main_table")
conn.exec_driver_sql(
"CREATE TABLE main_table (id int primary key, txt varchar(50))"
)
conn.exec_driver_sql(
"INSERT INTO main_table (id, txt) VALUES (1, 'row 1 old text')"
)
# step 0.1 - create DataFrame to UPSERT
df = pd.DataFrame(
[(2, "new row 2 text"), (1, "row 1 new text")], columns=["id", "txt"]
)
# step 1 - create temporary table and upload DataFrame
conn.exec_driver_sql(
"CREATE TEMPORARY TABLE temp_table AS SELECT * FROM main_table WHERE false"
)
df.to_sql("temp_table", conn, index=False, if_exists="append")
# step 2 - merge temp_table into main_table
conn.exec_driver_sql(
"""\
INSERT INTO main_table (id, txt)
SELECT id, txt FROM temp_table
ON CONFLICT (id) DO
UPDATE SET txt = EXCLUDED.txt
"""
)
# step 3 - confirm results
result = conn.exec_driver_sql("SELECT * FROM main_table ORDER BY id").all()
print(result) # [(1, 'row 1 new text'), (2, 'new row 2 text')]
I have needed this so many times, I ended up creating a gist for it.
The function is below, it will create the table if it is the first time persisting the dataframe and will update the table if it already exists:
import pandas as pd
import sqlalchemy
import uuid
import os
def upsert_df(df: pd.DataFrame, table_name: str, engine: sqlalchemy.engine.Engine):
"""Implements the equivalent of pd.DataFrame.to_sql(..., if_exists='update')
(which does not exist). Creates or updates the db records based on the
dataframe records.
Conflicts to determine update are based on the dataframes index.
This will set unique keys constraint on the table equal to the index names
1. Create a temp table from the dataframe
2. Insert/update from temp table into table_name
Returns: True if successful
"""
# If the table does not exist, we should just use to_sql to create it
if not engine.execute(
f"""SELECT EXISTS (
SELECT FROM information_schema.tables
WHERE table_schema = 'public'
AND table_name = '{table_name}');
"""
).first()[0]:
df.to_sql(table_name, engine)
return True
# If it already exists...
temp_table_name = f"temp_{uuid.uuid4().hex[:6]}"
df.to_sql(temp_table_name, engine, index=True)
index = list(df.index.names)
index_sql_txt = ", ".join([f'"{i}"' for i in index])
columns = list(df.columns)
headers = index + columns
headers_sql_txt = ", ".join(
[f'"{i}"' for i in headers]
) # index1, index2, ..., column 1, col2, ...
# col1 = exluded.col1, col2=excluded.col2
update_column_stmt = ", ".join([f'"{col}" = EXCLUDED."{col}"' for col in columns])
# For the ON CONFLICT clause, postgres requires that the columns have unique constraint
query_pk = f"""
ALTER TABLE "{table_name}" DROP CONSTRAINT IF EXISTS unique_constraint_for_upsert;
ALTER TABLE "{table_name}" ADD CONSTRAINT unique_constraint_for_upsert UNIQUE ({index_sql_txt});
"""
engine.execute(query_pk)
# Compose and execute upsert query
query_upsert = f"""
INSERT INTO "{table_name}" ({headers_sql_txt})
SELECT {headers_sql_txt} FROM "{temp_table_name}"
ON CONFLICT ({index_sql_txt}) DO UPDATE
SET {update_column_stmt};
"""
engine.execute(query_upsert)
engine.execute(f"DROP TABLE {temp_table_name}")
return True
Here is my code for bulk insert & insert on conflict update query for postgresql from pandas dataframe:
Lets say id is unique key for both postgresql table and pandas df and you want to insert and update based on this id.
import pandas as pd
from sqlalchemy import create_engine, text
engine = create_engine(postgresql://username:pass#host:port/dbname)
query = text(f"""
INSERT INTO schema.table(name, title, id)
VALUES {','.join([str(i) for i in list(df.to_records(index=False))])}
ON CONFLICT (id)
DO UPDATE SET name= excluded.name,
title= excluded.title
""")
engine.execute(query)
Make sure that your df columns must be same order with your table.
EDIT 1:
Thanks to Gord Thompson's comment, I realized that this query won't work if there is single quote in columns. Therefore here is a fix if there is single quote in columns:
import pandas as pd
from sqlalchemy import create_engine, text
df.name = df.name.str.replace("'", "''")
df.title = df.title.str.replace("'", "''")
engine = create_engine(postgresql://username:pass#host:port/dbname)
query = text("""
INSERT INTO author(name, title, id)
VALUES %s
ON CONFLICT (id)
DO UPDATE SET name= excluded.name,
title= excluded.title
""" % ','.join([str(i) for i in list(df.to_records(index=False))]).replace('"', "'"))
engine.execute(query)
Consider this function if your DataFrame and SQL Table contain the same column names and types already.
Advantages:
Good if you have a long dataframe to insert. (Batching)
Avoid writing long sql statement in your code.
Fast
.
from sqlalchemy import Table
from sqlalchemy.engine.base import Engine as sql_engine
from sqlalchemy.dialects.postgresql import insert
from sqlalchemy.ext.automap import automap_base
import pandas as pd
def upsert_database(list_input: pd.DataFrame, engine: sql_engine, table: str, schema: str) -> None:
if len(list_input) == 0:
return None
flattened_input = list_input.to_dict('records')
with engine.connect() as conn:
base = automap_base()
base.prepare(engine, reflect=True, schema=schema)
target_table = Table(table, base.metadata,
autoload=True, autoload_with=engine, schema=schema)
chunks = [flattened_input[i:i + 1000] for i in range(0, len(flattened_input), 1000)]
for chunk in chunks:
stmt = insert(target_table).values(chunk)
update_dict = {c.name: c for c in stmt.excluded if not c.primary_key}
conn.execute(stmt.on_conflict_do_update(
constraint=f'{table}_pkey',
set_=update_dict)
)
If you already have a pandas dataframe you could use df.to_sql to push the data directly through SQLAlchemy
from sqlalchemy import create_engine
#create a connection from Postgre URI
cnxn = create_engine("postgresql+psycopg2://username:password#host:port/database")
#write dataframe to database
df.to_sql("my_table", con=cnxn, schema="myschema")

Why am I getting a "relation does not exist" error for existing table with sqlalchemy Metadata?

I have the following code which throws the following error
engine = create_engine('postgresql+psycopg2:....', convert_unicode=True)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table('omni.all_order', metadata,
sqlalchemy.Column('o_id', sqlalchemy.Integer),
sqlalchemy.Column('order', sqlalchemy.String),
)
ins = table.insert().values(all_rows)
engine.execute(ins)
sqlalchemy.exc.ProgrammingError: (psycopg2.ProgrammingError) relation
"omni.all_order" does not exist
But the following two codes work fine
engine = create_engine('postgresql+psycopg2:....', convert_unicode=True)
result = engine.execute("SELECT * from omni.all_order ")
rows = result.fetchall()
print(rows)
--
engine = create_engine('postgresql+psycopg2:....', convert_unicode=True)
engine.execute("INSERT INTO omni.all_order (o_id) VALUES (1) ")
Creating another table first in the same schema (omni) throws the same error
engine = create_engine('postgresql+psycopg2:....', convert_unicode=True)
result = engine.execute("CREATE TABLE omni.all_order_s(o_id INT, order VARCHAR(80))")
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table('omni.all_order_s', metadata,
sqlalchemy.Column('o_id', sqlalchemy.Integer),
sqlalchemy.Column('order', sqlalchemy.String),
)
ins = table.insert().values(all_rows)
engine.execute(ins)
sqlalchemy.exc.ProgrammingError: (psycopg2.ProgrammingError) relation
"omni.all_order_s" does not exist
but creating it outside of the schema works fine
engine = create_engine('postgresql+psycopg2:....', convert_unicode=True)
result = engine.execute("CREATE TABLE all_order_s(o_id INT, order VARCHAR(80))")
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table('all_order_s', metadata,
sqlalchemy.Column('o_id', sqlalchemy.Integer),
sqlalchemy.Column('order', sqlalchemy.String),
)
ins = table.insert().values(all_rows)
engine.execute(ins)
Any ideas why this is?
Pass the table's schema using the schema= keyword argument instead of including it in the table's name:
table = sqlalchemy.Table('all_order', metadata,
sqlalchemy.Column('o_id', sqlalchemy.Integer),
sqlalchemy.Column('order', sqlalchemy.String),
schema='omni',
)
Currently it is quoted as a whole.
I had the same problem and I found the solution in this link: https://dba.stackexchange.com/questions/192897/postgres-relation-does-not-exist-error.
When you create the table name from a variable, the name is passed with quotes, so the name is case sensitive and need the quotes when you called again.

Categories