With Python + Sqlalchemy + Oracle, trying to drop all tables and recreate them. Using oracle sequence in Id column for autoincreament,but drop all is not dropping sequence.
engine = create_engine('oracle://user:pass#host:port/db',
implicit_returning=False,
echo=False)
Base = declarative_base(bind=engine)
if DROP_AND_CREATE:
Base.metadata.drop_all(bind=engine)
meta_data = MetaData()
meta_data = Base.metadata
from domains import users
meta_data.create_all(engine, checkfirst=False)
domain package sample,
class Users(Base):
__tablename__ = 'users'
id = Column(Integer, Sequence('users_id_seq'), primary_key=True)
name = Column(String(255))
in the above all tables are dropped except I can see the sequences I am using are still present in oracle db. if i manually delete them and run again they are running fine.
The user_id_seq created in oracle is not getting dropped. please help.
Error message:
sqlalchemy.exc.DatabaseError: (cx_Oracle.DatabaseError) ORA-00955: name is already used by an existing object
[SQL: 'CREATE SEQUENCE user_queries_id_seq']
Finally after few hours of googling... still learning the language and framework., but this code works and does exactly what i want in my application...
Thanks all
Base = declarative_base(bind=engine)
from domains import users
if DROP_AND_CREATE:
Base.metadata.drop_all(bind=engine, checkfirst=True)
logger.info('Creating all registered tables.')
Base.metadata.create_all(bind=engine, checkfirst=True)
Related
I have read in the following link:
Sqlalchemy adding multiple records and potential constraint violation
That using SQLAlchemy core library to perform the inserts is much faster option, rather than the ORM's session.add() method:
i.e:
session.add()
should be replaced with:
session.execute(Entry.__table__.insert(), params=inserts)
In the following code I have tried to replace .add with .insert:
from sqlalchemy import Column, DateTime, String, Integer, ForeignKey, func
from sqlalchemy.orm import relationship, backref
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Department(Base):
__tablename__ = 'department'
id = Column(Integer, primary_key=True)
name = Column(String)
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
name = Column(String)
# Use default=func.now() to set the default hiring time
# of an Employee to be the current time when an
# Employee record was created
hired_on = Column(DateTime, default=func.now())
department_id = Column(Integer, ForeignKey('department.id'))
# Use cascade='delete,all' to propagate the deletion of a Department onto its Employees
department = relationship(
Department,
backref=backref('employees',
uselist=True,
cascade='delete,all'))
from sqlalchemy import create_engine
engine = create_engine('postgres://blah:blah#blah:blah/blah')
from sqlalchemy.orm import sessionmaker
session = sessionmaker()
session.configure(bind=engine)
Base.metadata.create_all(engine)
d = Department(name="IT")
emp1 = Employee(name="John", department=d)
s = session()
s.add(d)
s.add(emp1)
s.commit()
s.delete(d) # Deleting the department also deletes all of its employees.
s.commit()
s.query(Employee).all()
# Insert Option Attempt
from sqlalchemy.dialects.postgresql import insert
d = insert(Department).values(name="IT")
d1 = d.on_conflict_do_nothing()
s.execute(d1)
emp1 = insert(Employee).values(name="John", department=d1)
emp1 = emp1.on_conflict_do_nothing()
s.execute(emp1)
The error I receive:
sqlalchemy.exc.CompileError: Unconsumed column names: department
I can't quite understand the syntax and how to do it in the right way, I'm new to the SQLAlchemy.
It looks my question is similar to How to get primary key columns in pd.DataFrame.to_sql insertion method for PostgreSQL "upsert"
, so potentially by answering either of our questions, you could help two people at the same time ;-)
I am new to SQLAlchemy as well, but this is what I found :
Using your exact code, adding department only didn't work using "s.execute(d1)", so I changed it to the below and it does work :
with engine.connect() as conn:
d = insert(Department).values(name="IT")
d1 = d.on_conflict_do_nothing()
conn.execute(d1)
I found on SQLAlchemy documentation that in the past it was just a warning when you try to use a virtual column that doesn't really exist. But from version 0.8, it has been changed to an exception.
As a result, I am not sure if you can do that using the insert. I think that SQLAlchemy does it behind the scene in some other way when using session.add(). Maybe some experts can elaborate here.
I hope that will help.
I create a very simple database with sqlalchemy as follows:
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
engine = create_engine('sqlite:///sqlalchemy_example.db')
# Create all tables in the engine. This is equivalent to "Create Table"
# statements in raw SQL.
Base.metadata.create_all(engine)
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Insert a Person in the person table
new_person = Person(name='new person')
session.add(new_person)
session.commit()
and then I tried to read it using pyDAL reference.
from pydal import DAL, Field
db = DAL('sqlite://sqlalchemy_example.db', auto_import=True)
db.tables
>> []
db.define_table('person', Field('name'))
>> OperationalError: table "person" already exists
How do I access the table using pyDAL?
thank you
First, do not set auto_import=True, as that is only relevant if pyDAL *.table migration metadata files exist for the tables, which will not be the case here.
Second, pyDAL does not know the table already exists, and because migrations are enabled by default, it attempts to create the table. To prevent this, you can simply disable migrations:
# Applies to all tables.
db = DAL('sqlite://sqlalchemy_example.db', migrate_enabled=False)
or:
# Applies to this table only.
db.define_table('person', Field('name'), migrate=False)
If you would like pyDAL to take over migrations for future changes to this table, then you should run a "fake migration", which will cause pyDAL to generate a *.table migration metadata file for this table without actually running the migration. To do this, temporarily make the following change:
db.define_table('person', Field('name'), fake_migrate=True)
After leaving the above in place for a single request, the *.table file will be generated, and you can remove the fake_migrate=True argument.
Finally, note that pyDAL expects the id field to be an auto-incrementing integer primary key field.
I am using SQLAlchemy as ORM for a python project. I have created few models/schema and it is working fine. Now I need to query a existing MySQL database, no insert/update just the select statement.
How can I create a wrapper around the tables of this existing database? I have briefly gone through the sqlalchemy docs and SO but couldn't find anything relevant. All suggest execute method, where I need to write the raw sql queries, while I want to use the SQLAlchemy query method in same way as I am using with the SA models.
For example if the existing db has table name User then I want to query it using the dbsession ( only the select operation, probably with join)
You seem to have an impression that SQLAlchemy can only work with a database structure created by SQLAlchemy (probably using MetaData.create_all()) - this is not correct. SQLAlchemy can work perfectly with a pre-existing database, you just need to define your models to match database tables. One way to do that is to use reflection, as Ilja Everilä suggests:
from sqlalchemy import Table
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class MyClass(Base):
__table__ = Table('mytable', Base.metadata,
autoload=True, autoload_with=some_engine)
(which, in my opinion, would be totally fine for one-off scripts but may lead to incredibly frustrating bugs in a "real" application if there's a potential that the database structure may change over time)
Another way is to simply define your models as usual taking care to define your models to match the database tables, which is not that difficult. The benefit of this approach is that you can map only a subset of database tables to you models and even only a subset of table columns to your model's fields. Suppose you have 10 tables in the database but only interested in users table from where you only need id, name and email fields:
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class User(Base):
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String)
email = sa.Column(sa.String)
(note how we didn't need to define some details which are only needed to emit correct DDL, such as the length of the String fields or the fact that the email field has an index)
SQLAlchemy will not emit INSERT/UPDATE queries unless you create or modify models in your code. If you want to ensure that your queries are read-only you may create a special user in the database and grant that user SELECT privileges only. Alternatively/in addition, you may also experiment with rolling back the transaction in your application code.
You can access an existing table using the automap extension:
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
Base = automap_base()
Base.prepare(engine, reflect=True)
Users = Base.classes.users
session = Session(engine)
res = session.query(Users).first()
Create a table with autoload enabled that will inspect it. Some example code:
from sqlalchemy.sql import select
from sqlalchemy import create_engine, MetaData, Table
CONN_STR = '…'
engine = create_engine(CONN_STR, echo=True)
metadata = MetaData()
cookies = Table('cookies', metadata, autoload=True,
autoload_with=engine)
cols = cookies.c
with engine.connect() as conn:
query = (
select([cols.created_at, cols.name])
.order_by(cols.created_at)
.limit(1)
)
for row in conn.execute(query):
print(row)
Other answers don't mention what to do if you have a table with no primary key, so I thought I would address this. Assuming a table called Customers that has columns for CustomerId, CustomerName, CustomerLocation you could do;
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import create_engine, MetaData, Column, String, Table
from sqlalchemy.orm import Session
Base = automap_base()
conn_str = '...'
engine = create_engine(conn_str)
metadata = MetaData()
# you only need to define which column is the primary key. It can automap the rest of the columns.
customers = Table('Customers',metadata, Column('CustomerId', String, primary_key=true), autoload=True, autoload_with=engine)
Base.prepare()
Customers= Base.classes.Customers
session = Session(engine)
customer1 = session.query(Customers).first()
print(customer1.CustomerName)
Assume we have a Postgresql database named accounts. And we already have a table named users.
import sqlalchemy as sa
psw = "verysecret"
db = "accounts"
# create an engine
pengine = sa.create_engine('postgresql+psycopg2://postgres:' + psw +'#localhost/' + db)
from sqlalchemy.ext.declarative import declarative_base
# define declarative base
Base = declarative_base()
# reflect current database engine to metadata
metadata = sa.MetaData(pengine)
metadata.reflect()
# build your User class on existing `users` table
class User(Base):
__table__ = sa.Table("users", metadata)
# call the session maker factory
Session = sa.orm.sessionmaker(pengine)
session = Session()
# filter a record
session.query(User).filter(User.id==1).first()
Warning: Your table should have a Primary Key defined. Otherwise, Sqlalchemy won't like it.
I want to use multiple database engines with a single sqlalchemy database model.
Following situation:
I have a photo album software (python) and the different albums are stored in different folders. In each folder is a separate sqlite database with additional information about the photos. I don't want to use a single global database because with this way I can simply move, delete and copy albums on a folder base.
Opening a single album is fairly straightforward:
Creating a db session:
maker = sessionmaker(autoflush=True, autocommit=False,
extension=ZopeTransactionExtension())
DBSession = scoped_session(maker)
Base class and metadata for db model:
DeclarativeBase = declarative_base()
metadata = DeclarativeBase.metadata
Defining database model (shortened):
pic_tag_table = Table('pic_tag', metadata,
Column('pic_id', Integer,
ForeignKey('pic.pic_id'),
primary_key=True),
Column('tag_id', Integer,
ForeignKey('tag.tag_id'),
primary_key=True))
class Picture(DeclarativeBase):
__tablename__ = 'pic'
pic_id = Column (Integer, autoincrement = True, primary_key=True)
...
class Tags(DeckarativeBase):
__tablename__ = 'tag'
tag_id = Column (Integer, autoincrement = True, primary_key=True)
...
pictures = relation('Picture', secondary=pic_tag_table, backref='tags')
And finally open the connection:
engine = engine_from_config(config, '...')
DBSession.configure(bind=engine)
metadata.bind = engine
This works well for opening one album. Now I want to open multiple albums (and db connections) the same time. Every album has the same database model so my hope is that I can reuse it. My problem is that the model class definition is inheritet from the declarative base which is connected to the metadata and the database engine. I want to connect the classes to different metadata with different enginges. Is this possible?
P.S.: I also want to query the databases via the ORM, e.g. DBSession.query(Picture).all() (or DBSession[0], ... for multiple sessions on different databases - so not one query for all pictures in all databases but one ORM style query for querying one database)
You can achieve this with multiple engines and sessions (you don't need multiple metadata):
engine1 = create_engine("sqlite:///tmp1.db")
engine2 = create_engine("sqlite:///tmp2.db")
Base.metadata.create_all(bind=engine1)
Base.metadata.create_all(bind=engine2)
session1 = Session(bind=engine1)
session2 = Session(bind=engine2)
print(session1.query(Picture).all()) # []
print(session2.query(Picture).all()) # []
session1.add(Picture())
session1.commit()
print(session1.query(Picture).all()) # [Picture]
print(session2.query(Picture).all()) # []
session2.add(Picture())
session2.commit()
print(session1.query(Picture).all()) # [Picture]
print(session2.query(Picture).all()) # [Picture]
session1.close()
session2.close()
For scoped_session, you can create multiple of those as well.
engine1 = create_engine("sqlite:///tmp1.db")
engine2 = create_engine("sqlite:///tmp2.db")
Base.metadata.create_all(bind=engine1)
Base.metadata.create_all(bind=engine2)
Session1 = scoped_session(sessionmaker(bind=engine1))
Session2 = scoped_session(sessionmaker(bind=engine2))
session1 = Session1()
session2 = Session2()
...
If you have a variable number of databases you need to have open, scoped_session might be a little cumbersome. You'll need some way to keep track of them.
I'm using SQL Alchemy and have some schema's that are account specific. The name of the schema is derived using the account ID, so I don't have the name of the schema until I hit my application service or repository layer. I'm wondering if it's possible to run a query against an entity that has it's schema dynamically set at runtime?
I know I need to set the __table_args__['schema'] and have tried doing that using the type() built-in, but I always get the following error:
could not assemble any primary key columns for mapped table
I'm ready to give up and just write straight sql, but I really hate to do that. Any idea how this can be done? I'm using SA 0.99 and I do have a PK mapped.
Thanks
from sqlalchemy 1.1,
this can be done easily using using schema_translation_map.
https://docs.sqlalchemy.org/en/11/changelog/migration_11.html#multi-tenancy-schema-translation-for-table-objects
One option would be to reflect the particular account-dependent tables. Here is the SqlAlchemy Documentation on the matter.
Alternatively, You can create the table with a static schema attribute and update it as needed at runtime and run the queries you need to. I can't think of a non-messy way to do this. So here's the messy option
Use a loop to update the schema property in each table definition whenever the account is switched.
add all the tables that are account-specific to a list.
if the tables are expressed in the declarative syntax, then you have to modify the DeclarativeName.__table__.schema attribute. I'm not sure if you need to also modify DeclarativeName.__table_args__['schema'], but I guess it won't hurt.
If the tables are expressed in the old style Table syntax, then you have to modify the Table.schema attribute.
If you're using text for any relationships or foreign keys, then that will break, and you have to inspect each table for such hard coded usage and change them
example
user_id = Column(ForeignKey('my_schema.user.id')) needs to be written as user_id = Column(ForeignKey(User.id)). Then you can change the schema of User to my_new_schema. Otherwise, at query time sqlalchemy will be confused because the foreign key will point to my_schema.user.id while the query would point to my_new_schema.user.
I'm not sure if more complicated relationships can be expressed without the use of plain text, so I guess that's the limit to my proposed solution.
Here's an example I wrote up in the terminal:
>>> from sqlalchemy import Column, Table, Integer, String, select, ForeignKey
>>> from sqlalchemy.orm import relationship, backref
>>> from sqlalchemy.ext.declarative import declarative_base
>>> B = declarative_base()
>>>
>>> class User(B):
... __tablename__ = 'user'
... __table_args__ = {'schema': 'first_schema'}
... id = Column(Integer, primary_key=True)
... name = Column(String)
... email = Column(String)
...
>>> class Posts(B):
... __tablename__ = 'posts'
... __table_args__ = {'schema':'first_schema'}
... id = Column(Integer, primary_key=True)
... user_id = Column(ForeignKey(User.id))
... text = Column(String)
...
>>> str(select([User.id, Posts.text]).select_from(User.__table__.join(Posts)))
'SELECT first_schema."user".id, first_schema.posts.text \nFROM first_schema."user" JOIN first_schema.posts ON first_schema."user".id = first_schema.posts.user_id'
>>> account_specific = [User, Posts]
>>> for Tbl in account_specific:
... Tbl.__table__.schema = 'second_schema'
...
>>> str(select([User.id, Posts.text]).select_from(User.__table__.join(Posts)))
'SELECT second_schema."user".id, second_schema.posts.text \nFROM second_schema."user" JOIN second_schema.posts ON second_schema."user".id = second_schema.posts.user_id'
As you see the same query refers to the second_schema after I update the table's schema attribute.
edit: Although you can do what I did here, using the schema translation map as shown in the the answer below is the proper way to do it.
They are set statically. Foreign keys needs the same treatment, and I have an additional issue, in that I have multiple schemas that contain multiple tables so I did this:
from sqlalchemy.ext.declarative import declarative_base
staging_dbase = declarative_base()
model_dbase = declarative_base()
def adjust_schemas(staging, model):
for vv in staging_dbase.metadata.tables.values():
vv.schema = staging
for vv in model_dbase.metadata.tables.values():
vv.schema = model
def all_tables():
return staging_dbase.metadata.tables.union(model_dbase.metadata.tables)
Then in my startup code:
adjust_schemas(staging=staging_name, model=model_name)
You can mod this for a single declarative base.
I'm working on a project in which I have to create postgres schemas and tables dynamically and then insert data in proper schema. Here is something I have done maybe it will help someone:
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from app.models.user import User
engine_uri = "postgres://someusername:somepassword#localhost:5432/users"
engine = create_engine(engine_uri, pool_pre_ping=True)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
def create_schema(schema_name: str):
"""
Creates a new postgres schema
- **schema_name**: name of the new schema to create
"""
if not engine.dialect.has_schema(engine, schema_name):
engine.execute(sqlalchemy.schema.CreateSchema(schema_name))
def create_tables(schema_name: str):
"""
Create new tables for postgres schema
- **schema_name**: schema in which tables are to be created
"""
if (
engine.dialect.has_schema(engine, schema_name) and
not engine.dialect.has_table(engine, str(User.__table__.name))
):
User.__table__.schema = schema_name
User.__table__.create(engine)
def add_data(schema_name: str):
"""
Add data to a particular postgres schema
- **schema_name**: schema in which data is to be added
"""
if engine.dialect.has_table(engine, str(User.__table__.name)):
db = SessionLocal()
db.connection(execution_options={
"schema_translate_map": {None: schema_name}},
)
user = User()
user.name = "Moin"
user.salary = 10000
db.add(user)
db.commit()