So have 2 different models on 2 different schemas. One has a foreign key relation to the other. I run BaseOne.metadata.create_all(engine) then BaseTwo.metadata.create_all(engine) I get sqlalchemy.exc.NoReferencedTableError: Foreign key associated with column...
BaseOne = declarative_base(metadata=MetaData(schema="a"))
BaseTwo = declarative_base(metadata=MetaData(schema="b"))
class Parent(BaseOne):
__tablename__ = "parent"
parent_id = Column(Integer, primary_key=True)
other_col = Column(String(20))
children = relationship("Child", backref="parent")
class Child(BaseTwo):
__tablename__ = "child"
child_id = Column(Integer, primary_key=True)
parent_id = Column(Integer, ForeignKey("a.parent.parent_id"), nullable=False)
# Where I'm creating them
BaseOne.metadata.create_all(engine)
BaseTwo.metadata.create_all(engine)
Should note I've also tried explicitly stating the schema via __table_args__. Also I have connected to my postgres instance and have verified that the parent table exists with the target column.
It appears the issue was due to the fact I used multiple MetaData objects. It appears that they were unable to see each other. Simplified to a single declarative base and using __table_args__ to declare the schemas appeared to work. If someone knows how to declare multiple metadata objects and still be able to use .create_all feel free to post.
This may be solved by using Alembic to manage table creation. Ensure that all bases are included in the target_metadata list e.g.:
# pylint: skip-file
import os
from logging.config import fileConfig
from alembic import context
from sqlalchemy import engine_from_config
from sqlalchemy import pool
import unimatrix.ext.octet.orm
import gpo.infra.orm
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = [
unimatrix.ext.octet.orm.metadata,
gpo.infra.orm.Relation.metadata
]
# Configure SQLAlchemy to use the DB_URI environment variable.
config.set_main_option("sqlalchemy.url", os.environ["DB_URI"])
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
``
Related
I followed the documentation from Alembic to auto-generate migrations. My project structure looks like this:
alembic/
versions/
env.py
README
script.py.mako
data/
__init__.py
db.py
models.py
alembic.ini
app.db
I made changes to env.py by exactly following the document:
from logging.config import fileConfig
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from alembic import context
from data.models import Base
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
This is my __init__.py:
import os
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from .models import Base
basedir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
db_url = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'app.db')
engine = create_engine(db_url, echo=False)
Base.metadata.create_all(engine)
session = sessionmaker(bind=engine)
I created a User class in models.py like this:
from sqlalchemy import Column, Sequence, Integer, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer, Sequence('id_seq'), primary_key=True)
first_name = Column(String, nullable=False)
last_name = Column(String, nullable=False)
def __repr__(self):
return "<%s('%s', '%s')>" \
% (self.__class__.__qualname__, self.first_name,
self.last_name)
After that, I run migration by:
alembic revision --autogenerate -m "Added user table"
However, I got empty upgrade() and downgrade() in migration file:
"""Added user table
Revision ID: 279933caec54
Revises:
Create Date: 2021-05-12 16:21:05.772568
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '279933caec54'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
But when I checked the database, the user table was created there (I didn't do it myself). How did it happen? How did alembic create empty migration but generating the table?
I found the answer in this question. The problem described in this question is not the same as mine (the alembic didn't create an empty migration file in this case) though. That's why I asked the question here (may be seen as duplicated).
So as the answer suggested, I commented the Base.metadata.create_all(engine) in the __init__.py. It is this line of code that creates the user table (if it not exist). So when alembic checks my database, the table is already there. Since no differences are found, the upgrade() and downgrade() are empty.
in this situation don't import your Base model from base file, import your Base from some where you have extend this Base class
for example I have a structure like below:
also i imported Base class in alembic/.env from models.trade.
in this form your base class metadata will detect your models and your auto generation migrations will work fine.
How to configure sqlAlchemy to not create tables in flask sqlAlchemy?
i only use flask-sqlalchemy Model's features,but won't create table.
How to configure this?
because i want to use in sql View,if run db.create_all(),View become to table.
thanks.
this is a View!
# -*- coding: utf-8 -*-
from settings.dataBase import CRUDMixin, db
class ViewExample(db.Model, CRUDMixin):
__tablename__ = "view_example"
id = db.Column(db.Integer, primary_key=True, nullable=False)
name = db.Column(db.String(50))
title = db.Column(db.String(100))
is_albums = db.Column(db.Integer)
is_attach = db.Column(db.Integer)
is_spec = db.Column(db.Integer)
sort_id = db.Column(db.Integer)
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return 'id : %s' % self.id
i find a example,but i dont know how to use in flask-sqlalchemy.
alembic slqlalchemy document:
https://alembic.sqlalchemy.org/en/latest/cookbook.html#don-t-emit-create-table-statements-for-views
flask-migrate env.py
i add a include_object func ,but dont get is_view.
from __future__ import with_statement
import logging
from logging.config import fileConfig
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from alembic import context
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
logger = logging.getLogger('alembic.env')
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from flask import current_app
config.set_main_option(
'sqlalchemy.url', current_app.config.get(
'SQLALCHEMY_DATABASE_URI').replace('%', '%%'))
target_metadata = current_app.extensions['migrate'].db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def include_object(object, name, type_, reflected, compare_to):
"""
Exclude views from Alembic's consideration.
"""
print(object.info)
return not object.info.get('is_view', False)
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True,
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# this callback is used to prevent an auto-migration from being generated
# when there are no changes to the schema
# reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html
def process_revision_directives(context, revision, directives):
if getattr(config.cmd_opts, 'autogenerate', False):
script = directives[0]
if script.upgrade_ops.is_empty():
directives[:] = []
logger.info('No changes in schema detected.')
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata,
include_object=include_object,# add
process_revision_directives=process_revision_directives,
**current_app.extensions['migrate'].configure_args
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
add __table_args__ = { "info": dict(is_view=True)} is good.
class ViewExample(db.Model, CRUDMixin):
__tablename__ = "view_example"
__table_args__ = { "info": dict(is_view=True)} # add
thanks! this questions been solved.
if you are not using alembic, the answer given here is not full.
what you need to do is change the metadata class that comes with base.Model in the following way:
class ViewAwareMetaData(MetaData):
def __init__(self):
super(ViewAwareMetaData, self).__init__()
def create_all(self, bind=None, tables=None, checkfirst=True):
tables = [table for table in tables if not table.info.get('is_view', False)]
super().create_all(bind=bind, tables=tables, checkfirst=checkfirst)
and then use the new meta when initializaing sqlalchemy:
db = SQLAlchemy(engine_options=engine_options, metadata=ViewAwareMetaData())
You can add __abstract__ = True in the model that your don't want to create
I am attempting to implement Flask-Session in my python application. I read in the docs that its recommended to use another interface like the SqlAlchemySessionInterface instead of the default NullSessionInterface which is used when nothing is provided to the SESSION_TYPE configuration key.
From the flask_session/init.py file under class Session it reads
By default Flask-Session will use :class:NullSessionInterface, you
really should configurate your app to use a different SessionInterface.
After setting the SESSION_TYPE configuration key to "sqlalchemy" I get an error
sqlalchemy.exc.ProgrammingError: (psycopg2.ProgrammingError) relation "sessions" does not exist
This indicates that Flask-Session is looking to use a table with the name "sessions" in my database model but I cannot find anywhere in the Flask-Session documentation where it points out that a table should be created and what fields it should have.
Can anyone suggest a solution to this please?
I wanted to use Flask-session, but I was also using Flask-migrate and didn't want to call db.create_all() manually and break the migration path. Fortunately, #Flashspeedlife's suggestion of just importing the Interface and instantiating it worked.
app/__init__.py:
from flask_session import SqlAlchemySessionInterface
from app.extensions import db, sess, migrate # My extensions file
def create_app():
app = Flask(__name__)
with app.app_context():
db.init_app(app)
migrate.init_app(app, db)
sess.init_app(app)
SqlAlchemySessionInterface(app, db, "sessions", "sess_")
Now, flask db migrate generates an alembic script with the new sessions table.
After studying the Flask-Session/init.py code I found that class SqlAlchemySessionInterface under its __init__ contains a Flask-SQLAlchemy model
class Session(self.db.Model).
To cause this table model to be created, in the file where I create my models I imported SqlAlchemySessionInterface from flask_sessionstore and put the line
SqlAlchemySessionInterface(myApp, sqlAlchemyDbObject, "table_name", "prefix_")
and then ran db.create_all().
class SqlAlchemySessionInterface(SessionInterface):
"""Uses the Flask-SQLAlchemy from a flask app as a session backend.
.. versionadded:: 0.2
:param app: A Flask app instance.
:param db: A Flask-SQLAlchemy instance.
:param table: The table name you want to use.
:param key_prefix: A prefix that is added to all store keys.
:param use_signer: Whether to sign the session id cookie or not.
:param permanent: Whether to use permanent session or not.
"""
serializer = pickle
session_class = SqlAlchemySession
def __init__(self, app, db, table, key_prefix, use_signer=False,
permanent=True):
if db is None:
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy(app)
self.db = db
self.key_prefix = key_prefix
self.use_signer = use_signer
self.permanent = permanent
class Session(self.db.Model):
__tablename__ = table
id = self.db.Column(self.db.Integer, primary_key=True)
session_id = self.db.Column(self.db.String(255), unique=True)
data = self.db.Column(self.db.LargeBinary)
expiry = self.db.Column(self.db.DateTime)
def __init__(self, session_id, data, expiry):
self.session_id = session_id
self.data = data
self.expiry = expiry
def __repr__(self):
return '<Session data %s>' % self.data
# self.db.create_all()
self.sql_session_model = Session
I'am definitely using Django for my next project. Documentation for many Flask Extensions aren't great at all.
EDIT
Changed (imported SqlAlchemySessionInterface from flask_session) to (imported SqlAlchemySessionInterface from flask_sessionstore)
Sorry i wasn't clear before,
Edited:
I am using default pyramid application with sqlalchemy (backend: postgresql), generated using
pcreate -s alchemy
So i have all the settings related to DBSession, pyramid_tm etc. I have this code
class User(Base):
id = Column(Integer, primary_key=True)
connection = relationship("UserConnection", uselist=False, backref="user")
class UserConnection(Base):
userid = Column(Integer, ForeignKey('user.id'), primary_key=True)
friends = Column(JSON)
def add_connection(user, friend):
with transaction.manager:
if not user.connection:
user_conn = UserConnection(userid=user.id, friends=[])
else:
user_conn = user.connection
user_conn.friends.append(friend)
print(user_connection.friends)
session.add(user_conn)
when i run add_connection() first time that is user.connection is not there. New record gets created but on next run ( in case this goes to else ) record don't get updated, on console i can only see ROLLBACK/COMMIT but no other statements.
The print statement there shows the updated result but db is not updated.
You should use transaction in request scope.
zope.sqlalchemy and pyramid_tm can do that for you.
You can use my code:
pyramid_sqlalchemy.py
# -*- coding: utf-8 -*-
""" Pyramid sqlalchemy lib.
Session will be available as dbsession attribute on request.
! Do not close session on your own.
"""
import sqlalchemy
from sqlalchemy.orm import sessionmaker, scoped_session
from zope.sqlalchemy import ZopeTransactionExtension
Session = scoped_session(sessionmaker(
extension=ZopeTransactionExtension()
))
def includeme(config):
""" Setup sqlalchemy session connection for pyramid app.
:param config: Pyramid configuration
"""
config.include('pyramid_tm')
# creates database engine from ini settings passed to pyramid wsgi
engine = sqlalchemy.engine_from_config(
config.get_settings(), connect_args={
'charset': 'utf8'
}
)
# scoped session gives us thread safe session
Session.configure(bind=engine)
# make database session available in every request
config.add_request_method(
callable=lambda request: Session, name='dbsession', property=True
)
Install zope.sqlalchemy and pyramid_tm using pip and call config.include(pyramid_sqlalchemy)
I have problem with setting up database connection. I want to set connection, where I can see this connection in all my controllers.
Now I use something like this in my controller:
db = create_engine('mysql://root:password#localhost/python')
metadata = MetaData(db)
email_list = Table('email',metadata,autoload=True)
In development.ini I have:
sqlalchemy.url = mysql://root#password#localhost/python
sqlalchemy.pool_recycle = 3600
How do I set _____init_____.py?
I hope you got pylons working; for anyone else that may later read question I'll present some pointers in the right direction.
First of all, you are only creating a engine and a metadata object. While you can use the engine to create connections directly you would almost always use a Session to manage querying and updating your database.
Pylons automatically setups this for you by creating a engine from your configuration file, then passing it to yourproject.model.__init__.py:init_model() which binds it to a scoped_session object.
This scoped_session object is available from yourproject.model.meta and is the object you would use to query your database. For example:
record = meta.Session.query(model.MyTable).filter(id=42)
Because it is a scoped_session it automatically creates a Session object and associates it with the current thread if it doesn't already exists. Scoped_session passes all action (.query(), .add(), .delete()) down into the real Session object and thus allows you a simple way to interact the database with having to manage the non-thread-safe Session object explicitly.
The scoped_session, Session, object from yourproject.model.meta is automatically associated with a metadata object created as either yourproject.model.meta:metadata (in pylons 0.9.7 and below) or yourproject.model.meta:Base.metadata (in pylons 1.0). Use this metadata object to define your tables. As you can see in newer versions of pylons a metadata is associated with a declarative_base() object named Base, which allows you to use SqlAlchemy's declarative style.
Using this from the controller
from yourproject import model
from yourproject.model import Session
class MyController(..):
def resource(self):
result = Session.query(model.email_list).\
filter(model.email_list.c.id=42).one()
return str(result)
Use real connections
If you really want to get a connection object simply use
from yourproject.model import Session
connection = Session.connection()
result = connection.execute("select 3+4;")
// more connection executions
Session.commit()
However this is all good, but what you should be doing is...
This leaves out that you are not really using SqlAlchemy much. The power of SqlAlchemy really shines when you start mapping your database tables to python classes. So anyone looking into using pylons with a database should take a serious look at what you can do with SqlAlchemy. If SqlAlchemy starts out intimidating simply start out with using its declarative approach, which should be enough for almost all pylons apps.
In your model instead of defining Table constructs, do this:
from sqlalchemy import Column, Integer, Unicode, ForeignKey
from sqlalchemy.orm import relation
from yourproject.model.meta import Base
class User(Base):
__tablename__ = 'users'
# primary_key implies nullable=False
id = Column(Integer, primary_key=True, index=True)
# nullable defaults to True
name = Column(Unicode, nullable=False)
notes = relation("UserNote", backref="user")
query = Session.query_property()
class UserNote(Base):
__tablename__ = 'usernotess'
# primary_key implies nullable=False
id = Column(Integer, primary_key=True, index=True)
userid = Column(Integer, index=True, ForeignKey("User.id"))
# nullable defaults to True
text = Column(Unicode, nullable=False)
query = Session.query_property()
Note the query objects. These are smart object that live on the class and associates your classes with the scoped_session(), Session. This allows you to event more easily extract data from your database.
from sqlalchemy.orm import eagerload
def resource(self):
user = User.query.filter(User.id==42).options(eagerload("notes")).one()
return "\n".join([ x.text for x in user.notes ])
1.0 version of Pylons use declarative syntax. More about this, you can see here .
In mode/init.py you can write somthing like this:
from your_programm.model.meta import Session, Base
from sqlalchemy import *
from sqlalchemy.types import *
def init_model(engine):
Session.configure(bind=engine)
class Foo(Base) :
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
name = Column(String)
...
What you want to do is modify the Globals class in your app_globals.py file to include a .engine (or whatever) attribute. Then, in your controllers, you use from pylons import app_globals and app_globals.engine to access the engine (or metadata, session, scoped_session, etc...).