alembic create_table using declarative_base derived objects - python

I have an Alchemy ORM object:
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class MyORM(Base):
id = Column(Integer, primary_key=True)
name = Column(String(128), unique=True, nullable=False)
When using alembic to create the table I do the following:
def upgrade():
op.create_table(
'myorm',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.String(128), nullable=False),
)
Question: Is there a way to use the MyORM class to create the table? Something like this:
def upgrade():
op.create_table(
'myorm',
sa.BaseObject(MyORM)
)

This is exactly what Alembic migrations are trying to avoid. If you tie your migration to the current state of your model, it will not be a consistent upgrade path.
You can use declarative in your migrations to create tables and migrate data, but not to alter. You will have to re-create the definitions separate from the application definitions. This can be useful if you want to do a data migration and are more familiar with ORM queries instead of core queries.
Here is an example migration that creates Foo and Bar models with a many-to-many relationship using declarative, creates the tables, and inserts some data.
"""declarative
Revision ID: 169ad57156f0
Revises: 29b4c2bfce6d
Create Date: 2014-06-25 09:00:06.784170
"""
revision = '169ad57156f0'
down_revision = '29b4c2bfce6d'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
Session = sessionmaker()
Base = declarative_base()
class Foo(Base):
__tablename__ = 'foo'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String, nullable=False, unique=True)
class Bar(Base):
__tablename__ = 'bar'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String, nullable=False, unique=True)
foos = relationship(Foo, lambda: foo_bar, backref='bars')
foo_bar = sa.Table(
'foo_bar', Base.metadata,
sa.Column('foo_id', sa.Integer, sa.ForeignKey('foo.id'), primary_key=True),
sa.Column('bar_id', sa.Integer, sa.ForeignKey('bar.id'), primary_key=True)
)
def upgrade():
bind = op.get_bind()
Base.metadata.create_all(bind=bind)
session = Session(bind=bind)
session._model_changes = False # if you are using Flask-SQLAlchemy, this works around a bug
f1 = Foo(name='f1')
f2 = Foo(name='f2')
b1 = Bar(name='b1')
b2 = Bar(name='b2')
f1.bars = [b1, b2]
b2.foos.append(f2)
session.add_all([f1, f2, b1, b2])
session.commit()
def downgrade():
bind = op.get_bind()
# in this case all we need to do is drop the tables
# Base.metadata.drop_all(bind=bind)
# but we could also delete data
session = Session(bind=bind)
session._model_changes = False # if you are using Flask-SQLAlchemy, this works around a bug
b1 = session.query(Bar).filter_by(name='b1').one()
session.delete(b1)
session.commit()

Related

How to avoid IntegrityError when proxying to Dictionary-based collections using SqlAlchemy

Using sqlalchemy 1.4.x, I've set up the following classes:
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import declarative_base, sessionmaker
from sqlalchemy.orm.collections import attribute_mapped_collection
from sqlalchemy.orm import backref, relationship
from sqlalchemy import ForeignKey, Column, Integer, Unicode
from sqlalchemy import create_engine
engine = create_engine("sqlite:///:memory:", echo=True)
Session = sessionmaker(bind=engine)
session = Session()
Base = declarative_base()
class Infra(Base):
__tablename__ = "infra"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(Unicode(200), index=True, unique=True)
protections = association_proxy(
"infra_protections",
"pscore",
creator=lambda k, v: Infra_Protection(protection=k, pscore=v),
)
class Protection(Base):
__tablename__ = "protection"
id = Column(Integer, primary_key=True, autoincrement=True)
ptype = Column(Unicode(200), index=True, unique=True)
def __init__(self, protection):
self.ptype = protection
class Infra_Protection(Base):
__tablename__ = "infraprotection"
infra_id = Column(
Integer, ForeignKey("infra.id", ondelete="CASCADE"), primary_key=True
)
protection_id = Column(
Integer, ForeignKey("protection.id", ondelete="CASCADE"), primary_key=True
)
prot = relationship("Protection")
protection = association_proxy("prot", "ptype")
infra = relationship(
Infra,
backref=backref(
"infra_protections",
collection_class=attribute_mapped_collection("protection"),
cascade="all, delete-orphan",
),
)
pscore = Column(Integer, nullable=False, unique=False, server_default="0")
Now I'd like to add some Infra objects, and associated Protections:
Base.metadata.create_all(engine)
i = Infra(name="Foo")
i.protections["test"] = 1
i.protections["test 2"] = 2
session.add(i)
session.commit()
# now, add another
j = Infra(name="Bar")
j.protections["test"] = 3
j.protections["test 2"] = 4
session.add(j)
session.commit() # UNIQUE constraint failed: protection.ptype
It's obvious why the unique constraint is violated, but I'm wondering how I can modify my association proxy setup to avoid this in a reasonably robust way. Some sort of get_or_create on the Protection __init__?
The way to deal with this is by implementing one of the UniqueObject recipes from here: https://github.com/sqlalchemy/sqlalchemy/wiki/UniqueObject

Sqlalchemy association proxy and no_autoflush

I'm trying to figure out why I need to use a no_autoflush block when inserting data into an association proxy if the association proxy data has been accessed first. An example of this is bellow (using MySQL):
from sqlalchemy import create_engine, Integer, Column, String, ForeignKey, UniqueConstraint
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship, scoped_session
Base = declarative_base()
engine = create_engine('{}://{}:{}#{}/{}'.format(...))
session_factory = sessionmaker(bind=engine)
Session = scoped_session(session_factory)
class DomainModel(Base):
__tablename__ = 'domains'
id = Column(Integer, primary_key=True)
name = Column(String(255), nullable=False, unique=True)
domains_to_servers = relationship("DomainServerModel", back_populates="domain")
servers = association_proxy('domains_to_servers', 'server',
creator=lambda s: DomainServerModel(server=s))
class ServerModel(Base):
__tablename__ = 'servers'
id = Column(Integer, primary_key=True)
name = Column(String(128), nullable=False, unique=True, index=True)
domains_to_servers = relationship("DomainServerModel", back_populates="server")
domains = association_proxy('domains_to_servers', 'domain',
creator=lambda d: DomainServerModel(domain=d))
class DomainServerModel(Base):
__tablename__ = 'domains_to_servers'
id = Column(Integer, primary_key=True)
domain_id = Column(Integer, ForeignKey('domains.id'), nullable=False)
server_id = Column(Integer, ForeignKey('servers.id'), nullable=False)
server = relationship('ServerModel', back_populates="domains_to_servers")
domain = relationship('DomainModel', back_populates="domains_to_servers")
def test():
session = Session()
with session.no_autoflush:
s = session.query(ServerModel).filter_by(name='test.com').one()
print(s.domains)
d = DomainModel(name='test1.com')
session.add(d)
session.commit()
s.domains.append(d)
session.commit()
if __name__ == '__main__':
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
session = Session()
session.add(ServerModel(name='test.com'))
session.commit()
test()
I'm trying to add a new domain_to_server mapping via the server/domain association proxy. If I don't access the association proxy first, ie remove the print statement in test(), then I can add the domain without needing the session.no_autoflush block. But with the print statement in there, it will fail without the session.no_autoflush block with an IntegrityError, saying that server_id cannot be null in the domains to servers table.
I'm trying to figure out why the no_autoflush block is needed here. I don't see any mention of it in the association_proxy docs. Is this simply the way it is, and all inserts into an association_proxy should to happen in a no_autoflush bock in case it has been accessed prior to the insert?

Alembic downgrade doesn't seem to understand the meta data

The model.py looks like this:
import datetime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Numeric, ForeignKey, DateTime, Boolean
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, relationship
from configs import config_base as config
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(String, unique=True, primary_key=True)
name = Column(String(100), nullable=False)
team_id = Column(String, ForeignKey('team.id'))
last_modified_on = Column(DateTime, default=datetime.datetime.utcnow())
team = relationship('Team', back_populates='members')
class Team(Base):
__tablename__ = 'team'
id = Column(String, unique=True, primary_key=True)
name = Column(String, nullable=False)
bot_access_token = Column(String(100), nullable=False)
bot_user_id = Column(String(100), nullable=False)
last_modified_on = Column(DateTime, default=datetime.datetime.utcnow())
is_active = Column(Boolean, default=True)
members = relationship('User', back_populates='team')
is_first_time_news = Column(Boolean, default=True)
engine = create_engine(config.SQLALCHEMY_DATABASE_URI)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
I just added is_first_time_news via this alembic migration:
revision = '6f9e2d360276'
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('team', sa.Column('is_first_time_news', sa.Boolean, default=False))
def downgrade():
op.drop_column('team', sa.Column('is_first_time_news', sa.Boolean))
alembic upgrade head works great.
But when I do a alembic downgrade -1 I get a strange exception:
AttributeError: Neither 'Column' object nor 'Comparator' object has an
attribute '_columns'
Are you using sqlite? Sqlite does not allow you to drop a column from the
scheme. I had a similar problem when I tried to downgrade a local sqlite database I was testing.
SQLite supports a limited subset of ALTER TABLE. The ALTER TABLE
command in SQLite allows the user to rename a table or to add a new
column to an existing table.
https://www.sqlite.org/lang_altertable.html
Try:
def downgrade():
op.drop_column('team', 'is_first_time_news')

SQLAlchemy many-to-many without foreign key

Could some one help me figure out how should i write primaryjoin/secondaryjoin
on secondary table that lacking one ForeignKey definition. I can't modify database
itself since it's used by different application.
from sqlalchemy import schema, types, func, orm
from sqlalchemy.dialects.postgresql import ARRAY
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class A(Base):
__tablename__ = 'atab'
id = schema.Column(types.SmallInteger, primary_key=True)
class B(Base):
__tablename__ = 'btab'
id = schema.Column(types.SmallInteger, primary_key=True)
a = orm.relationship(
'A', secondary='abtab', backref=orm.backref('b')
)
class AB(Base):
__tablename__ = 'abtab'
id = schema.Column(types.SmallInteger, primary_key=True)
a_id = schema.Column(types.SmallInteger, schema.ForeignKey('atab.id'))
b_id = schema.Column(types.SmallInteger)
I've tried specifing foreign on join condition:
a = orm.relationship(
'A', secondary='abtab', backref=orm.backref('b'),
primaryjoin=(id==orm.foreign(AB.b_id))
)
But received following error:
ArgumentError: Could not locate any simple equality expressions involving locally mapped foreign key columns for primary join condition '"atab".id = "abtab"."a_id"' on relationship Category.projects. Ensure that referencing columns are associated with a ForeignKey or ForeignKeyConstraint, or are annotated in the join condition with the foreign() annotation. To allow comparison operators other than '==', the relationship can be marked as viewonly=True.
You can add foreign_keys to your relationship configuration. They mention this in a mailing list post:
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
class User(Base):
__tablename__ = 'users'
logon = Column(String(10), primary_key=True)
group_id = Column(Integer)
class Group(Base):
__tablename__ = 'groups'
group_id = Column(Integer, primary_key=True)
users = relationship('User', backref='group',
primaryjoin='User.group_id==Group.group_id',
foreign_keys='User.group_id')
engine = create_engine('sqlite:///:memory:', echo=True)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
u1 = User(logon='foo')
u2 = User(logon='bar')
g = Group()
g.users = [u1, u2]
session.add(g)
session.commit()
g = session.query(Group).first()
print([user.logon for user in g.users])
output:
['foo', 'bar']

Populating a SQLAlchemy many-to-many relationship using ID's instead of objects

The situation:
So, I have a basic many-to-many relationship in SQLAlchemy using an association table.
For example, a person can attend many parties, and a party can have many persons as guests:
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
name = db.Column(db.String(50))
class SexyParty(Base):
__tablename__ = 'sexy_party'
id = Column(Integer, primary_key=True)
guests = relationship('Person', secondary='guest_association',
lazy='dynamic', backref='parties')
guest_association = Table(
'guest_association',
Column('user_id', Integer(), ForeignKey('person.id')),
Column('sexyparty.id', Integer(), ForeignKey('sexyparty.id'))
)
Normally if I wanted to add a list of guests to a party, I would do something like this:
my_guests = [prince, olivia, brittany, me]
my_party.guests = guests
db.session.commit()
...where prince, olivia and brittany are all <Person> instances, and my_party is a <SexyParty> instance.
My question:
I'd like to add guests to a party using person ID's rather than instances.
For example:
guest_ids = [1, 2, 3, 5]
my_party.guests = guest_ids # <-- This fails, because guest_ids
# are not <Person> instances
I could always load the instances from the databases, but that would entail an unnecessary DB query just to set a simple many-to-many relationships.
How would I go about setting the .guests attribute using a list of person_id's?
There has to be a simple way to do this since the association table ultimately represents the many-to-many relationship using ID's anyway...
thanks in advance, hope the question is clear.
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
name = Column(String(50))
class SexyParty(Base):
__tablename__ = 'sexy_party'
id = Column(Integer, primary_key=True)
guests = relationship('Person', secondary='guest_association',
lazy='dynamic', backref='parties')
guest_association = Table(
'guest_association', Base.metadata,
Column('user_id', Integer(), ForeignKey('person.id'), primary_key=True),
Column('sexyparty_id', Integer(), ForeignKey('sexy_party.id'), primary_key=True)
)
e = create_engine("sqlite://", echo=True)
Base.metadata.create_all(e)
sess = Session(e)
p1 = Person(id=1, name='p1')
p2 = Person(id=2, name='p2')
p3 = Person(id=3, name='p3')
p4 = Person(id=4, name='p4')
sp1 = SexyParty(id=1)
sess.add_all([sp1, p1, p2, p3, p4])
sess.commit()
# method one. use insert()
sess.execute(guest_association.insert().values([(1, 1), (2, 1)]))
# method two. map, optional association proxy
from sqlalchemy.ext.associationproxy import association_proxy
class GuestAssociation(Base):
__table__ = guest_association
party = relationship("SexyParty", backref="association_recs")
SexyParty.association_ids = association_proxy(
"association_recs", "user_id",
creator=lambda uid: GuestAssociation(user_id=uid))
sp1.association_ids.extend([3, 4])
sess.commit()

Categories