When I try to add HSTORE to my model and try to upgrade head it throws me NameError: name 'Text' is not defined.
I used PYTHONPATH=. alembic revision --autogenerate -m "Added user_id, admin_id and role to Customer" to create the revision.
Thanks in advance?
Update:
#Daniel Roseman suggested me to add sa.Text(). My question is why it has not been generated automatically during revision
Error:
op.add_column('customers', sa.Column('user_id',
postgresql.HSTORE(text_type=Text()), nullable=True)) NameError: name
'Text' is not defined
Model:
class Customer(Base):
__tablename__ = "customers"
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
img = Column(String, nullable=False)
auth_token = Column(String, nullable=True)
notification_config = Column(JSONB, nullable=True)
admin_id = Column(Integer, nullable=True)
user_id = Column(MutableDict.as_mutable(HSTORE))
Generated Migration revision:
"""Added user_id, admin_id and role to Customer
Revision ID: 1ebe3d18442f
Revises: 88b4dccb5c1e
Create Date: 2017-06-21 17:03:21.181933
"""
# revision identifiers, used by Alembic.
revision = '1ebe3d18442f'
down_revision = '88b4dccb5c1e'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('customers', sa.Column('admin_id', sa.Integer(), nullable=True))
op.add_column('customers', sa.Column('auth_token', sa.String(), nullable=True))
op.add_column('customers', sa.Column('user_id', postgresql.HSTORE(text_type=Text()), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('customers', 'user_id')
op.drop_column('customers', 'auth_token')
op.drop_column('customers', 'admin_id')
# ### end Alembic commands ###
Related
Expectation
Support Postgres Multitenancy using Schemas with Sqlalchemy and alembic.
Model
class User(Base):
__tablename__ = 'users'
__table_args__ = ({"schema": "test"})
id = Column(Integer, primary_key=True)
name = Column(String(80), unique=True, nullable=False)
def __repr__(self):
return '<User %r>' % self.name
We have two tenants tenant_1, tenant_2. Each tenant will have its own schema created in the same Postgres DB and one more schema to maintain shared tables between all tenants. Each tenant schema will have exact same tables.
So, the use case is any model that we have in models.py should be created in all tenant schemas. So the users table has to be created in both schemas tenant_1, tenant_2.
I am trying to use alembic to auto-generate migrations for all schemas for the same model.
alembic env.py
target_metadata = app.Base.metadata
...
def run_migrations_online() -> None:
""" Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
all_my_tenant_names = ["tenant_1", "tenant_2"]
for tenant_schema_name in all_my_tenant_names:
conn = connection.execution_options(schema_translate_map={"test": tenant_schema_name})
print("Migrating tenant schema %s" % tenant_schema_name)
context.configure(
connection=conn, target_metadata=target_metadata, include_schemas=True,
version_table_schema=target_metadata.schema,
)
with context.begin_transaction():
context.execute('SET search_path TO public')
context.run_migrations()
The expectation is migration file will have statements to create tenant_1.users, tenant_2.users tables. But it only has a statement to create test.users table.
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name'),
schema='test'
)
op.drop_table('user', schema='tenant_2')
op.drop_table('alembic_version', schema='tenant_2')
op.drop_table('user', schema='tenant_1')
op.drop_table('alembic_version', schema='tenant_1')
# ### end Alembic commands ###
Can someone please help to accomplish this?
Given this model:
from typing import Optional
from sqlmodel import SQLModel, Field
class SongBase(SQLModel):
name: str
artist: str = Field(index=False)
#label: Optional[str] = Field(None, index=False)
year: Optional[int] = Field(None, index=False)
class Song(SongBase, table=True):
id: int = Field(default=None, primary_key=True, index=False)
class SongCreate(SongBase):
pass
I create an initial alembic revision using alembic revision --autogenerate -m "init"and then apply it using alembic upgrade head.
Now I uncomment the labelfield, and run alembic revision --autogenerate -m "label".
My migration shows up like this:
revision = '083a8e84f047'
down_revision = 'c1b2ad7d0a39'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('song', sa.Column('label', sqlmodel.sql.sqltypes.AutoString(), nullable=True))
op.alter_column('song', 'id',
existing_type=sa.INTEGER(),
nullable=True,
autoincrement=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('song', 'id',
existing_type=sa.INTEGER(),
nullable=False,
autoincrement=True)
op.drop_column('song', 'label')
# ### end Alembic commands ###
Why is alembic trying to make changes to the id field?
We're trying to evaluate sqlmodel/alembic to see if it's feasable for a production workload and having to hand-wrangle migrations to get rid of these primary key manipulations seems a bit dangerous to me. Am I doing anything wrong to make alembic want to edit my primary key field in this way?
EDIT: For disclosure, the model comes from this article/example: https://github.com/testdrivenio/fastapi-sqlmodel-alembic
Did some more research here, and looked at a few github discussions in the alembic repo. What I think happens, is that the id column doesnt explicitly set nullable=False which alembic seems to require. Then during the initial "migration" it overlooks that fact (idk if that's a bug), which makes the model get out of sync from the get-go. So each migration tries to bring it back into sync.
In any case, the fix seems to be to always explicitly declare the nullable parameter for primary key fields:
class SongBase(SQLModel):
name: str
artist: str
label: str = Field(index=False)
year: Optional[int] = None
class Song(SongBase, table=True):
id: int = Field(default=None, primary_key=True, nullable=False)
class SongCreate(SongBase):
pass
I am new to this and I have a little test database which looks like this:
class Company(Base):
__tablename__ = 'company'
building_id = Column(Integer, primary_key=True, nullable=False, index=True)
name = Column(String, nullable=False)
buildings = relationship("Building", back_populates="company")
class Building(Base):
__tablename__ = 'building'
building_id = Column(Integer, primary_key=True, nullable=False, index=True)
name = Column(String, nullable=False)
ip_address = Column(String, nullable=True)
company_id = Column(Integer, ForeignKey('company.company_id'),nullable=False)
company = relationship("Company", back_populates="buildings")
As you may have noticed I have messed up the name for the company id, naming it "building_id".
I have changed this in the model, but it won't update the table with the error message
"(sqlite3.OperationalError) no such column: company.company_id".
How do I update it?
When you've actually launched your product you use a tool to handle database migrations for you, such as Alembic. Alembic can generate a migration script for you that runs the necessary ALTER statements to bring your database up to date with your model.
However while developing, it might be easier to just delete the .sqlite file and call create_all() again to get a fresh db created according to the schema.
My flask application has a single database(db1) before, now I bind a new database(db2) to it, both has 10 tables.
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root#localhost:3306/db1'
SQLALCHEMY_BINDS = {
'test': 'mysql+pymysql://root#localhost:3306/db2'
}
db = SQLAlchemy()
class table1(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
.......
class table10(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
class table11(db.Model):
__bind_key__ = 'db2'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
......
class table20(db.Model):
__bind_key__ = 'db2'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
I want to use alembic autogenerate function to auto detecting and generating migrations for db 1 and db2 separately, but db.metadata will get all tables metadata, but how to just get bind db metadata?
Thanks #davidism help! I can use include_symbol to make it.
def include_symbol(tablename, schema):
return tablename in ('table1', 'table2'.......'table10') # for db1
# return tablename not in ('table1', 'table2'.......'table10') # for db2
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata,
include_symbol=include_symbol
)
You can't because that feature doesn't exist yet. Currently, there is one metadata instance for all models across all binds. As long as all the models have unique names, this isn't a huge problem.
When we apply this patch and make a new release, each bind will have its own metadata. You will then be able to access it with db.get_metadata(bind='db2').
I need to alter data during an Alembic upgrade.
I currently have a 'players' table in a first revision:
def upgrade():
op.create_table('player',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Unicode(length=200), nullable=False),
sa.Column('position', sa.Unicode(length=200), nullable=True),
sa.Column('team', sa.Unicode(length=100), nullable=True)
sa.PrimaryKeyConstraint('id')
)
I want to introduce a 'teams' table. I've created a second revision:
def upgrade():
op.create_table('teams',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=False)
)
op.add_column('players', sa.Column('team_id', sa.Integer(), nullable=False))
I would like the second migration to also add the following data:
Populate teams table:
INSERT INTO teams (name) SELECT DISTINCT team FROM players;
Update players.team_id based on players.team name:
UPDATE players AS p JOIN teams AS t SET p.team_id = t.id WHERE p.team = t.name;
How do I execute inserts and updates inside the upgrade script?
What you are asking for is a data migration, as opposed to the schema migration that is most prevalent in the Alembic docs.
This answer assumes you are using declarative (as opposed to class-Mapper-Table or core) to define your models. It should be relatively straightforward to adapt this to the other forms.
Note that Alembic provides some basic data functions: op.bulk_insert() and op.execute(). If the operations are fairly minimal, use those. If the migration requires relationships or other complex interactions, I prefer to use the full power of models and sessions as described below.
The following is an example migration script that sets up some declarative models that will be used to manipulate data in a session. The key points are:
Define the basic models you need, with the columns you'll need. You don't need every column, just the primary key and the ones you'll be using.
Within the upgrade function, use op.get_bind() to get the current connection, and make a session with it.
Or use bind.execute() to use SQLAlchemy's lower level to write SQL queries directly. This is useful for simple migrations.
Use the models and session as you normally would in your application.
"""create teams table
Revision ID: 169ad57156f0
Revises: 29b4c2bfce6d
Create Date: 2014-06-25 09:00:06.784170
"""
revision = '169ad57156f0'
down_revision = '29b4c2bfce6d'
from alembic import op
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Player(Base):
__tablename__ = 'players'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String, nullable=False)
team_name = sa.Column('team', sa.String, nullable=False)
team_id = sa.Column(sa.Integer, sa.ForeignKey('teams.id'), nullable=False)
team = orm.relationship('Team', backref='players')
class Team(Base):
__tablename__ = 'teams'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String, nullable=False, unique=True)
def upgrade():
bind = op.get_bind()
session = orm.Session(bind=bind)
# create the teams table and the players.team_id column
Team.__table__.create(bind)
op.add_column('players', sa.Column('team_id', sa.ForeignKey('teams.id'), nullable=False)
# create teams for each team name
teams = {name: Team(name=name) for name in session.query(Player.team).distinct()}
session.add_all(teams.values())
# set player team based on team name
for player in session.query(Player):
player.team = teams[player.team_name]
session.commit()
# don't need team name now that team relationship is set
op.drop_column('players', 'team')
def downgrade():
bind = op.get_bind()
session = orm.Session(bind=bind)
# re-add the players.team column
op.add_column('players', sa.Column('team', sa.String, nullable=False)
# set players.team based on team relationship
for player in session.query(Player):
player.team_name = player.team.name
session.commit()
op.drop_column('players', 'team_id')
op.drop_table('teams')
The migration defines separate models because the models in your code represent the current state of the database, while the migrations represent steps along the way. Your database might be in any state along that path, so the models might not sync up with the database yet. Unless you're very careful, using the real models directly will cause problems with missing columns, invalid data, etc. It's clearer to explicitly state exactly what columns and models you will use in the migration.
You can also use direct SQL see (Alembic Operation Reference) as in the following example:
from alembic import op
# revision identifiers, used by Alembic.
revision = '1ce7873ac4ced2'
down_revision = '1cea0ac4ced2'
branch_labels = None
depends_on = None
def upgrade():
# ### commands made by andrew ###
op.execute('UPDATE STOCK SET IN_STOCK = -1 WHERE IN_STOCK IS NULL')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
I recommend using SQLAlchemy core statements using an ad-hoc table, as detailed in the official documentation, because it allows the use of agnostic SQL and pythonic writing and is also self-contained. SQLAlchemy Core is the best of both worlds for migration scripts.
Here is an example of the concept:
from sqlalchemy.sql import table, column
from sqlalchemy import String
from alembic import op
account = table('account',
column('name', String)
)
op.execute(
account.update().\\
where(account.c.name==op.inline_literal('account 1')).\\
values({'name':op.inline_literal('account 2')})
)
# If insert is required
from sqlalchemy.sql import insert
from sqlalchemy import orm
bind = op.get_bind()
session = orm.Session(bind=bind)
data = {
"name": "John",
}
ret = session.execute(insert(account).values(data))
# for use in other insert calls
account_id = ret.lastrowid