Is there any any way to incorporate defer() or load_only() types of methods in SQL 2.0 types of queries, except adding deffered() to the columns in tables?
What i.m trying to get is smthg like:
class Signals(Base):
__tablename__ = 'signals'
id = sa.Column(sa.BigInteger(), primary_key=True)
value_type_id = sa.Column(sa.Integer(), sa.ForeignKey('value_types.id'))
sources_id = sa.Column(sa.BigInteger(), sa.ForeignKey('sources.id'))
acl = sa.Column(JSONB())
stmt = select(Signals).join(Signals.value_types).defer(acl)
session = Session(engine)
# get all fields except acl
result = await session.execute(stmt)
I've expanded/modified your example a bit to have a full workable example.
In short, you can use .options(defer('field_name')) to prevent loading specific fields. However, implicitly loading those fields later (as the name "defer" implies) will not work.
Documentation link: https://docs.sqlalchemy.org/en/14/orm/loading_columns.html#deferred
import asyncio
import sqlalchemy as sa
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
from sqlalchemy.orm import declarative_base, relationship, defer, sessionmaker
meta = sa.MetaData()
Base = declarative_base(metadata=meta)
class ValueType(Base):
__tablename__ = 'value_types'
id = sa.Column(sa.Integer(), primary_key=True, autoincrement=True)
class Signals(Base):
__tablename__ = 'signals'
id = sa.Column(sa.Integer(), primary_key=True, autoincrement=True)
value_type_id = sa.Column(sa.Integer(), sa.ForeignKey('value_types.id'))
acl = sa.Column(sa.String())
value_type = relationship(ValueType)
async def async_main():
engine = create_async_engine(
"sqlite+aiosqlite:///", echo=True,
)
async_session = sessionmaker(
engine, expire_on_commit=False, class_=AsyncSession
)
async with engine.begin() as conn:
await conn.run_sync(meta.drop_all)
await conn.run_sync(meta.create_all)
async with async_session() as session:
async with session.begin():
v = ValueType()
session.add_all([
Signals(acl='acl1', value_type=v),
Signals(acl='acl2', value_type=v),
])
stmt = sa.select(Signals).join(Signals.value_type).options(defer('acl'))
results = await session.execute(stmt)
for r in results.scalars():
print(r)
# await session.refresh(r, ['acl']) # <-- uncomment to make the following line work
print(r.acl) # <-- error here, deferred loads will not work as the cursor is not awaited
await engine.dispose()
if __name__ == '__main__':
asyncio.run(async_main())
Related
I would like to query a user based on their code and mobile number.
Of note, I am using an async session when connecting to the DB.
This is my models and query:
class model_user(Base):
__tablename__ = "users"
id = Column(UUID)
code = Column(Integer)
user_name = Column(String(11), unique=True)
first_name = Column(String(255), index=True)
last_name = Column(String(255), index=True)
contact = relationship("model_contact", back_populates="user")
class model_contact(Base):
__tablename__ = "contacts"
id = Column(Integer)
mobile = Column(String(320))
user_id = Column(
UUID(as_uuid=True), ForeignKey("users.id"), nullable=False, index=True
)
user = relationship("model_user", back_populates="contact")
# Here is my query:
query = await db.execute(
select(user_model)\
.filter_by(code==5)\
.options(
joinedload(model_user.contact)
)\
.filter(model_contact.mobile == mobile)
This really has nothing to do with whether or not you are using the async extension of SQLAlchemy. Queries are constructed the same way. Only the session setup and interaction is obviously different.
Side notes:
You should use PascalCase to name your classes and including the term "model" in the name is typically not good style, i.e. User and Contact.
Since you have a one-to-many relationship between your user model and your contact model (i.e. one user can have multiple sets of contact info), you should name the relationship attribute on the user model with plural, i.e. contacts.
The simplest way to do what you want that I can think of is using the Select.where method. You can then construct an SQL EXISTS subquery using the any method of the relationship. The statement would look like this:
statement = select(User).where(
User.code == 123,
User.contacts.any(Contact.mobile == "555")
)
Here is a full working example using aiosqlite just to demonstrate that this works with async tools:
from asyncio import run
from sqlalchemy import Column, ForeignKey, Integer, String, select
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
Base = declarative_base()
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
code = Column(Integer)
contacts = relationship("Contact", back_populates="user")
class Contact(Base):
__tablename__ = "contacts"
id = Column(Integer, primary_key=True)
mobile = Column(String(320))
user_id = Column(
Integer(), ForeignKey("users.id"), nullable=False, index=True
)
user = relationship(User, back_populates="contacts")
async def main():
engine = create_async_engine("sqlite+aiosqlite://", echo=True)
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.create_all)
async_session = sessionmaker(
engine, expire_on_commit=False, class_=AsyncSession
)
async with async_session() as session:
statement = select(User).where(
User.code == 123,
User.contacts.any(Contact.mobile == "555")
)
await session.execute(statement)
if __name__ == "__main__":
run(main())
Running this script produces the following SQL output for the select query:
SELECT users.id, users.code
FROM users
WHERE users.code = ? AND (EXISTS (SELECT 1
FROM contacts
WHERE users.id = contacts.user_id AND contacts.mobile = ?))
...
(123, '555')
This approach should give you the results you want.
I have setup my first Python FastAPI but getting stuck. I have a function that query some results. The following function query the first entry in the database on a specific date. Now I want the last entry on a date or all results sort by highest id but how do i do this?
def get_workday(db: Session, workday_date: date):
return db.query(DBWorkday).where(DBWorkday.date == workday_date).first()
full code:
from datetime import date
from fastapi import FastAPI, Depends
from pydantic import BaseModel
from typing import Optional, List
from sqlalchemy import Date, create_engine
from sqlalchemy.orm import declarative_base, sessionmaker, Session
from sqlalchemy import Column, String, Integer
app = FastAPI()
# SqlAlchemy Setup
SQLALCHEMY_DATABASE_URL = 'sqlite:///../db/database.db'
engine = create_engine(SQLALCHEMY_DATABASE_URL, echo=True, future=True)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
# Dependency
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
# A SQLAlchemny ORM
class DBWorkday(Base):
__tablename__ = 'workdays'
id = Column(Integer, primary_key=True, index=True)
date = Column(Date)
start_time = Column(String(4))
end_time = Column(String(4))
type = Column(String, nullable=True)
location = Column(String, nullable=True)
Base.metadata.create_all(bind=engine)
# Workday Pydantic model
class Workday(BaseModel):
date: date
start_time: str
end_time: str
type: Optional[str] = None
location: Optional[str] = None
class Config:
orm_mode = True
# Methods for interacting with the database
def get_workday(db: Session, workday_date: date):
return db.query(DBWorkday).where(DBWorkday.date == workday_date).first()
#app.get('/workday/{date}')
def get_workday_view(date: date, db: Session = Depends(get_db)):
return get_workday(db, date)
return db.query(DBWorkday).where(DBWorkday.date == workday_date).order_by(DBWorkday.id.desc()).first()
I'm trying to implement a basic resource access using SQL Alchemy 1.4 and a PostgreSQL database.
Python code
from sqlalchemy.ext.declarative import DeclarativeMeta, declarative_base
Base: DeclarativeMeta = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
email = Column(String(length=255), index=True, nullable=False)
class Resource(Base):
__tablename__ = "resource"
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
name = Column(String(length=255), index=True, nullable=False)
class UserResourceRole(Base):
__tablename__ = "user_resource_role"
user_id = Column(
UUID(as_uuid=True),
ForeignKey("user.id", ondelete="CASCADE"),
primary_key=True
)
resource_id = Column(
UUID(as_uuid=True),
ForeignKey("resource.id", ondelete="CASCADE"),
primary_key=True,
)
can_edit = Column(Boolean, default=False, nullable=False)
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
from sqlalchemy.ext.asyncio.engine import AsyncEngine
from sqlalchemy.orm import sessionmaker
from the_other_file import User, Resource, UserResourceRole
async def select_all(user: User, db_session: AsyncSession):
results = await db_session.execute(
select(Resource, UserResourceRole)
.join(
UserResourceRole,
Resource.id == UserResourceRole.resource_id
)
.where(UserResourceRole.user_id == user.id)
)
return results.scalars().all()
engine: AsyncEngine = create_async_engine(POSTGRES_URL, future=True)
async_session = sessionmaker(
bind=engine, class_=AsyncSession, expire_on_commit=False, future=True
)
# ignore the fact that it's not executed in asyncio loop
a_db_session = await async_session()
resources = await select_all(user=a_real_user_is_here, db_session=a_db_session)
print(resources)
I can't retrieve anything from UserResourceRole in my results. It only contains data from Resource. If i swap the objects in the select call, then I can only retrieve data from UserResourceRole.
What I'm expecting
I'm expecting to have the same result of that SQL query :
SELECT *
FROM resource
INNER JOIN user_resource_role
ON resource.id = user_resource_role.resource_id
WHERE user_resource_role.user_id = :a_user_id
The query generated by SQL Alchemy is exactly the same (except the verbosity) :
SELECT resource.id, resource.name, user_resource_role.user_id, user_resource_role.resource_id, user_resource_role.can_edit
FROM resource
JOIN user_resource_role
ON resource.id = user_resource_role.resource_id
WHERE user_resource_role.user_id = :user_id_1
If you try
for entry in results:
print(entry)
it will show you a list of tuple of (Resource, UserResourceRole). Apparently the call to .scalars().all() only leave the first value.
My current solution is to turn results into a list and manually manipulate it.
Remove scalars(). So, it should be
return results.all()
This will return a list of tuples
[(resource_1, userresourcerole_1),(resource_2, userresourcerole_2),...]
I wrote an application using SQLAlchemy's object relational mapper to store and access data from an SQLite3 database.
I can call add_userto add one or multiple users and call get_users to get them
I can import data from excel and get them with get_users
I can import data from excel and add a user with add_user
BUT I can't get the users with the get_users function afterwards, because I'm getting the following error for the entry created with add_user: AttributeError: 'NoneType' object has no attribute 'id'
What am I doing wrong?
Here's a simple version of the application:
orm_test.py
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
def orm_setup():
Base = declarative_base()
engine = create_engine('sqlite:///:main:', echo=True)
Base.metadata.create_all(bind=engine)
Session = sessionmaker(bind=engine)
session = Session()
return Base, engine, session
orm_test_class.py
from sqlalchemy import Column, Integer, String
from orm_test import orm_setup
Base = orm_setup()[0]
engine = orm_setup()[1]
class User(Base):
__tablename__ = 'person'
id = Column('id', Integer, primary_key=True)
username = Column('username', String, unique=True)
Base.metadata.create_all(bind=engine)
orm_test_functions.py
from orm_test_class import User
from orm_test import orm_setup
session = orm_setup()[2]
def add_user(name):
u = User()
user_name = str(name)
u.username = user_name
session.add(u)
session.commit()
def get_users():
users = session.query(User).all()
for user in users:
print(user.id, user.username)
session.close()
main.py
import fire
from orm_test_functions import add_user, get_users
if __name__ == '__main__':
fire.Fire()
data_import.py
import fire
import pandas as pd
from orm_test import orm_setup
# import engine from orm
engine = orm_setup()[1]
def data_import():
file = 'Data.xlsx'
df_user = pd.read_excel(file, sheet_name = 'User')
df_user.to_sql('person', engine, if_exists='replace', index=False)
# Command line interface
if __name__ == '__main__':
fire.Fire()
The problem is that df_to_sql drops the original table, which has a primary key defined, and replaces it with a table that does not define a primary key.
From the dataframe_to_sql docs
replace: Drop the table before inserting new values.
You can get around this by setting if_exists='append' instead of if_exists='replace'.
df_user.to_sql('person', engine, if_exists='append', index=False)
If necessary you can emulate the "replace" behaviour by deleting any existing records from the table before importing the data.
This is the code I used to reproduce and resolve:
import io
import sqlalchemy as sa
from sqlalchemy import orm
import pandas as pd
Base = orm.declarative_base()
class User(Base):
__tablename__ = 'person'
id = sa.Column('id', sa.Integer, primary_key=True)
username = sa.Column('username', sa.String, unique=True)
engine = sa.create_engine('sqlite://', echo=True, future=False)
# Drop all is redundant for in-memory db
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
sessionmaker = orm.sessionmaker(engine)
def add_user(name):
session = sessionmaker()
u = User()
user_name = str(name)
u.username = user_name
session.add(u)
session.commit()
def get_users():
session = sessionmaker()
users = session.query(User).all()
for user in users:
print(user.id, user.username)
print()
session.close()
DATA = """\
id,username
1,Alice
2,Bob
"""
buf = io.StringIO(DATA)
df_user = pd.read_csv(buf)
df_user.to_sql('person', engine, if_exists='append', index=False)
users = get_users()
add_user('Carol')
users = get_users()
engine.dispose()
You should set the column id with AUTOINCREMENT keyword, see https://docs.sqlalchemy.org/en/14/dialects/sqlite.html#using-the-autoincrement-keyword
To prevent human errors, I'd like to check that the current SQL database schema matches the SQLAlchemy models code and there aren't migrations need to run on the application startup. Is there a way to iterate all models on SQLAlchemy and then see if the database schema is that what the model expects it to be?
This is to prevent the errors popping up later (HTTP 500 due to missing table, field, etc.)
Based on #yoloseem hints above, here is a complete answer:
import logging
from sqlalchemy import inspect
from sqlalchemy.ext.declarative.clsregistry import _ModuleMarker
from sqlalchemy.orm import RelationshipProperty
logger = logging.getLogger(__name__)
def is_sane_database(Base, session):
"""Check whether the current database matches the models declared in model base.
Currently we check that all tables exist with all columns. What is not checked
* Column types are not verified
* Relationships are not verified at all (TODO)
:param Base: Declarative Base for SQLAlchemy models to check
:param session: SQLAlchemy session bound to an engine
:return: True if all declared models have corresponding tables and columns.
"""
engine = session.get_bind()
iengine = inspect(engine)
errors = False
tables = iengine.get_table_names()
# Go through all SQLAlchemy models
for name, klass in Base._decl_class_registry.items():
if isinstance(klass, _ModuleMarker):
# Not a model
continue
table = klass.__tablename__
if table in tables:
# Check all columns are found
# Looks like [{'default': "nextval('sanity_check_test_id_seq'::regclass)", 'autoincrement': True, 'nullable': False, 'type': INTEGER(), 'name': 'id'}]
columns = [c["name"] for c in iengine.get_columns(table)]
mapper = inspect(klass)
for column_prop in mapper.attrs:
if isinstance(column_prop, RelationshipProperty):
# TODO: Add sanity checks for relations
pass
else:
for column in column_prop.columns:
# Assume normal flat column
if not column.key in columns:
logger.error("Model %s declares column %s which does not exist in database %s", klass, column.key, engine)
errors = True
else:
logger.error("Model %s declares table %s which does not exist in database %s", klass, table, engine)
errors = True
return not errors
Below is the py.test testing code to exercise this:
"""Tests for checking database sanity checks functions correctly."""
from pyramid_web20.system.model.sanitycheck import is_sane_database
from sqlalchemy import engine_from_config, Column, Integer, String
import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy import ForeignKey
def setup_module(self):
# Quiet log output for the tests
import logging
from pyramid_web20.system.model.sanitycheck import logger
#logger.setLevel(logging.FATAL)
def gen_test_model():
Base = declarative_base()
class SaneTestModel(Base):
"""A sample SQLAlchemy model to demostrate db conflicts. """
__tablename__ = "sanity_check_test"
#: Running counter used in foreign key references
id = Column(Integer, primary_key=True)
return Base, SaneTestModel
def gen_relation_models():
Base = declarative_base()
class RelationTestModel(Base):
__tablename__ = "sanity_check_test_2"
id = Column(Integer, primary_key=True)
class RelationTestModel2(Base):
__tablename__ = "sanity_check_test_3"
id = Column(Integer, primary_key=True)
test_relationship_id = Column(ForeignKey("sanity_check_test_2.id"))
test_relationship = relationship(RelationTestModel, primaryjoin=test_relationship_id == RelationTestModel.id)
return Base, RelationTestModel, RelationTestModel2
def gen_declarative():
Base = declarative_base()
class DeclarativeTestModel(Base):
__tablename__ = "sanity_check_test_4"
id = Column(Integer, primary_key=True)
#declared_attr
def _password(self):
return Column('password', String(256), nullable=False)
#hybrid_property
def password(self):
return self._password
return Base, DeclarativeTestModel
def test_sanity_pass(ini_settings, dbsession):
"""See database sanity check completes when tables and columns are created."""
engine = engine_from_config(ini_settings, 'sqlalchemy.')
conn = engine.connect()
trans = conn.begin()
Base, SaneTestModel = gen_test_model()
Session = sessionmaker(bind=engine)
session = Session()
try:
Base.metadata.drop_all(engine, tables=[SaneTestModel.__table__])
except sqlalchemy.exc.NoSuchTableError:
pass
Base.metadata.create_all(engine, tables=[SaneTestModel.__table__])
try:
assert is_sane_database(Base, session) is True
finally:
Base.metadata.drop_all(engine)
def test_sanity_table_missing(ini_settings, dbsession):
"""See check fails when there is a missing table"""
engine = engine_from_config(ini_settings, 'sqlalchemy.')
conn = engine.connect()
trans = conn.begin()
Base, SaneTestModel = gen_test_model()
Session = sessionmaker(bind=engine)
session = Session()
try:
Base.metadata.drop_all(engine, tables=[SaneTestModel.__table__])
except sqlalchemy.exc.NoSuchTableError:
pass
assert is_sane_database(Base, session) is False
def test_sanity_column_missing(ini_settings, dbsession):
"""See check fails when there is a missing table"""
engine = engine_from_config(ini_settings, 'sqlalchemy.')
conn = engine.connect()
trans = conn.begin()
Session = sessionmaker(bind=engine)
session = Session()
Base, SaneTestModel = gen_test_model()
try:
Base.metadata.drop_all(engine, tables=[SaneTestModel.__table__])
except sqlalchemy.exc.NoSuchTableError:
pass
Base.metadata.create_all(engine, tables=[SaneTestModel.__table__])
# Delete one of the columns
engine.execute("ALTER TABLE sanity_check_test DROP COLUMN id")
assert is_sane_database(Base, session) is False
def test_sanity_pass_relationship(ini_settings, dbsession):
"""See database sanity check understands about relationships and don't deem them as missing column."""
engine = engine_from_config(ini_settings, 'sqlalchemy.')
conn = engine.connect()
trans = conn.begin()
Session = sessionmaker(bind=engine)
session = Session()
Base, RelationTestModel, RelationTestModel2 = gen_relation_models()
try:
Base.metadata.drop_all(engine, tables=[RelationTestModel.__table__, RelationTestModel2.__table__])
except sqlalchemy.exc.NoSuchTableError:
pass
Base.metadata.create_all(engine, tables=[RelationTestModel.__table__, RelationTestModel2.__table__])
try:
assert is_sane_database(Base, session) is True
finally:
Base.metadata.drop_all(engine)
def test_sanity_pass_declarative(ini_settings, dbsession):
"""See database sanity check understands about relationships and don't deem them as missing column."""
engine = engine_from_config(ini_settings, 'sqlalchemy.')
conn = engine.connect()
trans = conn.begin()
Session = sessionmaker(bind=engine)
session = Session()
Base, DeclarativeTestModel = gen_declarative()
try:
Base.metadata.drop_all(engine, tables=[DeclarativeTestModel.__table__])
except sqlalchemy.exc.NoSuchTableError:
pass
Base.metadata.create_all(engine, tables=[DeclarativeTestModel.__table__])
try:
assert is_sane_database(Base, session) is True
finally:
Base.metadata.drop_all(engine)
Check out Runtime Inspection API.
You can pass Engine to inspect() as well. Once you have sqlalchemy.engine.reflection.Inspector object, now you can use get_table_names(), get_columns(tbl_name), and any other methods(e.g. for primary keys, constraints, indexes, ...) to examine the "real" schema that your database has.