Create SQLAlchemy session on event - python

If I want to use database while processing a request, I make a Dependency Injection like this:
#app.post("/sample_test")
async def sample_test(db: Session = Depends(get_db)):
return db.query(models.User.height).all()
But I cannot do it with events like this:
#app.on_event("startup")
async def sample_test(db: Session = Depends(get_db)):
return db.query(models.User.height).all()
because starlette events don't support Depends.
This is my get_db() function:
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
just like in FastAPI manual (https://fastapi.tiangolo.com/tutorial/sql-databases/).
How can I access get_db() inside my event function, so I can work with a Session?
I've tried:
#app.on_event("startup")
async def sample_test(db: Session = Depends(get_db)):
db = next(get_db())
return db.query(models.User.height).all()
but it doesn't work.
I use MSSQL, if it's important.

Instead of using a dependency you can import the SessionLocal you've created as shown in the FastAPI manual and use a contextmanager to open and close this session:
#app.on_event("startup")
async def sample_test():
with SessionLocal() as db:
return db.query(models.User.height).all()

Related

How to yield a db connection in a python sqlalchemy function similar to how it is done in FastAPI?

In FastAPI I had the following function that I used to open and close a DB session:
def get_db():
try:
db = SessionLocal()
yield db
finally:
db.close()
And within the routes of my API I would do something like that:
#router.get("/")
async def read_all_events(user: dict = Depends(get_current_user), db: Session = Depends(get_db)):
logger.info("API read_all_events")
if user is None:
raise http_user_credentials_not_valid_exception()
return db.query(models.Events).all()
You can see that I am injectin the session in the api call.
So now i want to do something similar within a python function:
def do_something():
#get person data from database
#play with person data
#save new person data in database
#get cars data from database
So i am wondering if I should use the same approach than in FastAPI (i do not know how) or if i just should be openning and clossing the connection manually like that:
def do_something():
try:
db = SessionLocal()
yield db
#get person data from database
#play with person data
#save new person data in database
#get cars data from database
finally:
db.close()
Thanks
The usage of yield in this case is so that Depends(get_db) returns the db session instance, so that it can be used in the fastapi route, and as soon as the fastapi route returns response to user, the finally clause (db.close()) will be executed. This is good because every request will be using a separate db session, and db connections will be closed after every route response.
If you want to use the db session normally in a function, just get the db instance using db = SessionLocal(), and proceed to use the db instance in the function.
Example:
def do_something():
db = SessionLocal()
event = db.query(models.Events).first()
db.delete(event)
db.commit()
db.close()

The garbage collector is trying to clean up connection asyncmy.connection.Connection

I'll try to be as complete as possible in this issue.
I'm using Sanic, an ASGI Python framework, and I built a Database manager on top of this.
This database manager uses the ContextVar to give access to my current db instance everywhere in the code.
Here's the code related to the database:
database.py
# -*- coding:utf-8 -*-
from sqlalchemy import exc, event
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession as SQLAlchemyAsyncSession
from sqlalchemy.orm import sessionmaker, Session
from sqlalchemy.pool import Pool, QueuePool, NullPool
from sqlalchemy.exc import OperationalError
from contextvars import ContextVar
from sentry_sdk import push_scope, capture_exception
from sanic import Sanic
class EngineNotInitialisedError(Exception):
pass
class DBSessionContext:
def __init__(self, read_session: Session, write_session: Session, commit_on_exit: bool = True) -> None:
self.read_session = read_session
self.write_session = write_session
self.commit_on_exit = commit_on_exit
self.token = None
self._read = None
self._write = None
def _disable_flush(self, *args, **kwargs):
raise NotImplementedError('Unable to flush a read-only session.')
async def close(self, exc_type=None, exc_value=None, traceback=None):
if self._write:
try:
if exc_value and getattr(exc_value, 'status_code', 500) > 300:
await self._write.rollback()
else:
await self._write.commit()
except Exception as e:
pass
try:
await self._write.close()
except OperationalError as e:
if e.orig.args[0] != 2013: # Lost connection to MySQL server during query
raise e
if self._read:
try:
await self._read.close()
except OperationalError as e:
if e.orig.args[0] != 2013: # Lost connection to MySQL server during query
raise e
def set_token(self, token):
self.token = token
#property
def read(self) -> Session:
if not self._read:
self._read = self.read_session()
self._read.flush = self._disable_flush
return self._read
#property
def write(self) -> Session:
if not self._write:
self._write = self.write_session()
return self._write
class AsyncSession(SQLAlchemyAsyncSession):
async def execute(self, statement, **parameters):
return await super().execute(statement, parameters)
async def first(self, statement, **parameters):
executed = await self.execute(statement, **parameters)
return executed.first()
async def all(self, statement, **parameters):
executed = await self.execute(statement, **parameters)
return executed.all()
class DBSession:
def __init__(self):
self.app = None
self.read_engine = None
self.read_session = None
self.write_engine = None
self.write_session = None
self._session = None
self.context = ContextVar("context", default=None)
self.commit_on_exit = True
def init_app(self, app: Surge) -> None:
self.app = app
self.commit_on_exit = self.app.config.get('DATABASE_COMMIT_ON_EXIT', cast=bool, default=True)
self.read_engine = create_async_engine(
self.app.config.get('DATABASE_READ_URL'),
connect_args={
'connect_timeout': self.app.config.get('DATABASE_CONNECT_TIMEOUT', cast=int, default=3)
},
**{
'echo': self.app.config.get('DATABASE_ECHO', cast=bool, default=False),
'echo_pool': self.app.config.get('DATABASE_ECHO_POOL', cast=bool, default=False),
'poolclass': QueuePool, # will be used to create a connection pool instance using the connection parameters given in the URL
# if pool_class is not NullPool:
# if True will enable the connection pool “pre-ping” feature that tests connections for liveness upon each checkout
'pool_pre_ping': self.app.config.get('DATABASE_POOL_PRE_PING', cast=bool, default=True),
# the number of connections to allow in connection pool “overflow”
'max_overflow': self.app.config.get('DATABASE_MAX_OVERFLOW', cast=int, default=10),
# the number of connections to keep open inside the connection pool
'pool_size': self.app.config.get('DATABASE_POOL_SIZE', cast=int, default=100),
# this setting causes the pool to recycle connections after the given number of seconds has passed
'pool_recycle': self.app.config.get('DATABASE_POOL_RECYCLE', cast=int, default=3600),
# number of seconds to wait before giving up on getting a connection from the pool
'pool_timeout': self.app.config.get('DATABASE_POOL_TIMEOUT', cast=int, default=5),
}
)
# #see https://writeonly.wordpress.com/2009/07/16/simple-read-only-sqlalchemy-sessions/
self.read_session = sessionmaker(
bind=self.read_engine,
expire_on_commit=False,
class_=AsyncSession,
autoflush=False,
autocommit=False
)
self.write_engine = create_async_engine(
self.app.config.get('DATABASE_WRITE_URL'),
connect_args={
'connect_timeout': self.app.config.get('DATABASE_CONNECT_TIMEOUT', cast=int, default=3)
},
**{
'echo': self.app.config.get('DATABASE_ECHO', cast=bool, default=False),
'echo_pool': self.app.config.get('DATABASE_ECHO_POOL', cast=bool, default=False),
'poolclass': NullPool, # will be used to create a connection pool instance using the connection parameters given in the URL
}
)
self.write_session = sessionmaker(
bind=self.write_engine,
expire_on_commit=False,
class_=AsyncSession,
autoflush=True
)
async def __aenter__(self):
session_ctx = DBSessionContext(self.read_session, self.write_session, self.commit_on_exit)
session_ctx.set_token(self.context.set(session_ctx))
return session_ctx
async def __aexit__(self, exc_type, exc_value, traceback):
session_ctx = self.context.get()
try:
await session_ctx.close(exc_type, exc_value, traceback)
except Exception:
pass
self.context.reset(session_ctx.token)
#property
def read(self) -> Session:
return self.context.get().read
#property
def write(self) -> Session:
return self.context.get().write
#event.listens_for(Pool, "checkout")
def check_connection(dbapi_con, con_record, con_proxy):
'''Listener for Pool checkout events that pings every connection before using.
Implements pessimistic disconnect handling strategy. See also:
http://docs.sqlalchemy.org/en/rel_0_8/core/pooling.html#disconnect-handling-pessimistic'''
cursor = dbapi_con.cursor()
try:
cursor.execute("SELECT 1")
except exc.OperationalError as ex:
if ex.args[0] in (2006, # MySQL server has gone away
2013, # Lost connection to MySQL server during query
2055): # Lost connection to MySQL server at '%s', system error: %d
raise exc.DisconnectionError() # caught by pool, which will retry with a new connection
else:
raise
cursor.close()
db = DBSession()
This configuration allows me to run something like:
from models import User
from database import db
#app.get('/user')
async def get_user(request):
async with db:
users = User.find_all() # Special function in the Model that returns all users
return json({'items': [{'id': x.id for x in users}])
The __aenter__ and mostly the __aexit__ from the DBSession class (and the subsequent DBSessionContext) handles everything when the code quit the async with, including any exceptions if they occurred.
The issue I'm having, is that from time to time, I have the following error reported at Sentry:
The garbage collector is trying to clean up connection <AdaptedConnection <asyncmy.connection.Connection object at 0x7f290c50dd30>>. This feature is unsupported on unsupported on asyncio dbapis that lack a "terminate" feature, since no IO can be performed at this stage to reset the connection. Please close out all connections when they are no longer used, calling close() or using a context manager to manage their lifetime.
I don't understand why this is happening. Even more odd is that I often get this error on a function call that doesn't use the database at all (the async with db is still present, but the inside doesn't use the database at all).
The content of that function is network call:
import requests
#app.get('/notify')
async def get_user(request):
async with db:
requests.post('https://service.com/notify', data={'some': 'data'})
return text('ok')
Here are my assumptions, but I'm hoping to have a clearer view on the issue:
Assumption 1: Since the read is using a QueuePool, maybe the __aexit__ call to close doesn't really close the connection, and as such, the connection remain open, causing the "The garbage collector is trying to clean up connection" issue later on.
Assumption 2: The connection is made at the check_connection and remains open, causing the "garbage collector" issue
Any idea why I'm having that "garbage collector" issue?
I'm using :
sanic==22.9.0
sqlalchemy[asyncio]==1.4.41
asyncmy==0.2.5
This line might be causing you the problem await session_ctx.close(exc_type, exc_value, traceback).
Try changing it to this await asyncio.shield(session_ctx.close(exc_type, exc_value, traceback)).
This was added to the SQLAlchemy code base in July.
This change was implemented in /asyncio/engine.py and /asyncio/session.py. Here is the change in the code:
Additional references:
SQLAlchemy issue 8145
The change was added to version 1.4.40 with a released date of August 8, 2022
A naive and fast solution might be to check it by wrapping it in a try/except block and handling the specific error by printing the output.
You don't manage the lifetime of the requests.post, isn't this keeping the close from being called?
Although I do think aexit should close the session I don't really understand why you do this at all: async with db:. What is the purpose of the session?
Nice implementation overall.

How to make FastAPI's path operation function to commit on successful return

I'm developing web API server with FastAPI and SQLAlchemy 1.4
What I'm going to do is like
Commit on the end of all path operation function implicitly if there is no error occured.
When HTTPException is occured, rollback the transaction.
In short, how to make FastAPI's path operation function atomic like database transaction.
These code snippets are what I'm using from tiangolo's full-stack-fastapi-postgresql project (https://github.com/tiangolo/full-stack-fastapi-postgresql)
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
engine = create_engine(settings.SQLALCHEMY_DATABASE_URI, pool_pre_ping=True)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
def get_db() -> Generator:
try:
db = SessionLocal()
yield db
finally:
db.close()
from fastapi import FastAPI, Depends
from sqlalchemy.orm import Session
app = FastAPI()
#app.put("/users/{id}")
def update_user(
id: int,
new_name: str,
new_login_id: str,
db: Session = Depends(deps.get_db),
):
"""
Simple User update API
"""
user = db.query(User).filter(User.id == id).first()
if not user:
raise HTTPExecption(404, "User not found")
user.name = new_name
if db.query(User).filter(User.login_id == new_login_id).first():
# if login id is already exist, I want to rollback update name
raise HTTPExecption(409, "Already used login id")
user.login_id = new_login_id
db.add(user)
# db.commit()
# I don't want to write db.commit() at the end of every path operation func.
return
I've tried in several ways
First, using FastAPI (Starlette) Middleware
I can't find way to pass db (Session) to middleware
Second, Add db.commit() at get_db()'s finally block
db.commit() is called even HTTPException is raised.
So I tried to use FastAPI Error handler for calling db.rollback() when HTTPExecption is raised, but I can't find a way to pass db (Session) to error handler.
How can I acheive it?
Firstly, i think SQLAlchemy won't commit data if there is some exception occured in a transaction.
Then, if you want to pass db, you can
def get_db(request) -> Generator:
try:
db = SessionLocal()
request.state.db = db
yield db
finally:
db.close()
About exception handler, finally code block will be exceuted before exception handler,so you can't rollback in exception handler.
About middleware, all http path operation exceptions were catched in starlette.ExceptionMiddleware which is fixed in FastAPI app. So if you want to custom it you should create a middleware CustomExceptionMiddleware , you can use db in it:
class CustomExceptionMiddleware(starlette.Exception):
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
if scope["type"] != "http":
await self.app(scope, receive, send)
return
response_started = False
async def sender(message: Message) -> None:
nonlocal response_started
if message["type"] == "http.response.start":
response_started = True
await send(message)
try:
await self.app(scope, receive, sender)
a = 0
except Exception as exc:
# some operation
request = Request(scope, receive=receive)
# request.state.db is usable
# some operation
then you must rewrite FastAPI.build_middleware_stack() to replace ExceptionMiddleware by CustomExceptionMiddleware. Now you can execute rollback or other operation in CustomExceptionMiddleware. Also you shoud remove db.close() from get_db() to CustomExceptionMiddleware.

Celery and InFailedSqlTransaction

All of the sudden, in my Celery application I'm getting lots of:
(psycopg2.errors.InFailedSqlTransaction) current transaction is aborted, commands ignored until end of transaction block
The truth is that I have no idea how to properly setup Celery to work with SQLAlchemy. I have a BaseTask that all tasks inherit from and it looks like this:
from sqlalchemy.orm import scoped_session, sessionmaker
session_factory = sessionmaker(
autocommit=False,
autoflush=False,
bind=create_engine("postgresql://****")
)
Session = scoped_session(session_factory)
class BaseTask(celery.Task):
def after_return(self, *args: tuple, **kwargs: dict) -> None:
Session.remove()
#property
def session(self) -> OrmSession:
return Session()
And all of my tasks (bound or not) are either using self.session or {task_func).session to make the queries. Should I rather use a context manager around my queries, whitin the tasks like:
#contextmanager
def session_scope():
"""Provide a transactional scope around a series of operations."""
session = Session()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
#app.task()
def my_task():
with session_scope() as session:
do_a_query(session)
Can someone please explain to me how those sessions work? And guide me towards the correct "Celery use of SQLAlchemy"?
Thank you.

DIdnt work dependency_overrides in fastapi

I am trying to write test for web service and I want to create a separate database for tests when you run them. It is my pytest fixture for realise it
#pytest.fixture(scope="session")
def db_engine():
engine = create_engine(SQLALCHEMY_DATABASE_URL)
if not database_exists:
create_database(engine.url)
Base.metadata.create_all(bind=engine)
yield engine
#pytest.fixture(scope="function")
def db(db_engine):
connection = db_engine.connect()
connection.begin()
db = Session(bind=connection)
yield db
db.rollback()
connection.close()
#pytest.fixture(scope="function")
def client(db):
app.dependency_overrides[get_db] = lambda: db
with TestClient(app) as c:
yield c
But app.dependecy_overrides[get_db] = lambda: db didnt work and requests continue to be sent to the main database and not the test one.
One of my endpoints
#router.get("/", response_model=List[RoomPayload])
def read(db: Session = Depends(get_db),
user=Depends(manager)):
q = db.query(Room).all()
if not q:
raise HTTPException(status_code=404, detail=f"Rooms not found")
return q

Categories