How to close MySql connections using Sqlalchemy and Flask? - python

I built an API using Flask and I'm using a service (as below) to create my database connections.
class DatabaseService:
def __init__(self):
self.connection_string = "foo"
def create_connection(self):
engine = create_engine(self.connection_string)
Session = scoped_session(sessionmaker(bind=engine))
return Session
In my app.py I add and remove these sessions to Flask application context (g) as the docs suggests.
So I can reference to g.session always I need them.
def get_session():
if 'session' not int g:
session = database_service.create_session()
g.session = session
#app.teardown_appcontext
def shutdown_session(exception=None):
if 'session' in g:
g.session.remove()
return None
This way every request has your own session that will close after processing. Am I right?
I don't understand why the connections are still alive on my database after the request is already done.
Always I run the command show processlist I can see multiple connections sleeping from my API.

I see no problem opening and closing sessions per-request
my_session = Session(engine)
my_session.execute(some_query)
my_session.close()

Related

How To Setup a SQLAlchemy Asynchronous Scoped Session In Python Behave Synchronous Hooks?

I have also asked this question on behave GitHub discussions and SQLAlchemy GitHub discussions.
I am trying to hookup a SQLAlchemy 1.4 engine and global scoped asynchronous session in behave before_all and before_scenario hooks to model testing similar to that outlined in the following blog article
The approach is to have a parent transaction and each test running in a nested transaction that gets rolled back when the test completes.
Unfortunately the before_all, before_scenario hooks are synchronous.
The application under test uses an asynchronous engine and asynchronous session created using sessionmaker:
def _create_session_factory(engine) -> sessionmaker[AsyncSession]:
factory = sessionmaker(engine, class_=AsyncSession, expire_on_commit=False)
factory.configure(bind=engine)
return factory
In the before_scenario test hook the following line raises an error when I try to create a scoped session.
"""THIS RAISES AN ERROR RuntimeError: no running event loop"""
context.session = context.Session(bind=context.connection, loop=loop)
The full code listing for setting up the test environment is listed below.
How do I get an asynchronous scoped session created in the synchronous before_all, before_scenario test hooks of behave?
import asyncio
import logging
from behave.api.async_step import use_or_create_async_context
from behave.log_capture import capture
from behave.runner import Context
from behave.model import Scenario
from sqlalchemy import event
from sqlalchemy.ext.asyncio import AsyncSession, async_scoped_session
from sqlalchemy.ext.asyncio.engine import async_engine_from_config
from sqlalchemy.orm.session import sessionmaker
from fastapi_graphql.server.config import Settings
logger = logging.getLogger()
#capture(level=logging.INFO)
def before_all(context: Context) -> None:
"""Setup database engine and session factory."""
logging.info("Setting up logging for behave tests...")
context.config.setup_logging()
logging.info("Setting up async context...")
use_or_create_async_context(context)
loop = context.async_context.loop
asyncio.set_event_loop(loop)
logging.info("Configuring db engine...")
settings = Settings()
config = settings.dict()
config["sqlalchemy_url"] = settings.db_url
engine = async_engine_from_config(config, prefix="sqlalchemy_")
logging.info(f"Db engine configured for connecting to: {settings.db_url}")
logging.info("Creating a global session instance")
factory = sessionmaker(bind=engine, class_=AsyncSession, expire_on_commit=False)
# factory.configure(bind=engine)
Session = async_scoped_session(factory(), scopefunc=asyncio.current_task)
context.engine = engine
context.connection = loop.run_until_complete(engine.connect())
context.factory = factory
context.Session = Session
#capture(level=logging.INFO)
def after_all(context: Context) -> None:
"""Teardown database engine gracefully."""
loop = context.async_context.loop
logging.info("Closing connection")
loop.run_until_complete(context.connection.close())
logging.info("Closing database engine...")
loop.run_until_complete(context.engine.dispose())
logging.info("Database engine closed")
#capture(level=logging.INFO)
def before_scenario(context: Context, scenario: Scenario) -> None:
"""Create a database session."""
loop = context.async_context.loop
logging.info("Starting a transaction...")
context.transaction = loop.run_until_complete(context.connection.begin())
logging.info("Transaction started...")
logging.info("Creating a db session...")
breakpoint()
# THIS RAISES AN ERROR RuntimeError: no running event loop
context.session = context.Session(bind=context.connection, loop=loop)
logging.info("Db session created")
breakpoint()
logging.info("Starting a nested transaction...")
context.session.begin_nested()
logging.info("Nested transaction started...")
#event.listens_for(context.session, "after_transaction_end")
def restart_savepoint(db_session, transaction):
"""Support tests with rollbacks.
This is required for tests that call some services that issue
rollbacks in try-except blocks.
With this event the Session always runs all operations within
the scope of a SAVEPOINT, which is established at the start of
each transaction, so that tests can also rollback the
“transaction” as well while still remaining in the scope of a
larger “transaction” that’s never committed.
"""
if context.transaction.nested and not context.transaction._parent.nested:
# ensure that state is expired the way session.commit() at
# the top level normally does
context.session.expire_all()
context.session.begin_nested()
#capture(level=logging.INFO)
def after_scenario(context: Context, scenario: Scenario) -> None:
"""Close the database session."""
logging.info("Closing db session...")
loop = asyncio.get_event_loop()
loop.run_until_complete(context.Session.remove())
logging.info("Db session closed")
logging.info("Rolling back transaction...")
loop.run_until_complete(context.transaction.rollback())
logging.info("Rolled back transaction")

Does SQLAlchemy close sessions after commit()?

So this question is a little like Does SQLAlchemy reset the database session between SQLAlchemy Sessions from the same connection?
I have a Flask/SQLAlchemy/Postgres app, which intermittently seems to drop connections after a commit() that occurs as part of a POST request.
This causes me headaches as I rely upon a customized option (https://www.postgresql.org/docs/9.6/runtime-config-custom.html) to control row level security - in effect executing the following before each Flask request while utilising scoped sessions:
#app.before_request
def load_user():
...
# Set-up RLS.
statement = f"SET app.permitted_workspace_id = '{workspace_id}'"
db.db_session.execute(statement)
...
This pattern generally works fine, but occasionally seems to fail when, so far as I can tell, after a commit(), SQLAlchemy drops the existing session and checks out a new one, in which app.permitted_workspace_id is no longer set.
My workaround for this is to listen for session checkout events, and then re-set the parameter:
#event.listens_for(db_engine, 'checkout')
def receive_checkout(dbapi_connection, connection_record, connection_proxy):
...
cursor = dbapi_connection.cursor()
statement = f"SET app.permitted_workspace_id = '{g.user.workspace_id}'"
cursor.execute(statement)
return
So my question is really: is it unavoidable that SQLAlchemy may close sessions after commit(), meaning I lose my session parameters - even with more DB work still to do?
If so, do we think this pattern is secure or even acceptable practice? Ideally, I'd keep the session open until removed (via #app.teardown_appcontext), but since I'm struggling to achieve that, and still have the relevant info available within the Flask request, I think this is the next best way to go.
Thanks
Edit 1:
In terms of session scoping, the layout is this:
In a database module, I lay out the following:
def get_database_connection()
...
db_engine = sa.create_engine(
f'postgresql://{user}:{password}#{host}/postgres',
echo=False,
poolclass=sa.pool.NullPool
)
# Connect - RLS is controlled by db_get_user_details.
db_connection = db_engine.connect()
db_session = scoped_session(
sessionmaker(
autocommit=False,
autoflush=False,
expire_on_commit=False,
bind=db_engine
)
)
return(db_engine, db_session, db_connection)
This is then called up top from inside the main Flask application:
db_engine, db_session, db_connection = db.get_database_connection()
And session removal is controlled by a function as follows:
#app.teardown_appcontext
def remove_session(exception=None):
db_session.remove()
So the answer in here seems to be that commit() does perform a checkin with this pattern:
https://github.com/sqlalchemy/sqlalchemy/issues/4925
if Session is what you're working with then yes, the Session will release connections when any of commit(), rollback(), or close() is called.

Keeping SQLAlchemy session alive when streaming a Flask Response

I'm trying to stream large CSVs to clients from my Flask server, which uses Flask-SQLAlchemy.
When configuring the app (using the factory pattern), db.session.close() is called after each request:
#app.after_request
def close_connection(r):
db.session.close()
return r
This configuration has worked great up until now, as all the requests are short lived. But when streaming a response, the SQLAlchemy session is closed prematurely, throwing the following error when the generator is called:
sqlalchemy.orm.exc.DetachedInstanceError: Parent instance <Question> is not bound to a Session; lazy load operation of attribute 'offered_answers' cannot proceed
Pseudo-code:
#app.route('/export')
def export_data():
answers = Answer.query.all()
questions = Question.query.all()
def generate():
Iterate through answers questions and write out various relationships to csv
response = Response(stream_with_context(generate()), mimetype='text/csv')
return response
I've tried multiple configurations of using / not using stream_with_context and global flags in def close_connection to not automatically close the connection but the same error persists.
#app.after_request was closing the database session before the generator to stream the file was ever invoked.
The solution was to migrate db.session.close() to #app.teardown_request. stream_with_context must also be used when instantiating Response.

cassandra rises operation time out in uwsgi server

I have a web server written in python, which is interacting with cassandra database using Cassandra's python driver.
when i starting this python server using gunicorn http server my requests are handled without error.
but when i run the same server using uwsgi http server after firs request, which must write some data into Cassandra table, cassandra raises an error
cassandra.OperationTimedOut: errors={}, last_host=127.0.0.1
the error raises in session.prepare() funcion call in python.
We got the same error message in our application.
And we fixed it by opening Cassandra session in constructor function, and shutdown it in destroy function in Model Class. Please see code below
class Model():
def __init__(self):
self.db = cassandra.createSession()
def __del__(self):
self.db.shutdown()
EDITED:
I found a better solution here: uWSGI Cassandra
from cqlengine import connection
from cqlengine.connection import (
cluster as cql_cluster, session as cql_session)
try:
from uwsgidecorators import postfork
except ImportError:
# We're not in a uWSGI context, no need to hook Cassandra session
# initialization to the postfork event.
pass
else:
#postfork
def cassandra_init():
""" Initialize a new Cassandra session in the context.
Ensures that a new session is returned for every new request.
"""
if cql_cluster is not None:
cql_cluster.shutdown()
if cql_session is not None:
cql_session.shutdown()
connection.setup()

sqlalchemy mysql connections not closing on flask api

I have an API I have written in flask. It uses sqlalchemy to deal with a MySQL database. I don't use flask-sqlalchemy, because I don't like how the module forces you into a certain pattern for declaring the model.
I'm having a problem in which my database connections are not closing. The object representing the connection is going out of scope, so I assume it is being garbage collected. I also explicitly call close() on the session. Despite this, the connections stay open long after the API call has returned its response.
sqlsession.py: Here is the wrapper I am using for the session.
class SqlSession:
def __init__(self, conn=Constants.Sql):
self.db = SqlSession.createEngine(conn)
Session = sessionmaker(bind=self.db)
self.session = Session()
#staticmethod
def createEngine(conn):
return create_engine(conn.URI.format(user=conn.USER, password=conn.PASS, host=conn.HOST, port=conn.PORT, database=conn.DATABASE, poolclass=NullPool))
def close(self):
self.session.close()
flaskroutes.py: Here is an example of the flask app instantiating and using the wrapper object. Note that it instantiates it in the beginning within the scope of the api call, then closes the session at the end, and presumably is garbage collected after the response is returned.
def commands(self, deviceId):
sqlSession = SqlSession(self.sessionType) <---
commandsQueued = getCommands()
jsonCommands = []
for command in commandsQueued:
jsonCommand = command.returnJsonObject()
jsonCommands.append(jsonCommand)
sqlSession.session.delete(command)
sqlSession.session.commit()
resp = jsonify({'commands': jsonCommands})
sqlSession.close() <---
resp.status_code = 200
return resp
I would expect the connections to be cleared as soon as the HTTP response is made, but instead, the connections end up with the "SLEEP" state (when viewed in the MySQL command line interface 'show processlist').
I ended up using the advice from this SO post:
How to close sqlalchemy connection in MySQL
I strongly recommend reading that post to anyone having this problem. Basically, I added a dispose() call to the close method. Doing so causes the entire connection to be destroyed, while closing simply returns connections to an available pool (but leave them open).
def close(self):
self.session.close()
self.db.dispose()
This whole this was a bit confusing to me, but at least now I understand more about the connection pool.

Categories