Object already attached to session - python

I'm trying to write unit tests for a module within a Flask app that uses it's own database connection.
The module opens its connection thus:
engine = create_engine(SQLALCHEMY_DATABASE_URI)
Session = sessionmaker(bind=engine)
session = Session()
and I then use session throughout the module.
My unit test has a fixture on conftest.py to create a new session:
#pytest.yield_fixture(scope='module')
def test_session(app):
"""
Creates a new database session for a test. Note you must use this fixture
if your test connects to db.
Here we not only support commit calls but also rollback calls in tests,
:coolguy:.
"""
connection = db.engine.connect()
transaction = connection.begin()
options = dict(bind=connection, binds={})
db_session = db.create_scoped_session(options=options)
db_session.begin_nested()
# session is actually a scoped_session
# for the `after_transaction_end` event, we need a session instance to
# listen for, hence the `session()` call
#sqlalchemy.event.listens_for(db_session(), 'after_transaction_end')
def restart_savepoint(sess, trans):
if trans.nested and not trans._parent.nested:
db_session.expire_all()
db_session.begin_nested()
db.session = db_session
yield db_session
db_session.remove()
transaction.rollback()
connection.close()
and in my test I do this:
def test_schedule_orders_by_last_update(test_session, create_test_user):
vendor = test_session.query(Vendors).filter(Vendors.name == 'Melie Bianco').first()
amazon = AmazonRequests(vendor)
amazon.schedule_orders_by_last_update()
result = test_session.query(AmazonReportRequests).filter(AmazonReportRequests.vendor == vendor).all()
assert len(result) == 1
assert result.vendor.name == vendor.name
My problem is that when I run the test it always ends with the following error:
self = <sqlalchemy.orm.session.Session object at 0x1104fab50>, state = <sqlalchemy.orm.state.InstanceState object at 0x110863f10>, obj = <AmazonReportRequests None>
def _before_attach(self, state, obj):
if state.session_id == self.hash_key:
return False
if state.session_id and state.session_id in _sessions:
raise sa_exc.InvalidRequestError(
"Object '%s' is already attached to session '%s' "
"(this is '%s')" % (state_str(state),
> state.session_id, self.hash_key))
E InvalidRequestError: Object '<AmazonReportRequests at 0x110863e90>' is already attached to session '2' (this is '1')
Shouldn't a query just retrieve the row from the database and ignore the other session?

Related

How to yield a db connection in a python sqlalchemy function similar to how it is done in FastAPI?

In FastAPI I had the following function that I used to open and close a DB session:
def get_db():
try:
db = SessionLocal()
yield db
finally:
db.close()
And within the routes of my API I would do something like that:
#router.get("/")
async def read_all_events(user: dict = Depends(get_current_user), db: Session = Depends(get_db)):
logger.info("API read_all_events")
if user is None:
raise http_user_credentials_not_valid_exception()
return db.query(models.Events).all()
You can see that I am injectin the session in the api call.
So now i want to do something similar within a python function:
def do_something():
#get person data from database
#play with person data
#save new person data in database
#get cars data from database
So i am wondering if I should use the same approach than in FastAPI (i do not know how) or if i just should be openning and clossing the connection manually like that:
def do_something():
try:
db = SessionLocal()
yield db
#get person data from database
#play with person data
#save new person data in database
#get cars data from database
finally:
db.close()
Thanks
The usage of yield in this case is so that Depends(get_db) returns the db session instance, so that it can be used in the fastapi route, and as soon as the fastapi route returns response to user, the finally clause (db.close()) will be executed. This is good because every request will be using a separate db session, and db connections will be closed after every route response.
If you want to use the db session normally in a function, just get the db instance using db = SessionLocal(), and proceed to use the db instance in the function.
Example:
def do_something():
db = SessionLocal()
event = db.query(models.Events).first()
db.delete(event)
db.commit()
db.close()

DIdnt work dependency_overrides in fastapi

I am trying to write test for web service and I want to create a separate database for tests when you run them. It is my pytest fixture for realise it
#pytest.fixture(scope="session")
def db_engine():
engine = create_engine(SQLALCHEMY_DATABASE_URL)
if not database_exists:
create_database(engine.url)
Base.metadata.create_all(bind=engine)
yield engine
#pytest.fixture(scope="function")
def db(db_engine):
connection = db_engine.connect()
connection.begin()
db = Session(bind=connection)
yield db
db.rollback()
connection.close()
#pytest.fixture(scope="function")
def client(db):
app.dependency_overrides[get_db] = lambda: db
with TestClient(app) as c:
yield c
But app.dependecy_overrides[get_db] = lambda: db didnt work and requests continue to be sent to the main database and not the test one.
One of my endpoints
#router.get("/", response_model=List[RoomPayload])
def read(db: Session = Depends(get_db),
user=Depends(manager)):
q = db.query(Room).all()
if not q:
raise HTTPException(status_code=404, detail=f"Rooms not found")
return q

FastAPI endpoint test - database session not sync

I have an issue when I test my API, the DB session at the tests isn't up to date when performing post request.
For example:
#pytest.fixture(scope="module")
def db(engine, tables, users) -> Generator:
"""Returns an sqlalchemy session, and after the test tears down everything properly."""
try:
db = TestingSessionLocal()
yield db
finally:
db.close()
def test_test(client: TestClient, db: Session):
item_id = 1
params = {
"item_id": item_id,
}
r = client.post("/endpoint",json=params)
db2: SessionLocal = TestingSessionLocal() # TEMP SOLUTION
# item_from_db = service.table.get(db, item_id) # TODO: make it work!!
item_from_db = service.event.get(db2, item_id)
db2.close()
I see the new item at my DB at run time, but I cant get it with the fixture.
Only when I create a new connection to DB, I manage to get the new item I added.

Pytest with sqlalchemy db conection terminates in subsequent test case

I am using a sqlalchemy engine connection as a pytest fixture as a way to test functions that require database interaction. When I run the test cases individually by running pytest tests/filename.py::test_get_token_data or pytest tests/filename.py::test_create the test passes; however, when running the entire test case, pytest tests/filename.py, I get the following error:
E sqlalchemy.exc.OperationalError: (psycopg2.errors.AdminShutdown) terminating connection due to administrator command
E server closed the connection unexpectedly
E This probably means the server terminated abnormally
E before or while processing the request.
(Background on this error at: http://sqlalche.me/e/13/e3q8)
To my understanding, after each test, the database is suppose to be cleared (which I've confirmed), however, I do not understand the error. My code below.
conftest.py
#pytest.fixture
def db_connection(test_config, admin_db_connection):
config = test_config()
engine = sqlalchemy.create_engine(config.DB_URL)
connection = engine.connect()
yield connection
connection.close()
# clear database
from psycopg2.extensions import AsIs # handle SQL quoting
with admin_db_connection.cursor() as curs:
curs.execute("drop database %s with (force);", (AsIs(config.DB_NAME),))
curs.execute(
"create database %s template vtag_template;", (AsIs(config.DB_NAME),)
)
filename.py
import sqlalchemy as sa
#pytest.fixture
def db_injection(db_connection):
with db_connection.begin():
some_value = db_connection.execute(
sa.sql.text(
"""insert into general.some_table as t (some_id, name, description, is_active) values (:some_id, :name, :description, :is_active) returning t.some_id;
"""
),
tenant_id='1234',
description="TEST",
is_active=True,
name="TEST",
)
tenant_id_ = some_value.first()[0]
#pytest.fixture
def some_function(db_connection):
with db_connection.begin():
some_table = db_connection.execute(
sa.sql.text(
"""
SELECT e.name
FROM general.some_table e
WHERE e.id = 1234
"""
)
)
return some_table.first()
def test_get_token_data(client, headers_with_token, db_injection, some_function):
token = some_function.name
#API is using the same db connection
response = client.get(f"/api/v2/{token}", headers=headers_with_token)
assert response.status_code == 200
def test_create(client, img, b_64, headers_with_token, db_injection):
items = Items(
user_id=1234,
template_id=67,
b_64=b_64,
)
data = {
"json": json.dumps(asdict(items)),
"file": ("some_name", img, "multipart/form-data"),
}
response = client.post(
"/api/v2/create",
data=data,
follow_redirects=True,
content_type="multipart/form-data",
headers=headers_with_token
)
assert response.status_code == 200
The issue was due to the application having an unbinded Session being used. Since most of our application us using sqlalchemy's Engine API, we decide with refactor in using raw sql via with engine.begin(). Another alternative solution could have been adding the engine into the session

SQL (sqlalchemy) commit hangs but no concurrent processes accessing same session

I have a program, whose main thread creates a child thread. The child thread has a while loop, in which it calls first to create a an entry in my db and then to query and insert back that entry.
I am using sqlalchemy as such:
Base = declarative_base()
engine = create_engine(connection_params)
try:
Base.metadata.reflect(engine)
except OperationalError as ex:
engine.dispose()
raise ex
inspector = reflection.Inspector.from_engine(engine)
Session = sessionmaker(bind=engine)
I also have a class inheriting from Base, which implements the following methods:
#classmethod
def create(cls, **kwargs):
with closing(Session()) as session:
inst = cls(**kwargs)
session.add(inst)
session.commit()
ret_val = inst.id
return ret_val
#classmethod
def update(cls, entry_id, **kwargs):
with closing(Session()) as session:
entry = session.query(cls).filter_by(id=entry_id).first()
# validate and update entry code...
for k, v in kwargs.iteritems():
setattr(entry, k, v)
session.commit()
Several users may be running the program at the same time, but each is using a unique entry_id (
the entry_id is always unique, due to this line:
inst = cls(**kwargs)
As I said, there is only one thread per user performing these operations so a session's transaction is always flushed.
The problem:
Somehow session.commit() started hanging (not returning from its call) because of the RLock on the operation. How is it possible that there was a block not released? How to prevent?

Categories