I am trying sqlalchemy with pytest, and having the following issues
#pytest.fixture(scope='function')
def my_session(my_db, request):
from my.models import Session, Base
Base.metadata.bind = my_db
Base.metadata.create_all()
def teardown():
Base.metadata.drop_all()
Base.metadata.create_all()
request.addfinalizer(teardown)
Session.configure(bind=my_db)
return Session()
But for some reason, the data which was stored to database during previous tests is still there. And I was kind of expecting it to vanish after .drop_all() call :(
Related
I am working on testing my flask app model. I'm using mysql 5.7, sqlalchemy and pytest.
Within my model, I have a CRUD mixin that I used to manage creating, updating and deleting. Whenever I try to access the object in the Mixin before returning the object to the test function, SQLAlchemy hangs at db.drop_all in my tear down. When I look in mysql at PROCESSLIST, it shows 1 sleep query and 1 query waiting for table metadata lock.
I can fix this by calling db.session.commit in the create method in the mixin before returning the object. However, if I call it at the test teardown (or in the main test function), it doesn't work. I'd prefer not to add an extra commit just to make my tests work as it doesn't feel correct. Does anyone know why this is happening or have any suggested fixes?
models.py
class CRUDMixin(object):
#classmethod
def create(cls, **kwargs):
instance = cls(**kwargs)
saved_instance = instance.save()
# do stuff with saved_instance (i.e. add to full text search engine)
# db.drop_all in teardown works if add db.session.commit() here
return saved_instance
def save(self, commit=True):
db.session.add(self)
if commit:
try:
db.session.commit()
except Exception:
db.session.rollback()
raise
return self
class User(CRUDMixin, db.model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
conftest.py
#pytest.fixture(scope='session')
def app():
app = create_app(TestConfig)
ctx = app.app_context()
ctx.push()
yield app
ctx.pop()
#pytest.fixture(scope='session')
def test_db(app):
db.drop_all()
db.create_all()
# add test db information
yield db
db.session.remove()
db.drop_all() # test hangs within drop all
#pytest.fixture(scope='function')
def db_session(test_db):
connection = db.engine.connect()
transaction = connection.begin()
options = dict(bind=connection, binds={})
session = db.create_scoped_session(options)
db.session = session
yield db
db.session.remove() # tables won't drop if I put db.session.commit before the remove call
transaction.rollback()
connection.close() # even though connection closes, mysql still shows process
test_models.py
class TestUser(object):
def test_add_new(self, db_session):
u = User.create(name='test_name')
assert u.name == 'test_name'
# if I put db.session.commit() here, tables won't drop
I want to write some py.test code to test 2 simple sqlalchemy ORM classes that were created based on this Tutorial. The problem is, how do I set a the database in py.test to a test database and rollback all changes when the tests are done? Is it possible to mock the database and run tests without actually connect to de database?
here is the code for my classes:
from sqlalchemy import create_engine, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import sessionmaker, relationship
eng = create_engine('mssql+pymssql://user:pass#host/my_database')
Base = declarative_base(eng)
Session = sessionmaker(eng)
intern_session = Session()
class Author(Base):
__tablename__ = "Authors"
AuthorId = Column(Integer, primary_key=True)
Name = Column(String)
Books = relationship("Book")
def add_book(self, title):
b = Book(Title=title, AuthorId=self.AuthorId)
intern_session.add(b)
intern_session.commit()
class Book(Base):
__tablename__ = "Books"
BookId = Column(Integer, primary_key=True)
Title = Column(String)
AuthorId = Column(Integer, ForeignKey("Authors.AuthorId"))
Author = relationship("Author")
I usually do that this way:
I do not instantiate engine and session with the model declarations, instead I only declare a Base with no bind:
Base = declarative_base()
and I only create a session when needed with
engine = create_engine('<the db url>')
db_session = sessionmaker(bind=engine)
You can do the same by not using the intern_session in your add_book method but rather use a session parameter.
def add_book(self, session, title):
b = Book(Title=title, AuthorId=self.AuthorId)
session.add(b)
session.commit()
It makes your code more testable since you can now pass the session of your choice when you call the method.
And you are no more stuck with a session bound to a hardcoded database url.
I add a custom --dburl option to pytest using its pytest_addoption hook.
Simply add this to your top-level conftest.py:
def pytest_addoption(parser):
parser.addoption('--dburl',
action='store',
default='<if needed, whatever your want>',
help='url of the database to use for tests')
Now you can run pytest --dburl <url of the test database>
Then I can retrieve the dburl option from the request fixture
From a custom fixture:
#pytest.fixture()
def db_url(request):
return request.config.getoption("--dburl")
# ...
Inside a test:
def test_something(request):
db_url = request.config.getoption("--dburl")
# ...
At this point you are able to:
get the test db_url in any test or fixture
use it to create an engine
create a session bound to the engine
pass the session to a tested method
It is quite a mess to do this in every test, so you can make a usefull usage of pytest fixtures to ease the process.
Below are some fixtures I use:
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
#pytest.fixture(scope='session')
def db_engine(request):
"""yields a SQLAlchemy engine which is suppressed after the test session"""
db_url = request.config.getoption("--dburl")
engine_ = create_engine(db_url, echo=True)
yield engine_
engine_.dispose()
#pytest.fixture(scope='session')
def db_session_factory(db_engine):
"""returns a SQLAlchemy scoped session factory"""
return scoped_session(sessionmaker(bind=db_engine))
#pytest.fixture(scope='function')
def db_session(db_session_factory):
"""yields a SQLAlchemy connection which is rollbacked after the test"""
session_ = db_session_factory()
yield session_
session_.rollback()
session_.close()
Using the db_session fixture you can get a fresh and clean db_session for each single test.
When the test ends, the db_session is rollbacked, keeping the database clean.
I'm setting up unit-testing for a Flask project using SQLAlchemy as ORM. For my tests I need to setup a new test database every time I run a single unit-test. Somehow, I cannot seem to run consecutive tests that query the database, even though if I run these tests in isolation they succeed.
I use the flask-testing package, and follow their documentation here.
Here is a working example to illustrate the problem:
app.py:
from flask import Flask
def create_app():
app = Flask(__name__)
return app
if __name__ == '__main__':
app = create_app()
app.run(port=8080)
database.py:
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
models.py:
from database import db
class TestModel(db.Model):
"""Model for testing."""
__tablename__ = 'test_models'
id = db.Column(db.Integer,
primary_key=True
)
test/__init__.py:
from flask_testing import TestCase
from app import create_app
from database import db
class BaseTestCase(TestCase):
def create_app(self):
app = create_app()
app.config.update({
'SQLALCHEMY_DATABASE_URI': 'sqlite:///:memory:',
'SQLALCHEMY_TRACK_MODIFICATIONS': False,
'TESTING': True
})
db.init_app(app)
return app
def setUp(self):
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
test/test_app.py:
from models import TestModel
from test import BaseTestCase
from database import db
test_model = TestModel()
class TestApp(BaseTestCase):
"""WebpageEnricherController integration test stubs"""
def _add_to_db(self, record):
db.session.add(record)
db.session.commit()
self.assertTrue(record in db.session)
def test_first(self):
"""
This test runs perfectly fine
"""
self._add_to_db(test_model)
result = db.session.query(TestModel).first()
self.assertIsNotNone(result, 'Nothing in the database')
def test_second(self):
"""
This test runs fine in isolation, but fails if run consecutively
after the first test
"""
self._add_to_db(test_model)
result = db.session.query(TestModel).first()
self.assertIsNotNone(result, 'Nothing in the database')
if __name__ == '__main__':
import unittest
unittest.main()
So, I can run TestApp.test_first and TestApp.test_second fine if run in isolation. If I run them consecutively, the first test passes, but the second test fails with:
=================================== FAILURES ===================================
_____________________________ TestApp.test_second ______________________________
self = <test.test_app.TestApp testMethod=test_second>
def test_second(self):
"""
This test runs fine in isolation, but fails if run consecutively
after the first test
"""
self._add_to_db(test_model)
result = db.session.query(TestModel).first()
> self.assertIsNotNone(result, 'Nothing in the database')
E AssertionError: unexpectedly None : Nothing in the database
Something is going wrong in the database setup and teardown, but I cannot figure out what. How do I set this up correctly?
The answer is that you are leaking state between one test and the next by reusing a single TestModel instance defined once in the module scope (test_model = TestModel()).
The state of that instance at the commencement of the first test is transient:
an instance that’s not in a session, and is not saved to the database;
i.e. it has no database identity. The only relationship such an object
has to the ORM is that its class has a mapper() associated with it.
The state of the object at commencement of the second test is detached:
Detached - an instance which corresponds, or previously corresponded,
to a record in the database, but is not currently in any session. The
detached object will contain a database identity marker, however
because it is not associated with a session, it is unknown whether or
not this database identity actually exists in a target database.
Detached objects are safe to use normally, except that they have no
ability to load unloaded attributes or attributes that were previously
marked as “expired”.
This kind of interdependence between tests is almost always a bad idea. You could use make_transient() on the object at the end of every test:
class BaseTestCase(TestCase):
...
def tearDown(self):
db.session.remove()
db.drop_all()
make_transient(test_model)
Or you should construct a new TestModel instance for each test:
class BaseTestCase(TestCase):
...
def setUp(self):
db.create_all()
self.test_model = TestModel()
class TestApp(BaseTestCase):
...
def test_xxxxx(self):
self._add_to_db(self.test_model)
I think the latter is the better choice as there is no danger of any other leaky state getting carried between tests.
When running tests in pytest, the database is modified. What is the best way to undo changes to the database?
DBSession rollback
For those tests where I can access the backend directly, I currently use pytest fixture to start a new DBSession for every test function, and rollback the session at the end of it
#pytest.fixture(scope='session')
def db(app, request):
"""Session-wide test database."""
def teardown():
pass
_db = SQLAlchemy(app)
return _db
#pytest.fixture(scope='function')
def db_session(db, request):
"""Creates a new database session for a test."""
engine = create_engine(
TestConfig.SQLALCHEMY_DATABASE_URI,
connect_args={"options": "-c timezone=utc"})
DbSession = sessionmaker(bind=engine)
session = DbSession()
connection = engine.connect()
transaction = connection.begin()
options = dict(bind=connection, binds={})
session = db.create_scoped_session(options=options)
db.session = session
yield session
transaction.rollback()
connection.close()
session.remove()
In the test code, I simply use the fixture
def test_create_project(db_session):
project = _create_test_project(db_session)
assert project.project_id > 0
Flask / HTTP Testing
But for testing the API via Flask/HTTP, I cannot use db_session. Even when I create a fixture to explicitly DROP the test database and restore from production, it will not work because there is no direct database code
#pytest.fixture(scope='function')
def db_session_refresh(db, request):
"""Refresh the test database from production after running the test"""
engine = create_engine(
TestConfig.SQLALCHEMY_DATABASE_URI,
connect_args={"options": "-c timezone=utc"})
DbSession = sessionmaker(bind=engine)
session = DbSession()
connection = engine.connect()
transaction = connection.begin()
options = dict(bind=connection, binds={})
session = db.create_scoped_session(options=options)
db.session = session
yield session
transaction.rollback()
connection.close()
session.remove()
refresh_test_db_sql = """
SELECT pg_terminate_backend(pid)
FROM pg_stat_activity
WHERE datname = 'appdb_test';
DROP DATABASE appdb_test;
CREATE DATABASE appdb_test TEMPLATE appdb;
"""
engine.execute(refresh_test_db_sql)
Even if this works, it is inefficient to refresh the database for every function.
What is the proper/better way to run test that modifies the database?
as commented earlier - creation and destruction of the DB should be taken care out of you unit test and moved into a wrapper.
but to answer you question - I have the impression that you're using postgresql. try removing the database name from your connection string and connect to it directly and add it to the search_path
I am trying convert unit test into py test. I am using the unit test example
class TestCase(unittest.TestCase):
def setUp(self):
app.config['TESTING'] = True
app.config['CSRF_ENABLED'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir,
'test.db')
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
I am not sure, What should be its py test version.
I searched high and low for a well explained solution to use SqlAlchemy without Flask-SQLAlchemy and run tests with Pytest, so here's how i have achieved this:
Set up your engine & Session objects as per the docs. (I have opted for sessionmaker as i want to check in my app if the session is still available in the Flask's request thread pool, see: https://dev.to/nestedsoftware/flask-and-sqlalchemy-without-the-flask-sqlalchemy-extension-3cf8
Import your Base object from wherever you've created it in your app. This will create all the tables in your database defined by the engine.
Now we want to Yield a Session back to your unit tests. The idea is to setup before calling Yield & teardown after. Now, in your test you can create a table and populate it with some rows of data etc.
Now we must close the Session, this is important!
Now by calling Base.metadata.drop_all(bind=engine) we drop all the tables in the database ( we can define a table(s) to drop if required, default is: tables=None)
engine = create_engine(create_db_connection_str(config), echo=True)
Session = scoped_session(sessionmaker(bind=engine))
#pytest.fixture(scope="function") # or "module" (to teardown at a module level)
def db_session():
Base.metadata.create_all(engine)
session = Session()
yield session
session.close()
Base.metadata.drop_all(bind=engine)
Now we can pass the function scoped fixture to each unit test:
class TestNotebookManager:
"""
Using book1.mon for this test suite
"""
book_name = "book1"
def test_load(self, client: FlaskClient, db_session) -> None:
notebook = Notebook(name=self.book_name)
db_session.add(book)
db_session.commit()
rv = client.get(f"/api/v1/manager/load?name={self.name}")
assert "200" in rv.status
First off, py.test should just run the existing unittest test case. However the native thing to do in py.test is use a fixture for the setup and teardown:
import pytest
#pytest.fixture
def some_db(request):
app.config['TESTING'] = True
app.config['CSRF_ENABLED'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'test.db')
db.create_all()
def fin():
db.session.remove()
db.drop_all()
request.addfinalizer(fin)
def test_foo(some_db):
pass
Note that I have no idea about SQLAlchemy and whether there are better ways of handling it's setup and teardown. All this example demonstrates is how to turn the setup/teardown methods into a fixture.