Django data leak between 2 separated tests - python

In my whole tests base, I experience a weird behaviour with two tests. They are completely isolated. However, I can find data from the first test in the second one. Here are the tests:
file1 (services.tests)
class ServiceTestCase(TestCase):
#patch('categories.models.ArticlesByCategory.objects.has_dish_type')
def test_build_dishtype_conflicts(self, mock_has_dish_type):
# WARN: create interference in tests
restaurant = RestaurantFactory()
dt_1 = DishTypeFactory(restaurant=restaurant)
cat_1 = CategoryFactory(restaurant=restaurant)
art_1 = ArticleFactory(name='fooA1', restaurant=restaurant)
art_2 = ArticleFactory(name='fooA2', restaurant=restaurant)
abc_1 = ArticlesByCategory.objects.create(category=cat_1, article=art_1, is_permanent=True,
dish_type=dt_1)
abc_2 = ArticlesByCategory.objects.create(category=cat_1, article=art_2, is_permanent=True,
dish_type=dt_1)
mock_has_dish_type.return_value = [abc_1, abc_2]
abcs_to_check = ArticlesByCategory.objects.filter(pk__in=[abc_1.pk, abc_2.pk])
conflicts = ServiceFactory()._build_dishtype_conflicts(abcs_to_check)
self.assertDictEqual(conflicts, {dt_1.pk: 2})
file2 (products.tests)
class ArticleQuerySetTestCase(TestCase):
def test_queryset_usable_for_category(self):
restaurant = RestaurantFactory()
category_1 = CategoryFactory(name='fooB1', restaurant=restaurant)
category_2 = CategoryFactory(name='fooB2', restaurant=restaurant)
article_1 = ArticleFactory(restaurant=restaurant)
article_2 = ArticleFactory(restaurant=restaurant)
ArticlesByCategory.objects.create(article=article_1, category=category_1, is_permanent=True)
queryset_1 = Article.objects.usable_for_category(category_1)
# This line is used for debug
for art in Article.objects.all():
print(art.name)
When running test_build_dishtype_conflicts THEN test_queryset_usable_for_category in the same command, here are the results of the print in the second test:
fooA1
fooA2
fooB1
fooB2
I suspect I did something wrong but can't find what.

Ok found the problem from Django documentation.
If your tests rely on database access such as creating or querying models, be sure to create your test classes as subclasses of django.test.TestCase rather than unittest.TestCase.
Using unittest.TestCase avoids the cost of running each test in a transaction and flushing the database, but if your tests interact with the database their behavior will vary based on the order that the test runner executes them. This can lead to unit tests that pass when run in isolation but fail when run in a suite.

Related

Airlfow API : testing tasks with parameters

I can't figure out how to use pytest to test a dag task waiting for xcom_arg.
I created the following DAG using the new airflow API syntax :
#dag(...)
def transfer_files():
#task()
def retrieve_existing_files():
existing = []
for elem in os.listdir("./backup"):
existing.append(elem)
return existing
#task()
def get_new_file_to_sync(existing: list[str]):
new_files = []
for elem in os.listdir("./prod"):
if not elem in existing:
new_files.append(elem)
return new_files
r = retrieve_existing_files()
get_new_file_to_sync(r)
Now I want to perform unit testing on the get_new_file_to_sync task. I wrote the following test :
def test_get_new_elan_list():
mocked_existing = ["a.out", "b.out"]
dag_bag = DagBag(include_examples=False)
dag = dag_bag.get_dag("transfer_files")
task = dag.get_task("get_new_file_to_sync")
result = task.execute({}, mocked_existing)
print(result)
The test fails because task.execute is waiting for 2 parameters but 3 were given.
My issue is that I don't have any clue of how to proceed in order to test my tasks waiting for arguments with a mocked custom argument.
Thanks for your insights
I managed to find a way to unit test airflow tasks declared using the new airflow API.
Here is a test case for the task get_new_file_to_sync contained in the DAG transfer_files declared in the question :
def test_get_new_file_to_synct():
mocked_existing = ["a.out", "b.out"]
# Asking airflow to load the dags in its home folder
dag_bag = DagBag(include_examples=False)
# Retrieving the dag to test
dag = dag_bag.get_dag("transfer_files")
# Retrieving the task to test
task = dag.get_task("get_new_file_to_sync")
# extracting the function to test from the task
function_to_unit_test = task.python_callable
# Calling the function normally
results = function_to_unit_test(mocked_existing)
assert len(results) == 10
This allows bypassing all the airflow mechanics triggered before calling the actual code you have written for your task. Thus, you can focus on writing tests for the code you have written for your task.
For testing such a task, I believe you'll need to use mocking from pytest.
Let's take this user defined operator for an example:
class MovielensPopularityOperator(BaseOperator):
def __init__(self, conn_id, start_date, end_date, min_ratings=4, top_n=5, **kwargs):
super().__init__(**kwargs)
self._conn_id = conn_id
self._start_date = start_date
self._end_date = end_date
self._min_ratings = min_ratings
self._top_n = top_n
def execute(self, context):
with MovielensHook(self._conn_id) as hook:
ratings = hook.get_ratings(start_date=self._start_date, end_date=self._end_date)
rating_sums = defaultdict(Counter)
for rating in ratings:
rating_sums[rating["movieId"]].update(count=1, rating=rating["rating"])
averages = {
movie_id: (rating_counter["rating"] / rating_counter["count"], rating_counter["count"])
for movie_id, rating_counter in rating_sums.items()
if rating_counter["count"] >= self._min_ratings
}
return sorted(averages.items(), key=lambda x: x[1], reverse=True)[: self._top_n]
And a test written just like the one you did:
def test_movielenspopularityoperator():
task = MovielensPopularityOperator(
task_id="test_id",
start_date="2015-01-01",
end_date="2015-01-03",
top_n=5,
)
result = task.execute(context={})
assert len(result) == 5
Running this test fail as:
=============================== FAILURES ===============================
___________________ test_movielenspopularityoperator ___________________
mocker = <pytest_mock.plugin.MockFixture object at 0x10fb2ea90>
def test_movielenspopularityoperator(mocker: MockFixture):
task = MovielensPopularityOperator(
➥
>
task_id="test_id", start_date="2015-01-01", end_date="2015-01-
03", top_n=5
)
➥
E
TypeError: __init__() missing 1 required positional argument:
'conn_id'
tests/dags/chapter9/custom/test_operators.py:30: TypeError
========================== 1 failed in 0.10s ==========================
The test failed because we’re missing the required argument conn_id, which points to the connection ID in the metastore. But how do you provide this in a test? Tests should be isolated from each other; they should not be able to influence the results of other tests, so a database shared between tests is not an ideal situation. In this case, mocking comes to the rescue.
Mocking is “faking” certain operations or objects. For example, the call to a database that is expected to exist in a production setting but not while testing could be faked, or mocked, by telling Python to return a certain value instead of making the actual call to the (nonexistent during testing) database. This allows you to develop and run tests without requiring a connection to external systems. It requires insight into the internals of whatever it is you’re testing, and thus sometimes requires you to dive into third-party code.
After installing pytest-mock in your enviroment:
pip install pytest-mock
Here is the test written where mocking is used:
def test_movielenspopularityoperator(mocker):
mocker.patch.object(
MovielensHook,
"get_connection",
return_value=Connection(conn_id="test", login="airflow", password="airflow"),
)
task = MovielensPopularityOperator(
task_id="test_id",
conn_id="test",
start_date="2015-01-01",
end_date="2015-01-03",
top_n=5,
)
result = task.execute(context=None)
assert len(result) == 5
Now, hopefully this will give you an idea about how to write your tests for Airflow Tasks.
For more about mocking and unit tests, you can check here and here.

Where are rows created with pytest-postgresql

I inherited a fast-api project that uses PostgreSQL and pytest-postgresql to provide pytest fixtures to test against PostgreSQL. Out of curiosity I placed some breakpoint() statements in several places after row model creation, and commits(), but before any cleanup. With breakpoint() holding the process, I then looked at the database server to see if I could find the data that was entered with pytest-postgresql. I could find nothing. Where would this data be?
In my conftest.py file, I have the following for pytest-postgresql setup.
from pytest_postgresql import factories
...
postgresql_proc = factories.postgresql_proc(
host="localhost",
user="REDACTED",
port="5432",
password="REDACTED",
)
pg_fixture = factories.postgresql("postgresql_proc", db_name="REDACTED")
#pytest.fixture(scope="function")
def db_session(pg_fixture):
"""
A session object to a non persistent db.
Will clean up the database after each test run, in its cleanup stage
"""
sqlalchemy_uri = (
f"postgresql://{pg_fixture.info.user}:{pg_fixture.info.password}#"
f"{pg_fixture.info.host}:{pg_fixture.info.port}"
f"/{pg_fixture.info.dbname}"
)
engine = get_engine(sqlalchemy_uri)
models.base.Base.metadata.create_all(engine) # CREATES VARIOUS MODELS
Session = sessionmaker(bind=engine)
yield Session()
models.base.Base.metadata.drop_all(engine)
The tests work, so I assume that its setup correctly. And by work pass when they should and fail when they should not. But for the life of me I can not understand where pytest-postgresql is putting the row data inserted with the model creation in test setup.

How should I test a method of a mocked object

I have a question about how to mock a nested method and test what it was called with. I'm having a hard time getting my head around: https://docs.python.org/3/library/unittest.mock-examples.html#mocking-chained-calls.
I'd like to test that the "put" method from the fabric library is called by the deploy_file method in this class, and maybe what values are given to it. This is the module that gathers some information from AWS and provides a method to take action on the data.
import json
import os
from aws.secrets_manager import get_secret
from fabric import Connection
class Deploy:
def __init__(self):
self.secrets = None
self.set_secrets()
def set_secrets(self):
secrets = get_secret()
self.secrets = json.loads(secrets)
def deploy_file(self, source_file):
with Connection(host=os.environ.get('SSH_USERNAME'), user=os.environ.get("SSH_USERNAME")) as conn:
destination_path = self.secrets["app_path"] + '/' + os.path.basename(source_file)
conn.put(source_file, destination_path)
"get_secret" is a method in another module that uses the boto3 library to get the info from AWS.
These are the tests I'm working on:
from unittest.mock import patch
from fabric import Connection
from jobs.deploy import Deploy
def test_set_secrets_dict_from_expected_json_string():
with patch('jobs.deploy.get_secret') as m_get_secret:
m_get_secret.return_value = '{"app_path": "/var/www/html"}'
deployment = Deploy()
assert deployment.secrets['app_path'] == "/var/www/html"
def test_copy_app_file_calls_fabric_put():
with patch('jobs.deploy.get_secret') as m_get_secret:
m_get_secret.return_value = '{"app_path": "/var/www/html"}'
deployment = Deploy()
with patch('jobs.deploy.Connection', spec=Connection) as m_conn:
local_file_path = "/tmp/foo"
deployment.deploy_file(local_file_path)
m_conn.put.assert_called_once()
where the second test results in "AssertionError: Expected 'put' to have been called once. Called 0 times."
the first test mocks the "get_secret" function just fine to test that the constructor for "Deploy" sets "Deploy.secrets" from the fake AWS data.
In the second test, get_secrets is mocked just as before, and I mock "Connection" from the fabric library. If I don't mock Connection, I get an error related to the "host" parameter when the Connection object is created.
I think that when "conn.put" is called its creating a whole new Mock object and I'm not testing that object when the unittest runs. I'm just not sure how to define the test to actually test the call to put.
I'm also a novice at understanding what to test (and how) and what not to test as well as how to use mock and such. I'm fully bought in on the idea though. It's been very helpful to find bugs and regressions as I work on projects.

Django TestCase: recreate database in self.subTest(...)

I need to test a function with different parameters, and the most proper way for this seems to be using the with self.subTest(...) context manager.
However, the function writes something to the db, and it ends up in an inconsistent state. I can delete the things I write, but it would be cleaner if I could recreate the whole db completely. Is there a way to do that?
Not sure how to recreate the database in self.subTest() but I have another technique I am currently using which might be of interest to you. You can use fixtures to create a "snapshot" of your database which will basically be copied in a second database used only for testing purposes. I currently use this method to test code on a big project I'm working on at work.
I'll post some example code to give you an idea of what this will look like in practice, but you might have to do some extra research to tailor the code to your needs (I've added links to guide you).
The process is rather straighforward. You would be creating a copy of your database with only the data needed by using fixtures, which will be stored in a .yaml file and accessed only by your test unit.
Here is what the process would look like:
List item you want to copy to your test database to populate it using fixtures. This will only create a db with the needed data instead of stupidly copying the entire db. It will be stored in a .yaml file.
generate.py
django.setup()
stdout = sys.stdout
conf = [
{
'file': 'myfile.yaml',
'models': [
dict(model='your.model', pks='your, primary, keys'),
dict(model='your.model', pks='your, primary, keys')
]
}
]
for fixture in conf:
print('Processing: %s' % fixture['file'])
with open(fixture['file'], 'w') as f:
sys.stdout = FixtureAnonymiser(f)
for model in fixture['models']:
call_command('dumpdata', model.pop('model'), format='yaml',indent=4, **model)
sys.stdout.flush()
sys.stdout = stdout
In your test unit, import your generated .yaml file as a fixture and your test will automatically use this the data from the fixture to carry out the tests, keeping your main database untouched.
test_class.py
from django.test import TestCase
class classTest(TestCase):
fixtures = ('myfile.yaml',)
def setUp(self):
"""setup tests cases"""
# create the object you want to test here, which will use data from the fixtures
def test_function(self):
self.assertEqual(True,True)
# write your test here
You can read up more here:
Django
YAML
If you have any questions because things are unclear just ask, I'd be happy to help you out.
Maybe my solution will help someone
I used transactions to roll back to the database state that I had at the start of the test.
I use Eric Cousineau's decorator function to parametrizing tests
More about database transactions at django documentation page
import functools
from django.db import transaction
from django.test import TransactionTestCase
from django.contrib.auth import get_user_model
User = get_user_model()
def sub_test(param_list):
"""Decorates a test case to run it as a set of subtests."""
def decorator(f):
#functools.wraps(f)
def wrapped(self):
for param in param_list:
with self.subTest(**param):
f(self, **param)
return wrapped
return decorator
class MyTestCase(TransactionTestCase):
#sub_test([
dict(email="new#user.com", password='12345678'),
dict(email="new#user.com", password='password'),
])
def test_passwords(self, email, password):
# open a transaction
with transaction.atomic():
# Creates a new savepoint. Returns the savepoint ID (sid).
sid = transaction.savepoint()
# create user and check, if there only one with this email in DB
user = User.objects.create(email=email, password=password)
self.assertEqual(User.objects.filter(email=user.email).count(), 1)
# Rolls back the transaction to savepoint sid.
transaction.savepoint_rollback(sid)

How to add Members folder in plone.app.testing?

How can I add Members folder for my functional tests in plone.app.testing so that it is findable as in real site?
Have have set member area creation flag in my product installation step which I'm testing.
membership.memberareaCreationFlag = 1
I need to get this test working:
class TestMemberFolder(unittest.TestCase):
layer = MY_FUNCTIONAL_TESTING
def setUp(self):
portal = self.portal = self.layer['portal']
def test_members_folder(self):
membership = getToolByName(self.portal, 'portal_membership')
membership.addMember("basicuser", "secret", ["Member"], [])
transaction.commit()
login(self.portal, "basicuser")
# This works just fine, because it was set by my product
self.assertEquals(membership.memberareaCreationFlag, 1,
"memberareaCreationFlag must be 1 when it is enabled")
members_folder = membership.getMembersFolder()
# But this fails
self.assertIsNotNone(members_folder)
# Also we should have the user folder here
self.assertTrue(members_folder.hasObject('basicuser'))
I specifically need Member folder functionality. Just a folder owned by the test user does not cut it.
Also I tried creating new user with acl_users.userFolderAddUser, but that does not help neighter.
The memberareaCreationFlag works just fine in live Plone site.
I finally figured it out.
At first membership.memberareaCreationFlag = 1 is not enough for enabling member folders.
It must be enabled with SecurityControlPanelAdapter in plone.app.controlpanel.security
from plone.app.controlpanel.security import ISecuritySchema
# Fetch the adapter
security_adapter = ISecuritySchema(portal)
security_adapter.set_enable_user_folders(True)
Also the Functional testing fixture does not create the member folder automatically, but is possible to install it manually in your fixture class
class YourPloneFixture(PloneSandboxLayer):
defaultBases = (PLONE_FIXTURE,)
def setUpZope(self, app, configurationContext):
# Required by Products.CMFPlone:plone-content
z2.installProduct(app, 'Products.PythonScripts')
def setUpPloneSite(self, portal):
# Installs all the Plone stuff. Workflows etc.
self.applyProfile(portal, 'Products.CMFPlone:plone')
# Install portal content. Including the Members folder!
self.applyProfile(portal, 'Products.CMFPlone:plone-content')
Finally as Member folders are created uppon user login, but the login helper function in plone.app.testing seem to be too low level for this. Login with zope.testbrowser seems to do the trick
browser = Browser(self.layer['app'])
browser.open(self.portal.absolute_url() + '/login_form')
browser.getControl(name='__ac_name').value = TEST_USER_NAME
browser.getControl(name='__ac_password').value = TEST_USER_PASSWOR
browser.getControl(name='submit').click()
Phew.
self.assert_ isn't a testing method, use something like self.assertTrue, or self.assertIsNotNone.
To add members folders just turn on member folder creation and add a new user.
Something like
def setUpPloneSite(self, portal):
# Install into Plone site using portal_setup
quickInstallProduct(portal, 'Products.DataGridField')
quickInstallProduct(portal, 'Products.ATVocabularyManager')
quickInstallProduct(portal, 'Products.MasterSelectWidget')
if HAVE_LP:
quickInstallProduct(portal, 'Products.LinguaPlone')
applyProfile(portal, 'vs.org:default')
portal.acl_users.userFolderAddUser('god', 'dummy', ['Manager'], [])
setRoles(portal, 'god', ['Manager'])
login(portal, 'god')
is perfectly working for us.

Categories