pytest parameterized test with custom id function - python

I have a parameterized test which takes str, and dict as an argument and so the name look pretty weird if I allow pytest to generate ids.
I want to generate custom ids using a function, however it seems it's not working as intended.
def id_func(param):
if isinstance(param, str):
return param
#pytest.mark.parametrize(argnames=('date', 'category_value'),
argvalues=[("2017.01", {"bills": "0,10", "shopping": "100,90", "Summe": "101,00"}),
("2017.02", {"bills": "20,00", "shopping": "10,00", "Summe": "30,00"})],
ids=id_func)
def test_demo(date, category_value):
pass
I was thinking it would return something like this
test_file.py::test_demo[2017.01] PASSED
test_file.py::test_demo[2017.02] PASSED
but it's returning this.
test_file.py::test_demo[2017.01-category_value0] PASSED
test_file.py::test_demo[2017.02-category_value1] PASSED
Could someone tell me what's wrong with this, or is there any way to achieve this?
Update:
I realize what's the issue, if_func will be called for each parameter and if I won't return str for any parameter default function will be called. I have fix but that's also ugly.
def id_func(param):
if isinstance(param, str):
return param
return " "
Now it returns something like this,
test_file.py::test_demo[2017.01- ] PASSED
test_file.py::test_demo[2017.02- ] PASSED
The problem is even If I return empty string (i.e. return "" )it takes the default representation. Could someone let me know why?

One way is to move your argvalues to another variable and write your test like this:
import pytest
my_args = [
("2017.01", {"bills": "0,10", "shopping": "100,90", "Summe": "101,00"}),
("2017.02", {"bills": "20,00", "shopping": "10,00", "Summe": "30,00"})
]
#pytest.mark.parametrize(
argnames=('date', 'category_value'), argvalues=my_args,
ids=[i[0] for i in my_args]
)
def test_demo(date, category_value):
pass
Test execution:
$ pytest -v tests.py
================= test session starts =================
platform linux2 -- Python 2.7.12, pytest-3.2.1, py-1.4.34, pluggy-0.4.0 -- /home/kris/.virtualenvs/2/bin/python2
cachedir: .cache
rootdir: /home/kris/projects/tmp, inifile:
collected 2 items
tests.py::test_demo[2017.01] PASSED
tests.py::test_demo[2017.02] PASSED
============== 2 passed in 0.00 seconds ===============
I think it's not possible with a function (idfn in your case), because if it's not generating label for an object the default pytest representation is used.
Check pytest site for details.

Usually, when I want to be specific about test case being executing in the params I use named tuples as a workaround for the id funcion being executed once per param, that way a get a cleaner test description.
import pytest
from collections import namedtuple
TCase = namedtuple("TCase", "x,y,expected,description")
test_cases = [
TCase(10, 10, 20, "10 + 10 should be 20"),
TCase(1, 1, 2, "1 + 1 should be 2"),
]
def idfn(tc: TCase):
return tc.description
#pytest.mark.parametrize("tc", test_cases, ids=idfn)
def test_sum(tc):
assert tc.x + tc.y == tc.expected
Output:
example.py::test_sum[10 + 10 should be 20] PASSED
example.py::test_sum[1 + 1 should be 2] PASSED
So I would write your example as:
from collections import namedtuple
import pytest
TCase = namedtuple("TCase", "date,data")
my_args = [
TCase("2017.01", {"bills": "0,10", "shopping": "100,90", "Summe": "101,00"}),
TCase("2017.02", {"bills": "20,00", "shopping": "10,00", "Summe": "30,00"}),
]
#pytest.mark.parametrize("tc", my_args, ids=lambda tc: tc.date)
def test_demo(tc):
# Do something in here with tc.date and tc.data
pass
Output:
migration.py::test_demo[2017.01] PASSED
migration.py::test_demo[2017.02] PASSED

Alternatively you can also use a list comprehension to generate your ids as follows:
import pytest
values = [
("2017.01", {"bills": "0,10", "shopping": "100,90", "Summe": "101,00"}),
("2017.02", {"bills": "20,00", "shopping": "10,00", "Summe": "30,00"})
]
value_ids = [e[0] for e in values]
#pytest.mark.parametrize('date,category_value', values, ids=value_ids
)
def test_demo(date, category_value):
pass
Assuming these tests are in test_file.py at the root of your directory, if you run pytest test_file.py --co -q, you will get the following output:
test_file.py::test_demo[2017.01]
test_file.py::test_demo[2017.02]

Related

Pytest missing 1 required positional argument with fixture

I'm using vscode as IDE
I have code a very simple usage of pytest fixture but it doesn't working when basic example fixture found in the pytest documentation are working well :
#pytest.fixture
def declare_hexidict():
hd = hexidict()
rvc = ReferenceValueCluster()
rv = ReferenceValue(init=3)
hd_var = (hd, rvc, rv)
return hd_var
def setitem_getitem(declare_hexidict):
print('start')
# hd = hexidict()
# rvc = ReferenceValueCluster()
# rv = ReferenceValue(init=3)
hd, rvc, rv = declare_hexidict
print('datastruct defined')
hd[rvc("key1").reflink] = rv[0].reflink
hd[rvc["key1"]] == rv[0]
assert rvc["key1"] in hd.keys(), "key :{} is not int this hexidict".format(
rvc("key1")
)
assert hd[rvc["key1"]] == rv[0], "key :{} return {} instead of {}".format(
rvc["key1"], hd[rvc["key1"]], rv[0]
)
#set non value item (on set une liste)
hd[rvc("key2").reflink] = [rv[1].reflink]
hd[rvc["key2"]]
assert type(hd[rvc["key2"]]) == list
#on verifie que l'item dans la list est bien celui qui provient de rv
assert hd[rvc["key2"]][0] in rv
I get in the test summary info :
ERROR test/process/hexidict/test_hd_basic_function.py - TypeError: setitem_getitem() missing 1 required positional argument: 'declare_hexidict'
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
pytest does not recognize setitem_getitem like test, so you should rename it to test_setitem_getitem and try it out:
def test_setitem_getitem(declare_hexidict):
The problem is that your test is not detected by Pytest's test discovery.
Depending on how you execute your tests (whether you provide a full path to your test file, provide path with sub directories and multiple test files or want to execute all tests matching a specific mark in the entire project) you will want to make sure all test modules, classes and functions are discovered properly. By default test files need to match test_*.py or *_test.py, classes - Test* and functions - test*.
https://docs.pytest.org/en/7.1.x/explanation/goodpractices.html#conventions-for-python-test-discovery
Test discovery can also be configured to match your needs in pytest.ini.
Example pytest.ini:
[pytest]
python_files = *_pytest.py
python_functions = mytest_*
python_classes = *Tests

How to add custom sections to terminal report in pytest

In pytest, when a test case is failed, you have in the report the following categories:
Failure details
Captured stdout call
Captured stderr call
Captured log call
I would like to add some additional custom sections (I have a server that turns in parallel and would like to display the information logged by this server in a dedicated section).
How could I do that (if ever possible)?
Thanks
NOTE:
I have currently found the following in source code but don't know whether that shall be right approach
nodes.py
class Item(Node):
...
def add_report_section(self, when, key, content):
"""
Adds a new report section, similar to what's done internally
to add stdout and stderr captured output::
...
"""
reports.py
class BaseReport:
...
#property
def caplog(self):
"""Return captured log lines, if log capturing is enabled
.. versionadded:: 3.5
"""
return "\n".join(
content for (prefix, content) in self.get_sections("Captured log")
)
To add custom sections to terminal output, you need to append to report.sections list. This can be done in pytest_report_teststatus hookimpl directly, or in other hooks indirectly (via a hookwrapper); the actual implementation heavily depends on your particular use case. Example:
# conftest.py
import os
import random
import pytest
def pytest_report_teststatus(report, config):
messages = (
'Egg and bacon',
'Egg, sausage and bacon',
'Egg and Spam',
'Egg, bacon and Spam'
)
if report.when == 'teardown':
line = f'{report.nodeid} says:\t"{random.choice(messages)}"'
report.sections.append(('My custom section', line))
def pytest_terminal_summary(terminalreporter, exitstatus, config):
reports = terminalreporter.getreports('')
content = os.linesep.join(text for report in reports for secname, text in report.sections)
if content:
terminalreporter.ensure_newline()
terminalreporter.section('My custom section', sep='-', blue=True, bold=True)
terminalreporter.line(content)
Example tests:
def test_spam():
assert True
def test_eggs():
assert True
def test_bacon():
assert False
When running the tests, you should see My custom section header at the bottom colored blue and containing a message for every test:
collected 3 items
test_spam.py::test_spam PASSED
test_spam.py::test_eggs PASSED
test_spam.py::test_bacon FAILED
============================================= FAILURES =============================================
____________________________________________ test_bacon ____________________________________________
def test_bacon():
> assert False
E assert False
test_spam.py:9: AssertionError
---------------------------------------- My custom section -----------------------------------------
test_spam.py::test_spam says: "Egg, bacon and Spam"
test_spam.py::test_eggs says: "Egg and Spam"
test_spam.py::test_bacon says: "Egg, sausage and bacon"
================================ 1 failed, 2 passed in 0.07 seconds ================================
The other answer shows how to add a custom section to the terminal report summary, but it's not the best way for adding a custom section per test.
For this goal, you can (and should) use the higher-level API add_report_section of an Item node (docs). A minimalist example is shown below, modify it to suit your needs. You can pass state from the test instance through an item node, if necessary.
In test_something.py, here is one passing test and two failing:
def test_good():
assert 2 + 2 == 4
def test_bad():
assert 2 + 2 == 5
def test_ugly():
errorerror
In conftest.py, setup a hook wrapper:
import pytest
content = iter(["first", "second", "third"])
#pytest.hookimpl(hookwrapper=True)
def pytest_runtest_call(item):
outcome = yield
item.add_report_section("call", "custom", next(content))
The report will now display custom sections per-test:
$ pytest
============================== test session starts ===============================
platform linux -- Python 3.9.0, pytest-6.2.2, py-1.10.0, pluggy-0.13.1
rootdir: /tmp/example
collected 3 items
test_something.py .FF [100%]
==================================== FAILURES ====================================
____________________________________ test_bad ____________________________________
def test_bad():
> assert 2 + 2 == 5
E assert (2 + 2) == 5
test_something.py:5: AssertionError
------------------------------ Captured custom call ------------------------------
second
___________________________________ test_ugly ____________________________________
def test_ugly():
> errorerror
E NameError: name 'errorerror' is not defined
test_something.py:8: NameError
------------------------------ Captured custom call ------------------------------
third
============================ short test summary info =============================
FAILED test_something.py::test_bad - assert (2 + 2) == 5
FAILED test_something.py::test_ugly - NameError: name 'errorerror' is not defined
========================== 2 failed, 1 passed in 0.02s ===========================

Optional job parameter in AWS Glue?

How can I implement an optional parameter to an AWS Glue Job?
I have created a job that currently have a string parameter (an ISO 8601 date string) as an input that is used in the ETL job. I would like to make this parameter optional, so that the job use a default value if it is not provided (e.g. using datetime.now and datetime.isoformatin my case). I have tried using getResolvedOptions:
import sys
from awsglue.utils import getResolvedOptions
args = getResolvedOptions(sys.argv, ['ISO_8601_STRING'])
However, when I am not passing an --ISO_8601_STRING job parameter I see the following error:
awsglue.utils.GlueArgumentError: argument --ISO_8601_STRING is required
matsev and Yuriy solutions is fine if you have only one field which is optional.
I wrote a wrapper function for python that is more generic and handle different corner cases (mandatory fields and/or optional fields with values).
import sys
from awsglue.utils import getResolvedOptions
def get_glue_args(mandatory_fields, default_optional_args):
"""
This is a wrapper of the glue function getResolvedOptions to take care of the following case :
* Handling optional arguments and/or mandatory arguments
* Optional arguments with default value
NOTE:
* DO NOT USE '-' while defining args as the getResolvedOptions with replace them with '_'
* All fields would be return as a string type with getResolvedOptions
Arguments:
mandatory_fields {list} -- list of mandatory fields for the job
default_optional_args {dict} -- dict for optional fields with their default value
Returns:
dict -- given args with default value of optional args not filled
"""
# The glue args are available in sys.argv with an extra '--'
given_optional_fields_key = list(set([i[2:] for i in sys.argv]).intersection([i for i in default_optional_args]))
args = getResolvedOptions(sys.argv,
mandatory_fields+given_optional_fields_key)
# Overwrite default value if optional args are provided
default_optional_args.update(args)
return default_optional_args
Usage :
# Defining mandatory/optional args
mandatory_fields = ['my_mandatory_field_1','my_mandatory_field_2']
default_optional_args = {'optional_field_1':'myvalue1', 'optional_field_2':'myvalue2'}
# Retrieve args
args = get_glue_args(mandatory_fields, default_optional_args)
# Access element as dict with args[‘key’]
Porting Yuriy's answer to Python solved my problem:
if ('--{}'.format('ISO_8601_STRING') in sys.argv):
args = getResolvedOptions(sys.argv, ['ISO_8601_STRING'])
else:
args = {'ISO_8601_STRING': datetime.datetime.now().isoformat()}
There is a workaround to have optional parameters. The idea is to examine arguments before resolving them (Scala):
val argName = 'ISO_8601_STRING'
var argValue = null
if (sysArgs.contains(s"--$argName"))
argValue = GlueArgParser.getResolvedOptions(sysArgs, Array(argName))(argName)
I don't see a way to have optional parameters, but you can specify default parameters on the job itself, and then if you don't pass that parameter when you run the job, your job will receive the default value (note that the default value can't be blank).
Wrapping matsev's answer in a function:
def get_glue_env_var(key, default="none"):
if f'--{key}' in sys.argv:
return getResolvedOptions(sys.argv, [key])[key]
else:
return default
It's possible to create a Step Function that starts the same Glue job with different parameters. The state machine starts with a Choice state and uses different number of inputs depending on which is present.
stepFunctions:
stateMachines:
taskMachine:
role:
Fn::GetAtt: [ TaskExecutor, Arn ]
name: ${self:service}-${opt:stage}
definition:
StartAt: DefaultOrNot
States:
DefaultOrNot:
Type: Choice
Choices:
- Variable: "$.optional_input"
IsPresent: false
Next: DefaultTask
- Variable: "$. optional_input"
IsPresent: true
Next: OptionalTask
OptionalTask:
Type: Task
Resource: "arn:aws:states:::glue:startJobRun.task0"
Parameters:
JobName: ${self:service}-${opt:stage}
Arguments:
'--log_group.$': "$.specs.log_group"
'--log_stream.$': "$.specs.log_stream"
'--optional_input.$': "$. optional_input"
Catch:
- ErrorEquals: [ 'States.TaskFailed' ]
ResultPath: "$.errorInfo"
Next: TaskFailed
Next: ExitExecution
DefaultTask:
Type: Task
Resource: "arn:aws:states:::glue:startJobRun.sync"
Parameters:
JobName: ${self:service}-${opt:stage}
Arguments:
'--log_group.$': "$.specs.log_group"
'--log_stream.$': "$.specs.log_stream"
Catch:
- ErrorEquals: [ 'States.TaskFailed' ]
ResultPath: "$.errorInfo"
Next: TaskFailed
Next: ExitExecution
TaskFailed:
Type: Fail
Error: "Failure"
ExitExecution:
Type: Pass
End: True
If you're using the interface, you must provide your parameter names starting with "--" like "--TABLE_NAME", rather than "TABLE_NAME", then you can use them like the following (python) code:
args = getResolvedOptions(sys.argv, ['JOB_NAME', 'TABLE_NAME'])
table_name = args['TABLE_NAME']

assert pytest command has been run

I have an django app route that will run a pytest.main() command if some conditions are met:
def run_single_test(request, single_test_name):
# get dict of test names, test paths
test_dict = get_single_test_names()
# check to see if test is in the dict
if single_test_name in test_dict:
for test_name,test_path in test_dict.items():
# if testname is valid run associated test
if test_name == single_test_name:
os.chdir('/lib/tests/')
run_test = pytest.main(['-v', '--json-report', test_path])
else:
return 'The requested test could not be found.'
I would like to include a unit test that validates run_test has been executed.
What is the best approach to doing this? Mock and unittest are new to me.
I tried messing around with stdout:
def test_run_single_test_flow_control(self):
mock_get = patch('test_automation_app.views.get_single_test_names')
mock_get = mock_get.start()
mock_get.return_value = {'test_search': 'folder/test_file.py::TestClass::test'}
results = run_single_test('this-request', 'test_search')
output = sys.stdout
self.assertEqual(output, '-v --json-report folder/test_file.py::TestClass::test')
but this returns:
<_pytest.capture.EncodedFile object at XXXXXXXXXXXXXX>
Here are two example tests that verify that pytest.main is invoked when a valid test name is passed and not invoked otherwise. I also added some different invocations of mock_pytest_main.assert_called as an example; they all do pretty much the same, with extra check for args that were passed on function call. Hope this helps you to write more complex tests!
from unittest.mock import patch
from test_automation_app.views import run_single_test
def test_pytest_invoked_when_test_name_valid():
with patch('pytest.main') as mock_pytest_main, patch('test_automation_app.views.get_single_test_names') as mock_get:
mock_get.return_value = {'test_search': 'folder/test_file.py::TestClass::test'}
results = run_single_test('this-request', 'test_search')
mock_pytest_main.assert_called()
mock_pytest_main.assert_called_with(['-v', '--json-report', 'folder/test_file.py::TestClass::test'])
mock_pytest_main.assert_called_once()
mock_pytest_main.assert_called_once_with(['-v', '--json-report', 'folder/test_file.py::TestClass::test'])
def test_pytest_not_invoked_when_test_name_invalid():
with patch('pytest.main') as mock_pytest_main, patch('test_automation_app.views.get_single_test_names') as mock_get:
mock_get.return_value = {'test_search': 'folder/test_file.py::TestClass::test'}
results = run_single_test('this-request', 'test_non_existent')
mock_pytest_main.assert_not_called()

Python3 argparse

I have been struggling with this for a few days now and still dont have a good solution. Instead of providing code this time which with this problem has lately been leading to unhelpful tangents, let me just give you an idea of exactly what I am trying to accomplish and perhaps this will streamline the solution.
All I am trying to do run a python program while inputting a few variables to control what the program does. Allow me to give a specific example.
Example Syntax Structure
program_name function_to_run variable_1 variable_2 variable_n
Generic Syntax Example
parrot add "Mr Fluffy" "Red" "15oz"
Another Example
datamine search "Chris"
So to expand on these examples. The first program "parrot" has an add function. When the program is run and the add function is used from the command line, the program expects three variables (Name, color, weight). In the second example, the program named "datamine" has a function named "search" that expects a single string (the search term). The idea is, the program (datamine) for example will have several functions that could be used. Perhaps "add", "search", "delete" are all examples and each will have different expected variables. Using datamine help would list out each function and the required and or optional components.
Using argparse, I have not been able to figure out a working implementation of this yet. From past experience, I think the solution will involved using custom actions. Can anyone please help with some example code? I am using Python 3 by the way.
Thanks for the help!
Use subparsers. The docs give a good example of how to use set_defaults to specify the function that should be called for each subparser:
One particularly effective way of handling sub-commands is to combine the use of the add_subparsers() method with calls to set_defaults() so that each subparser knows which Python function it should execute.
In your examples, parrot and datamine would be separate parsers in separate modules, and add and search would be subparsers under them respectively. For example, the datamine module would look something like this:
#!/usr/bin/env python
# datamine
def add(a, b):
print(a + b)
def search(query, search_all=True):
run_my_search_app(query, search_all=search_all)
if __name__ == '__main__':
# create the top-level parser
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
# create the parser for the "add" command
parser_add = subparsers.add_parser('add')
parser_add.add_argument('-a', type=int, default=1)
parser_add.add_argument('-b', type=int, default=2)
parser_add.set_defaults(func=add)
# create the parser for the "search" command
parser_search = subparsers.add_parser('search')
parser_search.add_argument('query')
parser_search.add_argument('--search-all', action='store_true')
parser_search.set_defaults(func=search)
args = parser.parse_args()
args = vars(args)
func = args.pop("func")
func(**args)
If this file is executable in your shell as datamine, you can do:
datamine add -a 11 -b 5
datamine search foo --search-all
Without optional flags you don't need anything fancy - just look at sys.argv directly:
import sys
def my_add(*args):
print( ','.join(args))
def my_search(*args):
print(args)
fn_map = {"add": my_add, "search": my_search}
if sys.argv[1:]:
fn = fn_map[sys.argv[1]]
rest = sys.argv[2:]
fn(*rest)
sample runs
1951:~/mypy$ python stack43990444.py
1951:~/mypy$ python stack43990444.py add "Mr Fluffy" "Red" "15oz"
Mr Fluffy,Red,15oz
1951:~/mypy$ python stack43990444.py search "Chris"
('Chris',)
Fully functional extrapolation of code from your parrot example using subparsers. Data set (created by this code) and usage examples at the bottom. Beware, example set does not consist strictly of parrots
#!/usr/bin/env python3
import argparse
import json
def add_parrot(name, weight, kind, **kwargs):
print("Adding {} of type {} and size {}".format(name, kind, weight))
with open('parrots.json', 'r') as parrotdb:
parrots = json.load(parrotdb)
parrots.append({'name': name, 'weight': weight, 'type': kind})
with open('parrots.json', 'w') as parrotdb:
json.dump(parrots, parrotdb)
def delete_parrot(name, **kwargs):
print("Uh oh! What happened to {}?".format(name))
with open('parrots.json', 'r') as parrotdb:
parrots = json.load(parrotdb)
parrots[:] = [p for p in parrots if p.get('name') != name]
with open('parrots.json', 'w') as parrotdb:
json.dump(parrots, parrotdb)
def show_parrots(name=None, weight=0, kind=None, **kwargs):
with open('parrots.json', 'r') as parrotdb:
parrots = json.load(parrotdb)
for p in parrots:
if (name or weight or kind):
if name in p['name'] or weight == p['weight'] or kind == p['type']:
print("{}\t{}\t{}".format(
p['name'], p['weight'], p['type']))
else:
print("{}\t{}\t{}".format(p['name'], p['weight'], p['type']))
parser = argparse.ArgumentParser(description="Manage Parrots")
subparsers = parser.add_subparsers()
add_parser = subparsers.add_parser('insert', aliases=['add', 'a'])
add_parser.add_argument('name')
add_parser.add_argument('weight', type=int)
add_parser.add_argument('kind')
add_parser.set_defaults(func=add_parrot)
del_parser = subparsers.add_parser("delete", aliases=['del', 'd'])
del_parser.add_argument('name')
del_parser.set_defaults(func=delete_parrot)
ls_parser = subparsers.add_parser('list', aliases=['show', 'ls'])
ls_parser.add_argument('--name')
ls_parser.add_argument('--size', type=int)
ls_parser.add_argument('--type', dest='kind')
ls_parser.set_defaults(func=show_parrots)
args = parser.parse_args()
args.func(**vars(args))
Dataset and usage examples:
➜ ~ cat parrots.json
[{"name": "tweety", "weight": 4, "type": "yellow"}, {"name": "donald", "weight": 18, "type": "white"}, {"name": "daffy", "weight": 12, "type": "black"}]
➜ ~ ./parrot.py ls
tweety 4 yellow
donald 18 white
daffy 12 black
➜ ~ ./parrot.py ls --name tweety
tweety 4 yellow
➜ ~ ./parrot.py delete tweety
Uh oh! What happened to tweety?
➜ ~ ./parrot.py ls --name tweety
➜ ~

Categories