Pytest missing 1 required positional argument with fixture - python

I'm using vscode as IDE
I have code a very simple usage of pytest fixture but it doesn't working when basic example fixture found in the pytest documentation are working well :
#pytest.fixture
def declare_hexidict():
hd = hexidict()
rvc = ReferenceValueCluster()
rv = ReferenceValue(init=3)
hd_var = (hd, rvc, rv)
return hd_var
def setitem_getitem(declare_hexidict):
print('start')
# hd = hexidict()
# rvc = ReferenceValueCluster()
# rv = ReferenceValue(init=3)
hd, rvc, rv = declare_hexidict
print('datastruct defined')
hd[rvc("key1").reflink] = rv[0].reflink
hd[rvc["key1"]] == rv[0]
assert rvc["key1"] in hd.keys(), "key :{} is not int this hexidict".format(
rvc("key1")
)
assert hd[rvc["key1"]] == rv[0], "key :{} return {} instead of {}".format(
rvc["key1"], hd[rvc["key1"]], rv[0]
)
#set non value item (on set une liste)
hd[rvc("key2").reflink] = [rv[1].reflink]
hd[rvc["key2"]]
assert type(hd[rvc["key2"]]) == list
#on verifie que l'item dans la list est bien celui qui provient de rv
assert hd[rvc["key2"]][0] in rv
I get in the test summary info :
ERROR test/process/hexidict/test_hd_basic_function.py - TypeError: setitem_getitem() missing 1 required positional argument: 'declare_hexidict'
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

pytest does not recognize setitem_getitem like test, so you should rename it to test_setitem_getitem and try it out:
def test_setitem_getitem(declare_hexidict):

The problem is that your test is not detected by Pytest's test discovery.
Depending on how you execute your tests (whether you provide a full path to your test file, provide path with sub directories and multiple test files or want to execute all tests matching a specific mark in the entire project) you will want to make sure all test modules, classes and functions are discovered properly. By default test files need to match test_*.py or *_test.py, classes - Test* and functions - test*.
https://docs.pytest.org/en/7.1.x/explanation/goodpractices.html#conventions-for-python-test-discovery
Test discovery can also be configured to match your needs in pytest.ini.
Example pytest.ini:
[pytest]
python_files = *_pytest.py
python_functions = mytest_*
python_classes = *Tests

Related

How to execute custom Splittable DoFn in parallel

I am trying to develop a custom I/O connector for Apache Beam, written in Python. According to the official guideline, Splittable DoFn (SDF) is the framework of choice in my case.
I tried to run the pseudocode in the SDF programming guide, however, I failed to execute the pipeline in parallel. Below is a working example.
Dummy data
myfile = open('test_beam.txt', 'w')
for i in range(0, 1000):
myfile.write("%s\n" % i)
myfile.close
Pipeline
Make sure to replace DUMMY_FILE with the absolute path of test_beam.txt.
import argparse
import logging
import os
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import StandardOptions
from time import sleep
import random
from apache_beam.io.restriction_trackers import OffsetRange
DUMMY_FILE = absolute_path_to_dummy_data_file
class FileToWordsRestrictionProvider(beam.transforms.core.RestrictionProvider
):
def initial_restriction(self, file_name):
return OffsetRange(0, os.stat(file_name).st_size)
def create_tracker(self, restriction):
return beam.io.restriction_trackers.OffsetRestrictionTracker(
offset_range=self.initial_restriction(file_name=DUMMY_FILE))
def restriction_size(self, element, restriction):
return restriction.size()
class FileToWordsFn(beam.DoFn):
def process(
self,
file_name,
# Alternatively, we can let FileToWordsFn itself inherit from
# RestrictionProvider, implement the required methods and let
# tracker=beam.DoFn.RestrictionParam() which will use self as
# the provider.
tracker=beam.DoFn.RestrictionParam(FileToWordsRestrictionProvider())):
with open(file_name) as file_handle:
file_handle.seek(tracker.current_restriction().start)
while tracker.try_claim(file_handle.tell()):
yield read_next_record(file_handle=file_handle)
def read_next_record(file_handle):
line_number = file_handle.readline()
logging.info(line_number)
sleep(random.randint(1, 5))
logging.info(f'iam done {line_number}')
def run(args, pipeline_args, file_name):
pipeline_options = PipelineOptions(pipeline_args)
with beam.Pipeline(options=pipeline_options) as p:
execute_pipeline(args, p, file_name)
def execute_pipeline(args, p, file_name):
_ = (
p |
'Create' >> beam.Create([file_name]) |
'Read File' >> beam.ParDo(FileToWordsFn(file_name=file_name))
)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
"""Build and run the pipeline."""
parser = argparse.ArgumentParser()
# to be added later
args, pipeline_args = parser.parse_known_args()
file_name = DUMMY_FILE
run(args, pipeline_args, file_name)
The SDF is taken from the first example here, however, I had to fix a few things (e.g., define restriction_size and a minor misplacement of ()). Furthermore, I introduced a random sleep in read_next_record to check whether the pipeline is executed in parallel (which it is not apparently).
There is probably a mistake in the way I constructed the pipeline? I would expect to use my SDF as the very first step in the pipeline, but doing so results in AttributeError: 'PBegin' object has no attribute 'windowing'. To circumvent this issue, I followed this post and added created a PCollection containing the input file_name.
What is the correct way to execute an SDF within a pipeline in parallel?
Beam DoFns (including SplittableDoFns) operate on an input PCollection. For SplittableDoFn, the input is usually a PCollection of source configs (for example, input files). When executing a SplittableDoFn the Beam runner is able to parallelize the execution of even a single input element by isolating parts of the input read using the RestrictionTracker. So for a file, this would mean that you might have workers running in parallel that read data from the same file but at different offsets.
So your implementation seems correct and should already facilitate parallel execution for a Beam runner.
Splittable DoFns of apache beam allows create a custom config to runner initiated splits, my case I had to process a big file where all content don't have separators and these were in one line and dataflow don't scalled. I used beam.transforms.core.RestrictionProvider, with the function split, where I specificed the number of parts for read the file and with this config when I ran the job dataflow used varios workers and the time of process reduced a lot.
class FileToLinesRestrictionProvider(beam.transforms.core.RestrictionProvider):
def initial_restriction(self, file_name):
return OffsetRange(0, size_file) #6996999736 #43493
#return OffsetRange(0, os.stat(file_name).st_size)
def create_tracker(self, restriction):
# return beam.io.restriction_trackers.OffsetRestrictionTracker(
# offset_range=self.initial_restriction(file_name=rutaFile_Test))
return beam.io.restriction_trackers.OffsetRestrictionTracker(restriction)
def split(self, file_name, restriction):
# Configuración para leer el archivo por partes
bundle_ranges = calcular_segmentos_lectura(tamFila, tam_segmentos, size_file)
for start, stop in bundle_ranges:
yield OffsetRange(start, stop)
def restriction_size(self, element, restriction):
#print(restriction.size())
return restriction.size()
class FileToLinesFn(beam.DoFn):
def process(
self,
file_name,
# Alternatively, we can let FileToWordsFn itself inherit from
# RestrictionProvider, implement the required methods and let
# tracker=beam.DoFn.RestrictionParam() which will use self as
# the provider.
tracker=beam.DoFn.RestrictionParam(FileToLinesRestrictionProvider())):
with FileSystems.open(file_name) as file_handle:
file_handle.seek(tracker.current_restriction().start)
print(tracker.current_restriction())
while tracker.try_claim(file_handle.tell()):
#print(file_handle.tell())
yield file_handle.read(tamFila)
def calcular_segmentos_lectura(
size_line,
tam_segmentos,
tam_file):
""" Basado en el tamaño del archivo y tamaños de las lineas divide en partes de acuerdo
a los parametros de entrada
Retorna array con los caracteres que deben procesar en cada paso
"""
num_lineas = int(tam_file /size_line)
valor_segmento = int(num_lineas / tam_segmentos)
valor_segmento = valor_segmento * size_line
print(valor_segmento)
segmentos_ranges = []
valorAnterior = 0
for i in range(tam_segmentos):
start = valorAnterior
stop_position = (valorAnterior + (valor_segmento))
valorAnterior = stop_position
if (i + 1) == tam_segmentos:
stop_position = tam_file
segmentos_ranges.append((start, stop_position))
return segmentos_ranges
This example help me a lot url

Mocked values mismatch with MagicMock

I am working on a python project, where we read parquet files from azure datalake and perform the required operations. We have defined a common parquet file reader function which we use to read such files. I am using MagicMock to mock objects and write test cases, when I ran the test cases some tests were failing because they have mocked values of some other test cases. I didn't understand this behavior completely. Below is my code
utils/common.py
def parquet_reader(parquet_path: str) -> Tuple[Dict, int]:
table = pq.read_table(parquet_directory_path)
return table.to_pydict(), table.num_rows
-------Test01-------------
PARQUET_DATA = {
"se_vl": ["530"],
"l_depart": ["028"],
"r_code": ["f5"],
"r_dir": ["W"],
}
NUM_RECORDS = 1
TEST_CAF_DATA = (PARQUET_DATA, NUM_RECORDS)
def test_read_from_source():
common.parquet_reader = MagicMock(return_value=TEST_CAF_DATA)
obj = next(read_from_source('path to file'))
assert obj.se_vl == "530"
-------Test02-------------
from utils import common
SAMPLE_DATA_PATH = "src/schedule/sample_data"
def test_parquet_reader():
rows, _ = common.parquet_reader(SAMPLE_DATA_PATH)
assert rows["key_id"][0] == 345689865
assert rows["com"][0] == "UP"
When I run all the tests(total of 240 test) than the variable 'rows' in test02 holds the data for test01 (PARQUET_DATA).
However I fixed the above issue using patch. But still confused as why such behaviour using MagicMock?

How to add custom sections to terminal report in pytest

In pytest, when a test case is failed, you have in the report the following categories:
Failure details
Captured stdout call
Captured stderr call
Captured log call
I would like to add some additional custom sections (I have a server that turns in parallel and would like to display the information logged by this server in a dedicated section).
How could I do that (if ever possible)?
Thanks
NOTE:
I have currently found the following in source code but don't know whether that shall be right approach
nodes.py
class Item(Node):
...
def add_report_section(self, when, key, content):
"""
Adds a new report section, similar to what's done internally
to add stdout and stderr captured output::
...
"""
reports.py
class BaseReport:
...
#property
def caplog(self):
"""Return captured log lines, if log capturing is enabled
.. versionadded:: 3.5
"""
return "\n".join(
content for (prefix, content) in self.get_sections("Captured log")
)
To add custom sections to terminal output, you need to append to report.sections list. This can be done in pytest_report_teststatus hookimpl directly, or in other hooks indirectly (via a hookwrapper); the actual implementation heavily depends on your particular use case. Example:
# conftest.py
import os
import random
import pytest
def pytest_report_teststatus(report, config):
messages = (
'Egg and bacon',
'Egg, sausage and bacon',
'Egg and Spam',
'Egg, bacon and Spam'
)
if report.when == 'teardown':
line = f'{report.nodeid} says:\t"{random.choice(messages)}"'
report.sections.append(('My custom section', line))
def pytest_terminal_summary(terminalreporter, exitstatus, config):
reports = terminalreporter.getreports('')
content = os.linesep.join(text for report in reports for secname, text in report.sections)
if content:
terminalreporter.ensure_newline()
terminalreporter.section('My custom section', sep='-', blue=True, bold=True)
terminalreporter.line(content)
Example tests:
def test_spam():
assert True
def test_eggs():
assert True
def test_bacon():
assert False
When running the tests, you should see My custom section header at the bottom colored blue and containing a message for every test:
collected 3 items
test_spam.py::test_spam PASSED
test_spam.py::test_eggs PASSED
test_spam.py::test_bacon FAILED
============================================= FAILURES =============================================
____________________________________________ test_bacon ____________________________________________
def test_bacon():
> assert False
E assert False
test_spam.py:9: AssertionError
---------------------------------------- My custom section -----------------------------------------
test_spam.py::test_spam says: "Egg, bacon and Spam"
test_spam.py::test_eggs says: "Egg and Spam"
test_spam.py::test_bacon says: "Egg, sausage and bacon"
================================ 1 failed, 2 passed in 0.07 seconds ================================
The other answer shows how to add a custom section to the terminal report summary, but it's not the best way for adding a custom section per test.
For this goal, you can (and should) use the higher-level API add_report_section of an Item node (docs). A minimalist example is shown below, modify it to suit your needs. You can pass state from the test instance through an item node, if necessary.
In test_something.py, here is one passing test and two failing:
def test_good():
assert 2 + 2 == 4
def test_bad():
assert 2 + 2 == 5
def test_ugly():
errorerror
In conftest.py, setup a hook wrapper:
import pytest
content = iter(["first", "second", "third"])
#pytest.hookimpl(hookwrapper=True)
def pytest_runtest_call(item):
outcome = yield
item.add_report_section("call", "custom", next(content))
The report will now display custom sections per-test:
$ pytest
============================== test session starts ===============================
platform linux -- Python 3.9.0, pytest-6.2.2, py-1.10.0, pluggy-0.13.1
rootdir: /tmp/example
collected 3 items
test_something.py .FF [100%]
==================================== FAILURES ====================================
____________________________________ test_bad ____________________________________
def test_bad():
> assert 2 + 2 == 5
E assert (2 + 2) == 5
test_something.py:5: AssertionError
------------------------------ Captured custom call ------------------------------
second
___________________________________ test_ugly ____________________________________
def test_ugly():
> errorerror
E NameError: name 'errorerror' is not defined
test_something.py:8: NameError
------------------------------ Captured custom call ------------------------------
third
============================ short test summary info =============================
FAILED test_something.py::test_bad - assert (2 + 2) == 5
FAILED test_something.py::test_ugly - NameError: name 'errorerror' is not defined
========================== 2 failed, 1 passed in 0.02s ===========================

assert pytest command has been run

I have an django app route that will run a pytest.main() command if some conditions are met:
def run_single_test(request, single_test_name):
# get dict of test names, test paths
test_dict = get_single_test_names()
# check to see if test is in the dict
if single_test_name in test_dict:
for test_name,test_path in test_dict.items():
# if testname is valid run associated test
if test_name == single_test_name:
os.chdir('/lib/tests/')
run_test = pytest.main(['-v', '--json-report', test_path])
else:
return 'The requested test could not be found.'
I would like to include a unit test that validates run_test has been executed.
What is the best approach to doing this? Mock and unittest are new to me.
I tried messing around with stdout:
def test_run_single_test_flow_control(self):
mock_get = patch('test_automation_app.views.get_single_test_names')
mock_get = mock_get.start()
mock_get.return_value = {'test_search': 'folder/test_file.py::TestClass::test'}
results = run_single_test('this-request', 'test_search')
output = sys.stdout
self.assertEqual(output, '-v --json-report folder/test_file.py::TestClass::test')
but this returns:
<_pytest.capture.EncodedFile object at XXXXXXXXXXXXXX>
Here are two example tests that verify that pytest.main is invoked when a valid test name is passed and not invoked otherwise. I also added some different invocations of mock_pytest_main.assert_called as an example; they all do pretty much the same, with extra check for args that were passed on function call. Hope this helps you to write more complex tests!
from unittest.mock import patch
from test_automation_app.views import run_single_test
def test_pytest_invoked_when_test_name_valid():
with patch('pytest.main') as mock_pytest_main, patch('test_automation_app.views.get_single_test_names') as mock_get:
mock_get.return_value = {'test_search': 'folder/test_file.py::TestClass::test'}
results = run_single_test('this-request', 'test_search')
mock_pytest_main.assert_called()
mock_pytest_main.assert_called_with(['-v', '--json-report', 'folder/test_file.py::TestClass::test'])
mock_pytest_main.assert_called_once()
mock_pytest_main.assert_called_once_with(['-v', '--json-report', 'folder/test_file.py::TestClass::test'])
def test_pytest_not_invoked_when_test_name_invalid():
with patch('pytest.main') as mock_pytest_main, patch('test_automation_app.views.get_single_test_names') as mock_get:
mock_get.return_value = {'test_search': 'folder/test_file.py::TestClass::test'}
results = run_single_test('this-request', 'test_non_existent')
mock_pytest_main.assert_not_called()

Debugging OOo UNO-Python

I'm trying read and parse a CSV file in LibreOffice Calc. I need to show text in order to debug my logic, and the first thing I found was this. Annoyingly, it duplicates functionality that's built into OOo Basic. The first implementation tries to use a non-existent function; the second one works if I invoke it directly (using TestMessageBox from the Tools menu), but when I include it from my pythonpath directory I get an error:
com.sun.star.uno.RuntimeExceptionError during invoking function main
in module
file:///C:/path/to/test.py
(: 'module' object has no attribute
'MessageBox' C:\path\to\test.py:34
in function main() [msgbox.MessageBox(parentwin, message, 'Title')]
C:\Program Files (x86)\LibreOffice 5\program\pythonscript.py:870 in
function invoke() [ret = self.func( *args )] )
Why is there no attribute MessageBox?
I'm invoking it like this:
import msgbox
def main():
doc = XSCRIPTCONTEXT.getDocument()
parentwin = doc.CurrentController.Frame.ContainerWindow
message = "Message"
msgbox.MessageBox(parentwin, message, 'Title')
return
And here's pythonpath/msgbox.py:
import uno
from com.sun.star.awt.MessageBoxButtons import BUTTONS_OK, BUTTONS_OK_CANCEL, BUTTONS_YES_NO, BUTTONS_YES_NO_CANCEL, BUTTONS_RETRY_CANCEL, BUTTONS_ABORT_IGNORE_RETRY
from com.sun.star.awt.MessageBoxButtons import DEFAULT_BUTTON_OK, DEFAULT_BUTTON_CANCEL, DEFAULT_BUTTON_RETRY, DEFAULT_BUTTON_YES, DEFAULT_BUTTON_NO, DEFAULT_BUTTON_IGNORE
from com.sun.star.awt.MessageBoxType import MESSAGEBOX, INFOBOX, WARNINGBOX, ERRORBOX, QUERYBOX
def TestMessageBox():
doc = XSCRIPTCONTEXT.getDocument()
parentwin = doc.CurrentController.Frame.ContainerWindow
s = "This a message"
t = "Title of the box"
res = MessageBox(parentwin, s, t, QUERYBOX, BUTTONS_YES_NO_CANCEL + DEFAULT_BUTTON_NO)
s = res
MessageBox(parentwin, s, t, "infobox")
# Show a message box with the UNO based toolkit
def MessageBox(ParentWin, MsgText, MsgTitle, MsgType=MESSAGEBOX, MsgButtons=BUTTONS_OK):
ctx = uno.getComponentContext()
sm = ctx.ServiceManager
sv = sm.createInstanceWithContext("com.sun.star.awt.Toolkit", ctx)
myBox = sv.createMessageBox(ParentWin, MsgType, MsgButtons, MsgTitle, MsgText)
return myBox.execute()
g_exportedScripts = TestMessageBox,
The package name msgbox is already used in UNO. See msgbox.MsgBox. Choose a different name for your module instead, such as mymsgbox.py. Even better, move it to a package (subdirectory) inside pythonpath, such as mystuff.msgbox.MessageBox.
As a matter of fact, I tried msgbox.MsgBox just now and it seemed like it could be useful:
import msgbox
def main():
message = "Message"
myBox = msgbox.MsgBox(XSCRIPTCONTEXT.getComponentContext())
myBox.addButton("oK")
myBox.renderFromButtonSize()
myBox.numberOflines = 2
myBox.show(message,0,"Title")

Categories