Issue Mocking Multiple Items on Path Object - python

I have code that looks like the following:
#patch.object(Path, "__init__", return_value=None)
#patch.object(Path, "exists", return_value=True)
#patch.object(Path, "read_text", return_value="FAKEFIELD: 'FAKEVALUE'")
def test_load_param_from_file_exists(self, *mock_path):
expected_dict = YAML.safe_load("FAKEFIELD: 'FAKEVALUE'")
return_dict = load_parameters("input", None)
self.assertTrue(return_dict["FAKEFIELD"] == expected_dict["FAKEFIELD"])
and deep in the code of load_parameters, the code looks like this:
file_path = Path(parameters_file_path)
if file_path.exists():
file_contents = file_path.read_text()
return YAML.safe_load(file_contents)
Right now, I have to break it up into two tests, because I cannot seem to get a single mock object that allows me to switch between "file exists" and "file doesn't". Ideally, I'd be able to do a single test like this:
#patch.object(Path, "__init__", return_value=None)
#patch.object(Path, "exists", return_value=False)
#patch.object(Path, "read_text", return_value="FAKEFIELD: 'FAKEVALUE'")
def test_load_param_from_file(self, mock_path, *mock_path_other):
with self.assertRaises(ValueError):
load_parameters("input", False)
mock_path.read_text.return_value = "FAKEFIELD: 'FAKEVALUE'"
expected_dict = YAML.safe_load("FAKEFIELD: 'FAKEVALUE'")
return_dict = load_parameters("input", None)
self.assertTrue(return_dict["FAKEFIELD"] == expected_dict["FAKEFIELD"])
To be clear, the above doesn't work because each of those patched objects get instantiated differently, and when the Path object in the load_parameters method gets called, exists is mocked correctly, but read_text returns no value.
What am I doing wrong? Is there a way to patch multiple methods on a single object or class?

I think you are making this more complicated than it needs to be:
def test_load_param_from_file_exists(self):
# Adjust the name as necessary
mock_path = Mock()
mock_path.exists.return_value = True
mock_path.read_text.return_value = '{"FAKEFIELD": "FAKEVALUE"}'
with patch("Path", return_value=mock_path):
return_dict = load_parameters("input", None)
self.assertTrue(return_dict["FAKEFIELD"] == 'FAKEVALUE')
Configure a Mock to behave like you want file_path to behave, then patch Path to return that object when it is called.
(I removed the code involving the environment variable, since it wasn't obvious the value matters when you patch Path.)

Related

Pytest Fixtures - Parameterisation - Call Fixture Once

I have a fixture that returns the endpoint for the name of that endpoint (passed in)
The name is a string set in the test. I have messed up by calling the endpoint each time in the tests (parameterised) and now I can't figure out how to get the same functionality working without calling the endpoint each time.
Basically I just need to call the endpoint once and then pass that data between all my tests in that file (Ideally without anything like creating a class and calling it in the test. I have about 12 files each with similar tests and I want to reduce the boiler plate. Ideally if it could be done at the fixture/parametrisation level with no globals.
Here's what I have so far:
#pytest.mark.parametrize('field', [('beskrivelse'), ('systemId')])
def test_intgra_001_elevforhold_req_fields(return_endpoint, field):
ep_to_get = 'get_elevforhold'
ep_returned = return_endpoint(ep_to_get)
apiv2 = Apiv2()
apiv2.entity_check(ep_returned, field, ep_to_get, False)
#pytest.fixture()
def return_endpoint():
def endpoint_initialisation(ep_name):
apiv2 = Apiv2()
ep_data = apiv2.get_ep_name(ep_name)
response = apiv2.get_endpoint_local(ep_data, 200)
content = json.loads(response.content)
apiv2.content_filt(content)
apiv2_data = content['data']
return apiv2_data
return endpoint_initialisation
Create return_endpoint as a fixture with scope session and store data in a dictionary after it is fetched. The fixture doesn't return the initialization function, but a function to access the dictionary.
#pytest.mark.parametrize('field', [('beskrivelse'), ('systemId')])
def test_intgra_001_elevforhold_req_fields(return_endpoint, field):
ep_to_get = 'get_elevforhold'
ep_returned = return_endpoint(ep_to_get)
apiv2 = Apiv2()
apiv2.entity_check(ep_returned, field, ep_to_get, False)
#pytest.fixture(scope='session')
def return_endpoint():
def endpoint_initialisation(ep_name):
apiv2 = Apiv2()
ep_data = apiv2.get_ep_name(ep_name)
response = apiv2.get_endpoint_local(ep_data, 200)
content = json.loads(response.content)
apiv2.content_filt(content)
apiv2_data = content['data']
return apiv2_data
ep_data = dict()
def access(ep_name):
try:
return ep_data[ep_name] # or use copy.deepcopy
except KeyError:
ep_data[ep_name] = endpoint_initialisation(ep_name)
return ep_data[ep_name] # or use copy.deepcopy
return access
There are some caveats here. If the object returned by endpoint_initialisation() is mutable, then you potentially create unwanted dependencies between your tests. You can avoid this by returning a (deep) copy of the object. You can use the copy module for that.

Static classes being initialised on import. How does python 2 initialise static classes on import

I am trying to introduce python 3 support for the package mime and the code is doing something I have never seen before.
There is a class Types() that is used in the package as a static class.
class Types(with_metaclass(ItemMeta, object)): # I changed this for 2-3 compatibility
type_variants = defaultdict(list)
extension_index = defaultdict(list)
# __metaclass__ = ItemMeta # unnessecary now
def __init__(self, data_version=None):
self.data_version = data_version
The type_variants defaultdict is what is getting filled in python 2 but not in 3.
It very much seems to be getting filled by this class when is in a different file called mime_types.py.
class MIMETypes(object):
_types = Types(VERSION)
def __repr__(self):
return '<MIMETypes version:%s>' % VERSION
#classmethod
def load_from_file(cls, type_file):
data = open(type_file).read()
data = data.split('\n')
mime_types = Types()
for index, line in enumerate(data):
item = line.strip()
if not item:
continue
try:
ret = TEXT_FORMAT_RE.match(item).groups()
except Exception as e:
__parsing_error(type_file, index, line, e)
(unregistered, obsolete, platform, mediatype, subtype, extensions,
encoding, urls, docs, comment) = ret
if mediatype is None:
if comment is None:
__parsing_error(type_file, index, line, RuntimeError)
continue
extensions = extensions and extensions.split(',') or []
urls = urls and urls.split(',') or []
mime_type = Type('%s/%s' % (mediatype, subtype))
mime_type.extensions = extensions
...
mime_type.url = urls
mime_types.add(mime_type) # instance of Type() is being filled?
return mime_types
The function startup() is being run whenever mime_types.py is imported and it does this.
def startup():
global STARTUP
if STARTUP:
type_files = glob(join(DIR, 'types', '*'))
type_files.sort()
for type_file in type_files:
MIMETypes.load_from_file(type_file) # class method is filling Types?
STARTUP = False
This all seems pretty weird to me. The MIMETypes class first creates an instance of Types() on the first line. _types = Types(VERSION). It then seems to do nothing with this instance and only use the mime_types instance created in the load_from_file() class method. mime_types = Types().
This sort of thing vaguely reminds me of javascript class construction. How is the instance mime_types filling Types.type_variants so that when it is imported like this.
from mime import Type, Types
The class's type_variants defaultdict can be used. And why isn't this working in python 3?
EDIT:
Adding extra code to show how type_variants is filled
(In "Types" Class)
#classmethod
def add_type_variant(cls, mime_type):
cls.type_veriants[mime_type.simplified].append(mime_type)
#classmethod
def add(cls, *types):
for mime_type in types:
if isinstance(mime_type, Types):
cls.add(*mime_type.defined_types())
else:
mts = cls.type_veriants.get(mime_type.simplified)
if mts and mime_type in mts:
Warning('Type %s already registered as a variant of %s.',
mime_type, mime_type.simplified)
cls.add_type_variant(mime_type)
cls.index_extensions(mime_type)
You can see that MIMETypes uses the add() classmethod.
Without posting more of your code, it's hard to say. I will say that I was able to get that package ported to Python 3 with only a few changes (print statement -> function, basestring -> str, adding a dot before same-package imports, and a really ugly hack to compensate for their love of cmp:
def cmp(x,y):
if isinstance(x, Type): return x.__cmp__(y)
if isinstance(y, Type): return y.__cmp__(x) * -1
return 0 if x == y else (1 if x > y else -1)
Note, I'm not even sure this is correct.
Then
import mime
print(mime.Types.type_veriants) # sic
printed out a 1590 entry defaultdict.
Regarding your question about MIMETypes._types not being used, I agree, it's not.
Regarding your question about how the dictionary is being populated, it's quite simple, and you've identified most of it.
import mime
Imports the package's __init__.py which contains the line:
from .mime_types import MIMETypes, VERSION
And mime_types.py includes the lines:
def startup():
global STARTUP
if STARTUP:
type_files = glob(join(DIR, 'types', '*'))
type_files.sort()
for type_file in type_files:
MIMETypes.load_from_file(type_file)
STARTUP = False
startup()
And MIMETypes.load_from_file() has the lines:
mime_types = Types()
#...
for ... in ...:
mime_types.add(mime_type)
And Types.add(): has the line:
cls.add_type_variant(mime_type)
And that classmethod contains:
cls.type_veriants[mime_type.simplified].append(mime_type)

set return_value of function

I have a class:
class AccountBusiness:
def save(self, account) -> Account:
if not account.account_number_is_valid():
return False
return True
and a test as:
#mock.patch.object(AccountBusiness, 'save')
def test_can_save_valid_account(self, mock_save):
mock_account = mock.create_autospec(Account)
mock_account.account_number_is_valid.return_value = False
account_business = AccountBusiness()
result = account_business.save(mock_account)
self.assertEqual(result.return_value, True)
but it shows an exception like:
AssertionError: <MagicMock name='save()()' id='48830448'> != True
I want to set the return value of account.account_number_is_valid() to False and run the test.
You are using a patch object on the instance method you are looking to test. However, you are looking to test the logic inside the save method. So mocking that out will not test any of the logic inside that method. So, the output you are actually getting here:
AssertionError: <MagicMock name='save()()' id='48830448'> != True
Should be the first hint that something is not right. Your save method is coming back as a MagicMock. You don't want this. What you actually want to do is only mock the Account class, and go accordingly from there. So, your patching here:
#mock.patch.object(AccountBusiness, 'save')
should actually only be:
#mock.patch('path.to.AccountBusiness.Account', return_value=Mock(), autospec=True)
The path.to.AccountBusiness.Account is the location of the Account class with respect to the AccountBusiness class.
So, with that patching, then the return_value of calling Account will now be your mock object that you can use for your account_number_is_valid. So, the code will actually look like this:
class MyTest(unittest.TestCase):
def setUp(self):
self.account_business = AccountBusiness()
#mock.patch('path.to.AccountBusiness.Account', return_value=Mock(), autospec=True)
def test_can_save_valid_account(self, mock_account):
mock_account_obj = mock_account.return_value
mock_account_obj.account_number_is_valid.return_value = False
self.assertFalse(self.account_business.save(mock_account_obj))
Also, pay close attention to the assertion at the end. It was changed to make use of the available assertFalse. Also, look over your own logic, as returning False for account_number_is_valid will actually return False in your save method.

Getting Python's nosetests results in a tearDown() method

I want to be able to get the result of a particular test method and output it inside the teardown method, while using the nose test runner.
There is a very good example here.
But unfortunately, running nosetests example.py does not work, since nose doesn't seem to like the fact that the run method in the superclass is being overridden:
AttributeError: 'ResultProxy' object has no attribute 'wasSuccessful'
Caveat: the following doesn't actually access the test during the tearDown, but it does access each result.
You might want to write a nose plugin (see the API documentation here). The method that you are probably interested in is afterTest(), which is run... after the test. :) Though, depending on your exact application, handleError()/handleFailure() or finalize() might actually be more useful.
Here is an example plugin that accesses the result of a test immediately after it is executed.
from nose.plugins import Plugin
import logging
log = logging.getLogger('nose.plugins.testnamer')
class ReportResults(Plugin):
def __init__(self, *args, **kwargs):
super(ReportResults, self).__init__(*args, **kwargs)
self.passes = 0
self.failures = 0
def afterTest(self, test):
if test.passed:
self.passes += 1
else:
self.failures += 1
def finalize(self, result):
print "%d successes, %d failures" % (self.passes, self.failures)
This trivial example merely reports the number of passes and failures (like the link you included, but I'm sure you can extend it to do something more interesting (here's another fun idea). To use this, make sure that it is installed in Nose (or load it into a custom runner), and then activate it with --with-reportresults.
If you are OK with adding some boilerplate code to the tests, something like the following might work.
In MyTest1, tearDown is called at the end of each test, and the value of self.result has been set to a tuple containing the method name and a dictionary (but you could set that to whatever you like). The inspect module is used to get the method name, so tearDown knows which test just ran.
In MyTest2, all the results are saved in a dictionary (results), which you can do with what you like in the tearDownClass method.
import inspect
import unittest
class MyTest1(unittest.TestCase):
result = None
def tearDown(self):
print "tearDown:", self.result
def test_aaa(self):
frame = inspect.currentframe()
name = inspect.getframeinfo(frame).function
del frame
self.result = (name, None)
x = 1 + 1
self.assertEqual(x, 2)
self.result = (name, dict(x=x))
def test_bbb(self):
frame = inspect.currentframe()
name = inspect.getframeinfo(frame).function
del frame
self.result = (name, None)
# Intentional fail.
x = -1
self.assertEqual(x, 0)
self.result = (name, dict(x=x))
class MyTest2(unittest.TestCase):
results = {}
#classmethod
def tearDownClass(cls):
print "tearDownClass:", cls.results
def test_aaa(self):
frame = inspect.currentframe()
name = inspect.getframeinfo(frame).function
del frame
self.results[name] = None
x = 1 + 1
self.assertEqual(x, 2)
self.results[name] = dict(x=x)
def test_bbb(self):
frame = inspect.currentframe()
name = inspect.getframeinfo(frame).function
del frame
self.results[name] = None
x = -1
self.assertEqual(x, 0)
self.results[name] = dict(x=x)
if __name__ == '__main__':
unittest.main()

Python / YAML: How to initialize additional objects not just from the YAML file, within loadConfig?

I have what I think is a small misconception with loading some YAML objects. I defined the class below.
What I want to do is load some objects with the overridden loadConfig function for YAMLObjects. Some of these come from my .yaml file, but others should be built out of objects loaded from the YAML file.
For instance, in the class below, I load a member object named "keep" which is a string naming some items to keep in the region. But I want to also parse this into a list and have the list stored as a member object too. And I don't want the user to have to give both the string and list version of this parameter in the YAML.
My current work around has been to override the __getattr__ function inside Region and make it create the defaults if it looks and doesn't find them. But this is clunky and more complicated than needed for just initializing objects.
What convention am I misunderstanding here. Why doesn't the loadConfig method create additional things not found in the YAML?
import yaml, pdb
class Region(yaml.YAMLObject):
yaml_tag = u'!Region'
def __init__(self, name, keep, drop):
self.name = name
self.keep = keep
self.drop = drop
self.keep_list = self.keep.split("+")
self.drop_list = self.drop.split("+")
self.pattern = "+".join(self.keep_list) + "-" + "-".join(self.drop_list)
###
def loadConfig(self, yamlConfig):
yml = yaml.load_all(file(yamlConfig))
for data in yml:
# These get created fine
self.name = data["name"]
self.keep = data["keep"]
self.drop = data["drop"]
# These do not get created.
self.keep_list = self.keep.split("+")
self.drop_list = self.drop.split("+")
self.pattern = "+".join(self.keep_list) + "-" + "-".join(self.drop_list)
###
### End Region
if __name__ == "__main__":
my_yaml = "/home/path/to/test.yaml"
region_iterator = yaml.load_all(file(my_yaml))
# Set a debug breakpoint to play with region_iterator and
# confirm the extra stuff isn't created.
pdb.set_trace()
And here is test.yaml so you can run all of this and see what I mean:
Regions:
# Note: the string conventions below are for an
# existing system. This is a shortened, representative
# example.
Market1:
!Region
name: USAndGB
keep: US+GB
drop: !!null
Market2:
!Region
name: CanadaAndAustralia
keep: CA+AU
drop: !!null
And here, for example, is what it looks like for me when I run this in an IPython shell and explore the loaded object:
In [57]: %run "/home/espears/testWorkspace/testRegions.py"
--Return--
> /home/espears/testWorkspace/testRegions.py(38)<module>()->None
-> pdb.set_trace()
(Pdb) region_iterator
<generator object load_all at 0x1139d820>
(Pdb) tmp = region_iterator.next()
(Pdb) tmp
{'Regions': {'Market2': <__main__.Region object at 0x1f858550>, 'Market1': <__main__.Region object at 0x11a91e50>}}
(Pdb) us = tmp['Regions']['Market1']
(Pdb) us
<__main__.Region object at 0x11a91e50>
(Pdb) us.name
'USAndGB'
(Pdb) us.keep
'US+GB'
(Pdb) us.keep_list
*** AttributeError: 'Region' object has no attribute 'keep_list'
A pattern I have found useful for working with yaml for classes that are basically storage is to have the loader use the constructor so that objects are created in the same way as when you make them normally. If I understand what you are attempting to do correctly, this kind of structure might be useful:
import inspect
import yaml
from collections import OrderedDict
class Serializable(yaml.YAMLObject):
__metaclass__ = yaml.YAMLObjectMetaclass
#property
def _dict(self):
dump_dict = OrderedDict()
for var in inspect.getargspec(self.__init__).args[1:]:
if getattr(self, var, None) is not None:
item = getattr(self, var)
if isinstance(item, np.ndarray) and item.ndim == 1:
item = list(item)
dump_dict[var] = item
return dump_dict
#classmethod
def to_yaml(cls, dumper, data):
return ordered_dump(dumper, '!{0}'.format(data.__class__.__name__),
data._dict)
#classmethod
def from_yaml(cls, loader, node):
fields = loader.construct_mapping(node, deep=True)
return cls(**fields)
def ordered_dump(dumper, tag, data):
value = []
node = yaml.nodes.MappingNode(tag, value)
for key, item in data.iteritems():
node_key = dumper.represent_data(key)
node_value = dumper.represent_data(item)
value.append((node_key, node_value))
return node
You would then want to have your Region class inherit from Serializable, and remove the loadConfig stuff. The code I posted inspects the constructor to see what data to save to the yaml file, and then when loading a yaml file calls the constructor with that same set of data. That way you just have to get the logic right in your constructor and the yaml loading should get it for free.
That code was ripped from one of my projects, apologies in advance if it doesn't quite work. It is also slightly more complicated than it needs to be because I wanted to control the order of output by using OrderedDict. You could replace my ordered_dump function with a call to dumper.represent_dict.

Categories