In following example:
import subprocess
import mock
class MyArgs():
cmd = ''
cmd_args = ''
cmd_path = ''
def __init__(self):
pass
def set_args(self, c, a, p):
self.cmd = c
self.cmd_args = a
self.cmd_path = p
def get_command(self):
return ([self.cmd, self.cmd_args, self.cmd_path])
class Example():
args = MyArgs()
def __init__(self):
pass
def run_ls_command(self):
print 'run_ls_command command:' + str(self.get_command())
p = subprocess.Popen(self.get_command(), stdout=subprocess.PIPE)
out, err = p.communicate()
print out #to verify the mock is working, should output 'output' if the mock is called
return err
def set_args(self, c, a, p):
#this would be more complicated logic in
#future and likely not just one method, this is a MWE
self.args.set_args(c,a,p)
def get_command(self):
return self.args.get_command()
#mock.patch.object(subprocess, 'Popen', autospec=True)
def test_subprocess_popen(mock_popen):
mock_popen.return_value.returncode = 0
mock_popen.return_value.communicate.return_value = ("output", "Error")
e = Example()
e.set_args('ls', '-al', '/bin/foobar')
e.run_ls_command()
#todo: validate arguments called by the popen command for the test
test_subprocess_popen()
The longer term goal is being able to validate more complicated subprocess.Popen commands, which will be constructed by more manipulations on the Example object (though the concept will be the same as this example).
What I would like to do is somehow analyze the arguments sent to the p = subprocess.Popen(self.get_command(), stdout=subprocess.PIPE) command.
However I am not sure how to get those arguments - I know my mock is being called because my output matches expected for the mock.
Related
I am using the python unittest module for testing a file that takes a command line argument. The argument is a file name which is then passed into a function like so:
file_name = str(sys.argv[1])
file = open(file_name)
result = main_loop(file)
print(result)
My test is set up like so:
class testMainFile(unittest.TestCase):
def test_main_loop(self):
file = open('file_name.json')
result = main_file.main_loop(file)
self.assertEqual(result, 'Expected Result')
if __name__ == 'main':
unittest.main()
When I run the test I get an "IndexError: list index out of range".
I tried passing the argument when running the test but to no avail. How do I run my test without error?
I think you have couple of options here. Firstly go to documentation and checkout patch because i think you can get away with
from unittest.mock import patch
#patch('sys.argv', ['mock.py', 'test-value'])
def test_main_loop(self):
Options for fun:
One would be simply to override the sys.argv next to your call
def test_main_loop(self):
file = open('file_name.json')
+ orginal_argv = sys.argv
+ sys.argv = ['mock argv', 'my-test-value']
result = main_file.main_loop(file)
+ sys.argv = orginal_argv
self.assertEqual(result, 'Expected Result')
Second would be to create a simple wrapper for your function
def set_sys_argv(func: Callable):
sys.argv = ['mock.py', 'my_test_value']
def wrapper(*args, **kwargs):
func()
return wrapper
and use it with test function
#set_sys_argv
def test_main_loop(self):
We can improve it slightly and make it more generic making a decorator that accepts the values to mock
def set_sys_argv(*argv):
sys.argv = argv
def _decorator(func: Callable):
def wrapper(*args, **kwargs):
func()
return wrapper
return _decorator
and use it similarly to patch
#set_sys_argv('mock.py', 'test-value')
def test_main_loop(self):
Third would be to create a context manager, likewise:
class ReplaceSysArgv(list):
def __enter__(self):
self._argv = sys.argv
sys.argv = ['mock', 'my-test-value']
return self
def __exit__(self, *args):
sys.argv = self._argv
and use it with your code
def test_main_loop(self):
file = open('file_name.json')
with ReplaceSysArgv():
result = main_file.main_loop(file)
self.assertEqual(result, 'Expected Result')
you have to push the arguments onto sys.argv before retrieving them (if your code is pulling from command-line arguments - it's unclear to me where in your test you're using the command-line arguments but I digress)
so something like first doing
import sys
sys.argv = ['mock_filename.py', 'json_file.json']
#... continue with rest of program / test.
I have an executable that has many options and I wish to create classes. e.g. the exec has options that can copy rows of a file, rename parts of a file, compress file, count specific rows of a file, e.t.c. Each option has parameters. I am uncertain if I am going about it the wrong way but I want to use oop.
import subprocess
class ExecProcess:
def __init__(self, myfile, tempfile, outfile):
self.myfile = myfile
self.tempfile = tempfile
self.outfile = outfile
self.compressed_file = self.outfile + 'zip'
def copy(self, myfile):
temp_copy = subprocess.call(['executable', '-c', self.myfile, '-out', self.tempfile])
return temp_copy # should this be return self.tempfile which is the output?
def rename(self, myfile, tempfile, outfile): # need to include all the variables I declared in init?
output = subprocess.call(['executable', '-i', self.myfile, '-r', self.tempfile'-out', self.compressed_file])
return output # return self.outfile?
Given the above, how do I call the methods within the class and then write tests for them e.g using pytest. Its not intuitive to me how to test subprocess calls using pytest
The results of each call is a different file each time with each subsequent command taking the file from an earlier command as input. Please note, myfile is the only original file I have. tempfile and outfile are just variable names I assigned to the results.
Any help will be appreciated
import subprocess
class Switch():
def __init__(self, ip):
self.ip = ip
self.subproc = subprocess
def ping(self):
success_ping = 0
for i in range(3):
status = self.subproc.call( ['ping', '-c', '1', '-W', '0.05', self.ip], stdout=self.subproc.DEVNULL )
if status == 0:
success_ping += 1
if success_ping > 0:
return True
else:
return False
def get_ip(self):
return ('Switch IP: '+self.ip)
sw = Switch('192.168.1.1')
if (sw.ping):
print('Switch', sw.get_ip(), 'UP')
else:
print('Switch', sw.get_ip(), 'DOWN')
Maybe this be helpful for you
I ran a quick test to see if something would work...
>>> from unittest.mock import MagicMock
>>> x = MagicMock()
>>> x.func.return_value = (0, 0)
>>> y, z = x.func()
seems to work like I expected, and then I try to patch something in my tests like this...
def setUp(self):
"""Setting up the command parameters"""
self.command = up.Command()
self.command.stdout = MagicMock()
self.command.directory = '{}/../'.format(settings.BASE_DIR)
self.command.filename = 'test_csv.csv'
#patch('module.Popen')
#patch('module.popen')
def test_download(self, m_popen, m_Popen):
"""Testing that download calls process.communicate"""
m_Popen.communicate.return_value = (0, 0)
self.command.download()
m_popen.assert_called()
m_Popen.communicate.assert_called()
in command.download, the code looks like this...
command = 'wget --directory-prefix=%s \
https://www.phoenix.gov/OpenDataFiles/Crime%%20Stats.csv' \
% self.directory
process = Popen(command.split(), stdin=PIPE, stdout=PIPE)
print(process.communicate())
stdout, stderr = process.communicate()
my first guess would be that I was patching the wrong namespace, but when I print communicate() I see this...
<MagicMock name='mock().communicate()' id='4438712160'>
which means that it is getting mocked, but it is just not registering my new return value for communicate...I don't know where to go from here.
You call communicate on process, which is the return value of Popen. So you need another level in that patch call:
m_Popen.return_value.communicate.return_value = (0, 0)
I was wondering if it would be possible to create some sort of static set in a Python Process subclass to keep track the types processes that are currently running asynchronously.
class showError(Process):
# Define some form of shared set that is shared by all Processes
displayed_errors = set()
def __init__(self, file_name, error_type):
super(showError, self).__init__()
self.error_type = error_type
def run(self):
if error_type not in set:
displayed_errors.add(error_type)
message = 'Please try again. ' + str(self.error_type)
winsound.MessageBeep(-1)
result = win32api.MessageBox(0, message, 'Error', 0x00001000)
if result == 0:
displayed_errors.discard(error_type)
That way, when I create/start multiple showError processes with the same error_type, subsequent error windows will not be created. So how can we define this shared set?
You can use a multiprocessing.Manager.dict (there's no set object available, but you can use a dict in the same way) and share that between all your subprocesses.
import multiprocessing as mp
if __name__ == "__main__":
m = mp.Manager()
displayed_errors = m.dict()
subp = showError("some filename", "some error type", displayed_errors)
Then change showError.__init__ to accept the shared dict:
def __init__(self, file_name, error_type, displayed_errors):
super(showError, self).__init__()
self.error_type = error_type
self.displayed_errors = displayed_errors
Then this:
displayed_errors.add(error_type)
Becomes:
self.displayed_errors[error_type] = 1
And this:
displayed_errors.discard(error_type)
Becomes:
try:
del self.displayed_errors[error_type]
except KeyError:
pass
When subclassing code.InteractiveInterpreter I can't seem to get the write() method to run as I would expect per the documentation.
import code
class PythonInterpreter(code.InteractiveInterpreter):
def __init__(self, localVars):
self.runResult = ''
print 'init called'
code.InteractiveInterpreter.__init__(self, localVars)
def write(self, data):
print 'write called'
self.runResult = data
test = 'Hello'
interpreter = PythonInterpreter({'test':test})
interpreter.runcode('print test')
print 'Result:' + interpreter.runResult
Expected output:
init called
write called
Result: Hello
Actual output:
init called
Hello <- shouldn't print
Result:
Any thoughts?
The write method is not used by the code passed to runcode at all. You would have to redirect stdout for this to work, e.g. something like:
import code
class PythonInterpreter(code.InteractiveInterpreter):
def __init__(self, localVars):
self.runResult = ''
print 'init called'
code.InteractiveInterpreter.__init__(self, localVars)
def write(self, data):
# since sys.stdout is probably redirected,
# we can't use print
sys.__stdout__.write('write called\n')
self.runResult = data
def runcode(cd):
# redirecting stdout to our method write before calling code cd
sys.stdout = self
code.InteractiveInterpreter.runcode(self,cd)
# redirecting back to normal stdout
sys.stdout = sys.__stdout__