Conditional with statement in Python - python

Is there a way to begin a block of code with a with statement, but conditionally?
Something like:
if needs_with():
with get_stuff() as gs:
# do nearly the same large block of stuff,
# involving gs or not, depending on needs_with()
To clarify, one scenario would have a block encased in the with statement, while another possibility would be the same block, but not encased (i.e., as if it wasn't indented)
Initial experiments of course give indentation errors..

Python 3.3 and above
Python 3.3 introduced contextlib.ExitStack for just this kind of situation. It gives you a "stack", to which you add context managers as necessary. In your case, you would do this:
from contextlib import ExitStack
with ExitStack() as stack:
if needs_with():
gs = stack.enter_context(get_stuff())
# do nearly the same large block of stuff,
# involving gs or not, depending on needs_with()
Anything that is entered to stack is automatically exited at the end of the with statement as usual. (If nothing is entered, that's not a problem.) In this example, whatever is returned by get_stuff() is exited automatically.
If you have to use an earlier version of python, you might be able to use the contextlib2 module, although this is not standard. It backports this and other features to earlier versions of python. You could even do a conditional import, if you like this approach.
Python 3.7 and above
Python 3.7 further introduced contextlib.nullcontext (a couple years after this answer was originally posted, and since that time mentioned in several other answers). In the comments, #Kache points out the most elegant usage of this option:
from contextlib import nullcontext
with get_stuff() if needs_with() else nullcontext() as gs:
# do nearly the same large block of stuff,
# involving gs or not, depending on needs_with()
Note that if needs_with() is False, then gs will be None inside the context block. If you want gs to be something_else in that case, you just replace nullcontext() with nullcontext(something_else).
This approach is obviously not as flexible as ExitStack, because this is just a binary choice, whereas ExitStack allows you to add as many exiting things as you want, with complicated logic and so on. But this certainly answers the OP's simple requirements.

If you want to avoid duplicating code and are using a version of Python prior to 3.7 (when contextlib.nullcontext was introduced) or even 3.3 (when contextlib.ExitStack was introduced), you could do something like:
class dummy_context_mgr():
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
return False
or:
import contextlib
#contextlib.contextmanager
def dummy_context_mgr():
yield None
and then use it as:
with get_stuff() if needs_with() else dummy_context_mgr() as gs:
# do stuff involving gs or not
You alternatively could make get_stuff() return different things based on needs_with().
(See Mike's answer or Daniel's answer for what you can do in later versions.)

As of Python 3.7 you can use contextlib.nullcontext:
from contextlib import nullcontext
if needs_with():
cm = get_stuff()
else:
cm = nullcontext()
with cm as gs:
# Do stuff
contextlib.nullcontext is pretty much just a no-op context manager. You can pass it an argument that it will yield, if you depend on something existing after the as:
>>> with nullcontext(5) as value:
... print(value)
...
5
Otherwise it'll just return None:
>>> with nullcontext() as value:
... print(value)
...
None
It's super neat, check out the docs for it here: https://docs.python.org/3/library/contextlib.html#contextlib.nullcontext

A third-party option to achieve exactly this:
https://pypi.python.org/pypi/conditional
from conditional import conditional
with conditional(needs_with(), get_stuff()):
# do stuff

import contextlib
my_context = None # your context
my_condition = False # your condition
# Option 1 (Recommended)
with my_context if my_condition else contextlib.nullcontext():
print('hello 1')
# Option 2
with my_context if my_condition else contextlib.ExitStack():
print('hello 2')

You can use contextlib.nested to put 0 or more context managers into a single with statement.
>>> import contextlib
>>> managers = []
>>> test_me = True
>>> if test_me:
... managers.append(open('x.txt','w'))
...
>>> with contextlib.nested(*managers):
... pass
...
>>> # see if it closed
... managers[0].write('hello')
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
ValueError: I/O operation on closed file
This solution has its quirks and I just noticed that as of 2.7 its been deprecated. I wrote my own context manager to handle juggling multiple context managers. Its worked for me so far, but I haven't really considered edge conditons
class ContextGroup(object):
"""A group of context managers that all exit when the group exits."""
def __init__(self):
"""Create a context group"""
self._exits = []
def add(self, ctx_obj, name=None):
"""Open a context manager on ctx_obj and add to this group. If
name, the context manager will be available as self.name. name
will still reference the context object after this context
closes.
"""
if name and hasattr(self, name):
raise AttributeError("ContextGroup already has context %s" % name)
self._exits.append(ctx_obj.__exit__)
var = ctx_obj.__enter__()
if name:
self.__dict__[name] = var
def exit_early(self, name):
"""Call __exit__ on named context manager and remove from group"""
ctx_obj = getattr(self, name)
delattr(self, name)
del self._exits[self._exits.index(ctx_obj)]
ctx_obj.__exit__(None, None, None)
def __enter__(self):
return self
def __exit__(self, _type, value, tb):
inner_exeptions = []
for _exit in self._exits:
try:
_exit(_type, value, tb )
except Exception, e:
inner_exceptions.append(e)
if inner_exceptions:
r = RuntimeError("Errors while exiting context: %s"
% (','.join(str(e)) for e in inner_exceptions))
def __setattr__(self, name, val):
if hasattr(val, '__exit__'):
self.add(val, name)
else:
self.__dict__[name] = val

It was hard to find #farsil's nifty Python 3.3 one-liner, so here it is in its own answer:
with ExitStack() if not needs_with() else get_stuff() as gs:
# do stuff
Note that ExitStack should come first, otherwise get_stuff() will be evaluated.

So I made this code;
It is invoked like so:
with c_with(needs_with(), lambda: get_stuff()) as gs:
##DOESN't call get_stuff() unless needs_with is called.
# do nearly the same large block of stuff,
# involving gs or not, depending on needs_with()
Properties:
it does not call get_stuff() unless condition is true
if condition is false, it provides a dummy contextmanager. (could probably be replaced with contextlib.nullcontext for python >= 3.7)
Optionally you can send in an alternative contextmanager in case the condition is false:
with c_with(needs_with(), lambda: get_stuff(), lambda: dont_get_stuff()) as gs:
Hope this will help someone!
-- Here is the code:
def call_if_lambda(f):
"""
Calls f if f is a lambda function.
From https://stackoverflow.com/a/3655857/997253
"""
LMBD = lambda:0
islambda=isinstance(f, type(LMBD)) and f.__name__ == LMBD.__name__
return f() if islambda else f
import types
class _DummyClass(object):
"""
A class that doesn't do anything when methods are called, items are set and get etc.
I suspect this does not cover _all_ cases, but many.
"""
def _returnself(self, *args, **kwargs):
return self
__getattr__=__enter__=__exit__=__call__=__getitem__=_returnself
def __str__(self):
return ""
__repr__=__str__
def __setitem__(*args,**kwargs):
pass
def __setattr__(*args,**kwargs):
pass
class c_with(object):
"""
Wrap another context manager and enter it only if condition is true.
Parameters
----------
condition: bool
Condition to enter contextmanager or possibly else_contextmanager
contextmanager: contextmanager, lambda or None
Contextmanager for entering if condition is true. A lambda function
can be given, which will not be called unless entering the contextmanager.
else_contextmanager: contextmanager, lambda or None
Contextmanager for entering if condition is true. A lambda function
can be given, which will not be called unless entering the contextmanager.
If None is given, then a dummy contextmanager is returned.
"""
def __init__(self, condition, contextmanager, else_contextmanager=None):
self.condition = condition
self.contextmanager = contextmanager
self.else_contextmanager = _DummyClass() if else_contextmanager is None else else_contextmanager
def __enter__(self):
if self.condition:
self.contextmanager=call_if_lambda(self.contextmanager)
return self.contextmanager.__enter__()
elif self.else_contextmanager is not None:
self.else_contextmanager=call_if_lambda(self.else_contextmanager)
return self.else_contextmanager.__enter__()
def __exit__(self, *args):
if self.condition:
return self.contextmanager.__exit__(*args)
elif self.else_contextmanager is not None:
self.else_contextmanager.__exit__(*args)
#### EXAMPLE BELOW ####
from contextlib import contextmanager
def needs_with():
return False
#contextmanager
def get_stuff():
yield {"hello":"world"}
with c_with(needs_with(), lambda: get_stuff()) as gs:
## DOESN't call get_stuff() unless needs_with() returns True.
# do nearly the same large block of stuff,
# involving gs or not, depending on needs_with()
print("Hello",gs['hello'])

I have found that the #Anentropic answer is incomplete.
from conditional import conditional
a = 1 # can be None
if not a is None:
b = 1
class WithNone:
def __enter__(self):
return self
def __exit__(self, type, value, tb):
pass
def foo(x):
print(x)
return WithNone()
with conditional(not a is None, foo(b) if not a is None else None):
print(123)
The complete conditional usage required 3 conditions instead of 1 because of:
NameError: name 'b' is not defined in case if not defined a
the function foo still must return enterable object, otherwise: AttributeError: 'NoneType' object has no attribute '__enter__'

Related

if else condition for with statement in Python [duplicate]

Is there a way to begin a block of code with a with statement, but conditionally?
Something like:
if needs_with():
with get_stuff() as gs:
# do nearly the same large block of stuff,
# involving gs or not, depending on needs_with()
To clarify, one scenario would have a block encased in the with statement, while another possibility would be the same block, but not encased (i.e., as if it wasn't indented)
Initial experiments of course give indentation errors..
Python 3.3 and above
Python 3.3 introduced contextlib.ExitStack for just this kind of situation. It gives you a "stack", to which you add context managers as necessary. In your case, you would do this:
from contextlib import ExitStack
with ExitStack() as stack:
if needs_with():
gs = stack.enter_context(get_stuff())
# do nearly the same large block of stuff,
# involving gs or not, depending on needs_with()
Anything that is entered to stack is automatically exited at the end of the with statement as usual. (If nothing is entered, that's not a problem.) In this example, whatever is returned by get_stuff() is exited automatically.
If you have to use an earlier version of python, you might be able to use the contextlib2 module, although this is not standard. It backports this and other features to earlier versions of python. You could even do a conditional import, if you like this approach.
Python 3.7 and above
Python 3.7 further introduced contextlib.nullcontext (a couple years after this answer was originally posted, and since that time mentioned in several other answers). In the comments, #Kache points out the most elegant usage of this option:
from contextlib import nullcontext
with get_stuff() if needs_with() else nullcontext() as gs:
# do nearly the same large block of stuff,
# involving gs or not, depending on needs_with()
Note that if needs_with() is False, then gs will be None inside the context block. If you want gs to be something_else in that case, you just replace nullcontext() with nullcontext(something_else).
This approach is obviously not as flexible as ExitStack, because this is just a binary choice, whereas ExitStack allows you to add as many exiting things as you want, with complicated logic and so on. But this certainly answers the OP's simple requirements.
If you want to avoid duplicating code and are using a version of Python prior to 3.7 (when contextlib.nullcontext was introduced) or even 3.3 (when contextlib.ExitStack was introduced), you could do something like:
class dummy_context_mgr():
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
return False
or:
import contextlib
#contextlib.contextmanager
def dummy_context_mgr():
yield None
and then use it as:
with get_stuff() if needs_with() else dummy_context_mgr() as gs:
# do stuff involving gs or not
You alternatively could make get_stuff() return different things based on needs_with().
(See Mike's answer or Daniel's answer for what you can do in later versions.)
As of Python 3.7 you can use contextlib.nullcontext:
from contextlib import nullcontext
if needs_with():
cm = get_stuff()
else:
cm = nullcontext()
with cm as gs:
# Do stuff
contextlib.nullcontext is pretty much just a no-op context manager. You can pass it an argument that it will yield, if you depend on something existing after the as:
>>> with nullcontext(5) as value:
... print(value)
...
5
Otherwise it'll just return None:
>>> with nullcontext() as value:
... print(value)
...
None
It's super neat, check out the docs for it here: https://docs.python.org/3/library/contextlib.html#contextlib.nullcontext
A third-party option to achieve exactly this:
https://pypi.python.org/pypi/conditional
from conditional import conditional
with conditional(needs_with(), get_stuff()):
# do stuff
import contextlib
my_context = None # your context
my_condition = False # your condition
# Option 1 (Recommended)
with my_context if my_condition else contextlib.nullcontext():
print('hello 1')
# Option 2
with my_context if my_condition else contextlib.ExitStack():
print('hello 2')
You can use contextlib.nested to put 0 or more context managers into a single with statement.
>>> import contextlib
>>> managers = []
>>> test_me = True
>>> if test_me:
... managers.append(open('x.txt','w'))
...
>>> with contextlib.nested(*managers):
... pass
...
>>> # see if it closed
... managers[0].write('hello')
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
ValueError: I/O operation on closed file
This solution has its quirks and I just noticed that as of 2.7 its been deprecated. I wrote my own context manager to handle juggling multiple context managers. Its worked for me so far, but I haven't really considered edge conditons
class ContextGroup(object):
"""A group of context managers that all exit when the group exits."""
def __init__(self):
"""Create a context group"""
self._exits = []
def add(self, ctx_obj, name=None):
"""Open a context manager on ctx_obj and add to this group. If
name, the context manager will be available as self.name. name
will still reference the context object after this context
closes.
"""
if name and hasattr(self, name):
raise AttributeError("ContextGroup already has context %s" % name)
self._exits.append(ctx_obj.__exit__)
var = ctx_obj.__enter__()
if name:
self.__dict__[name] = var
def exit_early(self, name):
"""Call __exit__ on named context manager and remove from group"""
ctx_obj = getattr(self, name)
delattr(self, name)
del self._exits[self._exits.index(ctx_obj)]
ctx_obj.__exit__(None, None, None)
def __enter__(self):
return self
def __exit__(self, _type, value, tb):
inner_exeptions = []
for _exit in self._exits:
try:
_exit(_type, value, tb )
except Exception, e:
inner_exceptions.append(e)
if inner_exceptions:
r = RuntimeError("Errors while exiting context: %s"
% (','.join(str(e)) for e in inner_exceptions))
def __setattr__(self, name, val):
if hasattr(val, '__exit__'):
self.add(val, name)
else:
self.__dict__[name] = val
It was hard to find #farsil's nifty Python 3.3 one-liner, so here it is in its own answer:
with ExitStack() if not needs_with() else get_stuff() as gs:
# do stuff
Note that ExitStack should come first, otherwise get_stuff() will be evaluated.
So I made this code;
It is invoked like so:
with c_with(needs_with(), lambda: get_stuff()) as gs:
##DOESN't call get_stuff() unless needs_with is called.
# do nearly the same large block of stuff,
# involving gs or not, depending on needs_with()
Properties:
it does not call get_stuff() unless condition is true
if condition is false, it provides a dummy contextmanager. (could probably be replaced with contextlib.nullcontext for python >= 3.7)
Optionally you can send in an alternative contextmanager in case the condition is false:
with c_with(needs_with(), lambda: get_stuff(), lambda: dont_get_stuff()) as gs:
Hope this will help someone!
-- Here is the code:
def call_if_lambda(f):
"""
Calls f if f is a lambda function.
From https://stackoverflow.com/a/3655857/997253
"""
LMBD = lambda:0
islambda=isinstance(f, type(LMBD)) and f.__name__ == LMBD.__name__
return f() if islambda else f
import types
class _DummyClass(object):
"""
A class that doesn't do anything when methods are called, items are set and get etc.
I suspect this does not cover _all_ cases, but many.
"""
def _returnself(self, *args, **kwargs):
return self
__getattr__=__enter__=__exit__=__call__=__getitem__=_returnself
def __str__(self):
return ""
__repr__=__str__
def __setitem__(*args,**kwargs):
pass
def __setattr__(*args,**kwargs):
pass
class c_with(object):
"""
Wrap another context manager and enter it only if condition is true.
Parameters
----------
condition: bool
Condition to enter contextmanager or possibly else_contextmanager
contextmanager: contextmanager, lambda or None
Contextmanager for entering if condition is true. A lambda function
can be given, which will not be called unless entering the contextmanager.
else_contextmanager: contextmanager, lambda or None
Contextmanager for entering if condition is true. A lambda function
can be given, which will not be called unless entering the contextmanager.
If None is given, then a dummy contextmanager is returned.
"""
def __init__(self, condition, contextmanager, else_contextmanager=None):
self.condition = condition
self.contextmanager = contextmanager
self.else_contextmanager = _DummyClass() if else_contextmanager is None else else_contextmanager
def __enter__(self):
if self.condition:
self.contextmanager=call_if_lambda(self.contextmanager)
return self.contextmanager.__enter__()
elif self.else_contextmanager is not None:
self.else_contextmanager=call_if_lambda(self.else_contextmanager)
return self.else_contextmanager.__enter__()
def __exit__(self, *args):
if self.condition:
return self.contextmanager.__exit__(*args)
elif self.else_contextmanager is not None:
self.else_contextmanager.__exit__(*args)
#### EXAMPLE BELOW ####
from contextlib import contextmanager
def needs_with():
return False
#contextmanager
def get_stuff():
yield {"hello":"world"}
with c_with(needs_with(), lambda: get_stuff()) as gs:
## DOESN't call get_stuff() unless needs_with() returns True.
# do nearly the same large block of stuff,
# involving gs or not, depending on needs_with()
print("Hello",gs['hello'])
I have found that the #Anentropic answer is incomplete.
from conditional import conditional
a = 1 # can be None
if not a is None:
b = 1
class WithNone:
def __enter__(self):
return self
def __exit__(self, type, value, tb):
pass
def foo(x):
print(x)
return WithNone()
with conditional(not a is None, foo(b) if not a is None else None):
print(123)
The complete conditional usage required 3 conditions instead of 1 because of:
NameError: name 'b' is not defined in case if not defined a
the function foo still must return enterable object, otherwise: AttributeError: 'NoneType' object has no attribute '__enter__'

Python decorator with multiprocessing fails

I would like to use a decorator on a function that I will subsequently pass to a multiprocessing pool. However, the code fails with "PicklingError: Can't pickle : attribute lookup __builtin__.function failed". I don't quite see why it fails here. I feel certain that it's something simple, but I can't find it. Below is a minimal "working" example. I thought that using the functools function would be enough to let this work.
If I comment out the function decoration, it works without an issue. What is it about multiprocessing that I'm misunderstanding here? Is there any way to make this work?
Edit: After adding both a callable class decorator and a function decorator, it turns out that the function decorator works as expected. The callable class decorator continues to fail. What is it about the callable class version that keeps it from being pickled?
import random
import multiprocessing
import functools
class my_decorator_class(object):
def __init__(self, target):
self.target = target
try:
functools.update_wrapper(self, target)
except:
pass
def __call__(self, elements):
f = []
for element in elements:
f.append(self.target([element])[0])
return f
def my_decorator_function(target):
#functools.wraps(target)
def inner(elements):
f = []
for element in elements:
f.append(target([element])[0])
return f
return inner
#my_decorator_function
def my_func(elements):
f = []
for element in elements:
f.append(sum(element))
return f
if __name__ == '__main__':
elements = [[random.randint(0, 9) for _ in range(5)] for _ in range(10)]
pool = multiprocessing.Pool(processes=4)
results = [pool.apply_async(my_func, ([e],)) for e in elements]
pool.close()
f = [r.get()[0] for r in results]
print(f)
The problem is that pickle needs to have some way to reassemble everything that you pickle. See here for a list of what can be pickled:
http://docs.python.org/library/pickle.html#what-can-be-pickled-and-unpickled
When pickling my_func, the following components need to be pickled:
An instance of my_decorator_class, called my_func.
This is fine. Pickle will store the name of the class and pickle its __dict__ contents. When unpickling, it uses the name to find the class, then creates an instance and fills in the __dict__ contents. However, the __dict__ contents present a problem...
The instance of the original my_func that's stored in my_func.target.
This isn't so good. It's a function at the top-level, and normally these can be pickled. Pickle will store the name of the function. The problem, however, is that the name "my_func" is no longer bound to the undecorated function, it's bound to the decorated function. This means that pickle won't be able to look up the undecorated function to recreate the object. Sadly, pickle doesn't have any way to know that object it's trying to pickle can always be found under the name __main__.my_func.
You can change it like this and it will work:
import random
import multiprocessing
import functools
class my_decorator(object):
def __init__(self, target):
self.target = target
try:
functools.update_wrapper(self, target)
except:
pass
def __call__(self, candidates, args):
f = []
for candidate in candidates:
f.append(self.target([candidate], args)[0])
return f
def old_my_func(candidates, args):
f = []
for c in candidates:
f.append(sum(c))
return f
my_func = my_decorator(old_my_func)
if __name__ == '__main__':
candidates = [[random.randint(0, 9) for _ in range(5)] for _ in range(10)]
pool = multiprocessing.Pool(processes=4)
results = [pool.apply_async(my_func, ([c], {})) for c in candidates]
pool.close()
f = [r.get()[0] for r in results]
print(f)
You have observed that the decorator function works when the class does not. I believe this is because functools.wraps modifies the decorated function so that it has the name and other properties of the function it wraps. As far as the pickle module can tell, it is indistinguishable from a normal top-level function, so it pickles it by storing its name. Upon unpickling, the name is bound to the decorated function so everything works out.
I also had some problem using decorators in multiprocessing. I'm not sure if it's the same problem as yours:
My code looked like this:
from multiprocessing import Pool
def decorate_func(f):
def _decorate_func(*args, **kwargs):
print "I'm decorating"
return f(*args, **kwargs)
return _decorate_func
#decorate_func
def actual_func(x):
return x ** 2
my_swimming_pool = Pool()
result = my_swimming_pool.apply_async(actual_func,(2,))
print result.get()
and when I run the code I get this:
Traceback (most recent call last):
File "test.py", line 15, in <module>
print result.get()
File "somedirectory_too_lengthy_to_put_here/lib/python2.7/multiprocessing/pool.py", line 572, in get
raise self._value
cPickle.PicklingError: Can't pickle <type 'function'>: attribute lookup __builtin__.function failed
I fixed it by defining a new function to wrap the function in the decorator function, instead of using the decorator syntax
from multiprocessing import Pool
def decorate_func(f):
def _decorate_func(*args, **kwargs):
print "I'm decorating"
return f(*args, **kwargs)
return _decorate_func
def actual_func(x):
return x ** 2
def wrapped_func(*args, **kwargs):
return decorate_func(actual_func)(*args, **kwargs)
my_swimming_pool = Pool()
result = my_swimming_pool.apply_async(wrapped_func,(2,))
print result.get()
The code ran perfectly and I got:
I'm decorating
4
I'm not very experienced at Python, but this solution solved my problem for me
If you want the decorators too bad (like me), you can also use the exec() command on the function string, to circumvent the mentioned pickling.
I wanted to be able to pass all the arguments to an original function and then use them successively. The following is my code for it.
At first, I made a make_functext() function to convert the target function object to a string. For that, I used the getsource() function from the inspect module (see doctumentation here and note that it can't retrieve source code from compiled code etc.). Here it is:
from inspect import getsource
def make_functext(func):
ft = '\n'.join(getsource(func).split('\n')[1:]) # Removing the decorator, of course
ft = ft.replace(func.__name__, 'func') # Making function callable with 'func'
ft = ft.replace('#§ ', '').replace('#§', '') # For using commented code starting with '#§'
ft = ft.strip() # In case the function code was indented
return ft
It is used in the following _worker() function that will be the target of the processes:
def _worker(functext, args):
scope = {} # This is needed to keep executed definitions
exec(functext, scope)
scope['func'](args) # Using func from scope
And finally, here's my decorator:
from multiprocessing import Process
def parallel(num_processes, **kwargs):
def parallel_decorator(func, num_processes=num_processes):
functext = make_functext(func)
print('This is the parallelized function:\n', functext)
def function_wrapper(funcargs, num_processes=num_processes):
workers = []
print('Launching processes...')
for k in range(num_processes):
p = Process(target=_worker, args=(functext, funcargs[k])) # use args here
p.start()
workers.append(p)
return function_wrapper
return parallel_decorator
The code can finally be used by defining a function like this:
#parallel(4)
def hello(args):
#§ from time import sleep # use '#§' to avoid unnecessary (re)imports in main program
name, seconds = tuple(args) # unpack args-list here
sleep(seconds)
print('Hi', name)
... which can now be called like this:
hello([['Marty', 0.5],
['Catherine', 0.9],
['Tyler', 0.7],
['Pavel', 0.3]])
... which outputs:
This is the parallelized function:
def func(args):
from time import sleep
name, seconds = tuple(args)
sleep(seconds)
print('Hi', name)
Launching processes...
Hi Pavel
Hi Marty
Hi Tyler
Hi Catherine
Thanks for reading, this is my very first post. If you find any mistakes or bad practices, feel free to leave a comment. I know that these string conversions are quite dirty, though...
If you use this code for your decorator:
import multiprocessing
from types import MethodType
DEFAULT_POOL = []
def run_parallel(_func=None, *, name: str = None, context_pool: list = DEFAULT_POOL):
class RunParallel:
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
process = multiprocessing.Process(target=self.func, name=name, args=args, kwargs=kwargs)
context_pool.append(process)
process.start()
def __get__(self, instance, owner):
return self if instance is None else MethodType(self, instance)
if _func is None:
return RunParallel
else:
return RunParallel(_func)
def wait_context(context_pool: list = DEFAULT_POOL, kill_others_if_one_fails: bool = False):
finished = []
for process in context_pool:
process.join()
finished.append(process)
if kill_others_if_one_fails and process.exitcode != 0:
break
if kill_others_if_one_fails:
# kill unfinished processes
for process in context_pool:
if process not in finished:
process.kill()
# wait for every process to be dead
for process in context_pool:
process.join()
Then you can use it like this, in these 4 examples:
#run_parallel
def m1(a, b="b"):
print(f"m1 -- {a=} {b=}")
#run_parallel(name="mym2", context_pool=DEFAULT_POOL)
def m2(d, cc="cc"):
print(f"m2 -- {d} {cc=}")
a = 1/0
class M:
#run_parallel
def c3(self, k, n="n"):
print(f"c3 -- {k=} {n=}")
#run_parallel(name="Mc4", context_pool=DEFAULT_POOL)
def c4(self, x, y="y"):
print(f"c4 -- {x=} {y=}")
if __name__ == "__main__":
m1(11)
m2(22)
M().c3(33)
M().c4(44)
wait_context(kill_others_if_one_fails=True)
The output will be:
m1 -- a=11 b='b'
m2 -- 22 cc='cc'
c3 -- k=33 n='n'
(followed by the exception raised in method m2)

conditional python with

I have a five or six resources that have nice 'with' handlers, and normally I'd do this:
with res1, res2, res3, res4, res5, res6:
do1
do2
However, sometimes one or more of these resources should not be activated. Which leads to very ugly repetitive code:
with res1, res3, res4, res6: # these always acquired
if res2_enabled:
with res2:
if res5_enabled:
with res5:
do1
do2
else:
do1
do2
else if res5_enabled:
with res5:
...
There must be clean easy ways to do this surely?
You could create a wrapper object that supports the with statement, and do the checking in there. Something like:
with wrapper(res1), wrapper(res2), wrapper(res3):
...
or a wrapper than handles all of them:
with wrapper(res1, res2, res3):
...
The definition for you wrapper would be:
class wrapper(object):
def __init__(self, *objs):
...
def __enter__(self):
initialize objs here
def __exit__(self):
release objects here
If I understand you correctly you can do this:
from contextlib import contextmanager, nested
def enabled_resources(*resources):
return nested(*(res for res,enabled in resources if enabled))
# just for testing
#contextmanager
def test(n):
print n, "entered"
yield
resources = [(test(n), n%2) for n in range(10)]
# you want
# resources = [(res1, res1_enabled), ... ]
with enabled_resources(*resources):
# do1, do2
pass
Original Poster here; here is my approach refined so far:
I can add (or monkey-patch) the bool operator __nonzero__ onto the with objects, returning whether they are enabled. Then, when objects are mutually exclusive, I can have:
with res1 or res2 or res3 or res4:
...
When an resource is togglable, I can create an empty withable that is a nop; wither seems a nice name for it:
class sither:
#classmethod
def __enter__(cls): pass
#classmethod
def __exit__(cls,*args): pass
...
with res1 or wither, res2 or wither:
...
I can also use this keeping the toggling out of the withable objects:
with res1 if res1enabled else wither, res2 if res2enabled else wither:
..
Finally, those I have most control over, I can integrate the enabled checking into the class itself such that when used and not enabled, they are nop:
with res1, res2, res3:
...
The with statement is absolutely adorable, it just seems a bit unentrenched yet. It will be interesting to see what finesse others come up with in this regard...

How to test with Python's unittest that a warning has been thrown?

I have a following function in Python and I want to test with unittest that if the function gets 0 as argument, it throws a warning. I already tried assertRaises, but since I don't raise the warning, that doesn't work.
def isZero(i):
if i != 0:
print "OK"
else:
warning = Warning("the input is 0!")
print warning
return i
Starting with Python 3.2, you can simply use assertWarns() method.
with self.assertWarns(Warning):
do_something()
You can use the catch_warnings context manager. Essentially this allows you to mock the warnings handler, so that you can verify details of the warning. See the official docs for a fuller explanation and sample test code.
import warnings
def fxn():
warnings.warn("deprecated", DeprecationWarning)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
fxn()
# Verify some things
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "deprecated" in str(w[-1].message)
You can write your own assertWarns function to incapsulate catch_warnings context. I've just implemented it the following way, with a mixin:
class WarningTestMixin(object):
'A test which checks if the specified warning was raised'
def assertWarns(self, warning, callable, *args, **kwds):
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
result = callable(*args, **kwds)
self.assertTrue(any(item.category == warning for item in warning_list))
A usage example:
class SomeTest(WarningTestMixin, TestCase):
'Your testcase'
def test_something(self):
self.assertWarns(
UserWarning,
your_function_which_issues_a_warning,
5, 10, 'john', # args
foo='bar' # kwargs
)
The test will pass if at least one of the warnings issued by your_function is of type UserWarning.
#ire_and_curses' answer is quite useful and, I think, canonical. Here is another way to do the same thing. This one requires Michael Foord's excellent Mock library.
import unittest, warnings
from mock import patch_object
def isZero( i):
if i != 0:
print "OK"
else:
warnings.warn( "the input is 0!")
return i
class Foo(unittest.TestCase):
#patch_object(warnings, 'warn')
def test_is_zero_raises_warning(self, mock_warn):
isZero(0)
self.assertTrue(mock_warn.called)
if __name__ == '__main__':
unittest.main()
The nifty patch_object lets you mock out the warn method.
One problem with the warnings.catch_warnings approach is that warnings produced in different tests can interact in strange ways through global state kept in __warningregistry__ attributes.
To address this, we should clear the __warningregistry__ attribute of every module before every test that checks warnings.
class MyTest(unittest.TestCase):
def setUp(self):
# The __warningregistry__'s need to be in a pristine state for tests
# to work properly.
for v in sys.modules.values():
if getattr(v, '__warningregistry__', None):
v.__warningregistry__ = {}
def test_something(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", MySpecialWarning)
...
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, MySpecialWarning)
This is how Python 3's assertWarns() method is implemented.
Building off the answer from #ire_and_curses,
class AssertWarns(warnings.catch_warnings):
"""A Python 2 compatible version of `unittest.TestCase.assertWarns`."""
def __init__(self, test_case, warning_type):
self.test_case = test_case
self.warning_type = warning_type
self.log = None
super(AssertWarns, self).__init__(record=True, module=None)
def __enter__(self):
self.log = super(AssertWarns, self).__enter__()
return self.log
def __exit__(self, *exc_info):
super(AssertWarns, self).__exit__(*exc_info)
self.test_case.assertEqual(type(self.log[0]), self.warning_type)
This can be called similarly to unittest.TestCase.assertWarns:
with AssertWarns(self, warnings.WarningMessage):
warnings.warn('test warning!')
where self is a unittest.TestCase.
Per Melebius' answer, you can use self.assertWarns().
Additionally, if you want to check the warning message as well, you can use self.assertWarnsRegex() for that greater specificity:
import warnings
from unittest import TestCase
class MyCustomWarning(Warning):
...
def is_zero(i: int) -> int:
if i != 0:
print("OK")
else:
warnings.warn("the input is 0!", MyCustomWarning)
return i
class TestIsZero(TestCase):
def test_when_then_input_is_zero(self):
regex = "the input is 0"
with self.assertWarnsRegex(MyCustomWarning, expected_regex=regex):
_ = is_zero(0)
This test will fail if the regex is not found in the warning message.

Passing 'None' as function parameter (where parameter is a function)

I am writing a small app that has to perform some 'sanity checks' before entering execution. (eg. of a sanity check: test if a certain path is readable / writable / exists)
The code:
import logging
import os
import shutil
import sys
from paths import PATH
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger('sf.core.sanity')
def sanity_access(path, mode):
ret = os.access(path, mode)
logfunc = log.debug if ret else log.warning
loginfo = (os.access.__name__, path, mode, ret)
logfunc('%s(\'%s\', %s)==%s' % loginfo)
return ret
def sanity_check(bool_func, true_func, false_func):
ret = bool_func()
(logfunc, execfunc) = (log.debug, true_func) if ret else \
(log.warning, false_func)
logfunc('exec: %s', execfunc.__name__)
execfunc()
def sanity_checks():
sanity_check(lambda: sanity_access(PATH['userhome'], os.F_OK), \
lambda: None, sys.exit)
My question is related to the sanity_check function.
This function takes 3 parameters (bool_func, true_func, false_func). If the bool_func (which is the test function, returning a boolean value) fails, true_func gets executed, else the false_func gets executed.
1) lambda: None is a little lame , because for example if the sanity_access returns True, lambda: None gets executed, and the output printed will be:
DEBUG:sf.core.sanity:access('/home/nomemory', 0)==True
DEBUG:sf.core.sanity:exec: <lambda>
So it won't be very clear in the logs what function got executed. The log will only contain <lambda> . Is there a default function that does nothing and can be passed as a parameter ? Is it a way to return the name of the first function that is being executed inside a lambda ?
Or a way not to log that "exec" if 'nothing' is sent as a paramter ?
What's the none / do-nothing equivalent for functions ?
sanity_check(lambda: sanity_access(PATH['userhome'], os.F_OK), \
<do nothing, but show something more useful than <lambda>>, sys.exit)
Additional question, why is lambda: pass instead of lambda: None not working ?
What's with all the lambdas that serve no purpose? Well, maybe optional arguments will help you a bit:
def sanity_check( test, name='undefined', ontrue=None, onfalse=None ):
if test:
log.debug(name)
if ontrue is not None:
ontrue()
else:
log.warn( name )
if onfalse is not None:
onfalse()
def sanity_checks():
sanity_check(sanity_access(PATH['userhome'], os.F_OK), 'test home',
onfalse=sys.exit)
But you are really overcomplicating things.
update
I would normally delete this post because THC4k saw through all the complexity and rewrote your function correctly. However in a different context, the K combinator trick might come in handy, so I'll leave it up.
There is no builtin that does what you want AFIK. I believe that you want the K combinator (the link came up on another question) which can be encoded as
def K_combinator(x, name):
def f():
return x
f.__name__ = name
return f
none_function = K_combinator(None, 'none_function')
print none_function()
of course if this is just a one off then you could just do
def none_function():
return None
But then you don't get to say "K combinator". Another advantage of the 'K_combinator' approach is that you can pass it to functions, for example,
foo(call_back1, K_combinator(None, 'name_for_logging'))
as for your second statement, only expressions are allowed in lambda. pass is a statement. Hence, lambda: pass fails.
You can slightly simplify your call to sanity check by removing the lambda around the first argument.
def sanity_check(b, true_func, false_func):
if b:
logfunc = log.debug
execfunc = true_func
else:
logfunc = log.warning
execfunc = false_func
logfunc('exec: %s', execfunc.__name__)
execfunc()
def sanity_checks():
sanity_check(sanity_access(PATH['userhome'], os.F_OK),
K_combinator(None, 'none_func'), sys.exit)
This is more readable (largely from expanding the ternary operator into an if). the boolfunc wasn't doing anything because sanity_check wasn't adding any arguments to the call. Might as well just call instead of wrapping it in a lambda.
You might want to rethink this.
class SanityCheck( object ):
def __call__( self ):
if self.check():
logger.debug(...)
self.ok()
else:
logger.warning(...)
self.not_ok()
def check( self ):
return True
def ok( self ):
pass
def not_ok( self ):
sys.exit(1)
class PathSanityCheck(SanityCheck):
path = "/path/to/resource"
def check( self ):
return os.access( path, os.F_OK )
class AnotherPathSanityCheck(SanityCheck):
path = "/another/path"
def startup():
checks = ( PathSanityCheck(), AnotherPathSanityCheck() )
for c in checks:
c()
Callable objects can simplify your life.
>>> import dis
>>> f = lambda: None
>>> dis.dis(f)
1 0 LOAD_CONST 0 (None)
3 RETURN_VALUE
>>> g = lambda: Pass
>>>
>>>
>>> dis.dis(g)
1 0 LOAD_GLOBAL 0 (Pass)
3 RETURN_VALUE
>>> g = lambda: pass
File "<stdin>", line 1
g = lambda: pass
^
SyntaxError: invalid syntax
Actually, what you want is a function which does nothing, but has a __name__ which is useful to the log. The lambda function is doing exactly what you want, but execfunc.__name__ is giving "<lambda>". Try one of these:
def nothing_func():
return
def ThisAppearsInTheLog():
return
You can also put your own attributes on functions:
def log_nothing():
return
log_nothing.log_info = "nothing interesting"
Then change execfunc.__name__ to getattr(execfunc,'log_info', '')

Categories