I am building my own decorator function, but I can't seem to be able to update the func.cache_length method of the function.
The code below simply uses an OrderedDict to store the items from all the dataframes loaded in pandas, with 5 dataframes maximum stored in cache.
I want the user to also find out how many items currently the function has loaded using cache_length but every time I run it I get 0.
from functools import wraps
from collections import OrderedDict
def cache(func, max_length=5):
func.cache_dict = OrderedDict()
func.cache_length = 0
#wraps(func)
def wrapper(*args, **kwargs):
if kwargs['df_name'] in func.cache_dict:
return func.cache_dict[kwargs['df_name']]
elif len(func.cache_dict) < max_length:
print('Running function...')
df = func(*args, **kwargs)
func.cache_dict[kwargs['df_name']] = df
func.cache_length += 1
return df
else:
func.cache_dict.popitem(last=True)
df = func(*args, **kwargs)
func.cache_dict[kwargs['df_name']] = df
return df
func.cache_reset = lambda: func.cache_dict.clear()
return wrapper
import pandas as pd
#cache
def data_reader(*, df_name: pd.DataFrame, file: str):
df = pd.read_csv(file)
return df
This is the output vs. expected (I should get 1),
data_reader(df_name='test_dataframe', file="parsed_data.csv")
>>
Running function...
....
>>
data_reader.cache_length
>>
0
Based on what you described, here is a more general implementation: (details below)
from collections import OrderedDict
from functools import wraps
def cache(function=None, *, max_length=5):
def decorator(func):
cache_dict = OrderedDict()
#wraps(func)
def wrapper(*args, **kwargs):
call_repr = f"args={args}, kwargs={kwargs}"
try:
return cache_dict[call_repr]
except KeyError:
pass
if len(cache_dict) >= max_length:
cache_dict.popitem(last=False)
print(f"Running function {func.__name__}...")
cache_dict[call_repr] = output = func(*args, **kwargs)
return output
wrapper.cache = cache_dict
return wrapper
return decorator if function is None else decorator(function)
#cache(max_length=3)
def add(x, y):
return x + y
def main():
print(f"{add(1, 1)=}")
print(f"{add(2, 1)=}")
print(f"{add(1, 1)=}")
print(f"{add(3, 1)=}")
print(f"{add(4, 1)=}")
print(f"{add(1, 1)=}")
print(f"{add.cache=}")
add.cache.clear()
print(f"{len(add.cache)=}")
print(f"{add.cache=}")
if __name__ == "__main__":
main()
Output:
Running function add...
add(1, 1)=2
Running function add...
add(2, 1)=3
add(1, 1)=2
Running function add...
add(3, 1)=4
Running function add...
add(4, 1)=5
Running function add...
add(1, 1)=2
add.cache=OrderedDict([('args=(3, 1), kwargs={}', 4), ('args=(4, 1), kwargs={}', 5), ('args=(1, 1), kwargs={}', 2)])
len(add.cache)=0
add.cache=OrderedDict()
Notice the cache was used for the second add(1, 1) call, but not the third.
Details
Uses the pattern allowing the decorator to be used with or without parantheses
Resulting wrapper function has the cache attribute to allow direct access to the underlying OrderedDict
Caching based on the string representation of all function arguments (positional and keyword)
Caveats
Not completely general by any stretch
Works as expected only with argument types that have a deterministic __repr__ without side effects (which is what one would expect, to be fair)
Cannot differentiate between arguments with identical string representations
Clean type annotations may be a bit more involved
Hope this helps.
Related
In Clojure I can do something like this:
(-> path
clojure.java.io/resource
slurp
read-string)
instead of doing this:
(read-string (slurp (clojure.java.io/resource path)))
This is called threading in Clojure terminology and helps getting rid of a lot of parentheses.
In Python if I try to use functional constructs like map, any, or filter I have to nest them to each other. Is there a construct in Python with which I can do something similar to threading (or piping) in Clojure?
I'm not looking for a fully featured version since there are no macros in Python, I just want to do away with a lot of parentheses when I'm doing functional programming in Python.
Edit: I ended up using toolz which supports pipeing.
Here is a simple implementation of #deceze's idea (although, as #Carcigenicate points out, it is at best a partial solution):
import functools
def apply(x,f): return f(x)
def thread(*args):
return functools.reduce(apply,args)
For example:
def f(x): return 2*x+1
def g(x): return x**2
thread(5,f,g) #evaluates to 121
I wanted to take this to the extreme and do it all dynamically.
Basically, the below Chain class lets you chain functions together similar to Clojure's -> and ->> macros. It supports both threading into the first and last arguments.
Functions are resolved in this order:
Object method
Local defined variable
Built-in variable
The code:
class Chain(object):
def __init__(self, value, index=0):
self.value = value
self.index = index
def __getattr__(self, item):
append_arg = True
try:
prop = getattr(self.value, item)
append_arg = False
except AttributeError:
try:
prop = locals()[item]
except KeyError:
prop = getattr(__builtins__, item)
if callable(prop):
def fn(*args, **kwargs):
orig = list(args)
if append_arg:
if self.index == -1:
orig.append(self.value)
else:
orig.insert(self.index, self.value)
return Chain(prop(*orig, **kwargs), index=self.index)
return fn
else:
return Chain(prop, index=self.index)
Thread each result as first arg
file = Chain(__file__).open('r').readlines().value
Thread each result as last arg
result = Chain(range(0, 100), index=-1).map(lambda x: x * x).reduce(lambda x, y: x + y).value
I often find myself using a pattern like this:
num_repeats = 123
interval = 12
for _ in xrange(num_repeats):
result = ...
if result meets condition:
break
time.sleep(interval)
else:
raise Failed despite multiple attempts
Basically, it repeats code until the correct result is returned, or the counter expires.
Although this works, it looks too verbose to me. Is it possible to "parametrize" this loop to a reusable function or context manager, like for example
with repeat(num_repeats, interval):
code
Or maybe there's something in the standard library that would do the trick?
You can use a generator which sleeps before returning repeated results.
The advantage is that your caller is still a genuine for loop, with
all the break, continue, else semantics still in tact.
def trickle_range(num_repeats, interval):
yield 0
for k in xrange(1, num_repeats):
time.sleep(interval)
yield k
for k in trickle_range(num_repeats, interval):
... do stuff, iterate or break as you like ...
You definately won't be able to use the with statement, as python only supplies hooks before and after the code has run, but not one for invoking it, ie. You can't hide a loop within a with statement.
A nice approach is to use a lambda function:
def repeat(repeats, interval, func):
for i in xrange(repeats):
if func(i):
break
time.sleep(interval)
Which you can then use quite easily:
repeat(123, 12, lambda i: condition(i))
Or something similar
One approach would be to decorate the functions you want to repeat:
def repeats_until(num_repeats, interval, condition):
def deco(f):
def func(*args, **kwargs):
for _ in xrange(num_repeats):
result = f(*args, **kwargs)
if condition(result):
return result
time.sleep(interval)
return func
return deco
And then use it like:
#repeats_until(3, 5, lambda s: s == "hello")
def take_input():
return raw_input("Say hello: ")
Example (although I can't show the wait!)
>>> take_input()
Say hello: foo
Say hello: bar
Say hello: baz
>>> take_input()
Say hello: hello
'hello'
Alternatively, to keep the condition with the called function, something like:
def repeats(num_repeats, interval):
def deco(f):
def func(*args, **kwargs):
for _ in xrange(num_repeats):
result = f(*args, **kwargs)
if result is not None: # or e.g. False if None is valid return
return result
time.sleep(interval)
return func
return deco
#repeats(3, 5)
def take_input(condition):
s = raw_input("Say hello: ")
if condition(s):
return s
ui = take_input(lambda s: s == "hello")
This relies on the decorated function returning a value (in this case the implicit None) that tells the decorator it isn't finished yet.
I write get_function_arg_data(func) as below code to get the function func's arguments information:
def get_function_arg_data(func):
import inspect
func_data = inspect.getargspec(func)
args_name = func_data.args #func argument list
args_default = func_data.defaults #funcargument default data list
return args_name, args_default
def showduration(user_function):
''' show time duration decorator'''
import time
def wrapped_f(*args, **kwargs):
t1 = time.clock()
result = user_function(*args, **kwargs)
print "%s()_Time: %0.5f"%(user_function.__name__, time.clock()-t1)
return result
return wrapped_f
def foo(para1, para2=5, para3=7):
for i in range(1000):
s = para1+para2+para3
return s
#showduration
def bar(para1, para2, para3):
for i in range(1000):
s=para1+para2+para3
return s
print get_function_arg_data(foo)
bar(1,2,3)
print get_function_arg_data(bar)
>>>
(['para1', 'para2', 'para3'], (5, 7))
bar()_Time: 0.00012
([], None)
>>>
get_function_arg_data() works for foo, not for bar for bar is decorated by a decorator #showduration . My question is how to penetrate the decorator to get the underlying function's information (argument list and default value) ?
Thanks for your tips.
I don't think there is, or at least know of, any general way to "penetrate" a decorated function and get at the underlying function's information because Python's concept of function decoration is so general -- if fact, generally speaking, there's nothing that requires or guarantees that the original function will be called at all (although that's usually the case).
Therefore, a more practical question would be: How could I write my own decorators which would allow me to inspect the underlying function's argument information?
One easy way, previously suggested, would be to use Michele Simionato's decorator module (and write decorators compatible with it).
A less robust, but extremely simple way of doing this would be to do what is shown below based on the code in your question:
def get_function_arg_data(func):
import inspect
func = getattr(func, '_original_f', func) # use saved original if decorated
func_data = inspect.getargspec(func)
args_name = func_data.args #func argument list
args_default = func_data.defaults #funcargument default data list
return args_name, args_default
def showduration(user_function):
'''show time duration decorator'''
import time
def wrapped_f(*args, **kwargs):
t1 = time.clock()
result = user_function(*args, **kwargs)
print "%s()_Time: %0.5f"%(user_function.__name__, time.clock()-t1)
return result
wrapped_f._original_f = user_function # save original function
return wrapped_f
def foo(para1, para2=5, para3=7):
for i in range(1000):
s = para1+para2+para3
return s
#showduration
def bar(para1, para2, para3):
for i in range(1000):
s=para1+para2+para3
return s
print 'get_function_arg_data(foo):', get_function_arg_data(foo)
print 'get_function_arg_data(bar):', get_function_arg_data(bar)
All the modification involves is saving the original function in an attribute named _original_f which is added the wrapped function returned by the decorator. The get_function_arg_data() function then simply checks for this attribute and returns information based its value rather the decorated function passed to it.
While this approach doesn't work with just any decorated function, only ones which have had the special attribute added to them, it is compatible with both Python 2 & 3.
Output produced by the code shown:
get_function_arg_data(foo): (['para1', 'para2', 'para3'], (5, 7))
get_function_arg_data(bar): (['para1', 'para2', 'para3'], None)
Assuming you've installed Michele Simionato's decorator module, you can make yourshowdurationdecorator work with it by making some minor modifications to it and to the nestedwrapped_f()function defined in it so the latter fits the signature that module's decorator.decorator() function expects:
import decorator
def showduration(user_function):
''' show time duration decorator'''
import time
def wrapped_f(user_function, *args, **kwargs):
t1 = time.clock()
result = user_function(*args, **kwargs)
print "%s()_Time: %0.5f"%(user_function.__name__, time.clock()-t1)
return result
return decorator.decorator(wrapped_f, user_function)
However, the module really shines because it will let you reduce boilerplate stuff like the above down to just:
import decorator
#decorator.decorator
def showduration(user_function, *args, **kwargs):
import time
t1 = time.clock()
result = user_function(*args, **kwargs)
print "%s()_Time: %0.5f"%(user_function.__name__, time.clock()-t1)
return result
With either set of the above changes, your sample code would output:
(['para1', 'para2', 'para3'], (5, 7))
bar()_Time: 0.00026
(['para1', 'para2', 'para3'], None)
In some circumstances, I want to print debug-style output like this:
# module test.py
def f()
a = 5
b = 8
debug(a, b) # line 18
I want the debug function to print the following:
debug info at test.py: 18
function f
a = 5
b = 8
I am thinking it should be possible by using inspect module to locate the stack frame, then finding the appropriate line, looking up the source code in that line, getting the names of the arguments from there. The function name can be obtained by moving one stack frame up. (The values of the arguments is easy to obtain: they are passed directly to the function debug.)
Am I on the right track? Is there any recipe I can refer to?
You could do something along the following lines:
import inspect
def debug(**kwargs):
st = inspect.stack()[1]
print '%s:%d %s()' % (st[1], st[2], st[3])
for k, v in kwargs.items():
print '%s = %s' % (k, v)
def f():
a = 5
b = 8
debug(a=a, b=b) # line 12
f()
This prints out:
test.py:12 f()
a = 5
b = 8
You're generally doing it right, though it would be easier to use AOP for this kinds of tasks. Basically, instead of calling "debug" every time with every variable, you could just decorate the code with aspects which do certain things upon certain events, like upon entering the function to print passed variables and it's name.
Please refer to this site and old so post for more info.
Yeah, you are in the correct track. You may want to look at inspect.getargspec which would return a named tuple of args, varargs, keywords, defaults passed to the function.
import inspect
def f():
a = 5
b = 8
debug(a, b)
def debug(a, b):
print inspect.getargspec(debug)
f()
This is really tricky. Let me try and give a more complete answer reusing this code, and the hint about getargspec in Senthil's answer which got me triggered somehow. Btw, getargspec is deprecated in Python 3.0 and getfullarcspec should be used instead.
This works for me on a Python 3.1.2 both with explicitly calling the debug function and with using a decorator:
# from: https://stackoverflow.com/a/4493322/923794
def getfunc(func=None, uplevel=0):
"""Return tuple of information about a function
Go's up in the call stack to uplevel+1 and returns information
about the function found.
The tuple contains
name of function, function object, it's frame object,
filename and line number"""
from inspect import currentframe, getouterframes, getframeinfo
#for (level, frame) in enumerate(getouterframes(currentframe())):
# print(str(level) + ' frame: ' + str(frame))
caller = getouterframes(currentframe())[1+uplevel]
# caller is tuple of:
# frame object, filename, line number, function
# name, a list of lines of context, and index within the context
func_name = caller[3]
frame = caller[0]
from pprint import pprint
if func:
func_name = func.__name__
else:
func = frame.f_locals.get(func_name, frame.f_globals.get(func_name))
return (func_name, func, frame, caller[1], caller[2])
def debug_prt_func_args(f=None):
"""Print function name and argument with their values"""
from inspect import getargvalues, getfullargspec
(func_name, func, frame, file, line) = getfunc(func=f, uplevel=1)
argspec = getfullargspec(func)
#print(argspec)
argvals = getargvalues(frame)
print("debug info at " + file + ': ' + str(line))
print(func_name + ':' + str(argvals)) ## reformat to pretty print arg values here
return func_name
def df_dbg_prt_func_args(f):
"""Decorator: dpg_prt_func_args - Prints function name and arguments
"""
def wrapped(*args, **kwargs):
debug_prt_func_args(f)
return f(*args, **kwargs)
return wrapped
Usage:
#df_dbg_prt_func_args
def leaf_decor(*args, **kwargs):
"""Leaf level, simple function"""
print("in leaf")
def leaf_explicit(*args, **kwargs):
"""Leaf level, simple function"""
debug_prt_func_args()
print("in leaf")
def complex():
"""A complex function"""
print("start complex")
leaf_decor(3,4)
print("middle complex")
leaf_explicit(12,45)
print("end complex")
complex()
and prints:
start complex
debug info at debug.py: 54
leaf_decor:ArgInfo(args=[], varargs='args', keywords='kwargs', locals={'args': (3, 4), 'f': <function leaf_decor at 0x2aaaac048d98>, 'kwargs': {}})
in leaf
middle complex
debug info at debug.py: 67
leaf_explicit:ArgInfo(args=[], varargs='args', keywords='kwargs', locals={'args': (12, 45), 'kwargs': {}})
in leaf
end complex
The decorator cheats a bit: Since in wrapped we get the same arguments as the function itself it doesn't matter that we find and report the ArgSpec of wrapped in getfunc and debug_prt_func_args. This code could be beautified a bit, but it works alright now for the simple debug testcases I used.
Another trick you can do: If you uncomment the for-loop in getfunc you can see that inspect can give you the "context" which really is the line of source code where a function got called. This code is obviously not showing the content of any variable given to your function, but sometimes it already helps to know the variable name used one level above your called function.
As you can see, with the decorator you don't have to change the code inside the function.
Probably you'll want to pretty print the args. I've left the raw print (and also a commented out print statement) in the function so it's easier to play around with.
I would like to use a decorator on a function that I will subsequently pass to a multiprocessing pool. However, the code fails with "PicklingError: Can't pickle : attribute lookup __builtin__.function failed". I don't quite see why it fails here. I feel certain that it's something simple, but I can't find it. Below is a minimal "working" example. I thought that using the functools function would be enough to let this work.
If I comment out the function decoration, it works without an issue. What is it about multiprocessing that I'm misunderstanding here? Is there any way to make this work?
Edit: After adding both a callable class decorator and a function decorator, it turns out that the function decorator works as expected. The callable class decorator continues to fail. What is it about the callable class version that keeps it from being pickled?
import random
import multiprocessing
import functools
class my_decorator_class(object):
def __init__(self, target):
self.target = target
try:
functools.update_wrapper(self, target)
except:
pass
def __call__(self, elements):
f = []
for element in elements:
f.append(self.target([element])[0])
return f
def my_decorator_function(target):
#functools.wraps(target)
def inner(elements):
f = []
for element in elements:
f.append(target([element])[0])
return f
return inner
#my_decorator_function
def my_func(elements):
f = []
for element in elements:
f.append(sum(element))
return f
if __name__ == '__main__':
elements = [[random.randint(0, 9) for _ in range(5)] for _ in range(10)]
pool = multiprocessing.Pool(processes=4)
results = [pool.apply_async(my_func, ([e],)) for e in elements]
pool.close()
f = [r.get()[0] for r in results]
print(f)
The problem is that pickle needs to have some way to reassemble everything that you pickle. See here for a list of what can be pickled:
http://docs.python.org/library/pickle.html#what-can-be-pickled-and-unpickled
When pickling my_func, the following components need to be pickled:
An instance of my_decorator_class, called my_func.
This is fine. Pickle will store the name of the class and pickle its __dict__ contents. When unpickling, it uses the name to find the class, then creates an instance and fills in the __dict__ contents. However, the __dict__ contents present a problem...
The instance of the original my_func that's stored in my_func.target.
This isn't so good. It's a function at the top-level, and normally these can be pickled. Pickle will store the name of the function. The problem, however, is that the name "my_func" is no longer bound to the undecorated function, it's bound to the decorated function. This means that pickle won't be able to look up the undecorated function to recreate the object. Sadly, pickle doesn't have any way to know that object it's trying to pickle can always be found under the name __main__.my_func.
You can change it like this and it will work:
import random
import multiprocessing
import functools
class my_decorator(object):
def __init__(self, target):
self.target = target
try:
functools.update_wrapper(self, target)
except:
pass
def __call__(self, candidates, args):
f = []
for candidate in candidates:
f.append(self.target([candidate], args)[0])
return f
def old_my_func(candidates, args):
f = []
for c in candidates:
f.append(sum(c))
return f
my_func = my_decorator(old_my_func)
if __name__ == '__main__':
candidates = [[random.randint(0, 9) for _ in range(5)] for _ in range(10)]
pool = multiprocessing.Pool(processes=4)
results = [pool.apply_async(my_func, ([c], {})) for c in candidates]
pool.close()
f = [r.get()[0] for r in results]
print(f)
You have observed that the decorator function works when the class does not. I believe this is because functools.wraps modifies the decorated function so that it has the name and other properties of the function it wraps. As far as the pickle module can tell, it is indistinguishable from a normal top-level function, so it pickles it by storing its name. Upon unpickling, the name is bound to the decorated function so everything works out.
I also had some problem using decorators in multiprocessing. I'm not sure if it's the same problem as yours:
My code looked like this:
from multiprocessing import Pool
def decorate_func(f):
def _decorate_func(*args, **kwargs):
print "I'm decorating"
return f(*args, **kwargs)
return _decorate_func
#decorate_func
def actual_func(x):
return x ** 2
my_swimming_pool = Pool()
result = my_swimming_pool.apply_async(actual_func,(2,))
print result.get()
and when I run the code I get this:
Traceback (most recent call last):
File "test.py", line 15, in <module>
print result.get()
File "somedirectory_too_lengthy_to_put_here/lib/python2.7/multiprocessing/pool.py", line 572, in get
raise self._value
cPickle.PicklingError: Can't pickle <type 'function'>: attribute lookup __builtin__.function failed
I fixed it by defining a new function to wrap the function in the decorator function, instead of using the decorator syntax
from multiprocessing import Pool
def decorate_func(f):
def _decorate_func(*args, **kwargs):
print "I'm decorating"
return f(*args, **kwargs)
return _decorate_func
def actual_func(x):
return x ** 2
def wrapped_func(*args, **kwargs):
return decorate_func(actual_func)(*args, **kwargs)
my_swimming_pool = Pool()
result = my_swimming_pool.apply_async(wrapped_func,(2,))
print result.get()
The code ran perfectly and I got:
I'm decorating
4
I'm not very experienced at Python, but this solution solved my problem for me
If you want the decorators too bad (like me), you can also use the exec() command on the function string, to circumvent the mentioned pickling.
I wanted to be able to pass all the arguments to an original function and then use them successively. The following is my code for it.
At first, I made a make_functext() function to convert the target function object to a string. For that, I used the getsource() function from the inspect module (see doctumentation here and note that it can't retrieve source code from compiled code etc.). Here it is:
from inspect import getsource
def make_functext(func):
ft = '\n'.join(getsource(func).split('\n')[1:]) # Removing the decorator, of course
ft = ft.replace(func.__name__, 'func') # Making function callable with 'func'
ft = ft.replace('#§ ', '').replace('#§', '') # For using commented code starting with '#§'
ft = ft.strip() # In case the function code was indented
return ft
It is used in the following _worker() function that will be the target of the processes:
def _worker(functext, args):
scope = {} # This is needed to keep executed definitions
exec(functext, scope)
scope['func'](args) # Using func from scope
And finally, here's my decorator:
from multiprocessing import Process
def parallel(num_processes, **kwargs):
def parallel_decorator(func, num_processes=num_processes):
functext = make_functext(func)
print('This is the parallelized function:\n', functext)
def function_wrapper(funcargs, num_processes=num_processes):
workers = []
print('Launching processes...')
for k in range(num_processes):
p = Process(target=_worker, args=(functext, funcargs[k])) # use args here
p.start()
workers.append(p)
return function_wrapper
return parallel_decorator
The code can finally be used by defining a function like this:
#parallel(4)
def hello(args):
#§ from time import sleep # use '#§' to avoid unnecessary (re)imports in main program
name, seconds = tuple(args) # unpack args-list here
sleep(seconds)
print('Hi', name)
... which can now be called like this:
hello([['Marty', 0.5],
['Catherine', 0.9],
['Tyler', 0.7],
['Pavel', 0.3]])
... which outputs:
This is the parallelized function:
def func(args):
from time import sleep
name, seconds = tuple(args)
sleep(seconds)
print('Hi', name)
Launching processes...
Hi Pavel
Hi Marty
Hi Tyler
Hi Catherine
Thanks for reading, this is my very first post. If you find any mistakes or bad practices, feel free to leave a comment. I know that these string conversions are quite dirty, though...
If you use this code for your decorator:
import multiprocessing
from types import MethodType
DEFAULT_POOL = []
def run_parallel(_func=None, *, name: str = None, context_pool: list = DEFAULT_POOL):
class RunParallel:
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
process = multiprocessing.Process(target=self.func, name=name, args=args, kwargs=kwargs)
context_pool.append(process)
process.start()
def __get__(self, instance, owner):
return self if instance is None else MethodType(self, instance)
if _func is None:
return RunParallel
else:
return RunParallel(_func)
def wait_context(context_pool: list = DEFAULT_POOL, kill_others_if_one_fails: bool = False):
finished = []
for process in context_pool:
process.join()
finished.append(process)
if kill_others_if_one_fails and process.exitcode != 0:
break
if kill_others_if_one_fails:
# kill unfinished processes
for process in context_pool:
if process not in finished:
process.kill()
# wait for every process to be dead
for process in context_pool:
process.join()
Then you can use it like this, in these 4 examples:
#run_parallel
def m1(a, b="b"):
print(f"m1 -- {a=} {b=}")
#run_parallel(name="mym2", context_pool=DEFAULT_POOL)
def m2(d, cc="cc"):
print(f"m2 -- {d} {cc=}")
a = 1/0
class M:
#run_parallel
def c3(self, k, n="n"):
print(f"c3 -- {k=} {n=}")
#run_parallel(name="Mc4", context_pool=DEFAULT_POOL)
def c4(self, x, y="y"):
print(f"c4 -- {x=} {y=}")
if __name__ == "__main__":
m1(11)
m2(22)
M().c3(33)
M().c4(44)
wait_context(kill_others_if_one_fails=True)
The output will be:
m1 -- a=11 b='b'
m2 -- 22 cc='cc'
c3 -- k=33 n='n'
(followed by the exception raised in method m2)