Python Multiprocess Error - python

I had a problem for multiprocess, and below it is the code that I used to perform a test:
import multiprocessing
class Test():
def __init__(self):
pass
def add(self,x,y):
self.x = x
self.y = y
return self.x + self.y
class Worker(Test):
def do_me(self,x,y):
return self.add(x,y)
if __name__ == '__main__':
job = Worker()
# #single process
# x = 1
# y = 9
# result = job.do_me(x,y)
# print "%2d +%2d = %2d "%(x,y,result)
#multiprocess
x = [1,2,3]
y = [9,7,5]
pool = multiprocessing.Pool(processes=4)
retults = [ pool.map(job.do_me, ((x[i], y[i]),)) for i in range(len(x)) ]
print results
For single process, it will work fine. However, for multiprocess, it did not. The error msg is:
cPickle.PicklingError: Can't pickle <type 'instancemethod'>: attribute lookup __builtin__.instancemet hod failed
I searched here, but i did not quite understand the machanism. Could someone give me more help? Cheers.

As the error suggests, you can't pickle instance methods. The problem is this line:
pool.map(job.do_me, ((x[i], y[i]),)) for i in range(len(x))
The mechanism behind this is that when the map function sends the function (the first argument) to all of the workers, it has to serialize it to data somehow, so it's using a mechanism called pickling. There are other mechanisms, this is the a very common one in Python).
When it's trying to pickle an instance method (specifically method do_me, of instances of type Worker) and send it to the pool (for the workers to know what method they're supposed to execute), it fails. Because you can't pickle instance methods.
You can fix this by moving the method to the module level (removing the Worker class):
def do_me(test_instance):
return test_instance.add(x,y)
Now we don't have access to self, since we're using the test_instance that's sent here explicitly, so this method isn't bound to the Test class anymore... Or put in other words - this isn't an instance method anymore. Now make sure you re-factor everything to work as you plan.
The Test class should have something along this structure to keep the list comprehension in the argument construction to pool.map simple:
class Test():
def __init__(self, x, y):
self.x = x
self.y = y
def add(self,x,y):
return self.x + self.y
Adn then calling it like this:
results = pool.map(do_me, [Test(x[i], y[i]) for i in range(len(x))])
Full code:
import multiprocessing
class Test():
def __init__(self, x, y):
self.x = x
self.y = y
def add(self):
return self.x + self.y
def do_me(test_instance):
return test_instance.add()
if __name__ == '__main__':
x = [1,2,3]
y = [9,7,5]
pool = multiprocessing.Pool(processes=4)
results = pool.map(do_me, [Test(x[i], y[i]) for i in range(len(x))])
print results
Some notes:
pool.map already returns a list
__init__ is the standard place to initialize the object data (x, y in your case).
The function pool.map is using is applied to each item of the iterable, so it should be a single-argument function (you can use a tuple, but you have to unpack it).

Related

Call class in function during multiprocessing

I am trying to modify a existing big code to a multiprocessing way. I simplied the question.
Class A is a big external class which I cannot change. I would like to run the class in different cores so I use multiprocessing.Pool. But I got an error as AttributeError: 'int' object has no attribute 'fun' of the line return input_class.fun(x)
How can I fix the problem?
from multiprocessing import Pool
from functools import partial
class A(object):
def __init__(self,value):
self.value = value
def fun(self,x):
return self.value**x
def B(x,input_class):
return input_class.fun(x)
if __name__ == '__main__':
l = range(10)
p = Pool(4)
input_class = A(3)
input_function = partial(B,input_class)
op = p.map(input_function,l)
print(op)
p.close()
p.join()
Your usage of partial doesn't do what you want:
input_function = partial(B,input_class)
This is equivalent to input_function = lambda input_class__: B(input_class, input_class__). So, the first argument of the function is now equal to input_class, not the second one, and now calls to input_function will feed integers as the second argument to B, so you'll be getting <integer>.fun(<input_class>).
Change this to:
input_function = partial(B, input_class=input_class)

How to use two helper functions in main script from another script

TypeError: _slow_trap_ramp() takes 1 positional argument but 2 were given
def demag_chip(self):
coil_probe_constant = float(514.5)
field_sweep = [50 * i * (-1)**(i + 1) for i in range(20, 0, -1)] #print as list
for j in field_sweep:
ramp = self._slow_trap_ramp(j)
def _set_trap_ramp(self):
set_trap_ramp = InstrumentsClass.KeysightB2962A.set_trap_ramp
return set_trap_ramp
def _slow_trap_ramp(self):
slow_trap_ramp = ExperimentsSubClasses.FraunhoferAveraging.slow_trap_ramp
return slow_trap_ramp
The error is straightforward.
ramp = self._slow_trap_ramp(j)
You are calling this method with an argument j, but the method doesn't take an argument (other than self, which is used to pass the object).
Re-define your method to accept an argument if you want to pass it one:
def _slow_trap_ramp(self, j):
It looks like your code extract contains methods of some class, whose full definition is not shown, and you are calling one method from another method (self._slow_trap_ramp(j)). When you call a method, Python automatically passes self before any other arguments. So you need to change def _slow_trap_ramp(self) to def _slow_trap_ramp(self, j).
Update in response to comment
To really help, we would need to see more of the class you are writing, and also some info on the other objects you are calling. But I am going to go out on a limb and guess that your code looks something like this:
InstrumentsClass.py
class KeysightB2962A
def __init__(self):
...
def set_trap_ramp(self):
...
ExperimentsSubClasses.py
class FraunhoferAveraging
def __init__(self):
...
def slow_trap_ramp(self, j):
...
Current version of main.py
import InstrumentsClass, ExperimentsSubClasses
class MyClass
def __init__(self)
...
def demag_chip(self):
coil_probe_constant = float(514.5)
field_sweep = [50 * i * (-1)**(i + 1) for i in range(20, 0, -1)] #print as list
for j in field_sweep:
ramp = self._slow_trap_ramp(j)
def _set_trap_ramp(self):
set_trap_ramp = InstrumentsClass.KeysightB2962A.set_trap_ramp
return set_trap_ramp
def _slow_trap_ramp(self):
slow_trap_ramp = ExperimentsSubClasses.FraunhoferAveraging.slow_trap_ramp
return slow_trap_ramp
if __name__ == "__main__":
my_obj = MyClass()
my_obj.demag_chip()
If this is the case, then these are the main problems:
Python passes self and j to MyClass._slow_trap_ramp, but you've only defined it to accept self (noted above),
you are using class methods from KeysightB2962A and FraunhoferAveraging directly instead of instantiating the class and using the instance's methods, and
you are returning references to the methods instead of calling the methods.
You can fix all of these by changing the code to look like this (see embedded comments):
New version of main.py
import InstrumentsClass, ExperimentsSubClasses
class MyClass
def __init__(self)
# create instances of the relevant classes (note parentheses at end)
self.keysight = InstrumentsClass.KeysightB2962A()
self.fraun_averaging = ExperimentsSubClasses.FraunhoferAveraging()
def demag_chip(self):
coil_probe_constant = float(514.5)
field_sweep = [50 * i * (-1)**(i + 1) for i in range(20, 0, -1)] #print as list
for j in field_sweep:
ramp = self._slow_trap_ramp(j)
def _set_trap_ramp(self):
# call instance method (note parentheses at end)
return self.keysight.set_trap_ramp()
def _slow_trap_ramp(self, j): # accept both self and j
# call instance method (note parentheses at end)
return self.fraun_averaging.slow_trap_ramp(j)
if __name__ == "__main__":
my_obj = MyClass()
my_obj.demag_chip()

Multiprocessing pool: How to call an arbitrary list of methods on a list of class objects

A cleaned up version of the code including the solution to the problem (thanks #JohanL!) can be found as a Gist on GitHub.
The following code snipped (CPython 3.[4,5,6]) illustrates my intention (as well as my problem):
from functools import partial
import multiprocessing
from pprint import pprint as pp
NUM_CORES = multiprocessing.cpu_count()
class some_class:
some_dict = {'some_key': None, 'some_other_key': None}
def some_routine(self):
self.some_dict.update({'some_key': 'some_value'})
def some_other_routine(self):
self.some_dict.update({'some_other_key': 77})
def run_routines_on_objects_in_parallel_and_return(in_object_list, routine_list):
func_handle = partial(__run_routines_on_object_and_return__, routine_list)
with multiprocessing.Pool(processes = NUM_CORES) as p:
out_object_list = list(p.imap_unordered(
func_handle,
(in_object for in_object in in_object_list)
))
return out_object_list
def __run_routines_on_object_and_return__(routine_list, in_object):
for routine_name in routine_list:
getattr(in_object, routine_name)()
return in_object
object_list = [some_class() for item in range(20)]
pp([item.some_dict for item in object_list])
new_object_list = run_routines_on_objects_in_parallel_and_return(
object_list,
['some_routine', 'some_other_routine']
)
pp([item.some_dict for item in new_object_list])
verification_object_list = [
__run_routines_on_object_and_return__(
['some_routine', 'some_other_routine'],
item
) for item in object_list
]
pp([item.some_dict for item in verification_object_list])
I am working with a list of objects of type some_class. some_class has a property, a dictionary, named some_dict and a few methods, which can modify the dict (some_routine and some_other_routine). Sometimes, I want to call a sequence of methods on all the objects in the list. Because this is computationally intensive, I intend to distribute the objects over multiple CPU cores (using multiprocessing.Pool and imap_unordered - the list order does not matter).
The routine __run_routines_on_object_and_return__ takes care of calling the list of methods on one individual object. From what I can tell, this is working just fine. I am using functools.partial for simplifying the structure of the code a bit - the multiprocessing pool therefore has to handle the list of objects as an input parameter only.
The problem is ... it does not work. The objects contained in the list returned by imap_unordered are identical to the objects I fed into it. The dictionaries within the objects look just like before. I have used similar mechanisms for working on lists of dictionaries directly without a glitch, so I somehow suspect that there is something wrong with modifying an object property which happens to be a dictionary.
In my example, verification_object_list contains the correct result (though it is generated in a single process/thread). new_object_list is identical to object_list, which should not be the case.
What am I doing wrong?
EDIT
I found the following question, which has an actually working and applicable answer. I modified it a bit following my idea of calling a list of methods on every object and it works:
import random
from multiprocessing import Pool, Manager
class Tester(object):
def __init__(self, num=0.0, name='none'):
self.num = num
self.name = name
def modify_me(self):
self.num += random.normalvariate(mu=0, sigma=1)
self.name = 'pla' + str(int(self.num * 100))
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.num, self.name)
def init(L):
global tests
tests = L
def modify(i_t_nn):
i, t, nn = i_t_nn
for method_name in nn:
getattr(t, method_name)()
tests[i] = t # copy back
return i
def main():
num_processes = num = 10 #note: num_processes and num may differ
manager = Manager()
tests = manager.list([Tester(num=i) for i in range(num)])
print(tests[:2])
args = ((i, t, ['modify_me']) for i, t in enumerate(tests))
pool = Pool(processes=num_processes, initializer=init, initargs=(tests,))
for i in pool.imap_unordered(modify, args):
print("done %d" % i)
pool.close()
pool.join()
print(tests[:2])
if __name__ == '__main__':
main()
Now, I went a bit further and introduced my original some_class into the game, which contains a the described dictionary property some_dict. It does NOT work:
import random
from multiprocessing import Pool, Manager
from pprint import pformat as pf
class some_class:
some_dict = {'some_key': None, 'some_other_key': None}
def some_routine(self):
self.some_dict.update({'some_key': 'some_value'})
def some_other_routine(self):
self.some_dict.update({'some_other_key': 77})
def __repr__(self):
return pf(self.some_dict)
def init(L):
global tests
tests = L
def modify(i_t_nn):
i, t, nn = i_t_nn
for method_name in nn:
getattr(t, method_name)()
tests[i] = t # copy back
return i
def main():
num_processes = num = 10 #note: num_processes and num may differ
manager = Manager()
tests = manager.list([some_class() for i in range(num)])
print(tests[:2])
args = ((i, t, ['some_routine', 'some_other_routine']) for i, t in enumerate(tests))
pool = Pool(processes=num_processes, initializer=init, initargs=(tests,))
for i in pool.imap_unordered(modify, args):
print("done %d" % i)
pool.close()
pool.join()
print(tests[:2])
if __name__ == '__main__':
main()
The diff between working and not working is really small, but I still do not get it:
diff --git a/test.py b/test.py
index b12eb56..0aa6def 100644
--- a/test.py
+++ b/test.py
## -1,15 +1,15 ##
import random
from multiprocessing import Pool, Manager
+from pprint import pformat as pf
-class Tester(object):
- def __init__(self, num=0.0, name='none'):
- self.num = num
- self.name = name
- def modify_me(self):
- self.num += random.normalvariate(mu=0, sigma=1)
- self.name = 'pla' + str(int(self.num * 100))
+class some_class:
+ some_dict = {'some_key': None, 'some_other_key': None}
+ def some_routine(self):
+ self.some_dict.update({'some_key': 'some_value'})
+ def some_other_routine(self):
+ self.some_dict.update({'some_other_key': 77})
def __repr__(self):
- return '%s(%r, %r)' % (self.__class__.__name__, self.num, self.name)
+ return pf(self.some_dict)
def init(L):
global tests
## -25,10 +25,10 ## def modify(i_t_nn):
def main():
num_processes = num = 10 #note: num_processes and num may differ
manager = Manager()
- tests = manager.list([Tester(num=i) for i in range(num)])
+ tests = manager.list([some_class() for i in range(num)])
print(tests[:2])
- args = ((i, t, ['modify_me']) for i, t in enumerate(tests))
+ args = ((i, t, ['some_routine', 'some_other_routine']) for i, t in enumerate(tests))
What is happening here?
Your problem is due to two things; namely that you are using a class variable and that you are running your code in different processes.
Since different processes do not share memory, all objects and parameters must be pickled and sent from the original process to the process that executes it. When the parameter is an object, its class is not sent with it. Instead the receiving process uses its own blueprint (i.e. class).
In your current code, you pass the object as a parameter, update it and return it. However, the updates are not made to the object, but rather to the class itself, since you are updating a class variable. However, this update is not sent back to your main process, and therefore you are left with your not updated class.
What you want to do, is to make some_dict a part of your object, rather than of your class. This is easily done by an __init__() method. Thus modify some_class as:
class some_class:
def __init__(self):
self.some_dict = {'some_key': None, 'some_other_key': None}
def some_routine(self):
self.some_dict.update({'some_key': 'some_value'})
def some_other_routine(self):
self.some_dict.update({'some_other_key': 77})
This will make your program work as you intend it to. You almost always want to setup your object in an __init__() call, rather than as class variables, since in the latter case the data will be shared between all instances (and can be updated by all). That is not normally what you want, when you encapsulate data and state in an object of a class.
EDIT: It seems I was mistaken in whether the class is sent with the pickled object. After further inspection of what happens, I think also the class itself, with its class variables are pickled. Since, if the class variable is updated before sending the object to the new process, the updated value is available. However it is still the case that the updates done in the new process are not relayed back to the original class.

Printing an object python class

I wrote the following program:
def split_and_add(invoer):
rij = invoer.split('=')
rows = []
for line in rij:
rows.append(process_row(line))
return rows
def process_row(line):
temp_coordinate_row = CoordinatRow()
rij = line.split()
for coordinate in rij:
coor = process_coordinate(coordinate)
temp_coordinate_row.add_coordinaterow(coor)
return temp_coordinate_row
def process_coordinate(coordinate):
cords = coordinate.split(',')
return Coordinate(int(cords[0]),int(cords[1]))
bestand = file_input()
rows = split_and_add(bestand)
for row in range(0,len(rows)-1):
rij = rows[row].weave(rows[row+1])
print rij
With this class:
class CoordinatRow(object):
def __init__(self):
self.coordinaterow = []
def add_coordinaterow(self, coordinate):
self.coordinaterow.append(coordinate)
def weave(self,other):
lijst = []
for i in range(len(self.coordinaterow)):
lijst.append(self.coordinaterow[i])
try:
lijst.append(other.coordinaterow[i])
except IndexError:
pass
self.coordinaterow = lijst
return self.coordinaterow
However there is an error in
for row in range(0,len(rows)-1):
rij = rows[row].weave(rows[row+1])
print rij
The outcome of the print statement is as follows:
[<Coordinates.Coordinate object at 0x021F5630>, <Coordinates.Coordinate object at 0x021F56D0>]
It seems as if the program doesn't acces the actual object and printing it. What am i doing wrong here ?
This isn't an error. This is exactly what it means for Python to "access the actual object and print it". This is what the default string representation for a class looks like.
If you want to customize the string representation of your class, you do that by defining a __repr__ method. The typical way to do it is to write a method that returns something that looks like a constructor call for your class.
Since you haven't shown us the definition of Coordinate, I'll make some assumptions here:
class Coordinate(object):
def __init__(self, x, y):
self.x, self.y = x, y
# your other existing methods
def __repr__(self):
return '{}({}, {})'.format(type(self).__name__, self.x, self.y)
If you don't define this yourself, you end up inheriting __repr__ from object, which looks something like:
return '<{} object at {:#010x}>'.format(type(self).__qualname__, id(self))
Sometimes you also want a more human-readable version of your objects. In that case, you also want to define a __str__ method:
def __str__(self):
return '<{}, {}>'.format(self.x, self.y)
Now:
>>> c = Coordinate(1, 2)
>>> c
Coordinate(1, 2)
>>> print(c)
<1, 2>
But notice that the __str__ of a list calls __repr__ on all of its members:
>>> cs = [c]
>>> print(cs)
[Coordinate(1, 2)]

Python decorator with multiprocessing fails

I would like to use a decorator on a function that I will subsequently pass to a multiprocessing pool. However, the code fails with "PicklingError: Can't pickle : attribute lookup __builtin__.function failed". I don't quite see why it fails here. I feel certain that it's something simple, but I can't find it. Below is a minimal "working" example. I thought that using the functools function would be enough to let this work.
If I comment out the function decoration, it works without an issue. What is it about multiprocessing that I'm misunderstanding here? Is there any way to make this work?
Edit: After adding both a callable class decorator and a function decorator, it turns out that the function decorator works as expected. The callable class decorator continues to fail. What is it about the callable class version that keeps it from being pickled?
import random
import multiprocessing
import functools
class my_decorator_class(object):
def __init__(self, target):
self.target = target
try:
functools.update_wrapper(self, target)
except:
pass
def __call__(self, elements):
f = []
for element in elements:
f.append(self.target([element])[0])
return f
def my_decorator_function(target):
#functools.wraps(target)
def inner(elements):
f = []
for element in elements:
f.append(target([element])[0])
return f
return inner
#my_decorator_function
def my_func(elements):
f = []
for element in elements:
f.append(sum(element))
return f
if __name__ == '__main__':
elements = [[random.randint(0, 9) for _ in range(5)] for _ in range(10)]
pool = multiprocessing.Pool(processes=4)
results = [pool.apply_async(my_func, ([e],)) for e in elements]
pool.close()
f = [r.get()[0] for r in results]
print(f)
The problem is that pickle needs to have some way to reassemble everything that you pickle. See here for a list of what can be pickled:
http://docs.python.org/library/pickle.html#what-can-be-pickled-and-unpickled
When pickling my_func, the following components need to be pickled:
An instance of my_decorator_class, called my_func.
This is fine. Pickle will store the name of the class and pickle its __dict__ contents. When unpickling, it uses the name to find the class, then creates an instance and fills in the __dict__ contents. However, the __dict__ contents present a problem...
The instance of the original my_func that's stored in my_func.target.
This isn't so good. It's a function at the top-level, and normally these can be pickled. Pickle will store the name of the function. The problem, however, is that the name "my_func" is no longer bound to the undecorated function, it's bound to the decorated function. This means that pickle won't be able to look up the undecorated function to recreate the object. Sadly, pickle doesn't have any way to know that object it's trying to pickle can always be found under the name __main__.my_func.
You can change it like this and it will work:
import random
import multiprocessing
import functools
class my_decorator(object):
def __init__(self, target):
self.target = target
try:
functools.update_wrapper(self, target)
except:
pass
def __call__(self, candidates, args):
f = []
for candidate in candidates:
f.append(self.target([candidate], args)[0])
return f
def old_my_func(candidates, args):
f = []
for c in candidates:
f.append(sum(c))
return f
my_func = my_decorator(old_my_func)
if __name__ == '__main__':
candidates = [[random.randint(0, 9) for _ in range(5)] for _ in range(10)]
pool = multiprocessing.Pool(processes=4)
results = [pool.apply_async(my_func, ([c], {})) for c in candidates]
pool.close()
f = [r.get()[0] for r in results]
print(f)
You have observed that the decorator function works when the class does not. I believe this is because functools.wraps modifies the decorated function so that it has the name and other properties of the function it wraps. As far as the pickle module can tell, it is indistinguishable from a normal top-level function, so it pickles it by storing its name. Upon unpickling, the name is bound to the decorated function so everything works out.
I also had some problem using decorators in multiprocessing. I'm not sure if it's the same problem as yours:
My code looked like this:
from multiprocessing import Pool
def decorate_func(f):
def _decorate_func(*args, **kwargs):
print "I'm decorating"
return f(*args, **kwargs)
return _decorate_func
#decorate_func
def actual_func(x):
return x ** 2
my_swimming_pool = Pool()
result = my_swimming_pool.apply_async(actual_func,(2,))
print result.get()
and when I run the code I get this:
Traceback (most recent call last):
File "test.py", line 15, in <module>
print result.get()
File "somedirectory_too_lengthy_to_put_here/lib/python2.7/multiprocessing/pool.py", line 572, in get
raise self._value
cPickle.PicklingError: Can't pickle <type 'function'>: attribute lookup __builtin__.function failed
I fixed it by defining a new function to wrap the function in the decorator function, instead of using the decorator syntax
from multiprocessing import Pool
def decorate_func(f):
def _decorate_func(*args, **kwargs):
print "I'm decorating"
return f(*args, **kwargs)
return _decorate_func
def actual_func(x):
return x ** 2
def wrapped_func(*args, **kwargs):
return decorate_func(actual_func)(*args, **kwargs)
my_swimming_pool = Pool()
result = my_swimming_pool.apply_async(wrapped_func,(2,))
print result.get()
The code ran perfectly and I got:
I'm decorating
4
I'm not very experienced at Python, but this solution solved my problem for me
If you want the decorators too bad (like me), you can also use the exec() command on the function string, to circumvent the mentioned pickling.
I wanted to be able to pass all the arguments to an original function and then use them successively. The following is my code for it.
At first, I made a make_functext() function to convert the target function object to a string. For that, I used the getsource() function from the inspect module (see doctumentation here and note that it can't retrieve source code from compiled code etc.). Here it is:
from inspect import getsource
def make_functext(func):
ft = '\n'.join(getsource(func).split('\n')[1:]) # Removing the decorator, of course
ft = ft.replace(func.__name__, 'func') # Making function callable with 'func'
ft = ft.replace('#§ ', '').replace('#§', '') # For using commented code starting with '#§'
ft = ft.strip() # In case the function code was indented
return ft
It is used in the following _worker() function that will be the target of the processes:
def _worker(functext, args):
scope = {} # This is needed to keep executed definitions
exec(functext, scope)
scope['func'](args) # Using func from scope
And finally, here's my decorator:
from multiprocessing import Process
def parallel(num_processes, **kwargs):
def parallel_decorator(func, num_processes=num_processes):
functext = make_functext(func)
print('This is the parallelized function:\n', functext)
def function_wrapper(funcargs, num_processes=num_processes):
workers = []
print('Launching processes...')
for k in range(num_processes):
p = Process(target=_worker, args=(functext, funcargs[k])) # use args here
p.start()
workers.append(p)
return function_wrapper
return parallel_decorator
The code can finally be used by defining a function like this:
#parallel(4)
def hello(args):
#§ from time import sleep # use '#§' to avoid unnecessary (re)imports in main program
name, seconds = tuple(args) # unpack args-list here
sleep(seconds)
print('Hi', name)
... which can now be called like this:
hello([['Marty', 0.5],
['Catherine', 0.9],
['Tyler', 0.7],
['Pavel', 0.3]])
... which outputs:
This is the parallelized function:
def func(args):
from time import sleep
name, seconds = tuple(args)
sleep(seconds)
print('Hi', name)
Launching processes...
Hi Pavel
Hi Marty
Hi Tyler
Hi Catherine
Thanks for reading, this is my very first post. If you find any mistakes or bad practices, feel free to leave a comment. I know that these string conversions are quite dirty, though...
If you use this code for your decorator:
import multiprocessing
from types import MethodType
DEFAULT_POOL = []
def run_parallel(_func=None, *, name: str = None, context_pool: list = DEFAULT_POOL):
class RunParallel:
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
process = multiprocessing.Process(target=self.func, name=name, args=args, kwargs=kwargs)
context_pool.append(process)
process.start()
def __get__(self, instance, owner):
return self if instance is None else MethodType(self, instance)
if _func is None:
return RunParallel
else:
return RunParallel(_func)
def wait_context(context_pool: list = DEFAULT_POOL, kill_others_if_one_fails: bool = False):
finished = []
for process in context_pool:
process.join()
finished.append(process)
if kill_others_if_one_fails and process.exitcode != 0:
break
if kill_others_if_one_fails:
# kill unfinished processes
for process in context_pool:
if process not in finished:
process.kill()
# wait for every process to be dead
for process in context_pool:
process.join()
Then you can use it like this, in these 4 examples:
#run_parallel
def m1(a, b="b"):
print(f"m1 -- {a=} {b=}")
#run_parallel(name="mym2", context_pool=DEFAULT_POOL)
def m2(d, cc="cc"):
print(f"m2 -- {d} {cc=}")
a = 1/0
class M:
#run_parallel
def c3(self, k, n="n"):
print(f"c3 -- {k=} {n=}")
#run_parallel(name="Mc4", context_pool=DEFAULT_POOL)
def c4(self, x, y="y"):
print(f"c4 -- {x=} {y=}")
if __name__ == "__main__":
m1(11)
m2(22)
M().c3(33)
M().c4(44)
wait_context(kill_others_if_one_fails=True)
The output will be:
m1 -- a=11 b='b'
m2 -- 22 cc='cc'
c3 -- k=33 n='n'
(followed by the exception raised in method m2)

Categories