I am very new to Python, thus am possibly asking a simple question.
I am wrting a multiprocess code with Python:
from multiprocessing import Process
from multiprocessing import Queue
class myClass(object):
def __init__(self):
self.__i = 0
self.__name = 'rob'
return
def target_func(self, name, q):
self.__name = name
print 'Hello', self.__name
self.__i += 1
print self.__i
q.put([self.__i, self.__name])
return
def name(self):
return self.__name
def i(self):
return self.__i
if __name__ == '__main__':
mc = myClass()
q = Queue()
p = Process(target = mc.target_func, args = ('bob', q,))
p.start()
ret = q.get()
p.join()
p2 = Process(target = mc.target_func, args = ('tom', q,))
p2.start()
ret = q.get()
p2.join()
I expect the print out should be
Hello bob
1
Hello tom
2
But actually, the print out is
Hello bob
1
Hello tom
1 <------------------ Why it's not 2?
May I know what am I wrong?
Many thanks.
target_func is called in separated process. mc is copied to each subprocess; not shared between processes.
Using Thread, you will get expected(?) result. For safety you should use lock; I omitted it in following code.
from threading import Thread
from Queue import Queue
....
if __name__ == '__main__':
mc = myClass()
q = Queue()
p = Thread(target = mc.target_func, args = ('bob', q,))
p.start()
ret = q.get()
p.join()
p2 = Thread(target = mc.target_func, args = ('tom', q,))
p2.start()
ret = q.get()
p2.join()
Processes don't share memory, unlike threads. The name __i in the second process refers to a different variable, whose initial value was copied from the original process when you launched the subprocess.
You can use the Value or Array data types to transfer information from one process to another, or you can use the Queue to push data from the subprocess back the the original. All of these classes are included in the multiprocessing module
http://docs.python.org/2/library/multiprocessing.html#multiprocessing.Queue
http://docs.python.org/2/library/multiprocessing.html#multiprocessing.Value
http://docs.python.org/2/library/multiprocessing.html#multiprocessing.Array
The value of the variable is still the same since each process you create gets a full copy of the memory space of the parent process, including a copy of the mc class instance that you created earlier. Hence, when you modify the instance variable of mc from within each process, it does not affect the variable in your main process. Here's a more concise example of this behavior:
from multiprocessing import Process
class A(object):
def __init__(self):
self.var = 1
print "Initialized class: ",self
def test(self):
print self
print "Variable value:",self.var
self.var += 1
if __name__ == '__main__':
a = A()
p1 = Process(target = a.test)
#Creates a copy of the curent memory space and will print "Variable value: 1"
p1.start()
p2 = Process(target = a.test)
#Will still print "Variable value: 1"
p2.start()
Related
I am trying to run the following snippet which appends data to lists 'tests1' and 'tests2'. But when I print 'tests1' and 'tests2', the displayed list is empty. Anything incorrect here?
tests1 = []
tests2 = []
def func1():
for i in range(25,26):
tests1.append(test_loader.get_tests(test_prefix=new_paths[i],tags=params.get('tags', None),
exclude=params.get('exclude', False)))
def func2():
for i in range(26,27):
tests2.append(test_loader.get_tests(test_prefix=new_paths[i],tags=params.get('tags', None),
exclude=params.get('exclude', False)))
p1 = mp.Process(target=func1)
p2 = mp.Process(target=func2)
p1.start()
p2.start()
p1.join()
p2.join()
print tests1
print tests2
The worker processes don't actually share the same object. It gets copied (pickled).
You can send values between processes using a multiprocessing.Queue (or by various other means). See my simple example (in which I've made your tests into integers for simplicity).
from multiprocessing import Process, Queue
def add_tests1(queue):
for i in range(10):
queue.put(i)
queue.put(None)
def add_tests2(queue):
for i in range(100,110):
queue.put(i)
queue.put(None)
def run_tests(queue):
while True:
test = queue.get()
if test is None:
break
print test
if __name__ == '__main__':
queue1 = Queue()
queue2 = Queue()
add_1 = Process(target = add_tests1, args = (queue1,))
add_2 = Process(target = add_tests2, args = (queue2,))
run_1 = Process(target = run_tests, args = (queue1,))
run_2 = Process(target = run_tests, args = (queue2,))
add_1.start(); add_2.start(); run_1.start(); run_2.start()
add_1.join(); add_2.join(); run_1.join(); run_2.join()
Note that the parent program can also access the queues.
The end goal is to execute a method in background, but not in parallel : when multiple objects are calling this method, each should wait for their turn to proceed. To achieve running in background, I have to run the method in a subprocess (not a thread), and I need to start it using spawn (not fork). To prevent parallel executions, the obvious solution is to have a global lock shared between processes.
When processes are forked, which is the default on Unix, it is easy to achieve, as highlighted in both of the following codes.
We can share it as a class variable :
import multiprocessing as mp
from time import sleep
class OneAtATime:
l = mp.Lock()
def f(self):
with self.l:
sleep(1)
print("Hello")
if __name__ == "__main__":
a = OneAtATime()
b = OneAtATime()
p1 = mp.Process(target = a.f)
p2 = mp.Process(target = b.f)
p1.start()
p2.start()
Or we can pass it to the method :
import multiprocessing as mp
from time import sleep
class OneAtATime:
def f(self, l):
with l:
sleep(1)
print("Hello")
if __name__ == "__main__":
a = OneAtATime()
b = OneAtATime()
m = mp.Manager()
l = mp.Lock()
p1 = mp.Process(target = a.f, args = (l,))
p2 = mp.Process(target = b.f, args = (l,))
p1.start()
p2.start()
Both of these codes have the appropriate behaviour of printing "hello" at one second of interval.
However, when changing the start method to 'spawn', they become broken.
The first one (1) prints both "hello"s at the same time. This is because the internal state of a class is not pickled, so they do not have the same lock.
The second one (2) fails with FileNotFoundError at runtime. I think it has to do with the fact that locks cannot be pickled : see Python sharing a lock between processes.
In this answer, two fixes are suggested (side note : I cannot use a pool because I want to randomly create an arbitrary number of processes).
I haven't found a way to adapt the second fix, but I tried to implement the first one :
import multiprocessing as mp
from time import sleep
if __name__ == "__main__":
mp.set_start_method('spawn')
class OneAtATime:
def f(self, l):
with l:
sleep(1)
print("Hello")
if __name__ == "__main__":
a = OneAtATime()
b = OneAtATime()
m = mp.Manager()
l = m.Lock()
p1 = mp.Process(target = a.f, args = (l,))
p2 = mp.Process(target = b.f, args = (l,))
p1.start()
p2.start()
This fails with AttributeError and FileNotFoundError (3). In fact it also fails (BrokenPipe) when the fork method is used (4).
What is the proper way of sharing a lock between spawned processes ?
A quick explanation of the four fails I numbered would be nice, too.
I'm running Python 3.6 under Archlinux.
Congratulations, you got yourself 90% of the way there. The last step is actually not very hard to do.
Yes, your final code block fails with an AttributeError, but what specifically is the error? "Can't get attribute 'OneAtATime' on ". This is very similar to a problem you've already encountered - it's not pickling the class OneAtATime.
I made the following change and it worked as you'd like:
file ooat.py:
from time import sleep
class OneAtATime:
def f(self, l):
with l:
sleep(1)
print("Hello")
interactive shell:
import multiprocessing as mp
from oaat import OneAtATime
if __name__ == "__main__":
mp.set_start_method('spawn')
a = OneAtATime()
b = OneAtATime()
m = mp.Manager()
l = m.Lock()
p1 = mp.Process(target = a.f, args = (l,))
p2 = mp.Process(target = b.f, args = (l,))
p1.start()
p2.start()
You may notice, I didn't really do anything - just split your code into two separate files. Try it out, you'll see it works fine. (At least, it did for me, using python 3.5 on ubuntu.)
The last code snippet works, provided the script does not exit prematurely. Joining processes is enough :
import multiprocessing as mp
from time import sleep
class OneAtATime:
def f(self, l):
with l:
sleep(1)
print("Hello")
if __name__ == "__main__":
mp.set_start_method('spawn')
a = OneAtATime()
b = OneAtATime()
m = mp.Manager()
l = m.Lock()
p1 = mp.Process(target = a.f, args = (l,))
p2 = mp.Process(target = b.f, args = (l,))
p1.start()
p2.start()
p1.join()
p2.join()
More info on the error it was causing here https://stackoverflow.com/a/25456494/8194503.
I have a 3 processes running in one script. Process 1 passes data to Process 2, and then Process 2 passes data to Process 3. When I put data to queue2, error occurs that "Global name "queue2" is not defined", I am stuck on this error now...
if __name__ == '__main__':
queue1 = mp.Queue()
queue2 = mp.Queue()
p1 = mp.Process(target=f2, args=(queue1,))
p1.start()
p2 = mp.Process(target=f3, args=(queue2,))
p2.start()
f1()
def f1():
# do something to a get x
queue1.put(x)
def f2(q):
a = q.get()
# do something to a, to produce b
queue2.put(b) # error happens here: Global name "queue2" is not defined
def f3(q):
c = q.get()
# keeping processing c...
Just as you passed queue1 to f2, you also need to pass queue2.
You can declare the queues as global:
def f2(q):
global queue2
a = q.get()
queue2.put(b)
This works :
import multiprocessing as mp
queue1 = mp.Queue()
queue2 = mp.Queue()
def f1(q):
x = 5
# do something to a get x
q.put(x)
def f2(in_queue, out_queue):
a = in_queue.get()
b = a + 2
# do something to a, to produce b
out_queue.put(b)
def f3(q):
c = q.get()
print c
f1(queue1)
p1 = mp.Process(target=f2, args=(queue1, queue2))
p1.start()
p2 = mp.Process(target=f3, args=(queue2,))
p2.start()
Your code doesn't return the error you seem to have, it returns "f2 not defined" since you when you spawn the process p1, f2 is not a defined variable yet. The rule when you fork is that at creation time your processes must see the variables they use, i.e. they must be in the current scope.
To put it clearly, at spawning process time you inherit the current namespace from the parent process.
I want to use python's multiprocessing module in a class, which itself uses subprocesses to not block the main call.
The minimal example looks like this:
import multiprocessing as mp
class mpo():
def __init__(self):
cpu = mp.cpu_count()
self.Pool = mp.Pool(processes = 2)
self.alive = True
self.p = mp.Process(target = self.sub,args=())
def worker():
print 'Alive'
def sub(self):
print self.alive
for i in range(2):
print i
self.Pool.apply_async(self.worker, args=())
print 'done'
self.Pool.close()
# self.Pool.join()
I commented the last line out, as it raises an assertion Error (can only join a child process).
When I do:
m =mpo()
m.p.start()
The output is
True
0
1
done
My main question is, why the print statement in the worker thread never is reached?
Update:
The updated code looks like this.
import multiprocessing as mp
class mpo():
def __init__(self):
cpu = mp.cpu_count()
self.alive = True
self.p = mp.Process(target = self.sub,args=())
self.result=[]
def worker(self):
self.result.append(1)
print 'Alive'
def sub(self):
print self.alive
Pool = mp.Pool(processes = 2)
for i in range(2):
print i
Pool.apply_async(self.worker, args=())
print 'done'
Pool.close()
Pool.join()
The pool now doesn't have to be inherited as it is created in the subprocess. Instead of the print statement the result is appended to the calling object and the pool is properly joined. Nevertheless, there is no result showing up.
so I think this may correspond to a simple example of what you are looking for:
import multiprocessing as mp
def worker(arg):
#print 'Alive'+str(arg)
return "Alive and finished {0}".format(arg)
class mpo():
def __init__(self):
cpu = mp.cpu_count()
self.alive = True
self.pool = mp.Pool(processes = 2)
def sub(self,arguments):
self.results=self.pool.map_async(worker, arguments)
return self.results
if __name__=="__main__":
s=mpo()
s.sub(range(10))
print s.results.get()
Additionally you can call
self.results.ready()
to find out whether the processes have finished their work. You do not have to put this inside of another process because the map_async call does not block the rest of your program.
EDIT:
Concerning your comment, I do not really see the value of putting the calculation in a separate process, because the function is already running in separate processes (in the pool). You only add complexity by nesting it in another subprocess, but it is possible:
import multiprocessing as mp
def worker(arg):
#print 'Alive'+str(arg)
return "Alive and finished {0}".format(arg)
class mpo():
def __init__(self):
cpu = mp.cpu_count()
self.alive = True
self.pool = mp.Pool(processes = 2)
def sub(self,arguments):
self.results=self.pool.map_async(worker, arguments)
return self.results
def run_calculation(q):
s=mpo()
results=s.sub(range(10))
q.put(results.get())
queue=mp.Queue()
proc=mp.Process(target=run_calculation,args=(queue,))
proc.start()
proc.join()
queue.get()
I've got a use case for multiprocessing where I want to write a dispatch function that receives as input a python function and an input queue, and then dispatches that function over that input through multiprocessing?
I'm not sure if args is the appropriate thing to pass in, since the function isn't strictly going to be shared.
import multiprocessing
def dispatch(queue, function):
while True:
if queue.empty():
return
current_project = queue.get()
function(current_project)
def letter_rip(projects_filename, function):
project_file = open(projects_filename, 'r')
projects = project_file.readlines()
project_file.close()
q = multiprocessing.Queue()
for project in projects:
q.put(project)
for i in xrange(0, 128):
p = multiprocessing.Process(target=dispatch, args=(q, ???function??? ))
p.start()
Functions are (when used as functions and not as objects) immutable objects so no problems should araise from their plain usage:
from multiprocessing import Process, Value
def callback(x):
return x * 2
def handler(x, fn, a):
a.value = fn(x.value)
if __name__ == '__main__':
x = Value('d', 2)
a = Value('d', 0)
p1 = Process(target=handler, args=(x, callback, a))
p2 = Process(target=handler, args=(x, callback, a))
p1.start()
p2.start()
p1.join()
p2.join()
print a.value