Why do main- and child-process not exit after exception in main? - python

#coding=utf8
from multiprocessing import Process
from time import sleep
import os
def foo():
print("foo")
for i in range(11000):
sleep(1)
print("{}: {}".format(os.getpid(), i))
if __name__ == "__main__":
p = Process(target=foo)
#p.daemon = True
p.start()
print("parent: {}".format(os.getpid()))
sleep(20)
raise Exception("invalid")
An exception is raised in the main-process, but child- and main-process keep running. Why?

When the MainProcess is shutting down, non-daemonic child processes are simply joined. This happens in _exit_function() which is registered with atexit.register(_exit_function). You can look it up in multiprocessing.util.py if you are curious.
You can also insert multiprocessing.log_to_stderr(logging.DEBUG) before you start your process to see the log messages:
parent: 9670
foo
[INFO/Process-1] child process calling self.run()
9675: 0
9675: 1
...
9675: 18
Traceback (most recent call last):
File "/home/...", line 26, in <module>
raise Exception("invalid")
Exception: invalid
[INFO/MainProcess] process shutting down
[DEBUG/MainProcess] running all "atexit" finalizers with priority >= 0
[INFO/MainProcess] calling join() for process Process-1
9675: 19

Related

How to close a multiprocessing queue from a parent (consumer) process

Where using a multiprocessing queue to communicate between processes, many articles recommend sending a terminate message to the queue.
However, if a child process is the producer, if may fail expectedly, leaving the consumer without and notification to expect more messages.
However, the parent process can be notified if a process when a child dies.
It seems it should be possible for it to notify a worker thread in this process to quit and not expect more messages. But how?
multiprocessing.Queue.close()
...doesn't notify consumers (Really? Wait? what!)
def onProcessQuit(): # Notify worker that we are done.
messageQ.put("TERMINATE")
... doesn't let me wait for pending work to complete.
def onProcessQuit(): # Notify worker that we are done.
messageQ.put("TERMINATE")
# messageQ.close()
messageQ.join_thread() # Wait for worker to complete
... fails because the queue is not yet closed.
def onProcessQuit(): # Notify worker that we are done.
messageQ.put("TERMINATE")
messageQ.close()
messageQ.join_thread() # Wait for worker to complete
... seems like it should work, but fails in the worker with a TypeError exception:
msg = messageQ.get()
File "/usr/lib/python3.7/multiprocessing/queues.py", line 94, in get
res = self._recv_bytes()
File "/usr/lib/python3.7/multiprocessing/connection.py", line 216, in recv_bytes
buf = self._recv_bytes(maxlength)
File "/usr/lib/python3.7/multiprocessing/connection.py", line 411, in _recv_bytes
return self._recv(size)
File "/usr/lib/python3.7/multiprocessing/connection.py", line 379, in _recv
chunk = read(handle, remaining)
TypeError: an integer is required (got type NoneType)
while !quit:
try:
msg = messageQ.get(block=True, timeout=0.5)
except Empty:
continue
... is terrible in that it unnecessarily demands trading shutdown latency without throttling the CPU.
Full example
import multiprocessing
import threading
def producer(messageQ):
messageQ.put("1")
messageQ.put("2")
messageQ.put("3")
if __name__ == '__main__':
messageQ = multiprocessing.Queue()
def worker():
try:
while True:
msg = messageQ.get()
print(msg)
if msg=="TERMINATE": return
# messageQ.task_done()
finally:
print("Worker quit")
# messageQ.close() # End thread
# messageQ.join_thread()
thr = threading.Thread(target=worker,
daemon=False) # The work queue is precious.
thr.start()
def onProcessQuit(): # Notify worker that we are done.
messageQ.put("TERMINATE") # Notify worker we are done
messageQ.close() # No more messages
messageQ.join_thread() # Wait for worker to complete
def runProcess():
proc = multiprocessing.Process(target=producer, args=(messageQ,))
proc.start()
proc.join()
print("runProcess quitting ...")
onProcessQuit()
print("runProcess quitting .. OK")
runProcess()
If you are concerned about the producer process not completing normally, then I am not sure what your question is because your code as is should work except for a few corrections: (1) it is missing an import statement, (2) there is no call to runProcess and (3) your worker thread is incorrectly a daemon thread (as such it may end up terminating before it has had a chance to process all the messages on the queue).
I would also use as a personal preference (and not a correction) None as the special sentinel message instead of TERMINATE and remove some extraneous queue calls that you don't really need (I don't see your explicitly closing the queue accomplishing anything that is necessary).
These are the changes:
def producer(messageQ):
messageQ.put("1")
messageQ.put("2")
messageQ.put("3")
if __name__ == '__main__':
import multiprocessing
import threading
SENTINEL = None
def worker():
try:
while True:
msg = messageQ.get()
if msg is SENTINEL:
return # No need to print the sentinel
print(msg)
finally:
print("Worker quit")
def onProcessQuit(): # Notify worker that we are done.
messageQ.put(SENTINEL) # Notify worker we are done
def runProcess():
proc = multiprocessing.Process(target=producer, args=(messageQ,))
proc.start()
proc.join()
print("runProcess quitting ...")
onProcessQuit()
print("runProcess quitting .. OK")
thr.join()
messageQ = multiprocessing.Queue()
thr = threading.Thread(target=worker) # The work queue is precious.
thr.start()
runProcess()
Prints:
1
2
3
runProcess quitting ...
runProcess quitting .. OK
Worker quit

multiprocessing.Queue fails intermittently. Bug in Python?

Python's multiprocessing.Queuefails intermittently, and I don't know why. Is this a bug in Python or my script?
Minimal failing script
import multiprocessing
import time
import logging
import multiprocessing.util
multiprocessing.util.log_to_stderr(level=logging.DEBUG)
queue = multiprocessing.Queue(maxsize=10)
def worker(queue):
queue.put('abcdefghijklmnop')
# "Indicate that no more data will be put on this queue by the
# current process." --Documentation
# time.sleep(0.01)
queue.close()
proc = multiprocessing.Process(target=worker, args=(queue,))
proc.start()
# "Indicate that no more data will be put on this queue by the current
# process." --Documentation
# time.sleep(0.01)
queue.close()
proc.join()
I am testing this in CPython 3.6.6 in Debian. It also fails with docker python:3.7.0-alpine.
docker run --rm -v "${PWD}/test.py:/test.py" \
python:3-alpine python3 /test.py
The above script sometimes fails with a BrokenPipeError.
Traceback (most recent call last):
File "/usr/lib/python3.6/multiprocessing/queues.py", line 240, in _feed
send_bytes(obj)
File "/usr/lib/python3.6/multiprocessing/connection.py", line 200, in send_bytes
self._send_bytes(m[offset:offset + size])
File "/usr/lib/python3.6/multiprocessing/connection.py", line 404, in _send_bytes
self._send(header + buf)
File "/usr/lib/python3.6/multiprocessing/connection.py", line 368, in _send
n = write(self._handle, buf)
BrokenPipeError: [Errno 32] Broken pipe
Test harness
Because this is intermittent, I wrote a shell script to call it many times and count the failures.
#!/bin/sh
total=10
successes=0
for i in `seq ${total}`
do
if ! docker run --rm -v "${PWD}/test.py:/test.py" python:3-alpine \
python3 test.py 2>&1 \
| grep --silent BrokenPipeError
then
successes=$(expr ${successes} + 1)
fi
done
python3 -c "print(${successes} / ${total})"
This usually shows some fraction, maybe 0.2 indicating intermittent failures.
Timing adjustments
If I insert time.sleep(0.01) before either queue.close(), it works consistently. I noticed in the source code that writing happens in its own thread. I think if the writing thread is still trying to write data and all of the other threads close the queue, then it causes the error.
Debug logs
By uncommenting the first few lines, I can trace the execution for failures and successes.
Failure:
[DEBUG/MainProcess] created semlock with handle 140480257941504
[DEBUG/MainProcess] created semlock with handle 140480257937408
[DEBUG/MainProcess] created semlock with handle 140480257933312
[DEBUG/MainProcess] Queue._after_fork()
[DEBUG/Process-1] Queue._after_fork()
[INFO/Process-1] child process calling self.run()
[DEBUG/Process-1] Queue._start_thread()
[DEBUG/Process-1] doing self._thread.start()
[DEBUG/Process-1] starting thread to feed data to pipe
[DEBUG/Process-1] ... done self._thread.start()
[DEBUG/Process-1] telling queue thread to quit
[INFO/Process-1] process shutting down
[DEBUG/Process-1] running all "atexit" finalizers with priority >= 0
[DEBUG/Process-1] running the remaining "atexit" finalizers
[DEBUG/Process-1] joining queue thread
Traceback (most recent call last):
File "/usr/lib/python3.7/multiprocessing/queues.py", line 242, in _feed
send_bytes(obj)
File "/usr/lib/python3.7/multiprocessing/connection.py", line 200, in send_bytes
self._send_bytes(m[offset:offset + size])
File "/usr/lib/python3.7/multiprocessing/connection.py", line 404, in _send_bytes
self._send(header + buf)
File "/usr/lib/python3.7/multiprocessing/connection.py", line 368, in _send
n = write(self._handle, buf)
BrokenPipeError: [Errno 32] Broken pipe
[DEBUG/Process-1] feeder thread got sentinel -- exiting
[DEBUG/Process-1] ... queue thread joined
[INFO/Process-1] process exiting with exitcode 0
[INFO/MainProcess] process shutting down
[DEBUG/MainProcess] running all "atexit" finalizers with priority >= 0
[DEBUG/MainProcess] running the remaining "atexit" finalizers
"Success" (really silent failure, only able to replicate with Python 3.6):
[DEBUG/MainProcess] created semlock with handle 139710276231168
[DEBUG/MainProcess] created semlock with handle 139710276227072
[DEBUG/MainProcess] created semlock with handle 139710276222976
[DEBUG/MainProcess] Queue._after_fork()
[DEBUG/Process-1] Queue._after_fork()
[INFO/Process-1] child process calling self.run()
[DEBUG/Process-1] Queue._start_thread()
[DEBUG/Process-1] doing self._thread.start()
[DEBUG/Process-1] starting thread to feed data to pipe
[DEBUG/Process-1] ... done self._thread.start()
[DEBUG/Process-1] telling queue thread to quit
[INFO/Process-1] process shutting down
[INFO/Process-1] error in queue thread: [Errno 32] Broken pipe
[DEBUG/Process-1] running all "atexit" finalizers with priority >= 0
[DEBUG/Process-1] running the remaining "atexit" finalizers
[DEBUG/Process-1] joining queue thread
[DEBUG/Process-1] ... queue thread joined
[INFO/Process-1] process exiting with exitcode 0
[INFO/MainProcess] process shutting down
[DEBUG/MainProcess] running all "atexit" finalizers with priority >= 0
[DEBUG/MainProcess] running the remaining "atexit" finalizers
True success (using either time.sleep(0.01)):
[DEBUG/MainProcess] created semlock with handle 140283921616896
[DEBUG/MainProcess] created semlock with handle 140283921612800
[DEBUG/MainProcess] created semlock with handle 140283921608704
[DEBUG/MainProcess] Queue._after_fork()
[DEBUG/Process-1] Queue._after_fork()
[INFO/Process-1] child process calling self.run()
[DEBUG/Process-1] Queue._start_thread()
[DEBUG/Process-1] doing self._thread.start()
[DEBUG/Process-1] starting thread to feed data to pipe
[DEBUG/Process-1] ... done self._thread.start()
[DEBUG/Process-1] telling queue thread to quit
[INFO/Process-1] process shutting down
[DEBUG/Process-1] feeder thread got sentinel -- exiting
[DEBUG/Process-1] running all "atexit" finalizers with priority >= 0
[DEBUG/Process-1] running the remaining "atexit" finalizers
[DEBUG/Process-1] joining queue thread
[DEBUG/Process-1] ... queue thread joined
[INFO/Process-1] process exiting with exitcode 0
[INFO/MainProcess] process shutting down
[DEBUG/MainProcess] running all "atexit" finalizers with priority >= 0
[DEBUG/MainProcess] running the remaining "atexit" finalizers
The difference seems to be that in the truly successful case, the feeder receives the sentinel object before the atexit handlers.
the primary issue with your code is that nobody is consuming what your worker process has put in the queue. python queues expect that the data in queues is consumed ("flushed to pipe") prior to the process that put data on it is killed.
in this light, your example doesn't make much sense, but if you want to get it to work:
the key is the queue.cancel_join_thread() -- https://docs.python.org/3/library/multiprocessing.html
Warning As mentioned above, if a child process has put items on a
queue (and it has not used JoinableQueue.cancel_join_thread), then
that process will not terminate until all buffered items have been
flushed to the pipe. This means that if you try joining that process
you may get a deadlock unless you are sure that all items which have
been put on the queue have been consumed. Similarly, if the child
process is non-daemonic then the parent process may hang on exit when
it tries to join all its non-daemonic children.
Note that a queue created using a manager does not have this issue
^ relevant bit. the issue is that stuff is being put on the queue from the child process but NOT consumed by anyone. In this case cancel_join_queue must be called on the CHILD process prior to asking it to join. This code sample will get rid of the error.
import multiprocessing
import time
import logging
import multiprocessing.util
multiprocessing.util.log_to_stderr(level=logging.DEBUG)
queue = multiprocessing.Queue(maxsize=10)
def worker(queue):
queue.put('abcdefghijklmnop')
# "Indicate that no more data will be put on this queue by the
# current process." --Documentation
# time.sleep(0.01)
queue.close()
queue.cancel_join_thread() # ideally, this would not be here but would rather be a response to a signal (or other IPC message) sent from the main process
proc = multiprocessing.Process(target=worker, args=(queue,))
proc.start()
# "Indicate that no more data will be put on this queue by the current
# process." --Documentation
# time.sleep(0.01)
queue.close()
proc.join()
I didn't bother with IPC for this because there's no consumer at all but I hope the idea is clear.

Python multiprocessing daemon vs non-daemon vs main

I'm learning multiprocessing in Python while I found this odd behaviour between daemon and non-daemon process with respect to the main process.
My Code:
import multiprocessing
import time
def worker(name,num):
print name, 'Starting'
time.sleep(num)
print name, 'Exiting'
if __name__ == '__main__':
print 'main starting'
p1=multiprocessing.Process(target=worker, args=("p1",7,))
p2=multiprocessing.Process(target=worker, args=("p2",5,))
p2.daemon=True
p1.start()
p2.start()
time.sleep(4)
print 'main exiting'
The output I'm getting is:
main starting
p1 Starting
p2 Starting
main exiting
p1 Exiting
Expected Output:
main starting
p1 Starting
p2 Starting
main exiting
p2 Exiting
p1 Exiting
After few searches, I found this answer and inserted the following line to my code.
logger = multiprocessing.log_to_stderr(logging.INFO)
And the output I got is,
main starting
[INFO/p1] child process calling self.run()
p1 Starting
[INFO/p2] child process calling self.run()
p2 Starting
main exiting
[INFO/MainProcess] process shutting down
[INFO/MainProcess] calling terminate() for daemon p2
[INFO/MainProcess] calling join() for process p2
[INFO/MainProcess] calling join() for process p1
p1 Exiting
[INFO/p1] process shutting down
[INFO/p1] process exiting with exitcode 0
As per my understanding,
When a main process exits, it terminates all of its child daemon processes. (Which can be seen here)
A main process can not exit before all of its child non-daemon processes exit.
But here, why the main process is trying to shut down before p1 exits?
p1 starts and sleeps for 7 seconds.
p2 starts and sleeps for 5 seconds.
main process sleeps for 4 seconds.
main process wakes up after 4 seconds and waits for p1 to exit.
p2 wakes up after 5 seconds and exits.
p1 wakes up after 7 seconds and exits.
main process exits.
Wouldn't be the above a normal time-line for the above program?
Can anyone please explain what's happening here and why?
EDIT
After adding the line p1.join() at the end of the code, I'm getting the following output:
main starting
[INFO/Process-1] child process calling self.run()
p1 Starting
[INFO/Process-2] child process calling self.run()
p2 Starting
main exiting
p2 Exiting
[INFO/Process-2] process shutting down
[INFO/Process-2] process exiting with exitcode 0
p1 Exiting
[INFO/Process-1] process shutting down
[INFO/Process-1] process exiting with exitcode 0
[INFO/MainProcess] process shutting down
When you see
[INFO/MainProcess] calling join() for process p1
it means the main process has not exited yet -- it's in the process of shutting down, but of course will not be done shutting down until that join returns... which will happen only once the joined process is done.
So the timeline is indeed as you expect it to be -- but since that join to p1 is the very last think the main process ever does, you don't see that in the output or logging from it. (main has taken all the termination-triggered action, but as a process it's still alive until then).
To verify, use (on a Unixy system) ps from another terminal while running this (perhaps with slightly longer delays to help you check): you will never see a single Python process running out of this complex -- there will be two (main and p1) until the end.
Python Multiprocess| Daemon & Join
Question end up adding more wait towards the behavior of daemon flag and join method, hence here a quick explanation with a simple script.
The daemon process is terminated automatically before the main program exits, to avoid leaving orphaned processes running but is not terminated with the main program. So any outstanding processes left by the daemoned function won't run!
However if the join() method is called on the daemoned function then the main programm will wait the left over processes.
INPUT
import multiprocessing
import time
import logging
def daemon():
p = multiprocessing.current_process()
print('Starting:', p.name, p.pid, flush=True)
print('---' * 15)
time.sleep(2)
print('===' * 15 + ' < ' + f'Exiting={p.name, p.pid}' + ' > ' + '===' * 15, flush=True)
def non_daemon():
p = multiprocessing.current_process()
print('Starting:', p.name, p.pid, flush=True)
print('---'*15)
print('===' * 15 + ' < ' + f'Exiting={p.name, p.pid}' + ' > ' + '===' * 15, flush=True)
if __name__ == '__main__':
print('==='*15 + ' < ' + 'MAIN PROCESS' + ' > ' + '==='*15)
logger = multiprocessing.log_to_stderr(logging.INFO)
# DAEMON
daemon_proc = multiprocessing.Process(name='MyDaemon', target=daemon)
daemon_proc.daemon = True
# NO-DAEMON
normal_proc = multiprocessing.Process(name='non-daemon', target=non_daemon)
normal_proc.daemon = False
daemon_proc.start()
time.sleep(2)
normal_proc.start()
# daemon_proc.join()
OUTPUT NO JOIN METHOD##
============================================= < MAIN PROCESS > =============================================
Starting: MyDaemon 8448
---------------------------------------------
[INFO/MyDaemon] child process calling self.run()
[INFO/MainProcess] process shutting down
[INFO/MainProcess] calling terminate() for daemon MyDaemon
[INFO/MainProcess] calling join() for process MyDaemon
[INFO/MainProcess] calling join() for process non-daemon
Starting: non-daemon 6688
---------------------------------------------
============================================= < Exiting=('non-daemon', 6688) > =============================================
[INFO/non-daemon] child process calling self.run()
[INFO/non-daemon] process shutting down
[INFO/non-daemon] process exiting with exitcode 0
Process finished with exit code 0
The function daemon() is 'spleeping' 2 second, hence as Python reaches the bottom of the script the program shut down but not daemon().
Note:
[INFO/MainProcess] calling terminate() for daemon MyDaemon
Now if the last line of the script daemon_proc.join() is not commented.
OUTPUT + JOIN METHOD
============================================= < MAIN PROCESS > =============================================
Starting: MyDaemon 13588
---------------------------------------------
[INFO/MyDaemon] child process calling self.run()
============================================= < Exiting=('MyDaemon', 13588) > =============================================
[INFO/MyDaemon] process shutting down
[INFO/MyDaemon] process exiting with exitcode 0
[INFO/MainProcess] process shutting down
[INFO/MainProcess] calling join() for process non-daemon
Starting: non-daemon 13608
---------------------------------------------
============================================= < Exiting=('non-daemon', 13608) > =============================================
[INFO/non-daemon] child process calling self.run()
[INFO/non-daemon] process shutting down
[INFO/non-daemon] process exiting with exitcode 0
Process finished with exit code 0

Python Multiprocessing: Crash in subprocess?

What happens when a python script opens subprocesses and one process crashes?
https://stackoverflow.com/a/18216437/311901
Will the main process crash?
Will the other subprocesses crash?
Is there a signal or other event that's propagated?
When using multiprocessing.Pool, if one of the subprocesses in the pool crashes, you will not be notified at all, and a new process will immediately be started to take its place:
>>> import multiprocessing
>>> p = multiprocessing.Pool()
>>> p._processes
4
>>> p._pool
[<Process(PoolWorker-1, started daemon)>, <Process(PoolWorker-2, started daemon)>, <Process(PoolWorker-3, started daemon)>, <Process(PoolWorker-4, started daemon)>]
>>> [proc.pid for proc in p._pool]
[30760, 30761, 30762, 30763]
Then in another window:
dan#dantop:~$ kill 30763
Back to the pool:
>>> [proc.pid for proc in p._pool]
[30760, 30761, 30762, 30767] # New pid for the last process
You can continue using the pool as if nothing happened. However, any work item that the killed child process was running at the time it died will not be completed or restarted. If you were running a blocking map or apply call that was relying on that work item to complete, it will likely hang indefinitely. There is a bug filed for this, but the issue was only fixed in concurrent.futures.ProcessPoolExecutor, rather than in multiprocessing.Pool. Starting with Python 3.3, ProcessPoolExecutor will raise a BrokenProcessPool exception if a child process is killed, and disallow any further use of the pool. Sadly, multiprocessing didn't get this enhancement. For now, if you want to guard against a pool call blocking forever due to a sub-process crashing, you have to use ugly workarounds.
Note: The above only applies to a process in a pool actually crashing, meaning the process completely dies. If a sub-process raises an exception, that will be propagated up the parent process when you try to retrieve the result of the work item:
>>> def f(): raise Exception("Oh no")
...
>>> pool = multiprocessing.Pool()
>>> result = pool.apply_async(f)
>>> result.get()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python2.7/multiprocessing/pool.py", line 528, in get
raise self._value
Exception: Oh no
When using a multiprocessing.Process directly, the process object will show that the process has exited with a non-zero exit code if it crashes:
>>> def f(): time.sleep(30)
...
>>> p = multiprocessing.Process(target=f)
>>> p.start()
>>> p.join() # Kill the process while this is blocking, and join immediately ends
>>> p.exitcode
-15
The behavior is similar if an exception is raised:
from multiprocessing import Process
def f(x):
raise Exception("Oh no")
if __name__ == '__main__':
p = Process(target=f)
p.start()
p.join()
print(p.exitcode)
print("done")
Output:
Process Process-1:
Traceback (most recent call last):
File "/usr/lib/python3.2/multiprocessing/process.py", line 267, in _bootstrap
self.run()
File "/usr/lib/python3.2/multiprocessing/process.py", line 116, in run
self._target(*self._args, **self._kwargs)
TypeError: f() takes exactly 1 argument (0 given)
1
done
As you can see, the traceback from the child is printed, but it doesn't affect exceution of the main process, which is able to show the exitcode of the child was 1.

Keyboard Interrupt with python's multiprocessing

I am having an issues gracefully handling a Keyboard Interrupt with python's multi-processing
(Yes I know that Ctr-C should not guarantee a graceful shutdown -- but lets leave that discussion for a different thread)
Consider the following code, where I am user a multiprocessing.Manager#list() which is a ListProxy which I understood handles multi-process access to a list.
When I Ctr-C out of this -- I get a socket.error: [Errno 2] No such file or directory when trying to access the ListProxy
I would love to have the shared list not to be corrupted upon Ctr-C. Is this possible?!
Note: I want to solve this without using Pools and Queues.
from multiprocessing import Process, Manager
from time import sleep
def f(process_number, shared_array):
try:
print "starting thread: ", process_number
shared_array.append(process_number)
sleep(3)
shared_array.append(process_number)
except KeyboardInterrupt:
print "Keyboard interrupt in process: ", process_number
finally:
print "cleaning up thread", process_number
if __name__ == '__main__':
processes = []
manager = Manager()
shared_array = manager.list()
for i in xrange(4):
p = Process(target=f, args=(i, shared_array))
p.start()
processes.append(p)
try:
for process in processes:
process.join()
except KeyboardInterrupt:
print "Keyboard interrupt in main"
for item in shared_array:
# raises "socket.error: [Errno 2] No such file or directory"
print item
If you run that and then hit Ctr-C, we get the following:
starting thread: 0
starting thread: 1
starting thread: 3
starting thread: 2
^CKeyboard interupt in process: 3
Keyboard interupt in process: 0
cleaning up thread 3
cleaning up thread 0
Keyboard interupt in process: 1
Keyboard interupt in process: 2
cleaning up thread 1
cleaning up thread 2
Keyboard interupt in main
Traceback (most recent call last):
File "multi.py", line 33, in <module>
for item in shared_array:
File "<string>", line 2, in __getitem__
File "/usr/local/Cellar/python/2.7.3/Frameworks/Python.framework/Versions/2.7/lib/python2.7/multiprocessing/managers.py", line 755, in _callmethod
self._connect()
File "/usr/local/Cellar/python/2.7.3/Frameworks/Python.framework/Versions/2.7/lib/python2.7/multiprocessing/managers.py", line 742, in _connect
conn = self._Client(self._token.address, authkey=self._authkey)
File "/usr/local/Cellar/python/2.7.3/Frameworks/Python.framework/Versions/2.7/lib/python2.7/multiprocessing/connection.py", line 169, in Client
c = SocketClient(address)
File "/usr/local/Cellar/python/2.7.3/Frameworks/Python.framework/Versions/2.7/lib/python2.7/multiprocessing/connection.py", line 293, in SocketClient
s.connect(address)
File "/usr/local/Cellar/python/2.7.3/Frameworks/Python.framework/Versions/2.7/lib/python2.7/socket.py", line 224, in meth
return getattr(self._sock,name)(*args)
socket.error: [Errno 2] No such file or directory
(Here is another approach using a multiprocessing.Lock with similar affect ... gist)
Similar Questions:
Catch Keyboard Interrupt to stop Python multiprocessing worker from working on queue
Keyboard Interrupts with python's multiprocessing Pool
Shared variable in python's multiprocessing
multiprocessing.Manager() fires up a child process which is responsible for handling your shared list proxy.
netstat output while running:
unix 2 [ ACC ] STREAM LISTENING 3921657 8457/python
/tmp/pymp-B9dcij/listener-X423Ml
this child process created by multiprocessing.Manager() is catching your SIGINT and exiting causing anything related to it to be dereferenced hence your "no such file" error (I also got several other errors depending on when i decided to send SIGINT).
to solve this you may directly declare a SyncManager object (instead of letting Manager() do it for you). this will require you to use the start() method to actually fire up the child process. the start() method takes an initialization function as its first argument (you can override SIGINT for the manager here).
code below, give this a try:
from multiprocessing import Process, Manager
from multiprocessing.managers import BaseManager, SyncManager
from time import sleep
import signal
#handle SIGINT from SyncManager object
def mgr_sig_handler(signal, frame):
print 'not closing the mgr'
#initilizer for SyncManager
def mgr_init():
signal.signal(signal.SIGINT, mgr_sig_handler)
#signal.signal(signal.SIGINT, signal.SIG_IGN) # <- OR do this to just ignore the signal
print 'initialized mananger'
def f(process_number, shared_array):
try:
print "starting thread: ", process_number
shared_array.append(process_number)
sleep(3)
shared_array.append(process_number)
except KeyboardInterrupt:
print "Keyboard interrupt in process: ", process_number
finally:
print "cleaning up thread", process_number
if __name__ == '__main__':
processes = []
#using syncmanager directly instead of letting Manager() do it for me
manager = SyncManager()
manager.start(mgr_init) #fire up the child manager process
shared_array = manager.list()
for i in xrange(4):
p = Process(target=f, args=(i, shared_array))
p.start()
processes.append(p)
try:
for process in processes:
process.join()
except KeyboardInterrupt:
print "Keyboard interrupt in main"
for item in shared_array:
print item
As I answer on similar question (duplicate):
Simplest solution - start manager with
manager.start(signal.signal, (signal.SIGINT, signal.SIG_IGN))
instead of manager.start().
And check if signal module is in your imports (import signal).
This catch and ignore SIGINT (Ctrl-C) in manager process.

Categories