For some reason I can't access the Queue.Empty exception - what am I doing wrong here?
from multiprocessing import Process, Queue
# ...
try:
action = action_queue.get(False)
print "Action: " + action
except Queue.Empty:
pass
The stack trace:
Traceback (most recent call last):
File "C:\Program Files\Python27\lib\multiprocessing\process.py", line 258,
in _bootstrap
self.run()
File "C:\Program Files\Python27\lib\multiprocessing\process.py", line 114,
in run
self._target(*self._args, **self._kwargs)
File "D:\Development\populate.py", line 39, in permutate
except Queue.Empty: AttributeError: 'function' object has no attribute 'Empty'
The Queue.Empty exception is in the Queue module, not in the multiprocessing.queues.Queue class. The multiprocessing module actually uses the Queue (module) Empty exception class:
from multiprocessing import Queue
from Queue import Empty
q = Queue()
try:
q.get( False )
except Empty:
print "Queue was empty"
If you want to be very explicit and verbose, you can do this:
import multiprocessing
import Queue
q = multiprocessing.Queue()
try:
q.get( False )
except Queue.Empty:
print "Queue was empty"
Favoring the former approach is probably a better idea because there is only one Queue object to worry about and you don't have to wonder if you are working with the class or the module as in my second example.
Related
When initializer throw Error like below, script won't stop.
I would like to abort before starting main process(do not run 'do_something').
from multiprocessing import Pool
import contextlib
def initializer():
raise Exception("init failed")
def do_something(args):
# main process
pass
pool = Pool(1, initializer=initializer)
with contextlib.closing(pool):
try:
pool.map_async(do_something, [1]).get(100)
except:
pool.terminate()
The never stopping stacktrace on console is below
...
Exception: init failed
Process ForkPoolWorker-18:
Traceback (most recent call last):
File "/home/hoge/anaconda3/lib/python3.6/multiprocessing/process.py", line 249, in _bootstrap
self.run()
File "/home/hoge/anaconda3/lib/python3.6/multiprocessing/process.py", line 93, in run
self._target(*self._args, **self._kwargs)
File "/home/hoge/anaconda3/lib/python3.6/multiprocessing/pool.py", line 103, in worker
initializer(*initargs)
File "hoge.py", line 5, in initializer
raise Exception("init failed")
Exception: init failed
...
My workaround is suppressing initializer error and return at the beginning of the main process by using global flag like below.
But I would like to learn better one.
def initializer():
try:
raise Exception("init failed")
except:
global failed
failed = True
def do_something(args):
global failed
if failed:
# skip when initializer failed
return
# main process
After navigating through the implementation of multiprocessing using PyCharm, I'm convinced that there is no better solution, because Pool started a thread to _maintain_pool() by _repopulate_pool() if any worker process exists--either accidentally or failed to initialize.
Check this out: Lib/multiprocessing/pool.py line 244
I just came across the same woe. My first solution was to catch the exception and raise it in the worker function (see below). But on second thought it really means that initializer support of multiprocessing.Pool is broken and sould not be used. So I now prefer to do the initialization stuff directly in the worker.
from multiprocessing import Pool
import contextlib, sys
_already_inited = False
def initializer():
global _already_inited
if _already_inited:
return
_already_inited = True
raise Exception("init failed")
def do_something(args):
initializer()
# main process
pool = Pool(1)
with contextlib.closing(pool):
pool.map_async(do_something, [1]).get(100)
Both the code and the stacktrace are simpler.
Off course all your worker function need to call initializer().
My initial solution was to defer the exception to the worker function.
from multiprocessing import Pool
import contextlib, sys
failed = None
def initializer():
try:
raise Exception("init failed")
except:
global failed
failed = sys.exc_info()[1]
def do_something(args):
global failed
if failed is not None:
raise RuntimeError(failed) from failed
# main process
pool = Pool(1, initializer=initializer)
with contextlib.closing(pool):
pool.map_async(do_something, [1]).get(100)
That way the caller still gets access to the exception.
multiprocessing.pool.RemoteTraceback:
"""
Traceback (most recent call last):
File "/usr/lib/python3.5/multiprocessing/pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "/usr/lib/python3.5/multiprocessing/pool.py", line 44, in mapstar
return list(map(*args))
File "/tmp/try.py", line 15, in do_something
raise RuntimeError(failed)
RuntimeError: init failed
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/tmp/try.py", line 20, in <module>
pool.map_async(do_something, [1]).get(100)
File "/usr/lib/python3.5/multiprocessing/pool.py", line 608, in get
raise self._value
RuntimeError: init failed
(venv) kmkaplan#dev1:~/src/options$ python3 /tmp/try.py
multiprocessing.pool.RemoteTraceback:
"""
Traceback (most recent call last):
File "/tmp/try.py", line 7, in initializer
raise Exception("init failed")
Exception: init failed
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/lib/python3.5/multiprocessing/pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "/usr/lib/python3.5/multiprocessing/pool.py", line 44, in mapstar
return list(map(*args))
File "/tmp/try.py", line 15, in do_something
raise RuntimeError(failed) from failed
RuntimeError: init failed
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/tmp/try.py", line 20, in <module>
pool.map_async(do_something, [1]).get(100)
File "/usr/lib/python3.5/multiprocessing/pool.py", line 608, in get
raise self._value
RuntimeError: init failed
I've written a class which inherits multiprocess.Process(). It holds a serial.Serial() object in a class attribute. The method self.loop() is supposed to read from and write to the serial port. When self.loop() is called, it is supposed to run as a separate process, which is a requirement of the person who asked me to write this. However, my code produces a strange error.
This is my code:
from multiprocessing import Process
import serial
import time
class MySerialManager(Process):
def __init__(self, serial_port, baudrate=115200, timeout=1):
super(MySerialManager, self).__init__(target=self.loop)
# As soon as you uncomment this, you'll get an error.
# self.ser = serial.Serial(serial_port, baudrate=baudrate, timeout=timeout)
def loop(self):
# Just some simple action for simplicity.
for i in range(3):
print("hi")
time.sleep(1)
if __name__ == "__main__":
msm = MySerialManager("COM14")
try:
msm.start()
except KeyboardInterrupt:
print("caught in main")
finally:
msm.join()
This is the error:
Traceback (most recent call last):
File "test.py", line 22, in <module>
msm.start()
File "C:\Python\Python36\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "C:\Python\Python36\lib\multiprocessing\context.py", line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Python\Python36\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj)
File "C:\Python\Python36\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
reduction.dump(process_obj, to_child)
File "C:\Python\Python36\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
ValueError: ctypes objects containing pointers cannot be pickled
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "test.py", line 26, in <module>
msm.join()
File "C:\Python\Python36\lib\multiprocessing\process.py", line 120, in join
assert self._popen is not None, 'can only join a started process'
AssertionError: can only join a started process
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Python\Python36\lib\multiprocessing\spawn.py", line 105, in spawn_main
exitcode = _main(fd)
File "C:\Python\Python36\lib\multiprocessing\spawn.py", line 115, in _main
self = reduction.pickle.load(from_parent)
EOFError: Ran out of input
I've also tried creating a serial port object outside of the class and passing it on to the constructor. Furthermore, I've tried not inheriting multiprocess.Process() but instead putting:
self.proc = Process(target=self.loop)
into the class and
try:
msm.proc.start()
except KeyboardInterrupt:
print("caught in main")
finally:
msm.proc.join()
into the main block. Neither of them solved the problem.
Somebody pointed out that it seems like mixing multiprocessing and serial ports just doesn't work out. Is that true? If it is, could you please explain to me why this isn't working? Any help is greatly appreciated!
In windows the serial object once created cannot be shared between two processes (ie. parent and child)
so make the serial object in the child process and pass the reference of that as argument to other functions
try this:
from multiprocessing import Process
import serial
import time
class MySerialManager(Process):
def __init__(self, serial_port, baudrate=115200, timeout=1):
super(MySerialManager, self).__init__(target=self.loop_iterator,args=(serial_port, baudrate, timeout))
# As soon as you uncomment this, you'll get an error.
# self.ser = serial.Serial(serial_port, baudrate=baudrate, timeout=timeout)
def loop_iterator(self,serial_port, baudrate,timeout):
ser = serial.Serial(serial_port, baudrate=baudrate, timeout=timeout)
self.loop(ser)
def loop(self,ser):
# Just some simple action for simplicity.
# you can use ser here
for i in range(3):
print("hi")
time.sleep(1)
if __name__ == "__main__":
msm = MySerialManager("COM4")
try:
msm.start()
except KeyboardInterrupt:
print("caught in main")
finally:
msm.join()
I have a very simple test fixture that instantiate and close a test class 'APMSim' in different threads, the class is not picklable, so I have to use multiprocessing Pool.imap to avoid them being transferred between processes:
class APMSimFixture(TestCase):
def setUp(self):
self.pool = multiprocessing.Pool()
self.sims = self.pool.imap(
apmSimUp,
range(numCores)
)
def tearDown(self):
self.pool.map(
simDown,
self.sims
)
def test_empty(self):
pass
However, when I run the empty Python unittest I encounter the following error:
Error
Traceback (most recent call last):
File "/home/peng/git/datapassport/spookystuff/mav/pyspookystuff_test/mav/__init__.py", line 87, in tearDown
self.sims
File "/usr/lib/python2.7/multiprocessing/pool.py", line 251, in map
return self.map_async(func, iterable, chunksize).get()
File "/usr/lib/python2.7/multiprocessing/pool.py", line 567, in get
raise self._value
Why this could happen? Is there a fix to this?
multiprocessing is re-raising an exception from your worker function/child process in the parent process, but it loses the traceback in the transfer from child to parent. Check your worker function, it's that code that's going wrong. It might help to take whatever your worker function is and change:
def apmSimUp(...):
... body ...
to:
import traceback
def apmSimUp(...):
try:
... body ...
except:
traceback.print_exc()
raise
This explicitly prints the full, original exception traceback (then lets it propagate normally), so you can see what the real problem is.
I'm using the Pool function of the multiprocessing module in order to run the same code in parallel on different data.
It turns out that on some data my code raises an exception, but the precise line in which this happens is not given:
Traceback (most recent call last):
File "my_wrapper_script.py", line 366, in <module>
main()
File "my_wrapper_script.py", line 343, in main
results = pool.map(process_function, folders)
File "/usr/lib64/python2.6/multiprocessing/pool.py", line 148, in map
return self.map_async(func, iterable, chunksize).get()
File "/usr/lib64/python2.6/multiprocessing/pool.py", line 422, in get
raise self._value
KeyError: 'some_key'
I am aware of multiprocessing.log_to_stderr() , but it seems that it is useful when concurrency issues arise, which is not my case.
Any ideas?
If you're using a new enough version of Python, you'll actually see the real exception get printed prior to that one. For example, here's a sample that fails:
import multiprocessing
def inner():
raise Exception("FAIL")
def f():
print("HI")
inner()
p = multiprocessing.Pool()
p.apply(f)
p.close()
p.join()
Here's the exception when running this with python 3.4:
multiprocessing.pool.RemoteTraceback:
"""
Traceback (most recent call last):
File "/usr/local/lib/python3.4/multiprocessing/pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "test.py", line 9, in f
inner()
File "test.py", line 4, in inner
raise Exception("FAIL")
Exception: FAIL
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "test.py", line 13, in <module>
p.apply(f)
File "/usr/local/lib/python3.4/multiprocessing/pool.py", line 253, in apply
return self.apply_async(func, args, kwds).get()
File "/usr/local/lib/python3.4/multiprocessing/pool.py", line 599, in get
raise self._value
Exception: FAIL
If using a newer version isn't an option, the easiest thing to do is to wrap your worker function in a try/except block that will print the exception prior to re-raising it:
import multiprocessing
import traceback
def inner():
raise Exception("FAIL")
def f():
try:
print("HI")
inner()
except Exception:
print("Exception in worker:")
traceback.print_exc()
raise
p = multiprocessing.Pool()
p.apply(f)
p.close()
p.join()
Output:
HI
Exception in worker:
Traceback (most recent call last):
File "test.py", line 11, in f
inner()
File "test.py", line 5, in inner
raise Exception("FAIL")
Exception: FAIL
Traceback (most recent call last):
File "test.py", line 18, in <module>
p.apply(f)
File "/usr/local/lib/python2.7/multiprocessing/pool.py", line 244, in apply
return self.apply_async(func, args, kwds).get()
File "/usr/local/lib/python2.7/multiprocessing/pool.py", line 558, in get
raise self._value
Exception: FAIL
You need to implement your own try/except block in the worker. Depending on how you want to organize your code, you could log to stderr as you mention above, log to some other place like a file, return some sort of error code or even tag the exception with the current traceback and re-raise. Here's an example of the last technique:
import traceback
import multiprocessing as mp
class MyError(Exception):
pass
def worker():
try:
# your real code here
raise MyError("boom")
except Exception, e:
e.traceback = traceback.format_exc()
raise
def main():
pool = mp.Pool()
try:
print "run worker"
result = pool.apply_async(worker)
result.get()
# handle exceptions you expect
except MyError, e:
print e.traceback
# re-raise the rest
except Exception, e:
print e.traceback
raise
if __name__=="__main__":
main()
It returns
run worker
Traceback (most recent call last):
File "doit.py", line 10, in worker
raise MyError("boom")
MyError: boom
Im trying to execute a program in a python subprocess:
class MiThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
try:
from Queue import Queue, Empty
except ImportError:
#from queue import Queue, Empty # python 3.x
print "error"
ON_POSIX = 'posix' in sys.builtin_module_names
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
p= Popen(["java -Xmx256m -jar bin/HelloWorld.jar"],cwd=r'/home/karen/sphinx4-1.0beta5-src/sphinx4-1.0beta5/',stdout=PIPE, shell=True, bufsize= 4024)
q = Queue()
t = Thread(target=enqueue_output, args=(p.stdout, q))
print "estoy en el hilo"
t.daemon = True # thread dies with the program
t.start()
print l
But when i execute the thread it fails with the following error:
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 551, in __bootstrap_inner
self.run()
File "/usr/lib/python2.7/site-packages/GNS3/Workspace.py", line 65, in run
t = Thread(target=enqueue_output, args=(p.stdout, q))
NameError: global name 'Thread' is not defined
QObject::connect: Cannot queue arguments of type 'QTextCursor'
(Make sure 'QTextCursor' is registered using qRegisterMetaType().)
i dont have any idea! What is happening?
Try changing:
t = Thread(target=enqueue_output, args=(p.stdout, q))
to:
t = threading.Thread(target=enqueue_output, args=(p.stdout, q))
In your current namespace, Thread exists as threading.Thread (a member of the threading module), so when you say Thread alone, Python can't find a match and throws that error.