I'm having some trouble getting the following code to run on Eclipse via Windows. The code is from Doug Hellman:
import random
import multiprocessing
import time
class ActivePool:
def __init__(self):
super(ActivePool, self).__init__()
self.mgr = multiprocessing.Manager()
self.active = self.mgr.list()
self.lock = multiprocessing.Lock()
def makeActive(self, name):
with self.lock:
self.active.append(name)
def makeInactive(self, name):
with self.lock:
self.active.remove(name)
def __str__(self):
with self.lock:
return str(self.active)
def worker(s, pool):
name = multiprocessing.current_process().name
with s:
pool.makeActive(name)
print('Activating {} now running {}'.format(
name, pool))
time.sleep(random.random())
pool.makeInactive(name)
if __name__ == '__main__':
pool = ActivePool()
s = multiprocessing.Semaphore(3)
jobs = [
multiprocessing.Process(
target=worker,
name=str(i),
args=(s, pool),
)
for i in range(10)
]
for j in jobs:
j.start()
for j in jobs:
j.join()
print('Now running: %s' % str(pool))
I get the following error, which I assume is due to some pickling issue with passing in pool as an argument to Process.
Traceback (most recent call last):
File "E:\Eclipse_Workspace\CodeExamples\FromCodes\CodeTest.py", line 50, in <module>
j.start()
File "C:\Users\Bob\AppData\Local\Programs\Python\Python36-32\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "C:\Users\Bob\AppData\Local\Programs\Python\Python36-32\lib\multiprocessing\context.py", line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Users\Bob\AppData\Local\Programs\Python\Python36-32\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj)
File "C:\Users\Bob\AppData\Local\Programs\Python\Python36-32\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
reduction.dump(process_obj, to_child)
File "C:\Users\Bob\AppData\Local\Programs\Python\Python36-32\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
File "C:\Users\Bob\AppData\Local\Programs\Python\Python36-32\lib\multiprocessing\connection.py", line 939, in reduce_pipe_connection
dh = reduction.DupHandle(conn.fileno(), access)
File "C:\Users\Bob\AppData\Local\Programs\Python\Python36-32\lib\multiprocessing\connection.py", line 170, in fileno
self._check_closed()
File "C:\Users\Bob\AppData\Local\Programs\Python\Python36-32\lib\multiprocessing\connection.py", line 136, in _check_closed
raise OSError("handle is closed")
OSError: handle is closed
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Users\Bob\AppData\Local\Programs\Python\Python36-32\lib\multiprocessing\spawn.py", line 99, in spawn_main
new_handle = reduction.steal_handle(parent_pid, pipe_handle)
File "C:\Users\Bob\AppData\Local\Programs\Python\Python36-32\lib\multiprocessing\reduction.py", line 87, in steal_handle
_winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE)
PermissionError: [WinError 5] Access is denied
A similar question's answer seems to suggest that I initialize pool with a function call at the top level, but I don't know how to apply that to this example. Do I initialize ActivePool in worker? That seems to defeat the spirit of Hellman's example.
Another answer suggests I use __getstate__, __setstate__, to remove unpickleable objects and reconstruct them when unpickling, but I don't know a good way to do this with Proxy Objects like Manager, and I actually don't know what the unpickleable object is.
Is there any way I can make this example work with minimal changes? I really wish to understand what is going on under the hood. Thanks!
Edit - Problem Solved:
The pickling issue was pretty obvious in hindsight. The ActivePool's __init__ contained a Manager() object which seems unpicklable. The code runs normally as per Hellman's example if we remove self.mgr, and initialize the list ProxyObject in one line:
def __init__(self):
super(ActivePool, self).__init__()
self.active = multiprocessing.Manager().list()
self.lock = multiprocessing.Lock()
Comment: The 'join()' was in the Hellman example, but I forgot to add it into the code snippet. Any other ideas?
I'm running Linux and it works as expected, Windows behave different read understanding-multiprocessing-shared-memory-management-locks-and-queues-in-pyt
To determine which Parameter of args=(s, pool) raise the Error remove one and use it as global.
Change:
def worker(s):
...
args=(s,),
Note: There is no need to enclose a multiprocessing.Manager().list() with a Lock().
This is not the culprit of your error.
Question: Is there any way I can make this example work with minimal changes?
Your __main__ Process terminates, therefore all started Processes die at unpredicted position of execution. Add simple a .join() at the end to let the __main__ wait until all Processes done:
for j in jobs:
j.join()
print('EXIT __main__')
Tested with Python: 3.4.2
Related
Here is my code:
from MyDetector import Helmet_Detector
from multiprocessing import Process
class Processor(Process):
def __init__(self):
super().__init__()
self.helmet_detector = Helmet_Detector()
def run(self):
print(111)
if __name__ == '__main__':
p=Processor()
p.start()
As you can see, the class 'Processor' inherits multiprocessing.Process, and Helmet_Detector is a YOLO model using cuda. But when I ran it, the error occurred as follow:
THCudaCheck FAIL file=C:\w\1\s\tmp_conda_3.7_075911\conda\conda-bld\pytorch_1579075223148\work\torch/csrc/generic/StorageSharing.cpp line=245 error=71 : operation not supported
Traceback (most recent call last):
File "E:/python-tasks/WHU-CSTECH/Processor.py", line 17, in <module>
p.start()
File "C:\Anaconda\lib\multiprocessing\process.py", line 112, in start
self._popen = self._Popen(self)
File "C:\Anaconda\lib\multiprocessing\context.py", line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Anaconda\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj)
File "C:\Anaconda\lib\multiprocessing\popen_spawn_win32.py", line 89, in __init__
reduction.dump(process_obj, to_child)
File "C:\Anaconda\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
File "C:\Anaconda\lib\site-packages\torch\multiprocessing\reductions.py", line 242, in reduce_tensor
event_sync_required) = storage._share_cuda_()
RuntimeError: cuda runtime error (71) : operation not supported at C:\w\1\s\tmp_conda_3.7_075911\conda\conda-bld\pytorch_1579075223148\work\torch/csrc/generic/StorageSharing.cpp:245
then I tried to intialize the Helmet_Detector in run method:
def run(self):
print(111)
self.helmet_detector = Helmet_Detector()
No error occurred. Could anyone please tell me the reason for this and how could I solve this problem? Thank you!
Error occurs because in python multiprocessing requires Process class objects to be pickelable so that data can be transferred to the process being created i.e. Serialisation and deserialization of the object. Suggestion to overcome the issue, lazy instantiate the Helmet_Detector object (hint: try property in python).
Edit:
As per the comment by #jodag, you should use pytorch's multiprocessing library instead of standard multiprocessing library
Example:
import torch.multiprocessing as mp
class Processor(mp.Process):
.
.
.
I'm far from being adapt in python and since 3 days I'm trying to figure out how to properly work with multiprocessing, but now I hit a dead and and need some assistance.
Basically what the program is supposed to do, is controlling different segments of an LED strip from multiple (seni-random) inputs at the same time. Therefore I came to the conclusion that I probably need to use multiprocessing.
I've written a module for it using an existing module from Adafruit. (I stripped it down for demonstration)
import time
import RPi.GPIO as GPIO
from multiprocessing import Lock
import Adafruit_WS2801
import Adafruit_GPIO.SPI as SPI
class Pixels(object):
def __init__(self, pixelCount, spiPort, spiDevice):
self.l = Lock()
self.pixels = Adafruit_WS2801.WS2801Pixels(pixelCount, spi=SPI.SpiDev(spiPort, spiDevice), gpio=GPIO)
# Clear all the pixels to turn them off.
self.pixels.clear()
self.pixels.show()
def set_color(self, target_pixel, color=(255,0,0)):
for k in target_pixel:
self.l.acquire()
self.pixels.set_pixel(k, Adafruit_WS2801.RGB_to_color( color[0], color[1], color[2] ))
self.l.release()
self.l.acquire()
self.pixels.show()
self.l.release()
def blink_color_blank(self, target_pixel, blink_times=1, wait=0.5, color=(255,0,0)):
for i in range(blink_times):
self.set_color(target_pixel, color)
time.sleep(wait)
self.set_color(target_pixel, (0,0,0))
time.sleep(wait)
Inside of self.pixels all the information about which LED should have which color is stored.
self.pixels.set_pixel() writes the new values to storage.
self.pixels.show() actually sends these values to the SPI-Bus.
Now my attempt at multiprocessing starts like this.
from multiprocessing import Process, Manager
from multiprocessing.managers import BaseManager
import LED_WS2801
if __name__ == '__main__':
BaseManager.register('LedClass', LED_WS2801.Pixels)
manager = BaseManager()
manager.start()
inst = manager.LedClass(10,0,0)
Now my problem arises when I start a process while another is still active.
p = Process(target=inst.blink_color_blank, args=([6,7,8], 10, 0.25, (255,0,0),))
p.start()
p = Process(target=inst.set_color, args=([3,4,5,6],(0,255,0),))
p.start()
p.join()
This gives me following error:
Process Process-3:
Traceback (most recent call last):
File "/usr/lib/python2.7/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/usr/lib/python2.7/multiprocessing/process.py", line 114, in run
self._target(*self._args, **self._kwargs)
File "<string>", line 2, in blink_color_blank
File "/usr/lib/python2.7/multiprocessing/managers.py", line 759, in _callmethod
kind, result = conn.recv()
EOFError
But when I do something like this, everything is fine.
p = Process(target=inst.blink_color_blank, args=([6,7,8], 10, 0.25, (255,0,0),))
p.start()
b = Process(target=inst.set_color, args=([3,4,5,6],(0,255,0),))
b.start()
p.join()
b.join()
But I don't know my final number of processes as they get spawned by external inputs, so I need some way to control a variable number of processes. My idea was to use a list like this:
jobs = []
jobs.append(Process(target=inst.set_color, args=([0,1,2],(255,0,255),)))
jobs[0].start()
But much to my disappointment this returns with another error:
Process Process-2:
Traceback (most recent call last):
File "/usr/lib/python2.7/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/usr/lib/python2.7/multiprocessing/process.py", line 114, in run
self._target(*self._args, **self._kwargs)
File "<string>", line 2, in set_color
File "/usr/lib/python2.7/multiprocessing/managers.py", line 755, in _callmethod
self._connect()
File "/usr/lib/python2.7/multiprocessing/managers.py", line 742, in _connect
conn = self._Client(self._token.address, authkey=self._authkey)
File "/usr/lib/python2.7/multiprocessing/connection.py", line 169, in Client
c = SocketClient(address)
File "/usr/lib/python2.7/multiprocessing/connection.py", line 308, in SocketClient
s.connect(address)
File "/usr/lib/python2.7/socket.py", line 228, in meth
return getattr(self._sock,name)(*args)
error: [Errno 2] No such file or directory
I hope I made my problem as understandable and clear as possible. As I haven't found anything like this I guess I'm doing something fundamentally wrong. So would you help me out, please?
Thank you.
you have to wait all child processes to finish its job, with re-asign p:
p = Process(...)
p.start()
p = Process(...)
p.start()
p.join()
you are just waiting for the later one in p to finish, the error comes when master wants to terminate but the first child process is still running. try this to wait for all child to finish:
p1 = Process(target=inst.blink_color_blank, args=([6,7,8], 10, 0.25, (255,0,0),))
p1.start()
p2 = Process(target=inst.set_color, args=([3,4,5,6],(0,255,0),))
p2.start()
childs = [p1, p2]
while any(p.is_alive() for p in childs):
for p in childs:
p.join(1)
besides, there is an multiprocessing.active_children() api to get all the children of the current process, in case you really cant gather the list from the beginning.
I'm reading and applying code from the python book and I can't use multiprocessing in simple example that you can see below:
import multiprocessing
def myProcess():
print("Currently Executing Child Process")
print("This process has it's own instance of the GIL")
print("Executing Main Process")
print("Creating Child Process")
myProcess = multiprocessing.Process(target=myProcess)
myProcess.start()
myProcess.join()
print("Child Process has terminated, terminating main process")
My platform is Windows 10 64 bit and using if __name_ == "__main_" : doesn't work in this case. What's wrong here? This code should work in python version 3.5 and above. Python version I use is 3.7. Full error message below:
C:\Users\Xian\AppData\Local\Programs\Python\Python37-32\python.exe "C:/OneDrive/Utilizing sub-process.py"
Traceback (most recent call last):
File "C:/OneDrive/Utilizing sub-process.py", line 25, in <module>
myProcess.start()
File "C:\Users\Xian\AppData\Local\Programs\Python\Python37-32\lib\multiprocessing\process.py", line 112, in start
self._popen = self._Popen(self)
File "C:\Users\Xian\AppData\Local\Programs\Python\Python37-32\lib\multiprocessing\context.py", line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Users\Xian\AppData\Local\Programs\Python\Python37-32\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj)
File "C:\Users\Xian\AppData\Local\Programs\Python\Python37-32\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
reduction.dump(process_obj, to_child)
File "C:\Users\Xian\AppData\Local\Programs\Python\Python37-32\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
_pickle.PicklingError: Can't pickle <function myProcess at 0x02B2D420>: it's not the same object as __main__.myProcess
try this
def test()
import multiprocessing
multiprocessing.set_start_method("fork")
p = multiprocessing.Process(target=xxx)
p.start()
python multiprocessing Contexts and start methods
I've written a class which inherits multiprocess.Process(). It holds a serial.Serial() object in a class attribute. The method self.loop() is supposed to read from and write to the serial port. When self.loop() is called, it is supposed to run as a separate process, which is a requirement of the person who asked me to write this. However, my code produces a strange error.
This is my code:
from multiprocessing import Process
import serial
import time
class MySerialManager(Process):
def __init__(self, serial_port, baudrate=115200, timeout=1):
super(MySerialManager, self).__init__(target=self.loop)
# As soon as you uncomment this, you'll get an error.
# self.ser = serial.Serial(serial_port, baudrate=baudrate, timeout=timeout)
def loop(self):
# Just some simple action for simplicity.
for i in range(3):
print("hi")
time.sleep(1)
if __name__ == "__main__":
msm = MySerialManager("COM14")
try:
msm.start()
except KeyboardInterrupt:
print("caught in main")
finally:
msm.join()
This is the error:
Traceback (most recent call last):
File "test.py", line 22, in <module>
msm.start()
File "C:\Python\Python36\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "C:\Python\Python36\lib\multiprocessing\context.py", line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Python\Python36\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj)
File "C:\Python\Python36\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
reduction.dump(process_obj, to_child)
File "C:\Python\Python36\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
ValueError: ctypes objects containing pointers cannot be pickled
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "test.py", line 26, in <module>
msm.join()
File "C:\Python\Python36\lib\multiprocessing\process.py", line 120, in join
assert self._popen is not None, 'can only join a started process'
AssertionError: can only join a started process
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Python\Python36\lib\multiprocessing\spawn.py", line 105, in spawn_main
exitcode = _main(fd)
File "C:\Python\Python36\lib\multiprocessing\spawn.py", line 115, in _main
self = reduction.pickle.load(from_parent)
EOFError: Ran out of input
I've also tried creating a serial port object outside of the class and passing it on to the constructor. Furthermore, I've tried not inheriting multiprocess.Process() but instead putting:
self.proc = Process(target=self.loop)
into the class and
try:
msm.proc.start()
except KeyboardInterrupt:
print("caught in main")
finally:
msm.proc.join()
into the main block. Neither of them solved the problem.
Somebody pointed out that it seems like mixing multiprocessing and serial ports just doesn't work out. Is that true? If it is, could you please explain to me why this isn't working? Any help is greatly appreciated!
In windows the serial object once created cannot be shared between two processes (ie. parent and child)
so make the serial object in the child process and pass the reference of that as argument to other functions
try this:
from multiprocessing import Process
import serial
import time
class MySerialManager(Process):
def __init__(self, serial_port, baudrate=115200, timeout=1):
super(MySerialManager, self).__init__(target=self.loop_iterator,args=(serial_port, baudrate, timeout))
# As soon as you uncomment this, you'll get an error.
# self.ser = serial.Serial(serial_port, baudrate=baudrate, timeout=timeout)
def loop_iterator(self,serial_port, baudrate,timeout):
ser = serial.Serial(serial_port, baudrate=baudrate, timeout=timeout)
self.loop(ser)
def loop(self,ser):
# Just some simple action for simplicity.
# you can use ser here
for i in range(3):
print("hi")
time.sleep(1)
if __name__ == "__main__":
msm = MySerialManager("COM4")
try:
msm.start()
except KeyboardInterrupt:
print("caught in main")
finally:
msm.join()
The program is designed to set up a process creation listener on various ips on the network. The code is:
import multiprocessing
from wmi import WMI
dynaIP = ['192.168.165.1','192.168.165.2','192.168.165.3','192.168.165.4',]
class WindowsMachine:
def __init__(self, ip):
self.ip = ip
self.connection = WMI(self.ip)
self.created_process = multiprocessing.Process(target = self.monitor_created_process, args = (self.connection,))
self.created_process.start()
def monitor_created_process(self, remote_pc):
while True:
created_process = remote_pc.Win32_Process.watch_for("creation")
print('Creation:',created_process.Caption, created_process.ProcessId, created_process.CreationDate)
return created_process
if __name__ == '__main__':
for ip in dynaIP:
print('Running', ip)
WindowsMachine(ip)
When running the code I get the following error:
Traceback (most recent call last):
File "U:/rmarshall/Work For Staff/ROB/_Python/__Python Projects Code/multipro_instance_stack_question.py", line 26, in <module>
WindowsMachine(ip)
File "U:/rmarshall/Work For Staff/ROB/_Python/__Python Projects Code/multipro_instance_stack_question.py", line 14, in __init__
self.created_process.start()
File "C:\Python33\lib\multiprocessing\process.py", line 111, in start
self._popen = Popen(self)
File "C:\Python33\lib\multiprocessing\forking.py", line 248, in __init__
dump(process_obj, to_child, HIGHEST_PROTOCOL)
File "C:\Python33\lib\multiprocessing\forking.py", line 166, in dump
ForkingPickler(file, protocol).dump(obj)
_pickle.PicklingError: Can't pickle <class 'PyIID'>: attribute lookup builtins.PyIID failed
I have looked at other questions surrounding this issue but none I feel have clearly explained the work-around for pickling class instances.
Is anyone able to demonstrate this?
The problem here is that multiprocessing pickles the arguments to the process in order to pass them around. The WMI class is not pickleable, so cannot be passed as an argument to multiprocessing.Process.
If you want this to work, you can either:
switch to using threads instead of processes (see the threading module)
create the WMI object in monitor_created_process
I'd recommend the former, because there doesn't seem to be much use in creating full-blown processes.