starting a process with eval - python

**exe.py**
def createProcess(f):
try:
from multiprocessing import Process
newProcess = Process(target=f)
newProcess.start()
newProcess.join()
except:
print "Error creating process"
def lala():
print "success creating process"
print "tying to make a process"
from multiprocessing import Process
newProcess = Process(target=lala)
newProcess.start()
**main.py**
if __name__ == '__main__':
f = open("exe.py", "r")
b = f.read()
f.close()
o = compile(b, "exe.py", "exec")
eval(o)
i get the following error
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Program Files\Opsware\agent\lcpython15\lib\multiprocessing\forking.p
", line 374, in main
self = load(from_parent)
File "C:\Program Files\Opsware\agent\lcpython15\lib\pickle.py", line 1378, in
load
return Unpickler(file).load()
File "C:\Program Files\Opsware\agent\lcpython15\lib\pickle.py", line 858, in
oad
dispatch[key](self)
File "C:\Program Files\Opsware\agent\lcpython15\lib\pickle.py", line 1090, in
load_global
klass = self.find_class(module, name)
File "C:\Program Files\Opsware\agent\lcpython15\lib\pickle.py", line 1126, in
find_class
klass = getattr(mod, name)
AttributeError: 'module' object has no attribute 'lala'
later edit
i changed the exe.py to
def lala2():
f = open("C:\\work\\asdfas", "w")
f.write("dsdfg\r\n")
f.close()
print "success creating process"
if __name__ == '__main__':
print "tying to make a process"
from multiprocessing import Process, freeze_support
freeze_support()
import pickle
l = pickle.dumps(lala2)
pickle.loads(l)()
newProcess = Process(target=pickle.loads(l))
newProcess.daemon = True
newProcess.start()
if newProcess.is_alive() == True:
print "alive"
else:
print "not alive"
import time
time.sleep(12)
this should make it importable and because of the pickle test it shows that my method is pickable. any suggestions on why it behaves this way?

You're on Windows. Unfortunately, on Windows, it is not possible to use a dynamic code object as a target for multiprocessing because the Windows implementation of multiprocessing must be able to import the main module (the technical reason is because Windows lacks a native fork() equivalent). Please see the multiprocessing programming guidelines for Windows for more details on the applicable restrictions.
The solution is to write the code to a file, then import it to start the server.

Related

can't pickle _thread.RLock objects when using a webservice

I am using python 3.6
I am trying to use multiprocessing from inside a class method shown below by the name SubmitJobsUsingMultiProcessing() which further calls another class method in turn.
I keep running into this error : Type Error : can't pickle _thread.RLock objects.
I have no idea what this means. I have a suspicion that the below line trying to establish a connection to a webserver API might be responsible but I am all at sea to understand why.
I am not a proper programmer(code as a part of a portfolio modeling team) so if this is an obvious question please pardon my ignorance and many thanks in advance.
import multiprocessing as mp,functools
def SubmitJobsUsingMultiProcessing(self,PartitionsOfAnalysisDates,PickleTheJobIdsDict = True):
if (self.ExportSetResult == "SUCCESS"):
NumPools = mp.cpu_count()
PoolObj = mp.Pool(NumPools)
userId,clientId,password,expSetName = self.userId , self.clientId , self.password , self.expSetName
PartialFunctor = functools.partial(self.SubmitJobsAsOfDate,userId = userId,clientId = clientId,password = password,expSetName = expSetName)
Result = PoolObj.map(self.SubmitJobsAsOfDate, PartitionsOfAnalysisDates)
BatchJobIDs = OrderedDict((key, val) for Dct in Result for key, val in Dct.items())
f_pickle = open(self.JobIdPickleFileName, 'wb')
pickle.dump(BatchJobIDs, f_pickle, -1)
f_pickle.close()
def SubmitJobsAsOfDate(self,ListOfDatesForBatchJobs,userId,clientId,password,expSetName):
client = Client(self.url, proxy=self.proxysettings)
if (self.ExportSetResult != "SUCCESS"):
print("The export set creation was not successful...exiting")
sys.exit()
BatchJobIDs = OrderedDict()
NumJobsSubmitted = 0
CurrentProcessID = mp.current_process()
for AnalysisDate in ListOfDatesForBatchJobs:
jobName = "Foo_" + str(AnalysisDate)
print('Sending job from process : ', CurrentProcessID, ' : ', jobName)
jobId = client.service.SubmitExportJob(userId,clientId,password,expSetName, AnalysisDate, jobName, False)
BatchJobIDs[AnalysisDate] = jobId
NumJobsSubmitted += 1
'Sleep for 30 secs every 100 jobs'
if (NumJobsSubmitted % 100 == 0):
print('100 jobs have been submitted thus far from process : ', CurrentProcessID,'---Sleeping for 30 secs to avoid the SSL time out error')
time.sleep(30)
self.BatchJobIDs = BatchJobIDs
return BatchJobIDs
Below is the trace ::
Traceback (most recent call last):
File "C:\Program Files\JetBrains\PyCharm 2017.2.3\helpers\pydev\pydevd.py", line 1599, in <module>
globals = debugger.run(setup['file'], None, None, is_module)
File "C:\Program Files\JetBrains\PyCharm 2017.2.3\helpers\pydev\pydevd.py", line 1026, in run
pydev_imports.execfile(file, globals, locals) # execute the script
File "C:\Program Files\JetBrains\PyCharm 2017.2.3\helpers\pydev\_pydev_imps\_pydev_execfile.py", line 18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "C:/Users/trpff85/PycharmProjects/QuantEcon/BDTAPIMultiProcUsingPathos.py", line 289, in <module>
BDTProcessObj.SubmitJobsUsingMultiProcessing(Partitions)
File "C:/Users/trpff85/PycharmProjects/QuantEcon/BDTAPIMultiProcUsingPathos.py", line 190, in SubmitJobsUsingMultiProcessing
Result = PoolObj.map(self.SubmitJobsAsOfDate, PartitionsOfAnalysisDates)
File "C:\Users\trpff85\AppData\Local\Continuum\anaconda3\lib\multiprocessing\pool.py", line 266, in map
return self._map_async(func, iterable, mapstar, chunksize).get()
File "C:\Users\trpff85\AppData\Local\Continuum\anaconda3\lib\multiprocessing\pool.py", line 644, in get
raise self._value
File "C:\Users\trpff85\AppData\Local\Continuum\anaconda3\lib\multiprocessing\pool.py", line 424, in _handle_tasks
put(task)
File "C:\Users\trpff85\AppData\Local\Continuum\anaconda3\lib\multiprocessing\connection.py", line 206, in send
self._send_bytes(_ForkingPickler.dumps(obj))
File "C:\Users\trpff85\AppData\Local\Continuum\anaconda3\lib\multiprocessing\reduction.py", line 51, in dumps
cls(buf, protocol).dump(obj)
TypeError: can't pickle _thread.RLock objects
I am struggling with a similar problem. There was a bug in <=3.5 whereby _thread.RLock objects did not raise an error when pickled (They cannot be) For the Pool object to work, a function and arguments must be passed to it from the main process and this relies on pickling (pickling is a means of serialising objects) In my case the RLock object is somewhere in the logging module. I suspect your code will work fine on 3.5. Good luck. See this bug resolution.

python multiprocessing pickling/manager/misc error (from PMOTW)

I'm having some trouble getting the following code to run on Eclipse via Windows. The code is from Doug Hellman:
import random
import multiprocessing
import time
class ActivePool:
def __init__(self):
super(ActivePool, self).__init__()
self.mgr = multiprocessing.Manager()
self.active = self.mgr.list()
self.lock = multiprocessing.Lock()
def makeActive(self, name):
with self.lock:
self.active.append(name)
def makeInactive(self, name):
with self.lock:
self.active.remove(name)
def __str__(self):
with self.lock:
return str(self.active)
def worker(s, pool):
name = multiprocessing.current_process().name
with s:
pool.makeActive(name)
print('Activating {} now running {}'.format(
name, pool))
time.sleep(random.random())
pool.makeInactive(name)
if __name__ == '__main__':
pool = ActivePool()
s = multiprocessing.Semaphore(3)
jobs = [
multiprocessing.Process(
target=worker,
name=str(i),
args=(s, pool),
)
for i in range(10)
]
for j in jobs:
j.start()
for j in jobs:
j.join()
print('Now running: %s' % str(pool))
I get the following error, which I assume is due to some pickling issue with passing in pool as an argument to Process.
Traceback (most recent call last):
File "E:\Eclipse_Workspace\CodeExamples\FromCodes\CodeTest.py", line 50, in <module>
j.start()
File "C:\Users\Bob\AppData\Local\Programs\Python\Python36-32\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "C:\Users\Bob\AppData\Local\Programs\Python\Python36-32\lib\multiprocessing\context.py", line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Users\Bob\AppData\Local\Programs\Python\Python36-32\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj)
File "C:\Users\Bob\AppData\Local\Programs\Python\Python36-32\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
reduction.dump(process_obj, to_child)
File "C:\Users\Bob\AppData\Local\Programs\Python\Python36-32\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
File "C:\Users\Bob\AppData\Local\Programs\Python\Python36-32\lib\multiprocessing\connection.py", line 939, in reduce_pipe_connection
dh = reduction.DupHandle(conn.fileno(), access)
File "C:\Users\Bob\AppData\Local\Programs\Python\Python36-32\lib\multiprocessing\connection.py", line 170, in fileno
self._check_closed()
File "C:\Users\Bob\AppData\Local\Programs\Python\Python36-32\lib\multiprocessing\connection.py", line 136, in _check_closed
raise OSError("handle is closed")
OSError: handle is closed
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Users\Bob\AppData\Local\Programs\Python\Python36-32\lib\multiprocessing\spawn.py", line 99, in spawn_main
new_handle = reduction.steal_handle(parent_pid, pipe_handle)
File "C:\Users\Bob\AppData\Local\Programs\Python\Python36-32\lib\multiprocessing\reduction.py", line 87, in steal_handle
_winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE)
PermissionError: [WinError 5] Access is denied
A similar question's answer seems to suggest that I initialize pool with a function call at the top level, but I don't know how to apply that to this example. Do I initialize ActivePool in worker? That seems to defeat the spirit of Hellman's example.
Another answer suggests I use __getstate__, __setstate__, to remove unpickleable objects and reconstruct them when unpickling, but I don't know a good way to do this with Proxy Objects like Manager, and I actually don't know what the unpickleable object is.
Is there any way I can make this example work with minimal changes? I really wish to understand what is going on under the hood. Thanks!
Edit - Problem Solved:
The pickling issue was pretty obvious in hindsight. The ActivePool's __init__ contained a Manager() object which seems unpicklable. The code runs normally as per Hellman's example if we remove self.mgr, and initialize the list ProxyObject in one line:
def __init__(self):
super(ActivePool, self).__init__()
self.active = multiprocessing.Manager().list()
self.lock = multiprocessing.Lock()
Comment: The 'join()' was in the Hellman example, but I forgot to add it into the code snippet. Any other ideas?
I'm running Linux and it works as expected, Windows behave different read understanding-multiprocessing-shared-memory-management-locks-and-queues-in-pyt
To determine which Parameter of args=(s, pool) raise the Error remove one and use it as global.
Change:
def worker(s):
...
args=(s,),
Note: There is no need to enclose a multiprocessing.Manager().list() with a Lock().
This is not the culprit of your error.
Question: Is there any way I can make this example work with minimal changes?
Your __main__ Process terminates, therefore all started Processes die at unpredicted position of execution. Add simple a .join() at the end to let the __main__ wait until all Processes done:
for j in jobs:
j.join()
print('EXIT __main__')
Tested with Python: 3.4.2

Mixing multiprocessing and serial ports

I've written a class which inherits multiprocess.Process(). It holds a serial.Serial() object in a class attribute. The method self.loop() is supposed to read from and write to the serial port. When self.loop() is called, it is supposed to run as a separate process, which is a requirement of the person who asked me to write this. However, my code produces a strange error.
This is my code:
from multiprocessing import Process
import serial
import time
class MySerialManager(Process):
def __init__(self, serial_port, baudrate=115200, timeout=1):
super(MySerialManager, self).__init__(target=self.loop)
# As soon as you uncomment this, you'll get an error.
# self.ser = serial.Serial(serial_port, baudrate=baudrate, timeout=timeout)
def loop(self):
# Just some simple action for simplicity.
for i in range(3):
print("hi")
time.sleep(1)
if __name__ == "__main__":
msm = MySerialManager("COM14")
try:
msm.start()
except KeyboardInterrupt:
print("caught in main")
finally:
msm.join()
This is the error:
Traceback (most recent call last):
File "test.py", line 22, in <module>
msm.start()
File "C:\Python\Python36\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "C:\Python\Python36\lib\multiprocessing\context.py", line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Python\Python36\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj)
File "C:\Python\Python36\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
reduction.dump(process_obj, to_child)
File "C:\Python\Python36\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
ValueError: ctypes objects containing pointers cannot be pickled
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "test.py", line 26, in <module>
msm.join()
File "C:\Python\Python36\lib\multiprocessing\process.py", line 120, in join
assert self._popen is not None, 'can only join a started process'
AssertionError: can only join a started process
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Python\Python36\lib\multiprocessing\spawn.py", line 105, in spawn_main
exitcode = _main(fd)
File "C:\Python\Python36\lib\multiprocessing\spawn.py", line 115, in _main
self = reduction.pickle.load(from_parent)
EOFError: Ran out of input
I've also tried creating a serial port object outside of the class and passing it on to the constructor. Furthermore, I've tried not inheriting multiprocess.Process() but instead putting:
self.proc = Process(target=self.loop)
into the class and
try:
msm.proc.start()
except KeyboardInterrupt:
print("caught in main")
finally:
msm.proc.join()
into the main block. Neither of them solved the problem.
Somebody pointed out that it seems like mixing multiprocessing and serial ports just doesn't work out. Is that true? If it is, could you please explain to me why this isn't working? Any help is greatly appreciated!
In windows the serial object once created cannot be shared between two processes (ie. parent and child)
so make the serial object in the child process and pass the reference of that as argument to other functions
try this:
from multiprocessing import Process
import serial
import time
class MySerialManager(Process):
def __init__(self, serial_port, baudrate=115200, timeout=1):
super(MySerialManager, self).__init__(target=self.loop_iterator,args=(serial_port, baudrate, timeout))
# As soon as you uncomment this, you'll get an error.
# self.ser = serial.Serial(serial_port, baudrate=baudrate, timeout=timeout)
def loop_iterator(self,serial_port, baudrate,timeout):
ser = serial.Serial(serial_port, baudrate=baudrate, timeout=timeout)
self.loop(ser)
def loop(self,ser):
# Just some simple action for simplicity.
# you can use ser here
for i in range(3):
print("hi")
time.sleep(1)
if __name__ == "__main__":
msm = MySerialManager("COM4")
try:
msm.start()
except KeyboardInterrupt:
print("caught in main")
finally:
msm.join()

IPython ipengineapp creation with keyword arguments

I am trying to write a script that will start an new engine.
Using some code from IPython source I have:
[engines.py]
def make_engine():
from IPython.parallel.apps import ipengineapp as app
app.launch_new_instance()
if __name__ == '__main__':
make_engine(file='./profiles/security/ipcontroller-engine.json', config='./profiles/e2.py')
if I run this with python engines.py in the command line I run into a configuration problem and my traceback is:
Traceback (most recent call last):
File "engines.py", line 30, in <module>
make_engine(file='./profiles/security/ipcontroller-engine.json', config='./profiles/e2.py')
File "engines.py", line 20, in make_engine
app.launch_new_instance(**kwargs)
File "/Users/martin/anaconda/lib/python2.7/site-packages/IPython/config/application.py", line 562, in launch_instance
app = cls.instance(**kwargs)
File "/Users/martin/anaconda/lib/python2.7/site-packages/IPython/config/configurable.py", line 354, in instance
inst = cls(*args, **kwargs)
File "<string>", line 2, in __init__
File "/Users/martin/anaconda/lib/python2.7/site-packages/IPython/config/application.py", line 94, in catch_config_error
app.print_help()
File "/Users/martin/anaconda/lib/python2.7/site-packages/IPython/config/application.py", line 346, in print_help
self.print_options()
File "/Users/martin/anaconda/lib/python2.7/site-packages/IPython/config/application.py", line 317, in print_options
self.print_alias_help()
File "/Users/martin/anaconda/lib/python2.7/site-packages/IPython/config/application.py", line 281, in print_alias_help
cls = classdict[classname]
KeyError: 'BaseIPythonApplication'
if I do a super ugly hack like the following, it works:
def make_engine():
from IPython.parallel.apps import ipengineapp as app
app.launch_new_instance()
if __name__ == '__main__':
from sys import argv
argv = ['--file=./profiles/security/ipcontroller-engine.json', '--config=./profiles/e2.py'] #OUCH this is ugly!
make_engine()
Why can't I pass the keyword arguments in the launch_new_instance method?
What are the right keyword arguments?
Where can I get the entry point to entering my configuration options?
Thanks,
Martin
The way to instantiate a new ipengine using the IPEngineApp api is:
def make_engine():
from IPython.parallel.apps.ipengineapp import IPEngineApp
lines1 ="a_command()"
app1 = IPEngineApp()
app1.url_file = './profiles/security/ipcontroller-engine.json'
app1.cluster_id = 'e2'
app1.startup_command = lines1
app1.init_engine()
app1.start()
However, this starts a new ipengine process that takes control of the script execution process, so there is no way I can start multiple engines in the same script using this method.
Thus I had to fallback on the subprocess module to spawn all additional new ipengines:
import subprocess
import os
pids = []
for num in range(1,3):
args = ["ipengine", "--config", os.path.abspath("./profiles/e%d.py" % num), "--file",os.path.abspath( "./profiles/security/ipcontroller-engine.json") ]
pid = subprocess.Popen(args).pid
pids.append(pid)

Python multiprocessing on Python 2.6 Win32 (xp)

I tried to copy this example from this Multiprocessing lecture by jesse noller (as recommended in another SO post)[http://pycon.blip.tv/file/1947354?filename=Pycon-IntroductionToMultiprocessingInPython630.mp4]
But for some reason I'm getting an error, as though it's ignoring my function definitions:
I'm on Windows XP (win32) which I know has restrictions with regards to the multiprocessing library in 2.6 that requires everything be pickleable
from multiprocessing import Process
import time
def sleeper(wait):
print 'Sleeping for %d seconds' % (wait,)
time.sleep(wait)
print 'Sleeping complete'
def doIT():
p = Process(target=sleeper, args=(9,))
p.start()
time.sleep(5)
p.join()
if __name__ == '__main__':
doIT()
Output:
Evaluating mypikklez.py
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Python26\lib\multiprocessing\forking.py", line 342, in main
self = load(from_parent)
File "C:\Python26\lib\pickle.py", line 1370, in load
return Unpickler(file).load()
File "C:\Python26\lib\pickle.py", line 858, in load
dispatch[key](self)
File "C:\Python26\lib\pickle.py", line 1090, in load_global
klass = self.find_class(module, name)
File "C:\Python26\lib\pickle.py", line 1126, in find_class
klass = getattr(mod, name)
AttributeError: 'module' object has no attribute 'sleeper'
The error causing the issue is : AttributeError: 'module' object has no attribute 'sleeper'
As simple of a function as it is I can't understand what would be the hold up.
This is just for self-teaching purposes of basic concepts. I'm not trying to pre-optimize any real world issue.
Thanks.
Seems from the traceback that you are running the code directly into the python interpreter (REPL).
Don't do that. Save the code in a file and run it from the file instead, with the command:
python myfile.py
That will solve your issue.
As an unrelated note, this line is wrong:
print 'Sleeping for ' + wait + ' seconds'
It should be:
print 'Sleeping for %d seconds' % (wait,)
Because you can't concatenate string and int objects (python is strongly typed)

Categories