I have the following code inwhich I try to call a function compute_cluster which do some computations and write the results in a txt file (each process write its results in different txt files independently), however, when I run the following code:
def main():
p = Pool(19)
p.map(compute_cluster, [(l, r) for l in range(6, 25) for r in range(1, 4)])
p.close()
if __name__ == "__main__":
main()
it crashes with the following errors:
File "RMSD_calc.py", line 124, in <module>
main()
File "RMSD_calc.py", line 120, in main
p.map(compute_cluster, [(l, r) for l in range(6, 25) for r in range(1, 4)])
File "/usr/local/lib/python2.7/multiprocessing/pool.py", line 225, in map
return self.map_async(func, iterable, chunksize).get()
File "/usr/local/lib/python2.7/multiprocessing/pool.py", line 522, in get
raise self._value
KeyError: 0
and when I searched online for the meaning of "KeyError: 0" i didn't find anything helpful so any suggestions why this error happens is highly appreciated
KeyError happens in compute_cluster() in a child process and p.map() reraises it for you in the parent:
from multiprocessing import Pool
def f(args):
d = {}
d[0] # <-- raises KeyError
if __name__=="__main__":
p = Pool()
p.map(f, [None])
Output
Traceback (most recent call last):
File "raise-exception-in-child.py", line 9, in <module>
p.map(f, [None])
File "/usr/lib/python2.7/multiprocessing/pool.py", line 227, in map
return self.map_async(func, iterable, chunksize).get()
File "/usr/lib/python2.7/multiprocessing/pool.py", line 528, in get
raise self._value
KeyError: 0
To see the full traceback, catch the exception in the child process:
import logging
from multiprocessing import Pool
def f(args):
d = {}
d[0] # <-- raises KeyError
def f_mp(args):
try:
return f(args)
except Exception:
logging.exception("f(%r) failed" % (args,))
if __name__=="__main__":
p = Pool()
p.map(f_mp, [None])
Output
ERROR:root:f(None) failed
Traceback (most recent call last):
File "raise-exception-in-child.py", line 10, in f_mp
return f(args)
File "raise-exception-in-child.py", line 6, in f
d[0] # <-- raises KeyError
KeyError: 0
It shows that d[0] caused the exception.
Related
I have this ipynb script to apply a function from a different python script on my dataframe. The "predict_tautomers.py" file contains a function called "get_taut_data".
my script:
import pandas as pd
import multiprocessing as mp
import numpy as np
import predict_tautomers
train_set=pd.read_csv("data/df_train_set.csv", nrows=3)
def parallelize_function(df):
df["tautomer_dict"] = df["smiles"].apply(predict_tautomers.get_taut_data)
return df
def parallelize_dataframe(df, func):
num_processes = 2
df_split = np.array_split(df, num_processes)
with mp.Pool(num_processes) as p:
df = pd.concat(p.map(func, df_split))
p.map(func, df_split)
return df
train_set = parallelize_dataframe(train_set, parallelize_function)
However, running this code gives me this attribute error.
the error:
Process SpawnPoolWorker-15:
Traceback (most recent call last):
File "/Users/michel_lim/miniconda3/envs/moltaut2/lib/python3.10/multiprocessing/process.py", line 314, in _bootstrap
self.run()
File "/Users/michel_lim/miniconda3/envs/moltaut2/lib/python3.10/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/Users/michel_lim/miniconda3/envs/moltaut2/lib/python3.10/multiprocessing/pool.py", line 114, in worker
task = get()
File "/Users/michel_lim/miniconda3/envs/moltaut2/lib/python3.10/multiprocessing/queues.py", line 367, in get
return _ForkingPickler.loads(res)
AttributeError: Can't get attribute 'parallelize_function' on <module '__main__' (built-in)>
Process SpawnPoolWorker-16:
Traceback (most recent call last):
File "/Users/michel_lim/miniconda3/envs/moltaut2/lib/python3.10/multiprocessing/process.py", line 314, in _bootstrap
self.run()
File "/Users/michel_lim/miniconda3/envs/moltaut2/lib/python3.10/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/Users/michel_lim/miniconda3/envs/moltaut2/lib/python3.10/multiprocessing/pool.py", line 114, in worker
task = get()
File "/Users/michel_lim/miniconda3/envs/moltaut2/lib/python3.10/multiprocessing/queues.py", line 367, in get
return _ForkingPickler.loads(res)
AttributeError: Can't get attribute 'parallelize_function' on <module '__main__' (built-in)>
Edit: The "predict_tautomers" script I am importing contains functions with multiprocessing which looks like this:
def predict_by_smi(smi, fmax, num_confs):
pmol = pybel.readstring("smi", smi)
blocks = gen_confs_set(smi, num_confs)
params = zip(blocks, [fmax for i in range(len(blocks))])
pool = Pool()
score = pool.map(predict_multicore_wrapper, params)
pool.close()
def predict_by_smis(smis, fmax, num_confs):
params = []
for idx, smi in enumerate(smis):
blocks = gen_confs_set(smi, num_confs)
for block in blocks:
params.append([idx, smi, block, fmax])
pool = Pool()
score = pool.map(predict_multicore_wrapper, params)
pool.close()
I am trying to use multiprocessing in a class in the following code:
class test:
def __init__(self):
return
global calc_corr
#staticmethod
def calc_corr(idx, df1, df2):
arr1 = df1.iloc[idx:idx+5, :].values.flatten('F')
arr2 = df2.iloc[idx:idx+5, :].values.flatten('F')
df_tmp = pd.DataFrame([arr1, arr2]).T
df_tmp.dropna(how='any', inplace=True)
corr = df_tmp.corr().iloc[0, 1]
return corr
def aa(self):
df1 = pd.DataFrame(np.random.normal(size=(100, 6)))
df2 = pd.DataFrame(np.random.normal(size=(100, 6)))
with concurrent.futures.ProcessPoolExecutor() as executor:
results = [executor.submit(calc_corr, (i, df1, df2)) for i in range(20)]
for f in concurrent.futures.as_completed(results):
print(f.result())
if __name__ == '__main__':
t = test()
t.aa()
I am using a #staticmethod because it is not related to the class, it's just a computing tool. But using it raises the following error when running the code:
D:\anaconda3\python.exe C:/Users/jonas/Desktop/728_pj/test.py
concurrent.futures.process._RemoteTraceback:
"""
Traceback (most recent call last):
File "D:\anaconda3\lib\multiprocessing\queues.py", line 245, in _feed
obj = _ForkingPickler.dumps(obj)
File "D:\anaconda3\lib\multiprocessing\reduction.py", line 51, in dumps
cls(buf, protocol).dump(obj)
TypeError: cannot pickle 'staticmethod' object
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "C:\Users\jonas\Desktop\728_pj\test.py", line 31, in <module>
t.aa()
File "C:\Users\jonas\Desktop\728_pj\test.py", line 26, in aa
print(f.result())
File "D:\anaconda3\lib\concurrent\futures\_base.py", line 438, in result
return self.__get_result()
File "D:\anaconda3\lib\concurrent\futures\_base.py", line 390, in __get_result
raise self._exception
File "D:\anaconda3\lib\multiprocessing\queues.py", line 245, in _feed
obj = _ForkingPickler.dumps(obj)
File "D:\anaconda3\lib\multiprocessing\reduction.py", line 51, in dumps
cls(buf, protocol).dump(obj)
TypeError: cannot pickle 'staticmethod' object
Process finished with exit code 1
Can anyone help me fix this?
I think it is somehow caused by the staticmethod being declared as global. When I tried removing the global calc_corr line and changing
results = [executor.submit(calc_corr, (i, df1, df2)) for i in range(20)] to
results = [executor.submit(self.calc_corr, i, df1, df2) for i in range(20)] it seemed to work fine. I'm not actually sure of the reason what you wrote doesn't work but hopefully this will.
Note: Removing the tuple for the arguments is unrelated to this issue but was causing another issue afterwards.
Current Implementation which needs optimization
import subprocess
childprocess = subprocess.Popen(
['python',
'/full_path_to_directory/called_script.py',
'arg1',
'arg2'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
returnVal = childprocess.communicate()[0]
print(retVal)
Is this a correct way to call another script(called_script.py) within the current working directory?
Is there a better way to call the other script? I used import script but it gives me below error
called_script.py
def func(arg1, arg2, arg3):
#doSomething
#sys.out.write(returnVal)
if __name__ == "__main__":
func(arg1, arg2, arg3)
Implementation 2 (throws exception and errored out)
caller_script.py
Both of them are under the same path (i.e. /home/bin)
import called_script
returnVal = called_script.func(arg1,arg2,arg3)
print(returnVal)
Output:
nullNone
Traceback (most recent call last):
File "/path_to_caller/caller_script.py", line 89, in <module>
l.simple_bind_s(binddn, pw)
File "/usr/lib64/python2.6/site-packages/ldap/ldapobject.py", line 206, in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
File "/usr/lib64/python2.6/site-packages/ldap/ldapobject.py", line 200, in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,EncodeControlTuples(serverctrls),EncodeControlTuples(clientctrls))
File "/usr/lib64/python2.6/site-packages/ldap/ldapobject.py", line 96, in _ldap_call
result = func(*args,**kwargs)
TypeError: argument 2 must be string or read-only buffer, not None
Another alternative I used and gave me an error is
Implementation 3(throws exception and errors out)
caller_script.py
import ldap
returnVal = subprocess.call(['python','called_script.py','arg1','arg2'])
print(returnVal)
l = ldap.initialize(cp.get('some_config_ref','some_url'))
try:
l.protocol_version = ldap.VERSION3
l.simple_bind_s(binddn, returnVal)
except ldap.INVALID_CREDENTIALS:
sys.stderr.write("Your username or password is incorrect.")
sys.exit(1)
except ldap.LDAPError, e:
if type(e.message) == dict and e.message.has_key('xyz'):
sys.stderr.write(e.message['xyz'])
else:
sys.stderr.write(e)
sys.exit(1)
Output:
returnVal0Traceback (most recent call last):
File "./path_to_script/caller_script.py", line 88, in <module>
l.simple_bind_s(binddn, pw)
File "/usr/lib64/python2.6/site-packages/ldap/ldapobject.py", line 206, in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
File "/usr/lib64/python2.6/site-packages/ldap/ldapobject.py", line 200, in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,EncodeControlTuples(serverctrls),EncodeControlTuples(clientctrls))
File "/usr/lib64/python2.6/site-packages/ldap/ldapobject.py", line 96, in _ldap_call
result = func(*args,**kwargs)
TypeError: argument 2 must be string or read-only buffer, not int
Here is an example where you are calling a function from another file, you pass one value, a list, which can have an arbitrary amount of numbers, and you get the sum. Make sure they are in the same directory or you will need the path. The function in your example "script.py" does not allow you to pass a value.
called_script.py
def add_many(list_add):
the_sum = sum(list_add)
return the_sum
caller_script.py
import called_script
a_list = [1, 2, 3, 4]
the_sum = called_script.add_many(a_list)
print(the_sum)
I'm trying to create a python decorator which takes a function with args and kwargs, executes it in a new process, shuts it down and returns whatever the function returned, including raising the same exception, if any.
For now, my decorator handles functions okay, if they raise no exceptions, but fails to provide the traceback. How do I pass it back to the parent process?
from functools import wraps
from multiprocessing import Process, Queue
import sys
def process_wrapper(func):
#wraps(func)
def wrapper(*args, **kwargs):
# queue for communicating between parent and child processes
q = Queue()
def func_to_q(_q: Queue, *_args, **_kwargs):
# do the same as func, but put result into the queue. Also put
# there an exception if any.
try:
_res = func(*_args, **_kwargs)
_q.put(_res)
except:
_q.put(sys.exc_info())
# start another process and wait for it to join
p = Process(target=func_to_q, args=(q, )+args, kwargs=kwargs)
p.start()
p.join()
# get result from the queue and return it, or raise if it's an exception
res = q.get(False)
if isinstance(res, tuple) and isinstance(res[0], Exception):
raise res[1].with_traceback(res[2])
else:
return res
return wrapper
if __name__ == '__main__':
#process_wrapper
def ok():
return 'ok'
#process_wrapper
def trouble():
def inside():
raise UserWarning
inside()
print(ok())
print(trouble())
I expect result to be something like:
ok
Traceback (most recent call last):
File "/temp.py", line 47, in <module>
print(trouble())
File "/temp.py", line 44, in trouble
inside()
File "/temp.py", line 43, in inside
raise UserWarning
UserWarning
Process finished with exit code 1
But it seems like the child process cannot put stacktrace into the queue and I get the following:
ok
Traceback (most recent call last):
File "/temp.py", line 47, in <module>
print(trouble())
File "/temp.py", line 26, in wrapper
res = q.get(False)
File "/usr/lib/python3.6/multiprocessing/queues.py", line 107, in get
raise Empty
queue.Empty
Process finished with exit code 1
Also, if the child puts into the queue only the exception itself _q.put(sys.exc_info()[1]), parent gets it from there and raises but with new stacktrace (note missing call to inside()):
ok
Traceback (most recent call last):
File "/temp.py", line 47, in <module>
print(trouble())
File "/temp.py", line 28, in wrapper
raise res
UserWarning
Process finished with exit code 1
Take a look at multiprocessing.pool.py and the stringification-hack for sending Exceptions to the parent. You can use multiprocessing.pool.ExceptionWithTraceback from there.
That's just enough code for demonstrating the basic principle:
from multiprocessing import Process, Queue
from multiprocessing.pool import ExceptionWithTraceback
def worker(outqueue):
try:
result = (True, 1 / 0) # will raise ZeroDivisionError
except Exception as e:
e = ExceptionWithTraceback(e, e.__traceback__)
result = (False, e)
outqueue.put(result)
if __name__ == '__main__':
q = Queue()
p = Process(target=worker, args=(q,))
p.start()
success, value = q.get()
p.join()
if success:
print(value)
else:
raise value # raise again
Output:
multiprocessing.pool.RemoteTraceback:
"""
Traceback (most recent call last):
File "/home/...", line 7, in worker
result = (True, 1 / 0) # will raise ZeroDivisionError
ZeroDivisionError: division by zero
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/...", line 23, in <module>
raise value
ZeroDivisionError: division by zero
Process finished with exit code 1
Here is my code:
import multiprocessing
import dill
class Some_class():
class_var = 'Foo'
def __init__(self, param):
self.name = param
def print_name(self):
print("we are in object "+self.name)
print(Some_class.class_var)
def run_dill_encoded(what):
fun, args = dill.loads(what)
return fun(*args)
def apply_async(pool, fun, args):
return pool.apply_async(run_dill_encoded, (dill.dumps((fun, args)),))
if __name__ == '__main__':
list_names = [Some_class('object_1'), Some_class('object_2')]
pool = multiprocessing.Pool(processes=4)
results = [apply_async(pool, Some_class.print_name, args=(x,)) for x in list_names]
output = [p.get() for p in results]
print(output)
It returns error:
multiprocessing.pool.RemoteTraceback:
"""
Traceback (most recent call last):
File "C:\Python34\lib\multiprocessing\pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "C:\...\temp_obj_output_standard.py", line 18, in run_dill_encoded
return fun(*args)
File "C:/...temp_obj_output_standard.py", line 14, in print_name
print(Some_class.class_var)
NameError: name 'Some_class' is not defined
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "C:/...temp_obj_output_standard.py", line 31, in <module>
output = [p.get() for p in results]
File "C:/...temp_obj_output_standard.py", line 31, in <listcomp>
output = [p.get() for p in results]
File "C:\Python34\lib\multiprocessing\pool.py", line 599, in get
raise self._value
NameError: name 'Some_class' is not defined
Process finished with exit code 1
The code works fine without line print(Some_class.class_var). What is wrong with accessing class variables, both objects should have it and I don't think processes should conflict about it. Am I missing something?
Any suggestions on how to troubleshoot it? Do not worry about run_dill_encoded and
apply_async, I am using this solution until I compile multiprocess on Python 3.x.
P.S. This is already enough, but stackoverflow wants me to put more details, not really sure what to put.