ProcessPoolExecutor does not start - python

I am working in a Jupyter notebook. I'm new to multiprocessing in python, and I'm trying to parallelize the calculation of a function for a grid of parameters. Here is a snippet of code quite representative of what I'm doing:
import os
import numpy as np
from concurrent.futures import ProcessPoolExecutor
def f(x,y):
print(os.getpid(), x,y,x+y)
return x+y
xs = np.linspace(5,7,3).astype(int)
ys = np.linspace(1,3,3).astype(int)
func = lambda p: f(*p)
with ProcessPoolExecutor() as executor:
args = (arg for arg in zip(xs,ys))
results = executor.map(func, args)
for res in results:
print(res)
The executor doesn't even start.
No problem whatsoever if I serially execute the same with, e.g. list comprehension,
args = (arg for arg in zip(xs,ys))
results = [func(arg) for arg in args]

Are you running on Windows? I think your main problem is that each process is trying to re-execute your whole script, so you should include an if name == "main" check. I think you have a second issue trying to use a lambda function that can't be pickled, since the processes communicate by pickling the data. There are work-arounds for that but in this case it looks like you don't really need the lambda. Try something like this:
import os
import numpy as np
from concurrent.futures import ProcessPoolExecutor
def f(x, y):
print(os.getpid(), x, y, x + y)
return x + y
if __name__ == '__main__':
xs = np.linspace(5, 7, 3).astype(int)
ys = np.linspace(1, 3, 3).astype(int)
with ProcessPoolExecutor() as executor:
results = executor.map(f, xs, ys)
for res in results:
print(res)

Related

How to map a function to an array of dataframes in parallel in python?

My code has an array of dataframes, each of which I want to apply a function to. The dataframes are all in the same format, here's an example of one:
and the code I have for regular mapping is this:
def return_stat(df):
return np.random.choice(df.iloc[:,1],p=df.iloc[:,0])
weather_df_list = [weather_df1,weather_df2,weather_df3,weather_df4]
expected_values = list(map(lambda i:return_stat(i), weather_df_list))
but I have 16 cores on my computer and want to make use of it to make this code super fast.
How would I implement this same code using parallel computing in Python?
Thanks!
Using multiprocessing.Pool can help to occupy all your cores.
import pandas as pd, numpy as np, multiprocessing
def return_stat(df):
return np.random.choice(df.iloc[:, 1], p = df.iloc[:, 0])
if __name__ == '__main__':
weather_df = pd.DataFrame({'rain_probability': [0.1,0.2,0.7], 'rain_inches': [1,2,3]})
weather_df_list = [weather_df, weather_df, weather_df, weather_df]
with multiprocessing.Pool() as pool:
expected_values = pool.map(return_stat, weather_df_list)
print(expected_values)
Another fancy and also efficient way to solve the problem is using Numba. It transcodes Python into efficient machine code and also has parallelization feature. Although it had no choice() variant supporting probabilities array, hence I had to implement choice() myself. You need to install numba once through python -m pip install numba.
import pandas as pd, numpy as np
from numba import njit
#njit(parallel = True, fastmath = True)
def choices(l):
rnds = np.random.random((len(l),))
def choice(i, a, p):
assert p.shape == a.shape
p = p.cumsum()
p = p / p[-1]
r = rnds[i]
i = np.sum((p <= r).astype(np.int64))
return a[i]
res = np.empty((len(l),), dtype = np.float64)
for i in range(len(l)):
res[i] = choice(i, l[i][:, 1], l[i][:, 0])
return res
weather_df = pd.DataFrame({'rain_probability': [0.1, 0.2, 0.3, 0.4], 'rain_inches': [0, 1, 2, 3]})
weather_df_list = [weather_df, weather_df, weather_df, weather_df, weather_df, weather_df, weather_df, weather_df]
weather_df_arrays = [e.values[:, :2] for e in weather_df_list]
print(choices(weather_df_arrays))
You may try numba variant on your side and tell me how fast it is, if it is not faster than multiprocessing variant then I have some extra ideas how to improve its speed.

Python multiprocessing producing unstable results

Can anyone help me understand why this simple example of trying to speed up a for loop using python's multiprocessing module produces unstable results? I use a Manager.List to store the values from the child processes.
Clearly I'm doing at least one thing wrong. What would be the correct way to do this?
import numpy as np
import multiprocessing
from matplotlib import pyplot as plt
from functools import partial
from multiprocessing import Manager
def run_parallel(x_val, result):
val = np.arctan(x_val)
result.append(val)
def my_func(x_array, parallel=False):
if not parallel:
result = []
for k in x_array:
result.append(np.arctan(k))
return result
else:
manager = Manager()
m_result = manager.list()
pool = multiprocessing.Pool(4)
pool.map(partial(run_parallel, result=m_result), x_array)
return list(m_result)
test_x = np.linspace(0.1,1,50)
serial = my_func(test_x,parallel=False)
parallel = my_func(test_x,parallel=True)
plt.figure()
plt.plot(test_x, serial, label='serial')
plt.plot(test_x,parallel, label='parallel')
plt.legend(loc='best')
plt.show()
The output I'm getting looks like this
and it looks different every time this runs.
I added some print functions and it turned out that the order of elements from x_array is arbitrary... That's why it looks so weird. I think you should keep argument and value of arctan pairs and then order it by argument value
EDIT
I read more and it turned out that map returns values in order... This works as you wanted:
import numpy as np
import multiprocessing
from matplotlib import pyplot as plt
from functools import partial
from multiprocessing import Manager
def run_parallel(x_val, result):
val = np.arctan(x_val)
return val
def my_func(x_array, parallel=False):
if not parallel:
result = []
for k in x_array:
result.append(np.arctan(k))
return result
else:
manager = Manager()
m_result = manager.list()
pool = multiprocessing.Pool(4)
x = pool.map(partial(run_parallel, result=m_result), x_array)
return list(x)
test_x = np.linspace(0.1,1,50)
parallel = my_func(test_x,parallel=True)
plt.figure()
plt.plot(test_x,parallel, label='parallel')
plt.legend(loc='best')
plt.show()

Python - ProcessPoolExecutor hangs when called from a mpl_connect handler

I'm using parallel processing to generate a plot of functions using complex numbers. My script allows you to zoom in on an area of the plot using the standard matplotlib controls and then regenerate the plot within the new limits to improve resolution.
This is my first foray into parallel processing and I've got as far as understanding that I need to preface with if __name__ == __main__: to allow the module to be imported properly. When running my script, the first plot is successfully generated and appears as expected. However, when the plotting function is called again from my event handler it instead hangs indefinitely. I assume that the hang is caused by some similar issue to that of requiring if __name__ == __main__:, as the parallel processes are being spawned from outside the main body of the script, but I haven't figured out anything further than this.
import numpy as np
import matplotlib.pyplot as plt
from concurrent.futures import ProcessPoolExecutor
import multiprocessing
res = [1000, 1000]
base_factor = 2.
cpuNum = multiprocessing.cpu_count()
def brot(c, depth=200):
z = complex(0)
for i in range(depth):
z = pow(z, 2) + c
if abs(z) > 2:
return i
return -1
def brot_gen(span):
re_span = span[0]
im_span = span[1]
mset = np.zeros([len(im_span), len(re_span)])
for re in range(len(re_span)):
for im in range(len(im_span)):
mset[im][re] = brot(complex(re_span[re], im_span[im]))
return mset
def brot_gen_parallel(re_lim, im_lim):
re_span = np.linspace(re_lim[0], re_lim[1], res[0])
im_span = np.linspace(im_lim[0], im_lim[1], res[1])
split_re_span = np.array_split(re_span, cpuNum)
packages = [(sec, im_span) for sec in split_re_span]
print("Generating set between", re_lim, "and", im_lim, "...")
with ProcessPoolExecutor(max_workers = cpuNum) as executor:
result = executor.map(brot_gen, packages)
mset = np.concatenate(list(result), axis=1)
print("Set generated")
return mset
def handler(ax):
def action(event):
if event.button == 2:
cur_re_lim = ax.get_xlim()
cur_im_lim = ax.get_ylim()
mset = brot_gen_parallel(cur_re_lim, cur_im_lim)
ax.cla()
ax.imshow(mset, extent=[cur_re_lim[0], cur_re_lim[1], cur_im_lim[0], cur_im_lim[1]], origin="lower", vmin=0, vmax=200, interpolation="bilinear")
plt.draw()
fig = ax.get_figure()
fig.canvas.mpl_connect('button_release_event', action)
return action
if __name__ == "__main__":
re_lim = np.array([-2.5, 2.5])
im_lim = res[1]/res[0] * re_lim
mset = brot_gen_parallel(re_lim, im_lim)
plt.imshow(mset, extent=[re_lim[0], re_lim[1], im_lim[0], im_lim[1]], origin="lower", vmin=0, vmax=200, interpolation="bilinear")
ax = plt.gca()
f = handler(ax)
plt.show()
EDIT: I wondered if there was a bug in the code causing an exception, but that this might not be being successfully passed back to the console, however I tested this by running the same task without splitting it into parallel tasks and it completed successfully.
I have discovered the answer to my own question. The answer lies in the IDE I was using. In my experience, in most IDEs plt.show() blocks execution by default, however in Spyder the default seems to be the equivalent of plt.show(block=False), meaning that the script completed and so whatever was required to successfully start the parallel processes was no longer available, causing the hang. This was solved by simply changing the statement to plt.show(block=True), meaning that the script was still live.
I'm still very new to parallel processing so I'd be very interested in any more information anyone can give on what was lacking to stop the parallel processing from working.

How to map a list of (x,y) pairs to function f(x,y) in Pool.map?

Suppose I have a want to plot the density on the x-y plane, the density is defined as:
def density(x,y):
return x**2 +y**2
If I have many points (x1,y1), (x2,y2)... to calculate, therefore I want to do it parallel. I found the doc multiprocessing and try to do the following:
pointsList = [(1,1), (2,2), (3,3)]
from multiprocessing import Pool
if __name__ == '__main__':
with Pool() as p:
print(p.map(density,pointsList ))
the error occurs and it seems that I failed to pass the args to the function, how to do this?
Edit:
the error is:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-647-1e2a1f0007fb> in <module>()
5 from multiprocessing import Pool
6 if __name__ == '__main__':
----> 7 with Pool() as p:
8 print(p.map(density,pointsList ))
AttributeError: __exit__
Edit2:
If I can't do this simple parallel in python2.7, how can I do it in python3.5 for instance?
The use of Pool in a context manager was added in Python 3.3. Since you tagged Python 2.7, you can't use the with syntax.
Documentation:
New in version 3.3: Pool objects now support the context management
protocol – see Context Manager Types. __enter__() returns the pool
object, and __exit__() calls terminate().
Here's the working example you wanted, for python 3.3+ :
def density(args):
x, y = args
return x**2 +y**2
pointsList = [(1,1), (2,2), (3,3)]
from multiprocessing import Pool
if __name__ == '__main__':
with Pool() as p:
print(p.map(density,pointsList ))
And since you're also using Python 2.7, you just need to not use the context manager and call p.terminate() instead:
def density(args):
x, y = args
return x**2 +y**2
pointsList = [(1,1), (2,2), (3,3)]
from multiprocessing import Pool
if __name__ == '__main__':
p = Pool()
print(p.map(density,pointsList ))
p.terminate()
Need to change the density function to unpack the tuple argument
def density(z):
(x,y) = z
return x**2 +y**2
try not use the with and close the pool yourself after you are done with it.
This way should be compatible for both python 2 and 3
from multiprocessing import Pool
pointsList = [(1,1), (2,2), (3,3)]
p = Pool()
print(p.map( density,pointsList ))
p.close()
or using contextlib module
from multiprocessing import Pool
import contextlib
pointsList = [(1,1), (2,2), (3,3)]
with contextlib.closing(Pool()) as p:
print(p.map( density,pointsList ))

returning a two dimensional array by multiprocessing

In the following code which is an example of my main code, I have tried to use pathos.multiprocessing to increase the speed of iteration of a loop. The output of each iteration which has implemented with multiprocessing is a 2-D array. I used pathos.multiprocessing instead of multiprocessing since I wanted to use it in my class method. I have used apipe method of the pathos.multiprocessing to collect the output in a list but it returns an empty list. I have no idea why it fails
import numpy as np
import random
import pathos.multiprocessing as mp
class Testsystematics(object):
def __init__(self, x, y, NTH = None, THMIN = None, THMAX = None, NRESAMPLE = None):
self.x = x
self.y = y
self.nbins = NTH
self.bmin = THMIN
self.bmax = THMAX
self.nresample= NRESAMPLE
self.bins = np.linspace(self.bmin, self.bmax, self.nbins+1, True).astype(np.float)
self.sample = np.array([[random.choice(range(len(self.y))) for _ in xrange(len(self.y))] for i in range(self.nresample)])
self.result_list=[]
def log_result(self, result):
self.result_list.append(result)
def bootstrapping(self, k):
xi_p = np.zeros(self.nbins, float)
xi_m = np.zeros(self.nbins, float)
nind = np.zeros(self.nbins, float)
for i in range(len(self.x)):
for j in range(len(self.x)):
if (i!=j):
sep= np.sqrt(self.x[i]**2+self.x[j]**2)
index= np.searchsorted(self.bins, sep , side='right')-1
sind = np.sin(sep)
if ((sep< self.bins[-1]) and (sep>=self.bins[0])):
xi_p[index] += sind*(np.mean(y)-np.median(y))
xi_m[index] += sind*np.std(y)
nind[index] += 1.0
for i in range(self.nbins):
xi_p[i]=xi_p[i]/nind[i]
xi_m[i]=xi_m[i]/nind[i]
return np.vstack((xi_p,xi_m))
def twopcf(self):
if (self.sys_type==1):
pool = mp.ProcessingPool(16)
for n in range(self.nresample):
pool.apipe(self.bootstrapping, args=(n,), callback=self.log_result)
shape,scale=0.5, 0.6
x=np.random.gamma(shape, scale, 10000)
mu1, sigma1 = 0, 0.5 # mean and standard deviation
mu2, sigma2 = 0.1, 0.7 # mean and standard deviation
y = np.random.normal(mu1, sigma1, 1000)+np.random.normal(mu2, sigma2, 1000)
sysTest=Testsystematics(x, y, NTH = 10, THMIN = 0, THMAX = 5, NRESAMPLE = 100)
any suggestion?
I'm the pathos author. I tried your code, and it runs, but produces no error and produces no result in result_list. I believe that is because you are using apipe incorrectly. The correct use of apipe is as follows:
>>> import pathos
>>> def squared(x):
... return x**2
...
>>> pool = pathos.multiprocessing.ProcessingPool()
>>> res = pool.apipe(squared, 5)
>>> res.get()
25
self.bootstrapping takes self and k, so you have to provide a k in the pipe call when you calling it as an instance method. There is no callback -- if you want a callback, you'd need to add one to your function.
Note that the return value is retrieved by (1) getting a return object, and (2) by calling get on the return object.
From you use of apipe within a for loop, that points me to suggest you use pool.amap (or pool.imap) instead -- then you can do the for loop in parallel.

Categories