Python in AWS Lambda: multiprocessing, terminate other processes when one finished - python

I learned that AWS Lambda does not support multiprocessing.Pool and multiprocessing.Queue from this other question.
I'm also working on Python multiprocessing in AWS Lambda. But my question: how do we terminate the main process when the first child process returns? (all child processes will return with different execution time)
What I have here:
import time
from multiprocessing import Process, Pipe
class run_func():
number = 0
def __init__(self, number):
self.number = number
def subrun(self, input, conn):
# subprocess function with different execution time based on input.
response = subprocess(input)
conn.send([input, response])
conn.close()
def run(self):
number = self.number
processes = []
parent_connections = []
for i in range(0, number):
parent_conn, child_conn = Pipe()
parent_connections.append(parent_conn)
process = Process(target=self.subrun, args=(i, child_conn,))
processes.append(process)
for process in processes:
process.start()
for process in processes:
process.join()
results = []
for parent_connection in parent_connections:
resp = parent_connection.recv()
print(resp)
results.append((resp[0],resp[1]))
return results
def lambda_handler(event, context):
starttime = time.time()
results = []
work = run_func(int(event['number']))
results = work.run()
print("Results : {}".format(results))
print('Time: {} seconds'.format(time.time() - starttime))
return output
The current program will return until all child processes finish (with for parent_connection in parent_connections). But I wonder how to terminate with the first child process finish? (terminate the main at least, other child processes - it's ok to leave it running)
Added:
To be clear, I mean the first returned child process (may not be the first created child).

So the join() loop is the one which waits for all child process to complete.
If we break that after completing the first child and terminate all other process forcefully it'll work for you
class run_func():
number = 0
def __init__(self, number):
self.number = number
def subrun(self, input, conn):
# subprocess function with different execution time based on input.
response = subprocess(input)
conn.send([input, response])
conn.close()
def run(self):
number = self.number
processes = []
parent_connections = []
for i in range(0, number):
parent_conn, child_conn = Pipe()
parent_connections.append(parent_conn)
process = Process(target=self.subrun, args=(i, child_conn,))
processes.append(process)
for process in processes:
process.start()
for process in processes:
process.join()
break
results = []
for parent_connection in parent_connections:
resp = parent_connection.recv()
print(resp)

Related

Python multiprocessing possible deadlock with two queue as producer-consumer pattern?

I'm wondering if there can be a sort of deadlock in the following code. I have to read each element of a database (about 1 million items), process it, then collect the results in a unique file.
I've parallelized the execution with multiprocessing using two Queue's and three types of processes:
Reader: Main process which reads the database and adds the read items in a task_queue
Worker: Pool of processes. Each worker gets an item from task_queue, processes the item, saves the results in an intermediate file stored in item_name/item_name.txt and puts the item_name in a completed_queue
Writer: Process which gets an item_name from completed_queue, gets the intermediate result from item_name/item_name.txt and writes it in results.txt
from multiprocessing import Pool, Process, Queue
class Computation():
def __init__(self,K):
self.task_queue = Queue()
self.completed_queue = Queue()
self.n_cpus = K
def reader(self,):
with open(db, "r") as db:
... # Read an item
self.task_queue.put(item)
def worker(self,):
while True:
item = self.task_queue.get(True)
if item == "STOP":
break
self.process_item(item)
def writer_process(self,):
while True:
f = self.completed_queue.get(True)
if f == "DONE":
break
self.write_f(f)
def run(self,):
pool = Pool(n_cpus, self.worker, args=())
writer = Process(target=self.writer_process, args=())
writer.start()
self.reader()
pool.close()
pool.join()
self.completed_queue.put("DONE")
writer.join()
The code works, but it seems that sometimes the writer or the pool stops working (or they are very slow). Is a deadlock possible in this scenario?
There are a couple of issues with your code. First, by using the queues as you are, you are in effect creating your own process pool and have no need for using the multiprocessing.Pool class at all. You are using a pool initializer as an actual pool worker and it's a bit of a misuse of this class; you would be better off to just use regular Process instances (my opinion, anyway).
Second, although it is well and good that you are putting message DONE to the writer_process to signal it to terminate, you have not done similarly for the self.n_cpus worker processes, which are looking for 'STOP' messages, and therefore the reader function needs to put self.n_cpus STOP messages in the task queue:
from multiprocessing import Process, Queue
class Computation():
def __init__(self, K):
self.task_queue = Queue()
self.completed_queue = Queue()
self.n_cpus = K
def reader(self,):
with open(db, "r") as db:
... # Read an item
self.task_queue.put(item)
# signal to the worker processes to terminate:
for _ in range(self.n_cpus):
self.task_queue.put('STOP')
def worker(self,):
while True:
item = self.task_queue.get(True)
if item == "STOP":
break
self.process_item(item)
def writer_process(self,):
while True:
f = self.completed_queue.get(True)
if f == "DONE":
break
self.write_f(f)
def run(self):
processes = [Process(target=self.worker) for _ in range(self.n_cpus)]
for p in processes:
p.start()
writer = Process(target=self.writer_process, args=())
writer.start()
self.reader()
for p in processes:
p.join()
self.completed_queue.put("DONE")
writer.join()
Personally, instead of using 'STOP' and 'DONE' as the sentinel messages, I would use None instead, assuming that is not a valid actual message. I have tested the above code where reader just processed strings in a list and self.process_item(item) simply appended ' done' to the each of those strings and put the modified string on the completed_queue and replaced self.write_f in the writer_process with a print call. I did not see any problems with the code as is.
Update to use a Managed Queue
Disclaimer: I have had no experience using mpi4py and have no idea how the queue proxies would get distributed across different computers. The above code may not be sufficient as suggested by the following article, How to share mutliprocessing queue object between multiple computers. However, that code is creating instances of Queue.Queue (that code is Python 2 code) and not the proxies that are returned by the multiprocessing.SyncManager. The documentation on this is very poor. Try the above change to see if it works better (it will be slower).
Because the proxy returned by manager.Queue(), I have had to rearrange the code a bit; the queues are now being passed explicitly as arguments to the process functions:
from multiprocessing import Process, Manager
class Computation():
def __init__(self, K):
self.n_cpus = K
def reader(self, task_queue):
with open(db, "r") as db:
... # Read an item
# signal to the worker processes to terminate:
for _ in range(self.n_cpus):
task_queue.put('STOP')
def worker(self, task_queue, completed_queue):
while True:
item = task_queue.get(True)
if item == "STOP":
break
self.process_item(item)
def writer_process(self, completed_queue):
while True:
f = completed_queue.get(True)
if f == "DONE":
break
self.write_f(f)
def run(self):
with Manager() as manager:
task_queue = manager.Queue()
completed_queue = manager.Queue()
processes = [Process(target=self.worker, args=(task_queue, completed_queue)) for _ in range(self.n_cpus)]
for p in processes:
p.start()
writer = Process(target=self.writer_process, args=(completed_queue,))
writer.start()
self.reader(task_queue)
for p in processes:
p.join()
completed_queue.put("DONE")
writer.join()

Python multiprocessing processes terminating?

I've read a number of answers here on Stackoverflow about Python multiprocessing, and I think this one is the most useful for my purposes: python multiprocessing queue implementation.
Here is what I'd like to do: poll the database for new work, put it in the queue and have 4 processes continuously do the work. What I'm unclear on is what happens when an item in the queue is done being processed. In the question above, the process terminates when the queue is empty. However, in my case, I'd just like to keep waiting until there is data in the queue. So do I just sleep and periodically check the queue? So my worker processes will never die? Is that good practice?
def mp_worker(queue):
while True:
if (queue.qsize() == 0):
time.sleep(20)
else:
db_record = queue.get()
process_file(db_record)
def mp_handler():
num_workers = 4
processes = [Process(target=mp_worker, args=(queue,)) for _ in range(num_workers)]
for process in processes:
process.start()
for process in processes:
process.join()
if __name__ == '__main__':
db_conn = db.create_postgre_connection(DB_CONFIG)
while True:
db_records = db.retrieve_received_files(DB_CONN)
if (len(db_records) > 0):
for db_record in db_records:
queue.put(db_record)
mp_handler()
else:
time.sleep(20)
db_conn.close()
Does it make sense?
Thanks.
Figured it out. Workers have to die, since otherwise they never return. But I start a new set of workers when there is data anyway, so that's not a problem. Updated code:
def mp_worker(queue):
while queue.qsize() > 0 :
db_record = queue.get()
process_file(db_record)
def mp_handler():
num_workers = 4
if (queue.qsize() < num_workers):
num_workers = queue.qsize()
processes = [Process(target=mp_worker, args=(queue,)) for _ in range(num_workers)]
for process in processes:
process.start()
for process in processes:
process.join()
if __name__ == '__main__':
while True:
db_records = db.retrieve_received_files(DB_CONN)
print(db_records)
if (len(db_records) > 0):
for db_record in db_records:
queue.put(db_record)
mp_handler()
else:
time.sleep(20)
DB_CONN.close()

Python Multiprocessing Pipe "Deadlock"

I'm facing problems with the following example code:
from multiprocessing import Lock, Process, Queue, current_process
def worker(work_queue, done_queue):
for item in iter(work_queue.get, 'STOP'):
print("adding ", item, "to done queue")
#this works: done_queue.put(item*10)
done_queue.put(item*1000) #this doesnt!
return True
def main():
workers = 4
work_queue = Queue()
done_queue = Queue()
processes = []
for x in range(10):
work_queue.put("hi"+str(x))
for w in range(workers):
p = Process(target=worker, args=(work_queue, done_queue))
p.start()
processes.append(p)
work_queue.put('STOP')
for p in processes:
p.join()
done_queue.put('STOP')
for item in iter(done_queue.get, 'STOP'):
print(item)
if __name__ == '__main__':
main()
When the done Queue becomes big enough (a limit about 64k i think), the whole thing freezes without any further notice.
What is the general approach for such a situation when the queue becomes too big? is there some way to remove elements on the fly once they are processed? The Python docs recommend removing the p.join(), in a real application however i can not estimate when the processes have finished. Is there a simple solution for this problem besides infinite looping and using .get_nowait()?
This works for me with 3.4.0alpha4, 3.3, 3.2, 3.1 and 2.6. It tracebacks with 2.7 and 3.0. I pylint'd it, BTW.
#!/usr/local/cpython-3.3/bin/python
'''SSCCE for a queue deadlock'''
import sys
import multiprocessing
def worker(workerno, work_queue, done_queue):
'''Worker function'''
#reps = 10 # this worked for the OP
#reps = 1000 # this worked for me
reps = 10000 # this didn't
for item in iter(work_queue.get, 'STOP'):
print("adding", item, "to done queue")
#this works: done_queue.put(item*10)
for thing in item * reps:
#print('workerno: {}, adding thing {}'.format(workerno, thing))
done_queue.put(thing)
done_queue.put('STOP')
print('workerno: {0}, exited loop'.format(workerno))
return True
def main():
'''main function'''
workers = 4
work_queue = multiprocessing.Queue(maxsize=0)
done_queue = multiprocessing.Queue(maxsize=0)
processes = []
for integer in range(10):
work_queue.put("hi"+str(integer))
for workerno in range(workers):
dummy = workerno
process = multiprocessing.Process(target=worker, args=(workerno, work_queue, done_queue))
process.start()
processes.append(process)
work_queue.put('STOP')
itemno = 0
stops = 0
while True:
item = done_queue.get()
itemno += 1
sys.stdout.write('itemno {0}\r'.format(itemno))
if item == 'STOP':
stops += 1
if stops == workers:
break
print('exited done_queue empty loop')
for workerno, process in enumerate(processes):
print('attempting process.join() of workerno {0}'.format(workerno))
process.join()
done_queue.put('STOP')
if __name__ == '__main__':
main()
HTH

Why my python multi process code runs very slow?

# multi-processes
from multiprocessing import Process, Queue
class Worker(object):
def __init__(self, queue):
self.queue = queue
self.process_num = 10 <------------ 10 processes
self.count = 0
def start(self):
for i in range(self.process_num):
p = Process(target = self.run)
p.start()
p.join()
def run(self):
while True:
self.count += 1
user = self.queue.get()
# do something not so fast like time.sleep(1)
print self.count
if self.queue.empty():
break
I use Worker().start(queue) to start the program, but the output is not so fast as i expected(Seems only one process are running).
Is there any problem in my code ?
Yes, you're only running one process at a time, you're waiting for each process to terminate before starting the next;
def start(self):
for i in range(self.process_num):
p = Process(target = self.run)
p.start() <-- starts a new process
p.join() <-- waits for the process to terminate
In other words, you're starting 10 processes, but the second one won't start until the first one terminates and so on.
For what you're trying to do, it may be better not to use Process manually and instead use a Process Pool.

dynamically calculate number of processes to be spawned

I have a list of about 15 years in the year_queue, I need to spawn one process for each year. But depending on which server I am running the code, the number of processors vary. How do I dynamically vary the variable num_processes depending on the number of processers in the server?
If I set num_processes > number of processers, would it automatically spawn accordingly? When I test this - it creates 15 processes & splits the CPU power between them. I am looking for a way to first create 'n' number of processes, where n = number of processers in the server, and then as each of those processes finish, the next is spawned.
for i in range(num_processes):
worker = ForEachPerson(year_queue, result_queue, i, dict_of_files)
print "worker spawned for " + str(i)
worker.start()
results = []
while len(results) < len(years):
result = result_queue.get()
results.append(result)
Anyone had the same issue?
while year_queue.empty() != True:
for i in range(num_processes):
worker = ForEachPerson(year_queue, result_queue, i, dict_of_files)
print "worker spawned for " + str(i)
worker.start()
# collect results off the queue
print "results being collected"
results = []
while len(results) < len(num_processes):
result = result_queue.get()
results.append(result)
Use a multiprocessing Pool. The class does all the tedious work of selecting the right number of processes and running them for you. It also doesn't spawn a new process for each task, but reuses processes once they're done.
def process_year(year):
...
return result
pool = multiprocessing.Pool()
results = pool.map(process_year, year_queue)
from multiprocessing import Process, Queue, cpu_count
from Queue import Empty
class ForEachPerson(Process):
def __init__(self, year_queue, result_queue, i, dict_of_files):
self.year_queue=year_queue
self.result_queue=result_queue
self.i=i
self.dict_of_files=dict_of_files
super(ForEachPerson, self).__init__()
def run(self):
while True:
try:
year=self.year_queue.get()
''' Do something '''
self.result_queue.put(year)
except Empty:
self.result_queue.close()
return
if __name__ == '__main__':
year_queue=Queue()
result_queue=Queue()
dict_of_files={}
start_year=1996
num_years=15
for year in range(start_year, start_year + num_years):
year_queue.put(year)
workers=[]
for i in range(cpu_count()):
worker = ForEachPerson(year_queue, result_queue, i, dict_of_files)
print 'worker spawned for', str(i)
worker.start()
workers.append(worker)
results=[]
while len(results) < num_years:
try:
year=result_queue.get()
results.append(year)
print 'Result:', year
except Empty:
pass
for worker in workers:
worker.terminate()

Categories