dynamically calculate number of processes to be spawned - python

I have a list of about 15 years in the year_queue, I need to spawn one process for each year. But depending on which server I am running the code, the number of processors vary. How do I dynamically vary the variable num_processes depending on the number of processers in the server?
If I set num_processes > number of processers, would it automatically spawn accordingly? When I test this - it creates 15 processes & splits the CPU power between them. I am looking for a way to first create 'n' number of processes, where n = number of processers in the server, and then as each of those processes finish, the next is spawned.
for i in range(num_processes):
worker = ForEachPerson(year_queue, result_queue, i, dict_of_files)
print "worker spawned for " + str(i)
worker.start()
results = []
while len(results) < len(years):
result = result_queue.get()
results.append(result)
Anyone had the same issue?
while year_queue.empty() != True:
for i in range(num_processes):
worker = ForEachPerson(year_queue, result_queue, i, dict_of_files)
print "worker spawned for " + str(i)
worker.start()
# collect results off the queue
print "results being collected"
results = []
while len(results) < len(num_processes):
result = result_queue.get()
results.append(result)

Use a multiprocessing Pool. The class does all the tedious work of selecting the right number of processes and running them for you. It also doesn't spawn a new process for each task, but reuses processes once they're done.
def process_year(year):
...
return result
pool = multiprocessing.Pool()
results = pool.map(process_year, year_queue)

from multiprocessing import Process, Queue, cpu_count
from Queue import Empty
class ForEachPerson(Process):
def __init__(self, year_queue, result_queue, i, dict_of_files):
self.year_queue=year_queue
self.result_queue=result_queue
self.i=i
self.dict_of_files=dict_of_files
super(ForEachPerson, self).__init__()
def run(self):
while True:
try:
year=self.year_queue.get()
''' Do something '''
self.result_queue.put(year)
except Empty:
self.result_queue.close()
return
if __name__ == '__main__':
year_queue=Queue()
result_queue=Queue()
dict_of_files={}
start_year=1996
num_years=15
for year in range(start_year, start_year + num_years):
year_queue.put(year)
workers=[]
for i in range(cpu_count()):
worker = ForEachPerson(year_queue, result_queue, i, dict_of_files)
print 'worker spawned for', str(i)
worker.start()
workers.append(worker)
results=[]
while len(results) < num_years:
try:
year=result_queue.get()
results.append(year)
print 'Result:', year
except Empty:
pass
for worker in workers:
worker.terminate()

Related

Python in AWS Lambda: multiprocessing, terminate other processes when one finished

I learned that AWS Lambda does not support multiprocessing.Pool and multiprocessing.Queue from this other question.
I'm also working on Python multiprocessing in AWS Lambda. But my question: how do we terminate the main process when the first child process returns? (all child processes will return with different execution time)
What I have here:
import time
from multiprocessing import Process, Pipe
class run_func():
number = 0
def __init__(self, number):
self.number = number
def subrun(self, input, conn):
# subprocess function with different execution time based on input.
response = subprocess(input)
conn.send([input, response])
conn.close()
def run(self):
number = self.number
processes = []
parent_connections = []
for i in range(0, number):
parent_conn, child_conn = Pipe()
parent_connections.append(parent_conn)
process = Process(target=self.subrun, args=(i, child_conn,))
processes.append(process)
for process in processes:
process.start()
for process in processes:
process.join()
results = []
for parent_connection in parent_connections:
resp = parent_connection.recv()
print(resp)
results.append((resp[0],resp[1]))
return results
def lambda_handler(event, context):
starttime = time.time()
results = []
work = run_func(int(event['number']))
results = work.run()
print("Results : {}".format(results))
print('Time: {} seconds'.format(time.time() - starttime))
return output
The current program will return until all child processes finish (with for parent_connection in parent_connections). But I wonder how to terminate with the first child process finish? (terminate the main at least, other child processes - it's ok to leave it running)
Added:
To be clear, I mean the first returned child process (may not be the first created child).
So the join() loop is the one which waits for all child process to complete.
If we break that after completing the first child and terminate all other process forcefully it'll work for you
class run_func():
number = 0
def __init__(self, number):
self.number = number
def subrun(self, input, conn):
# subprocess function with different execution time based on input.
response = subprocess(input)
conn.send([input, response])
conn.close()
def run(self):
number = self.number
processes = []
parent_connections = []
for i in range(0, number):
parent_conn, child_conn = Pipe()
parent_connections.append(parent_conn)
process = Process(target=self.subrun, args=(i, child_conn,))
processes.append(process)
for process in processes:
process.start()
for process in processes:
process.join()
break
results = []
for parent_connection in parent_connections:
resp = parent_connection.recv()
print(resp)

Problem trying to make two child processes share the load of processing the same resource

I'm messing around with python multiprocessing module. But something is not working as I was expecting it to do, so now I'm a little bit confused.
In a python script, I create two child processes, so they can work with the same resource. I was thinking that they were going to "share" the load more or less equally, but it seems that, instead of doing that, one of the processes executes just once, while the other one process almost everything.
To test it, I wrote the following code:
#!/usr/bin/python
import os
import multiprocessing
# Worker function
def worker(queueA, queueB):
while(queueA.qsize() != 0):
item = queueA.get()
item = "item: " + item + ". processed by worker " + str(os.getpid())
queueB.put(item)
return
# IPC Manager
manager = multiprocessing.Manager()
queueA = multiprocessing.Queue()
queueB = multiprocessing.Queue()
# Fill queueA with data
for i in range(0, 10):
queueA.put("hello" + str(i+1))
# Create processes
process1 = multiprocessing.Process(target = worker, args = (queueA, queueB,))
process2 = multiprocessing.Process(target = worker, args = (queueA, queueB,))
# Call processes
process1.start()
process2.start()
# Wait for processes to stop processing
process1.join()
process2.join()
for i in range(0, queueB.qsize()):
print queueB.get()
And that prints the following:
item: hello1. processed by worker 11483
item: hello3. processed by worker 11483
item: hello4. processed by worker 11483
item: hello5. processed by worker 11483
item: hello6. processed by worker 11483
item: hello7. processed by worker 11483
item: hello8. processed by worker 11483
item: hello9. processed by worker 11483
item: hello10. processed by worker 11483
item: hello2. processed by worker 11482
As you can see, one of the processes works with just one of the elements, and it doesn't continue to get more elements of the queue, while the other has to work with everything else.
I'm thinking that this is not correct, or at least not what I expected. Could you tell me which is the correct way of implementing this idea?
You're right that they won't be exactly equal, but mostly that's because your testing sample is so small. It takes time for each process to get started and start processing. The time it takes to process an item in the queue is extremely low and therefore one can quickly process 9 items before the other gets through one.
I tested this below (in Python3, but it should apply for 2.7 as well just change the print() function to a print statement):
import os
import multiprocessing
# Worker function
def worker(queueA, queueB):
for item in iter(queueA.get, 'STOP'):
out = str(os.getpid())
queueB.put(out)
return
# IPC Manager
manager = multiprocessing.Manager()
queueA = multiprocessing.Queue()
queueB = multiprocessing.Queue()
# Fill queueA with data
for i in range(0, 1000):
queueA.put("hello" + str(i+1))
# Create processes
process1 = multiprocessing.Process(target = worker, args = (queueA, queueB,))
process2 = multiprocessing.Process(target = worker, args = (queueA, queueB,))
# Call processes
process1.start()
process2.start()
queueA.put('STOP')
queueA.put('STOP')
# Wait for processes to stop processing
process1.join()
process2.join()
all = {}
for i in range(1000):
item = queueB.get()
if item not in all:
all[item] = 1
else:
all[item] += 1
print(all)
My output (a count of how many were done from each process):
{'18376': 537,
'18377': 463}
While they aren't the exact same, as we approach longer times they will approach being about equal.
Edit:
Another way to confirm this is to add a time.sleep(3) inside the worker function
def worker(queueA, queueB):
for item in iter(queueA.get, 'STOP'):
time.sleep(3)
out = str(os.getpid())
queueB.put(out)
return
I ran a range(10) test like in your original example and got:
{'18428': 5,
'18429': 5}

Python multiprocessing processes terminating?

I've read a number of answers here on Stackoverflow about Python multiprocessing, and I think this one is the most useful for my purposes: python multiprocessing queue implementation.
Here is what I'd like to do: poll the database for new work, put it in the queue and have 4 processes continuously do the work. What I'm unclear on is what happens when an item in the queue is done being processed. In the question above, the process terminates when the queue is empty. However, in my case, I'd just like to keep waiting until there is data in the queue. So do I just sleep and periodically check the queue? So my worker processes will never die? Is that good practice?
def mp_worker(queue):
while True:
if (queue.qsize() == 0):
time.sleep(20)
else:
db_record = queue.get()
process_file(db_record)
def mp_handler():
num_workers = 4
processes = [Process(target=mp_worker, args=(queue,)) for _ in range(num_workers)]
for process in processes:
process.start()
for process in processes:
process.join()
if __name__ == '__main__':
db_conn = db.create_postgre_connection(DB_CONFIG)
while True:
db_records = db.retrieve_received_files(DB_CONN)
if (len(db_records) > 0):
for db_record in db_records:
queue.put(db_record)
mp_handler()
else:
time.sleep(20)
db_conn.close()
Does it make sense?
Thanks.
Figured it out. Workers have to die, since otherwise they never return. But I start a new set of workers when there is data anyway, so that's not a problem. Updated code:
def mp_worker(queue):
while queue.qsize() > 0 :
db_record = queue.get()
process_file(db_record)
def mp_handler():
num_workers = 4
if (queue.qsize() < num_workers):
num_workers = queue.qsize()
processes = [Process(target=mp_worker, args=(queue,)) for _ in range(num_workers)]
for process in processes:
process.start()
for process in processes:
process.join()
if __name__ == '__main__':
while True:
db_records = db.retrieve_received_files(DB_CONN)
print(db_records)
if (len(db_records) > 0):
for db_record in db_records:
queue.put(db_record)
mp_handler()
else:
time.sleep(20)
DB_CONN.close()

How to kill Finished threads in python

My multi-threading script raising this error :
thread.error : can't start new thread
when it reached 460 threads :
threading.active_count() = 460
I assume the old threads keeps stack up, since the script didn't kill them. This is my code:
import threading
import Queue
import time
import os
import csv
def main(worker):
#Do Work
print worker
return
def threader():
while True:
worker = q.get()
main(worker)
q.task_done()
def main_threader(workers):
global q
global city
q = Queue.Queue()
for x in range(20):
t = threading.Thread(target=threader)
t.daemon = True
print "\n\nthreading.active_count() = " + str(threading.active_count()) + "\n\n"
t.start()
for worker in workers:
q.put(worker)
q.join()
How do I kill the old threads when their job is done? (Is return not enough?)
Your threader function never exits, so your threads never die. Since you're just processing one fixed set of work and never adding items after you start working, you could set the threads up to exit when the queue is empty.
See the following altered version of your code and the comments I added:
def threader(q):
# let the thread die when all work is done
while not q.empty():
worker = q.get()
main(worker)
q.task_done()
def main_threader(workers):
# you don't want global variables
#global q
#global city
q = Queue.Queue()
# make sure you fill the queue *before* starting the worker threads
for worker in workers:
q.put(worker)
for x in range(20):
t = threading.Thread(target=threader, args=[q])
t.daemon = True
print "\n\nthreading.active_count() = " + str(threading.active_count()) + "\n\n"
t.start()
q.join()
Notice that I removed global q, and instead I pass q to the thread function. You don't want threads created by a previous call to end up sharing a q with new threads (edit although q.join() prevents this anyway, it's still better to avoid globals).

Python multiprocessing and handling exceptions in workers

I use python multiprocessing library for an algorithm in which I have many workers processing certain data and returning result to the parent process. I use multiprocessing.Queue for passing jobs to workers, and second to collect results.
It all works pretty well, until worker fails to process some chunk of data. In the simplified example below each worker has two phases:
initialization - can fail, in this case worker should be destroyed
data processing - processing a chunk of data can fail, in this case worker should skip this chunk and continue with next data.
When either of this phases fails I get a deadlock after script completion. This code simulates my problem:
import multiprocessing as mp
import random
workers_count = 5
# Probability of failure, change to simulate failures
fail_init_p = 0.2
fail_job_p = 0.3
#========= Worker =========
def do_work(job_state, arg):
if random.random() < fail_job_p:
raise Exception("Job failed")
return "job %d processed %d" % (job_state, arg)
def init(args):
if random.random() < fail_init_p:
raise Exception("Worker init failed")
return args
def worker_function(args, jobs_queue, result_queue):
# INIT
# What to do when init() fails?
try:
state = init(args)
except:
print "!Worker %d init fail" % args
return
# DO WORK
# Process data in the jobs queue
for job in iter(jobs_queue.get, None):
try:
# Can throw an exception!
result = do_work(state, job)
result_queue.put(result)
except:
print "!Job %d failed, skip..." % job
finally:
jobs_queue.task_done()
# Telling that we are done with processing stop token
jobs_queue.task_done()
#========= Parent =========
jobs = mp.JoinableQueue()
results = mp.Queue()
for i in range(workers_count):
mp.Process(target=worker_function, args=(i, jobs, results)).start()
# Populate jobs queue
results_to_expect = 0
for j in range(30):
jobs.put(j)
results_to_expect += 1
# Collecting the results
# What if some workers failed to process the job and we have
# less results than expected
for r in range(results_to_expect):
result = results.get()
print result
#Signal all workers to finish
for i in range(workers_count):
jobs.put(None)
#Wait for them to finish
jobs.join()
I have two question about this code:
When init() fails, how to detect that worker is invalid and not to wait for it to finish?
When do_work() fails, how to notify parent process that less results should be expected in the results queue?
Thank you for help!
I changed your code slightly to make it work (see explanation below).
import multiprocessing as mp
import random
workers_count = 5
# Probability of failure, change to simulate failures
fail_init_p = 0.5
fail_job_p = 0.4
#========= Worker =========
def do_work(job_state, arg):
if random.random() < fail_job_p:
raise Exception("Job failed")
return "job %d processed %d" % (job_state, arg)
def init(args):
if random.random() < fail_init_p:
raise Exception("Worker init failed")
return args
def worker_function(args, jobs_queue, result_queue):
# INIT
# What to do when init() fails?
try:
state = init(args)
except:
print "!Worker %d init fail" % args
result_queue.put('init failed')
return
# DO WORK
# Process data in the jobs queue
for job in iter(jobs_queue.get, None):
try:
# Can throw an exception!
result = do_work(state, job)
result_queue.put(result)
except:
print "!Job %d failed, skip..." % job
result_queue.put('job failed')
#========= Parent =========
jobs = mp.Queue()
results = mp.Queue()
for i in range(workers_count):
mp.Process(target=worker_function, args=(i, jobs, results)).start()
# Populate jobs queue
results_to_expect = 0
for j in range(30):
jobs.put(j)
results_to_expect += 1
init_failures = 0
job_failures = 0
successes = 0
while job_failures + successes < 30 and init_failures < workers_count:
result = results.get()
init_failures += int(result == 'init failed')
job_failures += int(result == 'job failed')
successes += int(result != 'init failed' and result != 'job failed')
#print init_failures, job_failures, successes
for ii in range(workers_count):
jobs.put(None)
My changes:
Changed jobs to be just a normal Queue (instead of JoinableQueue).
Workers now communicate back special results strings "init failed" and "job failed".
The master process monitors for the said special results so long as specific conditions are in effect.
In the end, put "stop" requests (i.e. None jobs) for however many workers you have, regardless. Note that not all of these may be pulled from the queue (in case the worker failed to initalize).
By the way, your original code was nice and easy to work with. The random probabilities bit is pretty cool.

Categories