Python Locking Critical Section - python

I am trying to use the multiprocessing library in Python to process "tests" concurrently. I have a list of tests stored in the variable test_files. I want to workers to remove a test from test_files and call the process_test function of them. However when I run this code, both processes run the same test. It seems that I am not accessing test_files in a thread safe manner. What am I doing wrong?
Code
def process_worker(lock, test_files)
# Keep going until we run out of tests
while True:
test_file = None
# Critical section of code
lock.acquire()
try:
if len(test_files) != 0:
test_file = test_files.pop()
finally:
lock.release()
# End critical section of code
# If there is another test in the queue process it
if test_file is not None:
print "Running test {0} on worker {1}".format(test_file, multiprocessing.current_process().name)
process_test(test_file)
else:
# No more tests to process
return
# Mutex for workers
lock = multiprocessing.Lock()
# Declare our workers
p1 = multiprocessing.Process(target = process_worker, name = "Process 1", args=(lock, test_files))
p2 = multiprocessing.Process(target = process_worker, name = "Process 2", args=(lock, test_files))
# Start processing
p1.start()
p2.start()
# Block until both workers finish
p1.join()
p2.join()
Output
Running test "BIT_Test" on worker Process 1
Running test "BIT_Test" on worker Process 2

Trying to share a list like this not the right approach here. You should use a process-safe data structure, like multiprocessing.Queue, or better yet, use a multiprocessing.Pool and let it handle the queuing for you. What you're doing is perfectly suited for Pool.map:
import multiprocessing
def process_worker(test_file):
print "Running test {0} on worker {1}".format(test_file, multiprocessing.current_process().name)
process_test(test_file)
p = multiprocessing.Pool(2) # 2 processes in the pool
# map puts each item from test_files in a Queue, lets the
# two processes in our pool pull each item from the Queue,
# and then execute process_worker with that item as an argument.
p.map(process_worker, test_files)
p.close()
p.join()
Much simpler!

You could also use multiprocessing.Manager
import multiprocessing
def process_worker(lock, test_files):
# Keep going until we run out of tests
while True:
test_file = None
# Critical section of code
lock.acquire()
try:
if len(test_files) != 0:
test_file = test_files.pop()
finally:
lock.release()
# End critical section of code
# If there is another test in the queue process it
if test_file is not None:
print "Running test %s on worker %s" % (test_file, multiprocessing.current_process().name)
#process_test(test_file)
else:
# No more tests to process
return
# Mutex for workers
lock = multiprocessing.Lock()
manager = multiprocessing.Manager()
test_files = manager.list(['f1', 'f2', 'f3'])
# Declare our workers
p1 = multiprocessing.Process(target = process_worker, name = "Process 1", args=(lock, test_files))
p2 = multiprocessing.Process(target = process_worker, name = "Process 2", args=(lock, test_files))
# Start processing
p1.start()
p2.start()
# Block until both workers finish
p1.join()
p2.join()

Related

Stop a process when error occur in multiprocessing

I have created a 3 process in python. I have attached a code.
Now I want to stop the execution of running p2,p3 process because I got an error due to p1 process.I have idea to add p2.terminate(),I don't know where to add in this case. Thanks in advance.
def table(a):
try:
for i in range(100):
print(i,'x',a,'=',a*i)
except:
print("error")
processes = []
p1= multiprocessing.Process(target = table,args=['s'])
p2= multiprocessing.Process(target = table,args=[5])
p3= multiprocessing.Process(target = table,args=[2])
p1.start()
p2.start()
p3.start()
processes.append(p1)
processes.append(p2)
processes.append(p3)
for process in processes:
process.join()```
To stop any given process once one of the process terminates due to an error, first set up your target table() to exit with an appropriate exitcode > 0
def table(args):
try:
for i in range(100):
print(i,'x', a ,'=', a*i)
except:
sys.exit(1)
sys.exit(0)
Then you can start your processes and poll the processes to see if any one has terminated.
#!/usr/bin/env python3
# coding: utf-8
import multiprocessing
import time
import logging
import sys
logging.basicConfig(level=logging.INFO, format='[%(asctime)-15s] [%(processName)-10s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
def table(args):
try:
for i in range(5):
logging.info('{} x {} = {}'.format(i, args, i*args))
if isinstance(args, str):
raise ValueError()
time.sleep(5)
except:
logging.error('Done in Error Path: {}'.format(args))
sys.exit(1)
logging.info('Done in Success Path: {}'.format(args))
sys.exit(0)
if __name__ == '__main__':
p1 = multiprocessing.Process(target=table, args=('s',))
p2 = multiprocessing.Process(target=table, args=(5,))
p3 = multiprocessing.Process(target=table, args=(2,))
processes = [p1, p2, p3]
for process in processes:
process.start()
while True:
failed = []
completed = []
for process in processes:
if process.exitcode is not None and process.exitcode != 0:
failed.append(process)
if failed:
for process in processes:
if process not in failed:
logging.info('Terminating Process: {}'.format(process))
process.terminate()
break
if len(completed) == len(processes):
break
time.sleep(1)
Essentially, you are using terminate() to stop the remaining processes that are still running.
to stop a all cores when one core has faced with error, i use this code block:
processes = []
for j in range(0, n_core):
p = multiprocessing.Process(target=table, args=('some input',))
processes.append(p)
time.sleep(0.1)
p.start()
flag = True
while flag:
flag = False
for p in processes:
if p.exitcode == 1:
for z in processes:
z.kill()
sys.exit(1)
elif p.is_alive():
flag = True
for p in processes:
p.join()
First, I have modified function table to throw an exception that is not caught when the argument passed to it is 's' and to delay .1 seconds otherwise before printing to give the main process a chance to realize that the sub-process through an exception and can cancel the other processes before they have started printing. Otherwise, the other processes will have completed before you can cancel them. Here I am using a process pool, which supports a terminate method that conveniently terminates all submitted, uncompleted tasks without having to cancel each one individually (although that is also an option).
The code creates a multiprocessing pool of size 3 since that is the number of "tasks" being submitted and then uses method apply_async to submit the 3 tasks to run in parallel (assuming you have at least 3 processors). apply_sync returns an AsyncResult instance whose get method can be called to wait for the completion of the submitted task and to get the return value from the worker function table, which is None for the second and third tasks submitted and of no interest, or will throw an exception if the worker function had an uncaught exception, which is the case with the first task submitted:
import multiprocessing
import time
def table(a):
if a == 's':
raise Exception('I am "s"')
time.sleep(.1)
for i in range(100):
print(i,'x',a,'=',a*i)
# required for Windows:
if __name__ == '__main__':
pool = multiprocessing.Pool(3) # create a pool of 3 processes
result1 = pool.apply_async(table, args=('s',))
result2 = pool.apply_async(table, args=(5,))
result3 = pool.apply_async(table, args=(2,))
try:
result1.get() # wait for completion of first task
except Exception as e:
print(e)
pool.terminate() # kill all processes in the pool
else:
# wait for all submitted tasks to complete:
pool.close()
pool.join()
"""
# or alternatively:
result2.get() # wait for second task to finish
result3.get() # wait for third task to finish
"""
Prints:
I am "s"

Python multiprocessing possible deadlock with two queue as producer-consumer pattern?

I'm wondering if there can be a sort of deadlock in the following code. I have to read each element of a database (about 1 million items), process it, then collect the results in a unique file.
I've parallelized the execution with multiprocessing using two Queue's and three types of processes:
Reader: Main process which reads the database and adds the read items in a task_queue
Worker: Pool of processes. Each worker gets an item from task_queue, processes the item, saves the results in an intermediate file stored in item_name/item_name.txt and puts the item_name in a completed_queue
Writer: Process which gets an item_name from completed_queue, gets the intermediate result from item_name/item_name.txt and writes it in results.txt
from multiprocessing import Pool, Process, Queue
class Computation():
def __init__(self,K):
self.task_queue = Queue()
self.completed_queue = Queue()
self.n_cpus = K
def reader(self,):
with open(db, "r") as db:
... # Read an item
self.task_queue.put(item)
def worker(self,):
while True:
item = self.task_queue.get(True)
if item == "STOP":
break
self.process_item(item)
def writer_process(self,):
while True:
f = self.completed_queue.get(True)
if f == "DONE":
break
self.write_f(f)
def run(self,):
pool = Pool(n_cpus, self.worker, args=())
writer = Process(target=self.writer_process, args=())
writer.start()
self.reader()
pool.close()
pool.join()
self.completed_queue.put("DONE")
writer.join()
The code works, but it seems that sometimes the writer or the pool stops working (or they are very slow). Is a deadlock possible in this scenario?
There are a couple of issues with your code. First, by using the queues as you are, you are in effect creating your own process pool and have no need for using the multiprocessing.Pool class at all. You are using a pool initializer as an actual pool worker and it's a bit of a misuse of this class; you would be better off to just use regular Process instances (my opinion, anyway).
Second, although it is well and good that you are putting message DONE to the writer_process to signal it to terminate, you have not done similarly for the self.n_cpus worker processes, which are looking for 'STOP' messages, and therefore the reader function needs to put self.n_cpus STOP messages in the task queue:
from multiprocessing import Process, Queue
class Computation():
def __init__(self, K):
self.task_queue = Queue()
self.completed_queue = Queue()
self.n_cpus = K
def reader(self,):
with open(db, "r") as db:
... # Read an item
self.task_queue.put(item)
# signal to the worker processes to terminate:
for _ in range(self.n_cpus):
self.task_queue.put('STOP')
def worker(self,):
while True:
item = self.task_queue.get(True)
if item == "STOP":
break
self.process_item(item)
def writer_process(self,):
while True:
f = self.completed_queue.get(True)
if f == "DONE":
break
self.write_f(f)
def run(self):
processes = [Process(target=self.worker) for _ in range(self.n_cpus)]
for p in processes:
p.start()
writer = Process(target=self.writer_process, args=())
writer.start()
self.reader()
for p in processes:
p.join()
self.completed_queue.put("DONE")
writer.join()
Personally, instead of using 'STOP' and 'DONE' as the sentinel messages, I would use None instead, assuming that is not a valid actual message. I have tested the above code where reader just processed strings in a list and self.process_item(item) simply appended ' done' to the each of those strings and put the modified string on the completed_queue and replaced self.write_f in the writer_process with a print call. I did not see any problems with the code as is.
Update to use a Managed Queue
Disclaimer: I have had no experience using mpi4py and have no idea how the queue proxies would get distributed across different computers. The above code may not be sufficient as suggested by the following article, How to share mutliprocessing queue object between multiple computers. However, that code is creating instances of Queue.Queue (that code is Python 2 code) and not the proxies that are returned by the multiprocessing.SyncManager. The documentation on this is very poor. Try the above change to see if it works better (it will be slower).
Because the proxy returned by manager.Queue(), I have had to rearrange the code a bit; the queues are now being passed explicitly as arguments to the process functions:
from multiprocessing import Process, Manager
class Computation():
def __init__(self, K):
self.n_cpus = K
def reader(self, task_queue):
with open(db, "r") as db:
... # Read an item
# signal to the worker processes to terminate:
for _ in range(self.n_cpus):
task_queue.put('STOP')
def worker(self, task_queue, completed_queue):
while True:
item = task_queue.get(True)
if item == "STOP":
break
self.process_item(item)
def writer_process(self, completed_queue):
while True:
f = completed_queue.get(True)
if f == "DONE":
break
self.write_f(f)
def run(self):
with Manager() as manager:
task_queue = manager.Queue()
completed_queue = manager.Queue()
processes = [Process(target=self.worker, args=(task_queue, completed_queue)) for _ in range(self.n_cpus)]
for p in processes:
p.start()
writer = Process(target=self.writer_process, args=(completed_queue,))
writer.start()
self.reader(task_queue)
for p in processes:
p.join()
completed_queue.put("DONE")
writer.join()

Python multiprocessing processes terminating?

I've read a number of answers here on Stackoverflow about Python multiprocessing, and I think this one is the most useful for my purposes: python multiprocessing queue implementation.
Here is what I'd like to do: poll the database for new work, put it in the queue and have 4 processes continuously do the work. What I'm unclear on is what happens when an item in the queue is done being processed. In the question above, the process terminates when the queue is empty. However, in my case, I'd just like to keep waiting until there is data in the queue. So do I just sleep and periodically check the queue? So my worker processes will never die? Is that good practice?
def mp_worker(queue):
while True:
if (queue.qsize() == 0):
time.sleep(20)
else:
db_record = queue.get()
process_file(db_record)
def mp_handler():
num_workers = 4
processes = [Process(target=mp_worker, args=(queue,)) for _ in range(num_workers)]
for process in processes:
process.start()
for process in processes:
process.join()
if __name__ == '__main__':
db_conn = db.create_postgre_connection(DB_CONFIG)
while True:
db_records = db.retrieve_received_files(DB_CONN)
if (len(db_records) > 0):
for db_record in db_records:
queue.put(db_record)
mp_handler()
else:
time.sleep(20)
db_conn.close()
Does it make sense?
Thanks.
Figured it out. Workers have to die, since otherwise they never return. But I start a new set of workers when there is data anyway, so that's not a problem. Updated code:
def mp_worker(queue):
while queue.qsize() > 0 :
db_record = queue.get()
process_file(db_record)
def mp_handler():
num_workers = 4
if (queue.qsize() < num_workers):
num_workers = queue.qsize()
processes = [Process(target=mp_worker, args=(queue,)) for _ in range(num_workers)]
for process in processes:
process.start()
for process in processes:
process.join()
if __name__ == '__main__':
while True:
db_records = db.retrieve_received_files(DB_CONN)
print(db_records)
if (len(db_records) > 0):
for db_record in db_records:
queue.put(db_record)
mp_handler()
else:
time.sleep(20)
DB_CONN.close()

Timeouts for multiprocessing?

I've searched StackOverflow and although I've found many questions on this, I haven't found an answer that fits for my situation/not a strong python programmer to adapt their answer to fit my need.
I've looked here to no avail:
kill a function after a certain time in windows
Python: kill or terminate subprocess when timeout
signal.alarm replacement in Windows [Python]
I am using multiprocessing to run multiple SAP windows at once to pull reports. The is set up to run on a schedule every 5 minutes. Every once in a while, one of the reports gets stalled due to the GUI interface and never ends. I don't get an error or exception, it just stalls forever. What I would like is to have a timeout function that during this part of the code that is executed in SAP, if it takes longer than 4 minutes, it times out, closes SAP, skips the rest of the code, and waits for next scheduled report time.
I am using Windows Python 2.7
import multiprocessing
from multiprocessing import Manager, Process
import time
import datetime
### OPEN SAP ###
def start_SAP():
print 'opening SAP program'
### REPORTS IN SAP ###
def report_1(q, lock):
while True: # logic to get shared queue
if not q.empty():
lock.acquire()
k = q.get()
time.sleep(1)
lock.release()
break
else:
time.sleep(1)
print 'running report 1'
def report_2(q, lock):
while True: # logic to get shared queue
if not q.empty():
lock.acquire()
k = q.get()
time.sleep(1)
lock.release()
break
else:
time.sleep(1)
print 'running report 2'
def report_3(q, lock):
while True: # logic to get shared queue
if not q.empty():
lock.acquire()
k = q.get()
time.sleep(1)
lock.release()
break
else:
time.sleep(1)
time.sleep(60000) #mimicking the stall for report 3 that takes longer than allotted time
print 'running report 3'
def report_N(q, lock):
while True: # logic to get shared queue
if not q.empty():
lock.acquire()
k = q.get()
time.sleep(1)
lock.release()
break
else:
time.sleep(1)
print 'running report N'
### CLOSES SAP ###
def close_SAP():
print 'closes SAP'
def format_file():
print 'formatting files'
def multi_daily_pull():
lock = multiprocessing.Lock() # creating a lock in multiprocessing
shared_list = range(6) # creating a shared list for all functions to use
q = multiprocessing.Queue() # creating an empty queue in mulitprocessing
for n in shared_list: # putting list into the queue
q.put(n)
print 'Starting process at ', time.strftime('%m/%d/%Y %H:%M:%S')
print 'Starting SAP Pulls at ', time.strftime('%m/%d/%Y %H:%M:%S')
StartSAP = Process(target=start_SAP)
StartSAP.start()
StartSAP.join()
report1= Process(target=report_1, args=(q, lock))
report2= Process(target=report_2, args=(q, lock))
report3= Process(target=report_3, args=(q, lock))
reportN= Process(target=report_N, args=(q, lock))
report1.start()
report2.start()
report3.start()
reportN.start()
report1.join()
report2.join()
report3.join()
reportN.join()
EndSAP = Process(target=close_SAP)
EndSAP.start()
EndSAP.join()
formatfile = Process(target=format_file)
formatfile .start()
formatfile .join()
if __name__ == '__main__':
multi_daily_pull()
One way to do what you want would be to use the optional timeout argument that the Process.join() method accepts. This will make it only block the calling thread at most that length of time.
I also set the daemon attribute of each Process instance so your main thread will be able to terminate even if one of the processes it started is still "running" (or has hung up).
One final point, you don't need a multiprocessing.Lock to control access a multiprocessing.Queue, because they handle that aspect of things automatically, so I removed it. You may still want to have one for some other reason, such as controlling access to stdout so printing to it from the various processes doesn't overlap and mess up what is output to the screen.
import multiprocessing
from multiprocessing import Process
import time
import datetime
def start_SAP():
print 'opening SAP program'
### REPORTS IN SAP ###
def report_1(q):
while True: # logic to get shared queue
if q.empty():
time.sleep(1)
else:
k = q.get()
time.sleep(1)
break
print 'report 1 finished'
def report_2(q):
while True: # logic to get shared queue
if q.empty():
time.sleep(1)
else:
k = q.get()
time.sleep(1)
break
print 'report 2 finished'
def report_3(q):
while True: # logic to get shared queue
if q.empty():
time.sleep(1)
else:
k = q.get()
time.sleep(60000) # Take longer than allotted time
break
print 'report 3 finished'
def report_N(q):
while True: # logic to get shared queue
if q.empty():
time.sleep(1)
else:
k = q.get()
time.sleep(1)
break
print 'report N finished'
def close_SAP():
print 'closing SAP'
def format_file():
print 'formatting files'
def multi_daily_pull():
shared_list = range(6) # creating a shared list for all functions to use
q = multiprocessing.Queue() # creating an empty queue in mulitprocessing
for n in shared_list: # putting list into the queue
q.put(n)
print 'Starting process at ', time.strftime('%m/%d/%Y %H:%M:%S')
print 'Starting SAP Pulls at ', time.strftime('%m/%d/%Y %H:%M:%S')
StartSAP = Process(target=start_SAP)
StartSAP.start()
StartSAP.join()
report1 = Process(target=report_1, args=(q,))
report1.daemon = True
report2 = Process(target=report_2, args=(q,))
report2.daemon = True
report3 = Process(target=report_3, args=(q,))
report3.daemon = True
reportN = Process(target=report_N, args=(q,))
reportN.daemon = True
report1.start()
report2.start()
report3.start()
reportN.start()
report1.join(30)
report2.join(30)
report3.join(30)
reportN.join(30)
EndSAP = Process(target=close_SAP)
EndSAP.start()
EndSAP.join()
formatfile = Process(target=format_file)
formatfile .start()
formatfile .join()
if __name__ == '__main__':
multi_daily_pull()

Cannot obtain values while parallelizing 2 for loops

I am trying to run the following snippet which appends data to lists 'tests1' and 'tests2'. But when I print 'tests1' and 'tests2', the displayed list is empty. Anything incorrect here?
tests1 = []
tests2 = []
def func1():
for i in range(25,26):
tests1.append(test_loader.get_tests(test_prefix=new_paths[i],tags=params.get('tags', None),
exclude=params.get('exclude', False)))
def func2():
for i in range(26,27):
tests2.append(test_loader.get_tests(test_prefix=new_paths[i],tags=params.get('tags', None),
exclude=params.get('exclude', False)))
p1 = mp.Process(target=func1)
p2 = mp.Process(target=func2)
p1.start()
p2.start()
p1.join()
p2.join()
print tests1
print tests2
The worker processes don't actually share the same object. It gets copied (pickled).
You can send values between processes using a multiprocessing.Queue (or by various other means). See my simple example (in which I've made your tests into integers for simplicity).
from multiprocessing import Process, Queue
def add_tests1(queue):
for i in range(10):
queue.put(i)
queue.put(None)
def add_tests2(queue):
for i in range(100,110):
queue.put(i)
queue.put(None)
def run_tests(queue):
while True:
test = queue.get()
if test is None:
break
print test
if __name__ == '__main__':
queue1 = Queue()
queue2 = Queue()
add_1 = Process(target = add_tests1, args = (queue1,))
add_2 = Process(target = add_tests2, args = (queue2,))
run_1 = Process(target = run_tests, args = (queue1,))
run_2 = Process(target = run_tests, args = (queue2,))
add_1.start(); add_2.start(); run_1.start(); run_2.start()
add_1.join(); add_2.join(); run_1.join(); run_2.join()
Note that the parent program can also access the queues.

Categories