python mutiprocessing main process waiting for deamon process - python

I'm trying to understand multiprocessing module. Below is my code.
from multiprocessing import Process, current_process
#from time import time
import time
def work(delay):
p = current_process()
print p.name, p.pid, p.deamon
time.sleep(delay)
print 'Finised deamon work'
def main():
print 'Starting Main Process'
p = Process(target=work, args=(2,))
p.deamon = True
p.start()
print 'Exiting Main Process'
if __name__ == '__main__':
main()
Output:
Starting Main Process
Exiting Main Process
Process-1 7863 True
Finised deamon work
I expect main process to exit before deamon process(sleep for 2 secs). Since main process exits, deamon process should also exit. But output is confusing me.
Expected Output:
Starting Main Process
Exiting Main Process
Process-1 7863 True
Is my understanding of multiprocessing module wrong?

Related

Python Process Pool signal handling

I have a multiprocessing.Pool running tasks that I wan't to exit gracefully in case of a terminate by handling the SIGTERM signal
This is my code example (used python 3.9)
import os
import signal
import time
from multiprocessing import Pool
class SigTermException(Exception):
pass
def sigtermhandler(signum, frame):
raise SigTermException('sigterm')
def f():
print(os.getpid())
try:
while True:
print("loop")
time.sleep(5)
except SigTermException:
print("Received SIGTERM")
def main():
signal.signal(signal.SIGTERM, sigtermhandler)
pool = Pool()
pool.apply_async(f)
print("wait 5")
time.sleep(5)
print("Terminating")
pool.terminate()
print("Joining")
pool.join()
print("Exiting")
if __name__ == '__main__':
main()
I was expecting to print
...
Terminating
Received SIGTERM
Joining
Exiting
However it seems it doesn't go past pool.terminate()
Here's an example
wait 5
92363
loop
Terminating
loop
Received SIGTERM
Performing a ps I see the following
92362 pts/0 S+ 0:00 | | \_ python signal_pool.py
92363 pts/0 S+ 0:00 | | \_ python signal_pool.py
So it looks like the child process is still 'alive'
Also tested the solution mentioned here to no avail
Any hints o help is appreciated
Your worker function, f, runs forever yet your main process sleeps just for 5 seconds and then calls terminate on the pool which would result in killing any running tasks. This contradicts your saying you would like to have your tasks exit gracefully in case of receiving a SIGTERM because as it now stands, they will not exit gracefully in the absence of a SIGTERM.
So I would think the main process should be waiting as long as necessary for the submitted task or tasks to complete -- this is the usual situation, right? It also seems that when I tried this and issued a kill -15 command, perhaps because the main process is just in a wait state waiting for the submitted task to complete, that the worker function alone handled this and the signal was never passed to the main process. I therefore did not need a try/except block in the main proceess.
import os
import signal
import time
from multiprocessing import Pool
class SigTermException(Exception):
pass
def sigtermhandler(signum, frame):
raise SigTermException('sigterm')
def f():
print(os.getpid())
try:
while True:
print("loop")
time.sleep(5)
except SigTermException:
print("Received SIGTERM")
def main():
signal.signal(signal.SIGTERM, sigtermhandler)
pool = Pool()
async_result = pool.apply_async(f)
print("waiting for task to complete ...")
async_result.get() # wait for task to complete
pool.close()
print("Joining")
pool.join()
print("Exiting")
if __name__ == '__main__':
main()
Printed:
waiting for task to complete ...
98
loop
Received SIGTERM
Joining
Exiting
You can also just do:
def main():
signal.signal(signal.SIGTERM, sigtermhandler)
pool = Pool()
pool.apply_async(f)
print("waiting for all tasks to complete ...")
pool.close()
pool.join()
print("Exiting")

os._exit(1) does not kill non-daemonic sibling processes

I am writing a python script which has 2 child processes. The main logic occurs in one process and another process waits for some time and then kills the main process even if the logic is not done.
I read that calling os_exit(1) stops the interpreter, so the entire script is killed automatically. I've used it like shown below:
import os
from multiprocessing import Process, Lock
from multiprocessing.sharedctypes import Array
# Main process
def main_process(shared_variable):
shared_variable.value = "mainprc"
time.sleep(20)
print("Task finished normally.")
os._exit(1)
# Timer process
def timer_process(shared_variable):
threshold_time_secs = 5
time.sleep(threshold_time_secs)
print("Timeout reached")
print("Shared variable ",shared_variable.value)
print("Task is shutdown.")
os._exit(1)
if __name__ == "__main__":
lock = Lock()
shared_variable = Array('c',"initial",lock=lock)
process_main = Process(target=main_process, args=(shared_variable))
process_timer = Process(target=timer_process, args=(shared_variable))
process_main.start()
process_timer.start()
process_timer.join()
The timer process calls os._exit but the script still waits for the main process to print "Task finished normally." before exiting.
How do I make it such that if timer process exits, the entire program is shutdown (including main process)?
Thanks.

How to execute code just before terminating the process in python?

This question concerns multiprocessing in python. I want to execute some code when I terminate the process, to be more specific just before it will be terminated. I'm looking for a solution which works as atexit.register for the python program.
I have a method worker which looks:
def worker():
while True:
print('work')
time.sleep(2)
return
I run it by:
proc = multiprocessing.Process(target=worker, args=())
proc.start()
My goal is to execute some extra code just before terminating it, which I do by:
proc.terminate()
Use signal handling and intercept SIGTERM:
import multiprocessing
import time
import sys
from signal import signal, SIGTERM
def before_exit(*args):
print('Hello')
sys.exit(0) # don't forget to exit!
def worker():
signal(SIGTERM, before_exit)
time.sleep(10)
proc = multiprocessing.Process(target=worker, args=())
proc.start()
time.sleep(3)
proc.terminate()
Produces the desirable output just before subprocess termination.

exiting python with hanged thread

When you import and use package, this package can run non daemon threads. Until these threads are finished, python cannot exit properly (like with sys.exit(0)). For example imagine that thread t is from some package. When unhandled exception occurs in the main thread, you want to terminate. But this won't exit immediately, it will wait 60s till the thread terminates.
import time, threading
def main():
t = threading.Thread(target=time.sleep, args=(60,))
t.start()
a = 5 / 0
if __name__ == '__main__':
try:
main()
except:
sys.exit(1)
So I came up with 2 things. Replace sys.exit(1) with os._exit(1) or enumerate all threads and make them daemon. Both of them seems to work, but what do you thing is better? os._exit won't flush stdio buffers but setting daemon attribute to threads seems like a hack and maybe it's not guaranteed to work all the time.
import time, threading
def main():
t = thread.Thread(target=time.sleep, args=(60,))
t.start()
a = 5 / 0
if __name__ == '__main__':
try:
main()
except:
for t in threading.enumerate():
if not t.daemon and t.name != "MainThread":
t._daemonic = True
sys.exit(1)

python subprocess: find out in the signal handler which child terminated

I have a python script which starts multiple commands using subprocess.Popen. I added a signal handler which is called if a child exits. I want to check which child terminated. I can do this by iterating over all children:
#!/usr/bin/env python
import subprocess
import signal
procs = []
def signal_handler(signum, frame):
for proc in procs:
proc.poll()
if proc.returncode is not None:
print "%s returned %s" % (proc.pid, proc.returncode)
procs.remove(proc)
def main():
signal.signal(signal.SIGCHLD, signal_handler)
procs.append(subprocess.Popen(["/bin/sleep", "2"]))
procs.append(subprocess.Popen(["/bin/sleep","5"]))
# wait so the main process does not terminate immediately
procs[1].wait()
if __name__ == "__main__":
main()
I would like to avoid querying all subprocesses. Is there a way to determine in the signal handler which child terminated?
You could achieve a similar result using multiprocessing. You could use the threading package instead if you didn't want to spawn the extra processes. It has pretty much the exact same interface. Basically, each subprocess call happens in a new process, which then launches your sleep processes.
import subprocess
import multiprocessing
def callback(result):
# do something with result
pid, returncode = result
print pid, returncode
def call_process(cmd):
p = subprocess.Popen(cmd)
p.wait()
return p.pid, p.returncode
def main():
pool = multiprocessing.Pool()
pool.apply_async(call_process, [["/bin/sleep", "2"]], callback=callback)
pool.apply_async(call_process, [["/bin/sleep", "5"]], callback=callback)
pool.close()
pool.join()
main()

Categories