python how to run process in detached mode - python

here is a example:
from multiprocessing import Process
import time
def func():
print('sub process is running')
time.sleep(5)
print('sub process finished')
if __name__ == '__main__':
p = Process(target=func)
p.start()
print('done')
what I expect is that the main process will terminate right after it start a subprocess. But after printing out 'done', the terminal is still waiting....Is there any way to do this so that the main process will exit right after printing out 'done', instead of waiting for subprocess? I'm confused here because I'm not calling p.join()

Python will not end if there exists a non-daemon process.
By setting, daemon attribute before start() call, you can make the process daemonic.
p = Process(target=func)
p.daemon = True # <-----
p.start()
print('done')
NOTE: There will be no sub process finished message printed; because the main process will terminate sub-process at exit. This may not be what you want.
You should do double-fork:
import os
import time
from multiprocessing import Process
def func():
if os.fork() != 0: # <--
return # <--
print('sub process is running')
time.sleep(5)
print('sub process finished')
if __name__ == '__main__':
p = Process(target=func)
p.start()
p.join()
print('done')

Following the excellent answer from #falsetru, I wrote out a quick generalization in the form of a decorator.
import os
from multiprocessing import Process
def detachify(func):
"""Decorate a function so that its calls are async in a detached process.
Usage
-----
.. code::
import time
#detachify
def f(message):
time.sleep(5)
print(message)
f('Async and detached!!!')
"""
# create a process fork and run the function
def forkify(*args, **kwargs):
if os.fork() != 0:
return
func(*args, **kwargs)
# wrapper to run the forkified function
def wrapper(*args, **kwargs):
proc = Process(target=lambda: forkify(*args, **kwargs))
proc.start()
proc.join()
return
return wrapper
Usage (copied from docstring):
import time
#detachify
def f(message):
time.sleep(5)
print(message)
f('Async and detached!!!')
Or if you like,
def f(message):
time.sleep(5)
print(message)
detachify(f)('Async and detached!!!')

Related

How to execute code just before terminating the process in python?

This question concerns multiprocessing in python. I want to execute some code when I terminate the process, to be more specific just before it will be terminated. I'm looking for a solution which works as atexit.register for the python program.
I have a method worker which looks:
def worker():
while True:
print('work')
time.sleep(2)
return
I run it by:
proc = multiprocessing.Process(target=worker, args=())
proc.start()
My goal is to execute some extra code just before terminating it, which I do by:
proc.terminate()
Use signal handling and intercept SIGTERM:
import multiprocessing
import time
import sys
from signal import signal, SIGTERM
def before_exit(*args):
print('Hello')
sys.exit(0) # don't forget to exit!
def worker():
signal(SIGTERM, before_exit)
time.sleep(10)
proc = multiprocessing.Process(target=worker, args=())
proc.start()
time.sleep(3)
proc.terminate()
Produces the desirable output just before subprocess termination.

Terminate external program run through asyncio with specific signal

I need to terminate external programs which run from an asyncio Python script with a specific signal, say SIGTERM. My problem is that programs always receives SIGINT even if I send them SIGTERM signal.
Here is a test case, source code for a fakeprg used in the test below can be found here.
import asyncio
import traceback
import os
import os.path
import sys
import time
import signal
import shlex
from functools import partial
class ExtProgramRunner:
run = True
processes = []
def __init__(self):
pass
def start(self, loop):
self.current_loop = loop
self.current_loop.add_signal_handler(signal.SIGINT, lambda: asyncio.async(self.stop('SIGINT')))
self.current_loop.add_signal_handler(signal.SIGTERM, lambda: asyncio.async(self.stop('SIGTERM')))
asyncio.async(self.cancel_monitor())
asyncio.Task(self.run_external_programs())
#asyncio.coroutine
def stop(self, sig):
print("Got {} signal".format(sig))
self.run = False
for process in self.processes:
print("sending SIGTERM signal to the process with pid {}".format(process.pid))
process.send_signal(signal.SIGTERM)
print("Canceling all tasks")
for task in asyncio.Task.all_tasks():
task.cancel()
#asyncio.coroutine
def cancel_monitor(self):
while True:
try:
yield from asyncio.sleep(0.05)
except asyncio.CancelledError:
break
print("Stopping loop")
self.current_loop.stop()
#asyncio.coroutine
def run_external_programs(self):
os.makedirs("/tmp/files0", exist_ok=True)
os.makedirs("/tmp/files1", exist_ok=True)
# schedule tasks for execution
asyncio.Task(self.run_cmd_forever("/tmp/fakeprg /tmp/files0 1000"))
asyncio.Task(self.run_cmd_forever("/tmp/fakeprg /tmp/files1 5000"))
#asyncio.coroutine
def run_cmd_forever(self, cmd):
args = shlex.split(cmd)
while self.run:
process = yield from asyncio.create_subprocess_exec(*args)
self.processes.append(process)
exit_code = yield from process.wait()
for idx, p in enumerate(self.processes):
if process.pid == p.pid:
self.processes.pop(idx)
print("External program '{}' exited with exit code {}, relauching".format(cmd, exit_code))
def main():
loop = asyncio.get_event_loop()
try:
daemon = ExtProgramRunner()
loop.call_soon(daemon.start, loop)
# start main event loop
loop.run_forever()
except KeyboardInterrupt:
pass
except asyncio.CancelledError as exc:
print("asyncio.CancelledError")
except Exception as exc:
print(exc, file=sys.stderr)
print("====", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
finally:
print("Stopping daemon...")
loop.close()
if __name__ == '__main__':
main()
The reason for this is: When you start your python program (parent) and it starts it's processes /tmp/fakeprg (children) they get all different processes with its pid but they all run in the same foreground process group. Your shell is bound to this group, so when you hit Ctrl-C (SIGINT), Ctrl-Y (SIGTSTP) or Ctrl-\ (SIGQUIT) they are sent to all processes in the foreground process group.
In your code this happens before the parent can even send the signal to its children through send_signal, so this line sends a signal to an already dead process (and should fail, so IMO that's an issue with asyncio).
To solve that, you can explicitly put your child process into a separate process group, like this:
asyncio.create_subprocess_exec(*args, preexec_fn=os.setpgrp)

interrupt python multiprocessing.Process using signals in Windows

I have a python script that spawns a new Process using multiprocessing.Process class. This process is supposed to run forever to monitor stuff. On Unix I can now use os.kill() to send a signal to that specific process and signal.signal(...) within that process to implement my specific interrupt handler. On Windows things don't work.
I read how to do it using popen. Can I specify the CREATE_NEW_PROCESS_GROUP flag for the Process class also? and How?
here is my example code:
import multiprocessing as mp
import time
import signal
import os
import platform
def my_h(signal, frame):
print("recieved signal", signal)
raise InterruptedError
def a_task():
signal.signal(signal.SIGINT, my_h)
print("this is 'a_task'", os.getpid())
try:
while True:
print(time.time())
time.sleep(1)
except Exception as e:
print(type(e), e)
print("'a_task' is at end")
if __name__ == '__main__':
p = mp.Process(target=a_task)
p.start()
time.sleep(1)
if platform.system() == 'Windows':
print("send CTRL_C_EVENT")
os.kill(p.pid, signal.CTRL_C_EVENT)
elif platform.system() == 'Linux':
print("send SIGINT")
os.kill(p.pid, signal.SIGINT)
time.sleep(3)
try:
os.kill(p.pid, signal.SIGTERM)
except:
pass
I found a workaround, sorta implementing signaling using multiprocessing.Event class.
The clue was then to find interrupt_main() method (which is in either thread (Python2) or _thread (Python3)) which raises KeybordInterrupt in the main thread, which is the process I want to interrupt.
import multiprocessing as mp
import time
import signal
import os
import threading
import _thread
def interrupt_handler(interrupt_event):
print("before wait")
interrupt_event.wait()
print("after wait")
_thread.interrupt_main()
def a_task(interrupt_event, *args):
task = threading.Thread(target=interrupt_handler, args=(interrupt_event,))
task.start()
print("this is 'a_task'", os.getpid())
try:
while True:
print(time.time())
time.sleep(1)
except KeyboardInterrupt:
print("got KeyboardInterrupt")
print("'a_task' is at end")
if __name__ == '__main__':
interrupt_event = mp.Event()
p = mp.Process(target=a_task, args = (interrupt_event, tuple()))
p.start()
time.sleep(2)
print("set interrupt_event")
interrupt_event.set()
time.sleep(3)
try:
os.kill(p.pid, signal.SIGTERM)
except:
pass

Extending mp.Process in Python 3

import multiprocessing as mp
import time as t
class MyProcess(mp.Process):
def __init__(self, target, args, name):
mp.Process.__init__(self, target=target, args=args)
self.exit = mp.Event()
self.name = name
print("{0} initiated".format(self.name))
def run(self):
while not self.exit.is_set():
pass
print("Process {0} exited.".format(self.name))
def shutdown(self):
print("Shutdown initiated for {0}.".format(self.name))
self.exit.set()
def f(x):
while True:
print(x)
x = x+1
if __name__ == "__main__":
p = MyProcess(target=f, args=[3], name="function")
p.start()
#p.join()
t.wait(2)
p.shutdown()
I'm trying to extend the multiprocessing.Process class to add a shutdown method in order to be able to exit a function which could potentially have to be run for an undefined amount of time. Following instructions from Python Multiprocessing Exit Elegantly How? and adding the argument passing I came up with myself, only gets me this output:
function initiated
Shutdown initiated for function.
Process function exited.
But no actual method f(x) output. It seems that the actual process target doesn't get started. I'm obviously doing something wrong, but just can't figure out what, any ideas?
Thanks!
The sane way to handle this situation is, where possible, to have the background task cooperate in the exit mechanism by periodically checking the exit event. For that, there's no need to subclass Process: you can rewrite your background task to include that check. For example, here's your code rewritten using that approach:
import multiprocessing as mp
import time as t
def f(x, exit_event):
while not exit_event.is_set():
print(x)
x = x+1
print("Exiting")
if __name__ == "__main__":
exit_event = mp.Event()
p = mp.Process(target=f, args=(3, exit_event), name="function")
p.start()
t.sleep(2)
exit_event.set()
p.join()
If that's not an option (for example because you can't modify the code that's being run in the background job), then you can use the Process.terminate method. But you should be aware that using it is dangerous: the child process won't have an opportunity to clean up properly, so for example if it's shutdown while holding a multiprocessing lock, no other process will be able to acquire that lock, giving a risk of deadlock. It's far better to have the child cooperate in the shutdown if possible.
The solution to this problem is to call the super().run() function in your class run method.
Of course, this will cause the permanent execution of your function due to the existence of while True, and the specified event will not cause its end.
You can use Process.terminate() method to end your process.
import multiprocessing as mp
import time as t
class MyProcess(mp.Process):
def __init__(self, target, args, name):
mp.Process.__init__(self, target=target, args=args)
self.name = name
print("{0} initiated".format(self.name))
def run(self):
print("Process {0} started.".format(self.name))
super().run()
def shutdown(self):
print("Shutdown initiated for {0}.".format(self.name))
self.terminate()
def f(x):
while True:
print(x)
t.sleep(1)
x += 1
if __name__ == "__main__":
p = MyProcess(target=f, args=(3,), name="function")
p.start()
# p.join()
t.sleep(5)
p.shutdown()

python subprocess: find out in the signal handler which child terminated

I have a python script which starts multiple commands using subprocess.Popen. I added a signal handler which is called if a child exits. I want to check which child terminated. I can do this by iterating over all children:
#!/usr/bin/env python
import subprocess
import signal
procs = []
def signal_handler(signum, frame):
for proc in procs:
proc.poll()
if proc.returncode is not None:
print "%s returned %s" % (proc.pid, proc.returncode)
procs.remove(proc)
def main():
signal.signal(signal.SIGCHLD, signal_handler)
procs.append(subprocess.Popen(["/bin/sleep", "2"]))
procs.append(subprocess.Popen(["/bin/sleep","5"]))
# wait so the main process does not terminate immediately
procs[1].wait()
if __name__ == "__main__":
main()
I would like to avoid querying all subprocesses. Is there a way to determine in the signal handler which child terminated?
You could achieve a similar result using multiprocessing. You could use the threading package instead if you didn't want to spawn the extra processes. It has pretty much the exact same interface. Basically, each subprocess call happens in a new process, which then launches your sleep processes.
import subprocess
import multiprocessing
def callback(result):
# do something with result
pid, returncode = result
print pid, returncode
def call_process(cmd):
p = subprocess.Popen(cmd)
p.wait()
return p.pid, p.returncode
def main():
pool = multiprocessing.Pool()
pool.apply_async(call_process, [["/bin/sleep", "2"]], callback=callback)
pool.apply_async(call_process, [["/bin/sleep", "5"]], callback=callback)
pool.close()
pool.join()
main()

Categories