interrupt python multiprocessing.Process using signals in Windows - python

I have a python script that spawns a new Process using multiprocessing.Process class. This process is supposed to run forever to monitor stuff. On Unix I can now use os.kill() to send a signal to that specific process and signal.signal(...) within that process to implement my specific interrupt handler. On Windows things don't work.
I read how to do it using popen. Can I specify the CREATE_NEW_PROCESS_GROUP flag for the Process class also? and How?
here is my example code:
import multiprocessing as mp
import time
import signal
import os
import platform
def my_h(signal, frame):
print("recieved signal", signal)
raise InterruptedError
def a_task():
signal.signal(signal.SIGINT, my_h)
print("this is 'a_task'", os.getpid())
try:
while True:
print(time.time())
time.sleep(1)
except Exception as e:
print(type(e), e)
print("'a_task' is at end")
if __name__ == '__main__':
p = mp.Process(target=a_task)
p.start()
time.sleep(1)
if platform.system() == 'Windows':
print("send CTRL_C_EVENT")
os.kill(p.pid, signal.CTRL_C_EVENT)
elif platform.system() == 'Linux':
print("send SIGINT")
os.kill(p.pid, signal.SIGINT)
time.sleep(3)
try:
os.kill(p.pid, signal.SIGTERM)
except:
pass

I found a workaround, sorta implementing signaling using multiprocessing.Event class.
The clue was then to find interrupt_main() method (which is in either thread (Python2) or _thread (Python3)) which raises KeybordInterrupt in the main thread, which is the process I want to interrupt.
import multiprocessing as mp
import time
import signal
import os
import threading
import _thread
def interrupt_handler(interrupt_event):
print("before wait")
interrupt_event.wait()
print("after wait")
_thread.interrupt_main()
def a_task(interrupt_event, *args):
task = threading.Thread(target=interrupt_handler, args=(interrupt_event,))
task.start()
print("this is 'a_task'", os.getpid())
try:
while True:
print(time.time())
time.sleep(1)
except KeyboardInterrupt:
print("got KeyboardInterrupt")
print("'a_task' is at end")
if __name__ == '__main__':
interrupt_event = mp.Event()
p = mp.Process(target=a_task, args = (interrupt_event, tuple()))
p.start()
time.sleep(2)
print("set interrupt_event")
interrupt_event.set()
time.sleep(3)
try:
os.kill(p.pid, signal.SIGTERM)
except:
pass

Related

How do I make the input() function run if no response after a set amount of time? [duplicate]

I'm trying to find a way to shorten a time.sleep(600) if the user inputs a key, without resorting to some ugly hack like:
key_pressed = False
for i in range(600):
key_pressed = key_was_pressed()
if not key_pressed:
time.sleep(1)
else:
break
This is a cross-platform adaptation of an implementation using signal.alarm interrupt (an idea which is not available on Windows). This code should work for Linux, macOS, and Windows. The 3rd-party helper library readchar can be installed with pip install readchar.
import os
import signal
import sys
from threading import Timer
from readchar import readkey
def wait_for(key="x", timeout=600):
pid = os.getpid()
sig = signal.CTRL_C_EVENT if os.name == "nt" else signal.SIGINT
timer = Timer(timeout, lambda: os.kill(pid, sig))
print(f"waiting {timeout}s for user to press {key!r} ...")
timer.start() # spawn a worker thread to interrupt us later
while True:
k = readkey()
print(f"received {k!r}")
if k == key:
timer.cancel() # cancel the timer
print("breaking")
break
def main():
import sys
try:
wait_for(key=sys.argv[1], timeout=int(sys.argv[2]))
except KeyboardInterrupt as err:
print("user took too long")
if __name__ == "__main__":
main()

(Unit) Test python signal handler

I have a simple Python service, where there is a loop that performs some action infinitely. On various signals, sys.exit(0) is called, which causes SystemExit to be raised and then some cleanup should happen if it can.
In a test, i.e. standard unittest.TestCase, I would like to test that this cleanup happens and the loop exits. However, I'm stuck on even getting the signal to be triggered / SystemExit to be raised.
# service.py
import signal
import sys
import time
def main():
def signal_handler(signalnum, _):
# How to get this to block to run in a test?
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
while True:
try:
print("Some action here")
time.sleep(10)
except SystemExit:
# How to get this to block to run in a test?
print("Some cleanup")
break
if __name__ == '__main__':
main()
How can the code enter the SystemExit handler / signal handler in the test environment? An alternative pattern would also be welcome.
You can trigger a SIGINT (or any signal) from another thread after some delay, which is received in the main thread. You can then assert on its effects just as in any other test, as below.
import os
import signal
import time
import threading
import unittest
from unittest.mock import (
Mock,
patch,
)
import service
class TestService(unittest.TestCase):
#patch('service.print')
def test_signal_handling(self, mock_print):
pid = os.getpid()
def trigger_signal():
while len(mock_print.mock_calls) < 1:
time.sleep(0.2)
os.kill(pid, signal.SIGINT)
thread = threading.Thread(target=trigger_signal)
thread.daemon = True
thread.start()
service.main()
self.assertEqual(mock_print.mock_calls[1][1][0], 'Some cleanup')
if __name__ == '__main__':
unittest.main()
Let's refactor that to make it easier to test:
def loop():
try:
print("Some action here")
except:
# clean up and re-raise
print("Some cleanup")
raise
def main():
def signal_handler(signalnum, _):
# How to get this to block to run in a test?
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
while True:
try:
loop_body()
time.sleep(10)
except SystemExit:
break
if __name__ == '__main__':
main()
This doesn't allow easy testing of the signal handling code though. However, that amount is so small, rarely changed and strongly depends on the environment, that it is possible and perhaps even better to test manually.
For clarity, it could be useful to use a context handler, which is usually a good idea when you have setup/shutdown code. You don't mention the setup code, but my Crystall Ball (tm) tells me it exists. It could then be called like this:
try:
with my_service() as service:
while True:
service.run()
sleep(10)
except SystemExit:
# perform graceful shutdown on signal
pass
I'll leave the implementation of that context manager to you, but check out contextlib, which makes it easy and fun.

python how to run process in detached mode

here is a example:
from multiprocessing import Process
import time
def func():
print('sub process is running')
time.sleep(5)
print('sub process finished')
if __name__ == '__main__':
p = Process(target=func)
p.start()
print('done')
what I expect is that the main process will terminate right after it start a subprocess. But after printing out 'done', the terminal is still waiting....Is there any way to do this so that the main process will exit right after printing out 'done', instead of waiting for subprocess? I'm confused here because I'm not calling p.join()
Python will not end if there exists a non-daemon process.
By setting, daemon attribute before start() call, you can make the process daemonic.
p = Process(target=func)
p.daemon = True # <-----
p.start()
print('done')
NOTE: There will be no sub process finished message printed; because the main process will terminate sub-process at exit. This may not be what you want.
You should do double-fork:
import os
import time
from multiprocessing import Process
def func():
if os.fork() != 0: # <--
return # <--
print('sub process is running')
time.sleep(5)
print('sub process finished')
if __name__ == '__main__':
p = Process(target=func)
p.start()
p.join()
print('done')
Following the excellent answer from #falsetru, I wrote out a quick generalization in the form of a decorator.
import os
from multiprocessing import Process
def detachify(func):
"""Decorate a function so that its calls are async in a detached process.
Usage
-----
.. code::
import time
#detachify
def f(message):
time.sleep(5)
print(message)
f('Async and detached!!!')
"""
# create a process fork and run the function
def forkify(*args, **kwargs):
if os.fork() != 0:
return
func(*args, **kwargs)
# wrapper to run the forkified function
def wrapper(*args, **kwargs):
proc = Process(target=lambda: forkify(*args, **kwargs))
proc.start()
proc.join()
return
return wrapper
Usage (copied from docstring):
import time
#detachify
def f(message):
time.sleep(5)
print(message)
f('Async and detached!!!')
Or if you like,
def f(message):
time.sleep(5)
print(message)
detachify(f)('Async and detached!!!')

Terminate external program run through asyncio with specific signal

I need to terminate external programs which run from an asyncio Python script with a specific signal, say SIGTERM. My problem is that programs always receives SIGINT even if I send them SIGTERM signal.
Here is a test case, source code for a fakeprg used in the test below can be found here.
import asyncio
import traceback
import os
import os.path
import sys
import time
import signal
import shlex
from functools import partial
class ExtProgramRunner:
run = True
processes = []
def __init__(self):
pass
def start(self, loop):
self.current_loop = loop
self.current_loop.add_signal_handler(signal.SIGINT, lambda: asyncio.async(self.stop('SIGINT')))
self.current_loop.add_signal_handler(signal.SIGTERM, lambda: asyncio.async(self.stop('SIGTERM')))
asyncio.async(self.cancel_monitor())
asyncio.Task(self.run_external_programs())
#asyncio.coroutine
def stop(self, sig):
print("Got {} signal".format(sig))
self.run = False
for process in self.processes:
print("sending SIGTERM signal to the process with pid {}".format(process.pid))
process.send_signal(signal.SIGTERM)
print("Canceling all tasks")
for task in asyncio.Task.all_tasks():
task.cancel()
#asyncio.coroutine
def cancel_monitor(self):
while True:
try:
yield from asyncio.sleep(0.05)
except asyncio.CancelledError:
break
print("Stopping loop")
self.current_loop.stop()
#asyncio.coroutine
def run_external_programs(self):
os.makedirs("/tmp/files0", exist_ok=True)
os.makedirs("/tmp/files1", exist_ok=True)
# schedule tasks for execution
asyncio.Task(self.run_cmd_forever("/tmp/fakeprg /tmp/files0 1000"))
asyncio.Task(self.run_cmd_forever("/tmp/fakeprg /tmp/files1 5000"))
#asyncio.coroutine
def run_cmd_forever(self, cmd):
args = shlex.split(cmd)
while self.run:
process = yield from asyncio.create_subprocess_exec(*args)
self.processes.append(process)
exit_code = yield from process.wait()
for idx, p in enumerate(self.processes):
if process.pid == p.pid:
self.processes.pop(idx)
print("External program '{}' exited with exit code {}, relauching".format(cmd, exit_code))
def main():
loop = asyncio.get_event_loop()
try:
daemon = ExtProgramRunner()
loop.call_soon(daemon.start, loop)
# start main event loop
loop.run_forever()
except KeyboardInterrupt:
pass
except asyncio.CancelledError as exc:
print("asyncio.CancelledError")
except Exception as exc:
print(exc, file=sys.stderr)
print("====", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
finally:
print("Stopping daemon...")
loop.close()
if __name__ == '__main__':
main()
The reason for this is: When you start your python program (parent) and it starts it's processes /tmp/fakeprg (children) they get all different processes with its pid but they all run in the same foreground process group. Your shell is bound to this group, so when you hit Ctrl-C (SIGINT), Ctrl-Y (SIGTSTP) or Ctrl-\ (SIGQUIT) they are sent to all processes in the foreground process group.
In your code this happens before the parent can even send the signal to its children through send_signal, so this line sends a signal to an already dead process (and should fail, so IMO that's an issue with asyncio).
To solve that, you can explicitly put your child process into a separate process group, like this:
asyncio.create_subprocess_exec(*args, preexec_fn=os.setpgrp)

How to gracefully terminate python process on windows

I have a python 2.7 process running in the background on Windows 8.1.
Is there a way to gracefully terminate this process and perform cleanup on shutdown or log off?
Try using win32api.GenerateConsoleCtrlEvent.
I solved this for a multiprocessing python program here:
Gracefully Terminate Child Python Process On Windows so Finally clauses run
I tested this solution using subprocess.Popen, and it also works.
Here is a code example:
import time
import win32api
import win32con
from multiprocessing import Process
def foo():
try:
while True:
print("Child process still working...")
time.sleep(1)
except KeyboardInterrupt:
print "Child process: caught ctrl-c"
if __name__ == "__main__":
p = Process(target=foo)
p.start()
time.sleep(2)
print "sending ctrl c..."
try:
win32api.GenerateConsoleCtrlEvent(win32con.CTRL_C_EVENT, 0)
while p.is_alive():
print("Child process is still alive.")
time.sleep(1)
except KeyboardInterrupt:
print "Main process: caught ctrl-c"

Categories