Stopping Threads - getting invalid thread id - python

This question has been answered before and I'm trying to implement the second solution in the first answer outlined here (NB: the first solution does not concern me, my thread is running a server from an external library and can't be modified to check a flag)
I've tried to implement the simplest case that corresponds to my circumstances. I have a class that spawns a thread and that thread should be stopped externally (the thread never finishes naturally, as in this example). NB: _async_raise and ThreadWithExc are copy/pastes of the accepted answer to this question on SO:
import threading
import inspect
import ctypes
import time
# https://stackoverflow.com/questions/323972/is-there-any-way-to-kill-a-thread-in-python
def _async_raise(tid, exctype):
if not inspect.isclass(exctype):
raise TypeError("Only types can be raised (not instances)")
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid,
ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, 0)
raise SystemError("PyThreadState_SetAsyncExc failed")
# https://stackoverflow.com/questions/323972/is-there-any-way-to-kill-a-thread-in-python
class ThreadWithExc(threading.Thread):
def _get_my_tid(self):
if not self.isAlive():
raise threading.ThreadError("the thread is not active")
if hasattr(self, "_thread_id"):
return self._thread_id
for tid, tobj in threading._active.items():
if tobj is self:
self._thread_id = tid
return tid
raise AssertionError("could not determine the thread's id")
def raiseExc(self, exctype):
_async_raise( self._get_my_tid(), exctype )
def work():
while True:
print('work')
time.sleep(1)
class Server:
def __init__(self):
self.thread = ThreadWithExc(target=work)
def start(self):
self.thread.start()
def stop(self):
_async_raise(self.thread.raiseExc(TypeError))
server = Server()
server.start()
server.stop()
This gives a ValueError: invalid thread id exception. I also tried threading.get_ident() instead of the answer's _get_my_tid(); that gives me another ID but that one is also invalid.

I think the fundamental problem you have is that you're not calling _async_raise() correctly and should replace the line:
_async_raise(self.thread.raiseExc(TypeError))
in Server.stop() with:
self.thread.raiseExc(TypeError)
If you do just that, however, you'll get an Exception in thread Thread-1: because there's no exception handler in the work() function to handle the exception that gets raised by raiseExc().
The following fixes that and uses a custom Exception subclass to make things more clear:
import threading
import inspect
import ctypes
import time
# https://stackoverflow.com/questions/323972/is-there-any-way-to-kill-a-thread-in-python
def _async_raise(tid, exctype):
if not inspect.isclass(exctype):
raise TypeError("Only types can be raised (not instances)")
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid,
ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, 0)
raise SystemError("PyThreadState_SetAsyncExc failed")
# https://stackoverflow.com/questions/323972/is-there-any-way-to-kill-a-thread-in-python
class ThreadWithExc(threading.Thread):
def _get_my_tid(self):
if not self.isAlive():
raise threading.ThreadError("the thread is not active")
if hasattr(self, "_thread_id"):
return self._thread_id
for tid, tobj in threading._active.items():
if tobj is self:
self._thread_id = tid
return tid
raise AssertionError("could not determine the thread's id")
def raiseExc(self, exctype):
_async_raise(self._get_my_tid(), exctype )
def work():
try:
while True:
print('work')
time.sleep(1)
except Server.ThreadStopped:
pass
print('exiting work() function')
class Server:
class ThreadStopped(Exception): pass
def __init__(self):
self.thread = ThreadWithExc(target=work)
def start(self):
self.thread.start()
def stop(self):
# _async_raise(self.thread.raiseExc(TypeError))
self.thread.raiseExc(self.ThreadStopped)
server = Server()
server.start()
server.stop()
Output:
work
exiting work() function

Python 3.6:
self._thread_id = ctypes.c_long(tid)
Python 3.7:
self._thread_id = ctypes.c_ulong(tid)

Related

Sending KeyboardInterrupt to child process

Is there a way to raise an exception in the child process when the main process gets an KeyboardInterrupt exception (instead of a loop polling for an event or queue value)?
For now I am using a Queue to communicate the KeyboardInterrupt triggered in the main process to the child processes. For the while part it gets noticed in the child process loop and until now I can do a good clean up for the child process.
However, when KeyboardInterrupt gets triggered during child's initialization I have to check after every statement if the user has aborted the main process. Another option would be to trigger an exception by freeing the connection resource - that will be called later - to trigger a (general or connection-related) exception.
Are there better ways for a good clean up (daemon processes will not yield a good clean up I think)?
def connect(self):
self.conn = mysql.connector.connect(
host="192.168.10.10",
user="homestead",
password="xxxx",
database="xxxx"
)
self.cursor = self.conn.cursor()
def dispose(self):
self.cursor.close()
self.conn.close()
def init(self):
# set up root logger
# ...
root_logger = logging.getLogger()
root_logger.addHandler(fh)
# ...
try: # init check 1 for KeyboardInterrupt exception in
# main process (*1)
row = self.task_queue.get(timeout=5) # or something like using an
# Event.is_set() whenever KeyboardInterrupt is raised
# in main process could be possible too
if row is None: # None is sent when KeyboardInterrupt exception
# in main process
self.task_queue.task_done()
return false
# for example, calling self.dispose() here generates an exception at
# self.connect() because connection gets closed / freed (*2)
# or raise CustomException (*2b)?
except:
pass
# ...
self.connect()
# ...
try: # init check 2 for KeyboardInterrupt exception in main process (*3)
row = self.task_queue.get(timeout=5)
if row is None:
self.task_queue.task_done()
self.dispose()
return false
# raise CustomException?
except:
pass
return true
def run(self):
try:
self.init()
except KeyboardInterrupt: # would something like this be possible
# (or disrupt the code flow to elicit another exception like in *2
# , or raise a CustomException in *2b, both which get caught here, as
# an alternative)?
...
# this would be handy instead of checking after each statement
# in the init parts (*1, *3)
except:
logging.error("Something went wrong during initialization")
self.task_queue.task_done()
self.dispose()
return
while True:
if self.conn.is_connected() == False:
# ....
row = None
empty = False
leave = False
try:
row = self.task_queue.get(timeout=5)
if row is None:
self.task_queue.task_done()
leave = True
else:
# save item
except:
empty = True
pass
if (leave == True):
break
self.dispose()
BTW: I have read some other topics like Python: while KeyboardInterrupt is forwarded to multiprocessing child process? and How to use KeyboardInterrupt from the main process to stop child processes?
Edit (added from main()):
def manage_ctrlC(*args):
sqlDataSaver.exit.set()
def main():
global tasks, sqlDataSaver
# Manage Ctrl_C keyboard event
signal.signal(signal.SIGINT, manage_ctrlC) # dummy, not used yet
# ...
tasks = multiprocessing.JoinableQueue()
sqlDataSaver = sqlExecutor(tasks) # inherits from multiprocessing.Process
sqlDataSaver.start()
#Tim Roberts:
You mean something like this? So each process has its own sigint handler and a separate cleanup process that is triggered by the exception that is raised in each handler?
from multiprocessing import *
import signal
import time
import sys
class SigInt(Exception):
"""SIG INT"""
pass
class MyProcess(Process):
def __init__(self, toExecute, sighandler):
Process.__init__(self)
self.toExecute = toExecute
self.sighandler = sighandler
def run(self):
# set up custom handler
signal.signal(signal.SIGINT, self.sighandler)
while True:
try:
self.toExecute()
except SigInt:
# clean up
break
print(current_process().name," process exited")
def manage_ctrlC_main(*args):
print('main crtl-c')
sys.exit()
def toExecute1():
time.sleep(1)
print("exec1");
def toExecute2():
time.sleep(1)
print("exec2");
def sigh1(signal, frame):
print('sig 1 handler')
raise SigInt
def sigh2(signal, frame):
print('sig 2 handler')
raise SigInt
def main():
global myProcess1, myProcess2
signal.signal(signal.SIGINT, manage_ctrlC_main)
myProcess1 = MyProcess(toExecute1,sigh1)
myProcess1.start()
time.sleep(4)
myProcess2 = MyProcess(toExecute2,sigh2)
myProcess2.start()
myProcess1.join()
myProcess2.join()
if __name__ == '__main__':
main()

catch exceptions raised by main in other threads python

I am trying to end a thread execution without directly referencing the thread. because it is not possible to do that in the full program.
for reference the main program is for the Raspberry Pi and I need it to stop executing a function/thread immediately once a button is pressed.
I have tried raising an exception from main but the other do not catch it for some reason.
Here is the scrap program that I have been testing on:
import threading
import time
class Thread_Exception(Exception):
def __init__(self, msg):
return super().__init__(msg)
def thread_function(index):
bool = True
try:
while bool:
print("Print from thread #", index)
time.sleep(4)
except Thread_Exception:
print('Exception thrown, thread #', index)
bool = False
if __name__ == "__main__":
try:
for index in range(3):
x = threading.Thread(target=thread_function, args=(index,))
x.start()
time.sleep(20)
raise Thread_Exception("intr")
while True:
continue
except KeyboardInterrupt:
print('Interrupted main')
an example of how it can be done:
import threading
import ctypes
import time
class thread_with_exception(threading.Thread):
def __init__(self, name):
threading.Thread.__init__(self)
self.name = name
def run(self):
# target function of the thread class
try:
while True:
print('running ' + self.name)
finally:
print('ended')
def get_id(self):
# returns id of the respective thread
if hasattr(self, '_thread_id'):
return self._thread_id
for id, thread in threading._active.items():
if thread is self:
return id
def raise_exception(self):
thread_id = self.get_id()
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id,
ctypes.py_object(SystemExit))
if res > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, 0)
print('Exception raise failure')
t1 = thread_with_exception('Thread 1')
t1.start()
time.sleep(2)
t1.raise_exception()
t1.join()
The article this came from can currently be found here:
https://www.geeksforgeeks.org/python-different-ways-to-kill-a-thread/

Infinite BrokenPipeError's when interrupting a custom multiprocessing pool

I like the default python multiprocessing.Pool, but it's still a pain that it isn't easy to show the current progress being made during the pool's execution. In leui of that, I attempted to create my own, custom multiprocess pool mapper, and it looks like this;
from multiprocessing import Process, Pool, cpu_count
from iterable_queue import IterableQueue
def _proc_action(f, in_queue, out_queue):
try:
for val in in_queue:
out_queue.put(f(val))
except (KeyboardInterrupt, EOFError):
pass
def progress_pool_map(f, ls, n_procs=cpu_count()):
in_queue = IterableQueue()
out_queue = IterableQueue()
err = None
try:
procs = [Process(target=_proc_action, args=(f, in_queue, out_queue)) for _ in range(n_procs)]
[p.start() for p in procs]
for elem in ls:
in_queue.put(elem)
in_queue.close()
bar = 0
for _ in ls:
elem = next(out_queue)
bar += 1
if bar % 1000 == 0:
print(bar)
yield elem
out_queue.close()
except (KeyboardInterrupt, EOFError) as e:
in_queue.close()
out_queue.close()
print("Joining processes")
[p.join() for p in procs]
print("Closing processes")
[p.close() for p in procs]
err = e
if err:
raise err
It works fairly well, and prints a value to the console for every 1000 items processed. The progress display itself is something I can worry about in future. Right now, however, my issue is that when cancelled, the operation does anything but fail gracefully. When I try to interrupt the map, it hangs on Joining Processes, and never makes it to Closing Processes. If I try hitting Ctrl+C again, it causes an infinite spew of BrokenPipeErrors to fill the console until I send an EOF and stop my program.
Here's iterable_queue.py, for reference;
from multiprocessing.queues import Queue
from multiprocessing import get_context, Value
import queue
class QueueClosed(Exception):
pass
class IterableQueue(Queue):
def __init__(self, maxsize=0, *, ctx=None):
super().__init__(
maxsize=maxsize,
ctx=ctx if ctx is not None else get_context()
)
self.closed = Value('b', False)
def close(self):
with self.closed.get_lock():
if not self.closed.value:
self.closed.value = True
super().put((None, False))
# throws BrokenPipeError in another thread without this sleep in between
# terrible hack, must fix at some point
import time; time.sleep(0.01)
super().close()
def __iter__(self):
return self
def __next__(self):
try:
return self.get()
except QueueClosed:
raise StopIteration
def get(self, *args, **kwargs):
try:
result, is_open = super().get(*args, **kwargs)
except OSError:
raise QueueClosed
if not is_open:
super().put((None, False))
raise QueueClosed
return result
def __bool__(self):
return bool(self.closed.value)
def put(self, val, *args, **kwargs):
with self.closed.get_lock():
if self.closed.value:
raise QueueClosed
super().put((val, True), *args, **kwargs)
def get_nowait(self):
return self.get(block=False)
def put_nowait(self):
return self.put(block=False)
def empty_remaining(self, block=False):
try:
while True:
yield self.get(block=block)
except (queue.Empty, QueueClosed):
pass
def clear(self):
for _ in self.empty_remaining():
pass
def __enter__(self):
return self
def __exit__(self, *args):
self.close()

Break Main Calling Thread If Child Thread Throws An Exception

I'm using threading.Thread and t.start() with a List of Callables to do long-running multithreaded processing. My main thread is blocked until all threads did finish. I'd like however t.start() to immediately return if one of the Callables throw an exception and terminate the other threads.
Using t.join() to check that the thread got executed provides no information about failures due to exception.
Here is the code:
import json
import requests
class ThreadServices:
def __init__(self):
self.obj = ""
def execute_services(self, arg1, arg2):
try:
result = call_some_process(arg1, arg2) #some method
#save results somewhere
except Exception, e:
# raise exception
print e
def invoke_services(self, stubs):
"""
Thread Spanning Function
"""
try:
p1 = "" #some value
p2 = "" #some value
# Call service 1
t1 = threading.Thread(target=self.execute_services, args=(a, b,)
# Start thread
t1.start()
# Block till thread completes execution
t1.join()
thread_pool = list()
for stub in stubs:
# Start parallel execution of threads
t = threading.Thread(target=self.execute_services,
args=(p1, p2))
t.start()
thread_pool.append(t)
for thread in thread_pool:
# Block till all the threads complete execution: Wait for all
the parallel tasks to complete
thread.join()
# Start another process thread
t2 = threading.Thread(target=self.execute_services,
args=(p1, p2)
t2.start()
# Block till this thread completes execution
t2.join()
requests.post(url, data= json.dumps({status_code=200}))
except Exception, e:
print e
requests.post(url, data= json.dumps({status_code=500}))
# Don't return anything as this function is invoked as a thread from
# main calling function
class Service(ThreadServices):
"""
Service Class
"""
def main_thread(self, request, context):
"""
Main Thread:Invokes Task Execution Sequence in ThreadedService
:param request:
:param context:
:return:
"""
try:
main_thread = threading.Thread(target=self.invoke_services,
args=(request,))
main_thread.start()
return True
except Exception, e:
return False
When i call Service().main_thread(request, context) and there is some exception executing t1, I need to get it raised in main_thread and return False. How can i implement it for this structure. Thanks!!
For one thing, you are complicating matters too much. I would do it this way:
from thread import start_new_thread as thread
from time import sleep
class Task:
"""One thread per task.
This you should do with subclassing threading.Thread().
This is just conceptual example.
"""
def __init__ (self, func, args=(), kwargs={}):
self.func = func
self.args = args
self.kwargs = kwargs
self.error = None
self.done = 0
self.result = None
def _run (self):
self.done = 0
self.error = None
self.result = None
# So this is what you should do in subclassed Thread():
try: self.result = self.func(*self.args, **self.kwargs)
except Exception, e:
self.error = e
self.done = 1
def start (self):
thread(self._run,())
def wait (self, retrexc=1):
"""Used in place of threading.Thread.join(), but it returns the result of the function self.func() and manages errors.."""
while not self.done: sleep(0.001)
if self.error:
if retrexc: return self.error
raise self.error
return self.result
# And this is how you should use your pool:
def do_something (tasknr):
print tasknr-20
if tasknr%7==0: raise Exception, "Dummy exception!"
return tasknr**120/82.0
pool = []
for task in xrange(20, 50):
t = Task(do_something, (task,))
pool.append(t)
# And only then wait for each one:
results = []
for task in pool:
results.append(task.wait())
print results
This way you can make task.wait() raise the error instead. The thread would already be stopped. So all you need to do is remove their references from pool, or whole pool, after you are done. You can even:
results = []
for task in pool:
try: results.append(task.wait(0))
except Exception, e:
print task.args, "Error:", str(e)
print results
Now, do not use strictly this (I mean Task() class) as it needs a lot of things added to be used for real.
Just subclass threading.Thread() and implement the similar concept by overriding run() and join() or add new functions like wait().

Cancel asynchronous iterator by timeout

I have a process running with asyncio which should run forever.
I can interact with that process with a ProcessIterator, which can (left out here) send data to stdin and fetch from stdout.
I can access the data with async for fd, data in ProcessIterator(...):.
The problem is now that the execution of this async iterator must be timelimited. If the time runs out, the timeout() function is called,
but the exception does not originate out of the __anext__ function to notify of the timeout.
How can I raise this exception in the async iterator?
I found no way of calling awaitable.throw(something) or similar for it.
class ProcessIterator:
def __init__(self, process, loop, run_timeout):
self.process = process
self.loop = loop
self.run_timeout = run_timeout
# set the global timer
self.overall_timer = self.loop.call_later(
self.run_timeout, self.timeout)
def timeout(self):
# XXX: how do i pass this exception into the iterator?
raise ProcTimeoutError(
self.process.args,
self.run_timeout,
was_global,
)
async def __aiter__(self):
return self
async def __anext__(self):
if self.process.exited:
raise StopAsyncIteration()
else:
# fetch output from the process asyncio.Queue()
entry = await self.process.output_queue.get()
if entry == StopIteration:
raise StopAsyncIteration()
return entry
The usage of the async iterator is now roughly:
async def test_coro(loop):
code = 'print("rofl"); time.sleep(5); print("lol")'
proc = Process([sys.executable, '-u', '-c', code])
await proc.create()
try:
async for fd, line in ProcessIterator(proc, loop, run_timeout=1):
print("%d: %s" % (fd, line))
except ProcessTimeoutError as exc:
# XXX This is the exception I'd like to get here! How can i throw it?
print("timeout: %s" % exc)
await proc.wait()
tl;dr: How can I throw a timed exception so it originates from a async iterator?
EDIT: Added solution 2
Solution 1:
Can the timeout() callback store the ProcTimeoutError exception in an instance variable? Then __anext__() can check the instance variable and raise the exception if it is set.
class ProcessIterator:
def __init__(self, process, loop, run_timeout):
self.process = process
self.loop = loop
self.error = None
self.run_timeout = run_timeout
# set the global timer
self.overall_timer = self.loop.call_later(
self.run_timeout, self.timeout)
def timeout(self):
# XXX: set instance variable
self.error = ProcTimeoutError(
self.process.args,
self.run_timeout,
was_global
)
async def __aiter__(self):
return self
async def __anext__(self):
# XXX: if error is set, then raise the exception
if self.error:
raise self.error
elif self.process.exited:
raise StopAsyncIteration()
else:
# fetch output from the process asyncio.Queue()
entry = await self.process.output_queue.get()
if entry == StopIteration:
raise StopAsyncIteration()
return entry
Solution 2:
Put the exception on the process.output_queue.
....
def timeout(self):
# XXX: set instance variable
self.process.ouput_queue.put(ProcTimeoutError(
self.process.args,
self.run_timeout,
was_global
))
....
# fetch output from the process asyncio.Queue()
entry = await self.process.output_queue.get()
if entry == StopIteration:
raise StopAsyncIteration()
elif entry = ProcTimeoutError:
raise entry
....
If there may be entries on the queue, use a priority queue. Assign ProcTimeoutError a higher priority than the other entries, e.g., (0, ProcTimeoutError) vs (1, other_entry).
Please check out timeout context manager from asyncio:
with asyncio.timeout(10):
async for i in get_iter():
process(i)
It is not released yet but you can copy-paste the implementation from asyncio master branch
You could use get_nowait, which will return entry or throw QueueEmpty immediately. Wrapping it in while loop on self.error with some async sleep should do the trick. Something like:
async def __anext__(self):
if self.process.exited:
raise StopAsyncIteration()
else:
while self.error is None:
try:
entry = self.process.output_queue.get_nowait()
if entry == StopIteration:
raise StopAsyncIteration()
return entry
except asyncio.QueueEmpty:
# some sleep to give back control to ioloop
# since we using nowait
await asyncio.sleep(0.1)
else:
raise self.error
And as a hint approach that is used in Tornado's Queue.get implementation with timeout:
def get(self, timeout=None):
"""Remove and return an item from the queue.
Returns a Future which resolves once an item is available, or raises
`tornado.gen.TimeoutError` after a timeout.
"""
future = Future()
try:
future.set_result(self.get_nowait())
except QueueEmpty:
self._getters.append(future)
_set_timeout(future, timeout)
return future
This is the solution I came up with by now.
See https://github.com/SFTtech/kevin kevin/process.py for the upstream version.
It also features line counting and output timeouts, which I stripped from this example.
class Process:
def __init__(self, command, loop=None):
self.loop = loop or asyncio.get_event_loop()
self.created = False
self.killed = asyncio.Future()
self.proc = self.loop.subprocess_exec(
lambda: WorkerInteraction(self), # see upstream repo
*command)
self.transport = None
self.protocol = None
async def create(self):
self.transport, self.protocol = await self.proc
def communicate(self, timeout):
if self.killed.done():
raise Exception("process was already killed "
"and no output is waiting")
return ProcessIterator(self, self.loop, timeout)
class ProcessIterator:
"""
Asynchronous iterator for the process output.
Use like `async for (fd, data) in ProcessIterator(...):`
"""
def __init__(self, process, loop, run_timeout):
self.process = process
self.loop = loop
self.run_timeout = run_timeout
self.overall_timer = None
if self.run_timeout < INF:
# set the global timer
self.overall_timer = self.loop.call_later(
self.run_timeout,
functools.partial(self.timeout, was_global=True))
def timeout(self):
if not self.process.killed.done():
self.process.killed.set_exception(ProcTimeoutError(
self.process.args,
self.run_timeout,
))
async def __aiter__(self):
return self
async def __anext__(self):
# either the process exits,
# there's an exception (process killed, timeout, ...)
# or the queue gives us the next data item.
# wait for the first of those events.
done, pending = await asyncio.wait(
[self.process.protocol.queue.get(), self.process.killed],
return_when=asyncio.FIRST_COMPLETED)
# at least one of them is done now:
for future in done:
# if something failed, cancel the pending futures
# and raise the exception
# this happens e.g. for a timeout.
if future.exception():
for future_pending in pending:
future_pending.cancel()
# kill the process before throwing the error!
await self.process.pwn()
raise future.exception()
# fetch output from the process
entry = future.result()
# it can be stopiteration to indicate the last data chunk
# as the process exited on its own.
if entry == StopIteration:
if not self.process.killed.done():
self.process.killed.set_result(entry)
# raise the stop iteration
await self.stop_iter(enough=False)
return entry
raise Exception("internal fail: no future was done!")
async def stop_iter(self):
# stop the timer
if self.overall_timer:
self.overall_timer.cancel()
retcode = self.process.returncode()
raise StopAsyncIteration()
The magic function is this:
done, pending = await asyncio.wait(
[self.process.protocol.queue.get(), self.process.killed],
return_when=asyncio.FIRST_COMPLETED)
When the timeout occurs, the queue fetching is aborted reliably.

Categories