I have a decorator written as such:
import threading
from time import sleep
from functools import wraps
import sys
import os
def repeat_periodically(f):
""" Repeat wrapped function every second """
#wraps(f)
def wrap(self, *args, **kwargs):
def wrap_helper(*args, **kwargs):
try:
threading.Timer(1.0, wrap_helper).start()
f(self)
except KeyboardInterrupt:
try:
sys.exit(1)
except:
os._exit(1)
wrap_helper()
return wrap
I'm not sure if it continues to open a new thread every single time it calls itself, but regardless, I'm unable to kill the process when I hit CTRL + C. I've also added the same try-except block in the function that I've decorated:
#repeat_periodically
def get_stats(self):
try:
# log some state information
except KeyboardInterrupt:
try:
sys.exit(1)
except:
os._exit(1)
My program just continues to run and all I see in the terminal is
^C <the stuff that I am logging>
<the stuff that I am logging>
<the stuff that I am logging>
In other words, it just keeps logging, even though I'm trying to kill it with CTRL + C.
Update:
I should mention that the above process is spun up from another thread:
tasks = [
{'target': f, 'args': (arg1)},
{'target': g},
]
for task in tasks:
t = threading.Thread(**task)
t.start()
Specifically it is the second task that spins up the Timer. However, if I set t.daemon = True, the process just runs once and exits. The first task uses watchdog. I've essentially used the example code from the watchdog documentation:
def watch_for_event_file(Event):
path = sys.argv[1] if len(sys.argv) > 1 else '.'
event_handler = LoggingCreateHandler(Event)
observer = Observer()
observer.schedule(event_handler, path)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
(Sorry for all the updates)
From the Thread documentation:
The entire Python program exits when no alive non-daemon threads are left.
So making your Timer threads as daemon threads should solve your problem. So replace:
threading.Timer(1.0, wrap_helper).start()
with:
t = threading.Timer(1.0, wrap_helper)
t.daemon = True
t.start()
Related
I have a function that downloads videos from specific URLs and I launch this function through a thread to avoid GUI freezing, but I want a function to stop or pause the download. How to do this?
Here is the code:
def download_videos(self):
ydl1 = youtube_dl.YoutubeDL(self.get_opts())
if self.get_Urls().__len__() > 0:
ydl1.download(self.get_Urls())
def downloadVideoThrd(self):
self.t1 = threading.Thread(target=self.download_videos())
self.t1.start()
You can use these two options. Use multiprocessing library instead threading or raise SystemExit exception in the subthread
Note: When i tested youtube-dl, it is resume downloading from where it is left. That's why when we start to download same url, youtube-dl will resume, but downloaded file need to be in the filesystem
Here is the first option, in this solution, we not use thread, we use subprocess because we can send any signal to the subprocess and handle this signal whatever we want.
import multiprocessing
import time
import ctypes,signal,os
import youtube_dl
class Stop_Download(Exception): # This is our own special Exception class
pass
def usr1_handler(signum,frame): # When signal comes, run this func
raise Stop_Download
def download_videos(resume_event):
signal.signal(signal.SIGUSR1,usr1_handler)
def download(link):
ydl1 = youtube_dl.YoutubeDL()
ydl1.download([link])
try:
download(link)
except Stop_Download: #catch this special exception and do what ever you want
print("Stop_Download exception")
resume_event.wait() # wait for event to resume or kill this process by sys.exit()
print("resuming")
download(link)
def downloadVideoThrd():
resume_event=multiprocessing.Event()
p1 = multiprocessing.Process(target=download_videos,args=(resume_event,))
p1.start()
print("mp start")
time.sleep(5)
os.kill(p1.pid,signal.SIGUSR1) # pause the download
time.sleep(5)
down_event.set() #resume downlaod
time.sleep(5)
os.kill(p1.pid,signal.SIGUSR2) # stop the download
downloadVideoThrd()
Here is the second solution, you can also check this for more detail about killing the thread. We will raise a SystemExit exception in the subthread by main thread. We can both stop or pause thread. To pause thread you can use threading.Event() class. To stop the thread(not process) you can use sys.exit()
import ctypes,time, threading
import youtube_dl,
def download_videos(self):
try:
ydl1 = youtube_dl.YoutubeDL(self.get_opts())
if self.get_Urls().__len__() > 0:
ydl1.download(self.get_Urls())
except SystemExit:
print("stopped")
#do whatever here
def downloadVideoThrd(self):
self.t1 = threading.Thread(target=self.download_videos)
self.t1.start()
time.sleep(4)
"""
raise SystemExit exception in self.t1 thread
"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(self.t1.ident),
ctypes.py_object(SystemExit))
From the pycamera docs I took the example for fast capture and processing and added a sigint event handler to catch the keyboard interrupt:
import io
import time
import threading
import picamera
# Create a pool of image processors
done = False
lock = threading.Lock()
pool = []
def signal_handler(signal, frame):
global done
print 'You pressed Ctrl+C!'
done=True
sys.exit()
signal.signal(signal.SIGINT, signal_handler)
class ImageProcessor(threading.Thread):
def __init__(self):
super(ImageProcessor, self).__init__()
self.stream = io.BytesIO()
self.event = threading.Event()
self.terminated = False
self.daemon=True;
self.start()
def run(self):
# This method runs in a separate thread
global done
while not self.terminated:
# Wait for an image to be written to the stream
if self.event.wait(1):
try:
self.stream.seek(0)
# Read the image and do some processing on it
#Image.open(self.stream)
#...
#...
# Set done to True if you want the script to terminate
# at some point
#done=True
finally:
# Reset the stream and event
self.stream.seek(0)
self.stream.truncate()
self.event.clear()
# Return ourselves to the pool
with lock:
pool.append(self)
def streams():
while not done:
with lock:
if pool:
processor = pool.pop()
else:
processor = None
if processor:
yield processor.stream
processor.event.set()
else:
# When the pool is starved, wait a while for it to refill
time.sleep(0.1)
with picamera.PiCamera() as camera:
pool = [ImageProcessor() for i in range(4)]
camera.resolution = (640, 480)
camera.framerate = 30
camera.start_preview()
time.sleep(2)
camera.capture_sequence(streams(), use_video_port=True)
# Shut down the processors in an orderly fashion
while pool:
with lock:
processor = pool.pop()
processor.terminated = True
processor.join()
but the interrupt signal is never caught.
Until the camera.capture_sequence(streams(), use_video_port=True) runs the signal is caught, after capture_sequence is started the signal handler is not called.
I'm new to python so maybe the answer is simple. What am i doing wrong in here?
EDIT:
If i remove the following code the signal is caught:
yield processor.stream
The problem there is that you are using thread.join(), it block the main thread,which means your program have to wait until that thread you joined finishes to continue.
The signals will always be caught by the main process, because it's the one that receives the signals, it's the process that has threads.
There are plenty of answer about how to deal with main thread and CTRL+C,and i give you three options,
First,add timeout to join() call:
thread1.join(60) detail here
Second, start a new process to deal with signal to kill the program.
class Watcher():
def __init__(self):
self.child = os.fork()
if self.child == 0:
return
else:
self.watch()
def watch(self):
try:
os.wait()
except KeyboardInterrupt:
self.kill()
sys.exit()
def kill(self):
try:
os.kill(self.child, signal.SIGKILL)
except OSError:
pass
start a Watcher before you start work thread,like
def main():
init()
Watcher()
start_your_thread1()
start_your_thread2()
start_your_thread3()
The final,your original way,the complicate Producer and Consumer way.
just delete the final join(),and add some task for the main thread.
i prefer the second option,it's easy use,and solves two problems with multithreaded programs in Python, (1) a signal might be delivered to any thread (which is just a malfeature) and (2) if the thread that gets the signal is waiting, the signal is ignored (which is a bug).
More detail about the Watcher is in Appendix A of the book The Little Book of Semaphores
In your code, the done variable is a global variable.
So, whenever you want to modify it inside a function, you need to use the keyword global, or else it become a local variable.
You should fix your code like this:
import signal
import sys
done = False
def signal_handler(signal, frame):
global done
print('You pressed Ctrl+C!')
done = True
sys.exit()
signal.signal(signal.SIGINT, signal_handler)
I've read a lot of questions on SO and elsewhere on this topic but can't get it working. Perhaps it's because I'm using Windows, I don't know.
What I'm trying to do is download a bunch of files (whose URLs are read from a CSV file) in parallel. I've tried using multiprocessing and concurrent.futures for this with no success.
The main problem is that I can't stop the program on Ctrl-C - it just keeps running. This is especially bad in the case of processes instead of threads (I used multiprocessing for that) because I have to kill each process manually every time.
Here is my current code:
import concurrent.futures
import signal
import sys
import urllib.request
class Download(object):
def __init__(self, url, filename):
self.url = url
self.filename = filename
def perform_download(download):
print('Downloading {} to {}'.format(download.url, download.filename))
return urllib.request.urlretrieve(download.url, filename=download.filename)
def main(argv):
args = parse_args(argv)
queue = []
with open(args.results_file, 'r', encoding='utf8') as results_file:
# Irrelevant CSV parsing...
queue.append(Download(url, filename))
def handle_interrupt():
print('CAUGHT SIGINT!!!!!!!!!!!!!!!!!!!11111111')
sys.exit(1)
signal.signal(signal.SIGINT, handle_interrupt)
with concurrent.futures.ThreadPoolExecutor(max_workers=args.num_jobs) as executor:
futures = {executor.submit(perform_download, d): d for d in queue}
try:
concurrent.futures.wait(futures)
except KeyboardInterrupt:
print('Interrupted')
sys.exit(1)
I'm trying to catch Ctrl-C in two different ways here but none of them works. The latter one (except KeyboardInterrupt) actually gets run but the process won't exit after calling sys.exit.
Before this I used the multiprocessing module like this:
try:
pool = multiprocessing.Pool(processes=args.num_jobs)
pool.map_async(perform_download, queue).get(1000000)
except Exception as e:
pool.close()
pool.terminate()
sys.exit(0)
So what is the proper way to add ability to terminate all worker threads or processes once you hit Ctrl-C in the terminal?
System information:
Python version: 3.6.1 32-bit
OS: Windows 10
You are catching the SIGINT signal in a signal handler and re-routing it as a SystemExit exception. This prevents the KeyboardInterrupt exception to ever reach your main loop.
Moreover, if the SystemExit is not raised in the main thread, it will just kill the child thread where it is raised.
Jesse Noller, the author of the multiprocessing library, explains how to deal with CTRL+C in a old blog post.
import signal
from multiprocessing import Pool
def initializer():
"""Ignore CTRL+C in the worker process."""
signal.signal(SIGINT, SIG_IGN)
pool = Pool(initializer=initializer)
try:
pool.map(perform_download, dowloads)
except KeyboardInterrupt:
pool.terminate()
pool.join()
I don't believe the accepted answer works under Windows, certainly not under current versions of Python (I am running 3.8.5). In fact, it won't run at all since SIGINT and SIG_IGN will be undefined (what is needed is signal.SIGINT and signal.SIG_IGN).
This is a know problem under Windows. A solution I have come up with is essentially the reverse of the accepted solution: The main process must ignore keyboard interrupts and we initialize the process pool to initially set a global flag ctrl_c_entered to False and to set this flag to True if Ctrl-C is entered. Then any multiprocessing worker function (or method) is decorated with a special decorator, handle_ctrl_c, that firsts tests the ctrl_c_entered flag and only if False does it run the worker function after re-enabling keyboard interrupts and establishing a try/catch handler for keyboard interrups. If the ctrl_c_entered flag was True or if a keyboard interrupt occurs during the execution of the worker function, the value returned is an instance of KeyboardInterrupt, which the main process can check to determine whether a Ctrl-C was entered.
Thus all submitted tasks will be allowed to start but will immediately terminate with a return value of a KeyBoardInterrupt exception and the actual worker function will never be called by the decorator function once a Ctrl-C has been entered.
import signal
from multiprocessing import Pool
from functools import wraps
import time
def handle_ctrl_c(func):
"""
Decorator function.
"""
#wraps(func)
def wrapper(*args, **kwargs):
global ctrl_c_entered
if not ctrl_c_entered:
# re-enable keyboard interrups:
signal.signal(signal.SIGINT, default_sigint_handler)
try:
return func(*args, **kwargs)
except KeyboardInterrupt:
ctrl_c_entered = True
return KeyboardInterrupt()
finally:
signal.signal(signal.SIGINT, pool_ctrl_c_handler)
else:
return KeyboardInterrupt()
return wrapper
def pool_ctrl_c_handler(*args, **kwargs):
global ctrl_c_entered
ctrl_c_entered = True
def init_pool():
# set global variable for each process in the pool:
global ctrl_c_entered
global default_sigint_handler
ctrl_c_entered = False
default_sigint_handler = signal.signal(signal.SIGINT, pool_ctrl_c_handler)
#handle_ctrl_c
def perform_download(download):
print('begin')
time.sleep(2)
print('end')
return True
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal.SIG_IGN)
pool = Pool(initializer=init_pool)
results = pool.map(perform_download, range(20))
if any(map(lambda x: isinstance(x, KeyboardInterrupt), results)):
print('Ctrl-C was entered.')
print(results)
I need to terminate external programs which run from an asyncio Python script with a specific signal, say SIGTERM. My problem is that programs always receives SIGINT even if I send them SIGTERM signal.
Here is a test case, source code for a fakeprg used in the test below can be found here.
import asyncio
import traceback
import os
import os.path
import sys
import time
import signal
import shlex
from functools import partial
class ExtProgramRunner:
run = True
processes = []
def __init__(self):
pass
def start(self, loop):
self.current_loop = loop
self.current_loop.add_signal_handler(signal.SIGINT, lambda: asyncio.async(self.stop('SIGINT')))
self.current_loop.add_signal_handler(signal.SIGTERM, lambda: asyncio.async(self.stop('SIGTERM')))
asyncio.async(self.cancel_monitor())
asyncio.Task(self.run_external_programs())
#asyncio.coroutine
def stop(self, sig):
print("Got {} signal".format(sig))
self.run = False
for process in self.processes:
print("sending SIGTERM signal to the process with pid {}".format(process.pid))
process.send_signal(signal.SIGTERM)
print("Canceling all tasks")
for task in asyncio.Task.all_tasks():
task.cancel()
#asyncio.coroutine
def cancel_monitor(self):
while True:
try:
yield from asyncio.sleep(0.05)
except asyncio.CancelledError:
break
print("Stopping loop")
self.current_loop.stop()
#asyncio.coroutine
def run_external_programs(self):
os.makedirs("/tmp/files0", exist_ok=True)
os.makedirs("/tmp/files1", exist_ok=True)
# schedule tasks for execution
asyncio.Task(self.run_cmd_forever("/tmp/fakeprg /tmp/files0 1000"))
asyncio.Task(self.run_cmd_forever("/tmp/fakeprg /tmp/files1 5000"))
#asyncio.coroutine
def run_cmd_forever(self, cmd):
args = shlex.split(cmd)
while self.run:
process = yield from asyncio.create_subprocess_exec(*args)
self.processes.append(process)
exit_code = yield from process.wait()
for idx, p in enumerate(self.processes):
if process.pid == p.pid:
self.processes.pop(idx)
print("External program '{}' exited with exit code {}, relauching".format(cmd, exit_code))
def main():
loop = asyncio.get_event_loop()
try:
daemon = ExtProgramRunner()
loop.call_soon(daemon.start, loop)
# start main event loop
loop.run_forever()
except KeyboardInterrupt:
pass
except asyncio.CancelledError as exc:
print("asyncio.CancelledError")
except Exception as exc:
print(exc, file=sys.stderr)
print("====", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
finally:
print("Stopping daemon...")
loop.close()
if __name__ == '__main__':
main()
The reason for this is: When you start your python program (parent) and it starts it's processes /tmp/fakeprg (children) they get all different processes with its pid but they all run in the same foreground process group. Your shell is bound to this group, so when you hit Ctrl-C (SIGINT), Ctrl-Y (SIGTSTP) or Ctrl-\ (SIGQUIT) they are sent to all processes in the foreground process group.
In your code this happens before the parent can even send the signal to its children through send_signal, so this line sends a signal to an already dead process (and should fail, so IMO that's an issue with asyncio).
To solve that, you can explicitly put your child process into a separate process group, like this:
asyncio.create_subprocess_exec(*args, preexec_fn=os.setpgrp)
I have a very simple example, it prints out the names, but the problem is, when I press ctrl+C, the program doesn't return to the normal command line interface:
^CStopping
After I only see my cursor blinking, but I can't do anything, so I have to close the window and open it up again.
I'm running Ubuntu 12.10.
that's my code:
import threading
import random
import time
import Queue
import urllib2
import sys
queue = Queue.Queue()
keep_running = True
class MyThread(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
self.names = ['Sophia', 'Irina', 'Tanya', 'Cait', 'Jess']
def run(self):
while keep_running:
time.sleep(0.25)
line = self.names[random.randint(0,len(self.names)-1)]
queue.put(line)
self.queue.task_done()
class Starter():
def __init__(self):
self.queue = queue
t = MyThread(self.queue)
t.start()
self.next()
def next(self):
while True:
time.sleep(0.2)
if not self.queue.empty():
line = self.queue.get()
print line, self.queue.qsize()
else:
print 'waiting for queue'
def main():
try:
Starter()
queue.join()
except KeyboardInterrupt, e:
print 'Stopping'
keep_running = False
sys.exit(1)
main()
Your main problem is that you didn't declare keep_running as global, so main is just creating a local variable with the same name.
If you fix that, it will usually exit on some platforms.
If you want it to always exit on all platforms, you need to do two more things:
join the thread that you created.
protect the shared global variable with a Lock or other sync mechanism.
However, a shared global keep_running flag isn't really needed here anyway. You've already got a queue. Just define a special "shutdown" message you can post on the queue, or use closing the queue as a signal to shutdown.
While we're at it, unless you're trying to simulate a slow network or something, there is no need for that time.sleep in your code. Just call self.queue.get(timeout=0.2). That way, instead of always taking 0.2 seconds to get each entry, it will take up to 0.2 seconds, but as little as 0 if there's already something there.
Your main thread is stuck in Starter.next. The interrupt then is called there and propagates up to the first line of the try statement and is caught, jumping to the except clause before join can be called. Try putting the join call in a finally block (with the sys.exit) or simply moving it to th exception handler