Cancellable threading.Timer in Python - python

I am trying to write a method that counts down to a given time and unless a restart command is given, it will execute the task. But I don't think Python threading.Timer class allows for timer to be cancelable.
import threading
def countdown(action):
def printText():
print 'hello!'
t = threading.Timer(5.0, printText)
if (action == 'reset'):
t.cancel()
t.start()
I know the above code is wrong somehow. Would appreciate some kind guidance over here.

You would call the cancel method after you start the timer:
import time
import threading
def hello():
print "hello, world"
time.sleep(2)
t = threading.Timer(3.0, hello)
t.start()
var = 'something'
if var == 'something':
t.cancel()
You might consider using a while-loop on a Thread, instead of using a Timer.
Here is an example appropriated from Nikolaus Gradwohl's answer to another question:
import threading
import time
class TimerClass(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.event = threading.Event()
self.count = 10
def run(self):
while self.count > 0 and not self.event.is_set():
print self.count
self.count -= 1
self.event.wait(1)
def stop(self):
self.event.set()
tmr = TimerClass()
tmr.start()
time.sleep(3)
tmr.stop()

I'm not sure if I understand correctly. Do you want to write something like in this example?
>>> import threading
>>> t = None
>>>
>>> def sayHello():
... global t
... print "Hello!"
... t = threading.Timer(0.5, sayHello)
... t.start()
...
>>> sayHello()
Hello!
Hello!
Hello!
Hello!
Hello!
>>> t.cancel()
>>>

The threading.Timer class does have a cancel method, and although it won't cancel the thread, it will stop the timer from actually firing. What actually happens is that the cancel method sets a threading.Event, and the thread actually executing the threading.Timer will check that event after it's done waiting and before it actually executes the callback.
That said, timers are usually implemented without using a separate thread for each one. The best way to do it depends on what your program is actually doing (while waiting for this timer), but anything with an event loop, like GUI and network frameworks, all have ways to request a timer that is hooked into the eventloop.

Im not sure if best option but for me is woking like this:
t = timer_mgr(.....) append to list "timers.append(t)" and then after all created you can call:
for tm in timers:#threading.enumerate():
print "********", tm.cancel()
my timer_mgr() class is this:
class timer_mgr():
def __init__(self, st, t, hFunction, id, name):
self.is_list = (type(st) is list)
self.st = st
self.t = t
self.id = id
self.hFunction = hFunction
self.thread = threading.Timer(t, self.handle_function, [id])
self.thread.name = name
def handle_function(self, id):
if self.is_list:
print "run_at_time:", datetime.now()
self.hFunction(id)
dt = schedule_fixed_times(datetime.now(), self.st)
print "next:", dt
self.t = (dt-datetime.now()).total_seconds()
else:
self.t = self.st
print "run_every", self.t, datetime.now()
self.hFunction(id)
self.thread = threading.Timer(self.t, self.handle_function, [id])
self.thread.start()
def start(self):
self.thread.start()
def cancel(self):
self.thread.cancel()

Inspired by above post.
Cancelable and Resetting Timer in Python. It uses thread.
Features: Start, Stop, Restart, callback function.
Input: Timeout, sleep_chunk values, and callback_function.
Can use or inherit this class in any other program. Can also pass arguments to the callback function.
Timer should respond in middle also. Not just after completion of full sleep time. So instead of using one full sleep, using small chunks of sleep and kept checking event object in loop.
import threading
import time
class TimerThread(threading.Thread):
def __init__(self, timeout=3, sleep_chunk=0.25, callback=None, *args):
threading.Thread.__init__(self)
self.timeout = timeout
self.sleep_chunk = sleep_chunk
if callback == None:
self.callback = None
else:
self.callback = callback
self.callback_args = args
self.terminate_event = threading.Event()
self.start_event = threading.Event()
self.reset_event = threading.Event()
self.count = self.timeout/self.sleep_chunk
def run(self):
while not self.terminate_event.is_set():
while self.count > 0 and self.start_event.is_set():
# print self.count
# time.sleep(self.sleep_chunk)
# if self.reset_event.is_set():
if self.reset_event.wait(self.sleep_chunk): # wait for a small chunk of timeout
self.reset_event.clear()
self.count = self.timeout/self.sleep_chunk # reset
self.count -= 1
if self.count <= 0:
self.start_event.clear()
#print 'timeout. calling function...'
self.callback(*self.callback_args)
self.count = self.timeout/self.sleep_chunk #reset
def start_timer(self):
self.start_event.set()
def stop_timer(self):
self.start_event.clear()
self.count = self.timeout / self.sleep_chunk # reset
def restart_timer(self):
# reset only if timer is running. otherwise start timer afresh
if self.start_event.is_set():
self.reset_event.set()
else:
self.start_event.set()
def terminate(self):
self.terminate_event.set()
#=================================================================
def my_callback_function():
print 'timeout, do this...'
timeout = 6 # sec
sleep_chunk = .25 # sec
tmr = TimerThread(timeout, sleep_chunk, my_callback_function)
tmr.start()
quit = '0'
while True:
quit = raw_input("Proceed or quit: ")
if quit == 'q':
tmr.terminate()
tmr.join()
break
tmr.start_timer()
if raw_input("Stop ? : ") == 's':
tmr.stop_timer()
if raw_input("Restart ? : ") == 'r':
tmr.restart_timer()

Related

how to terminate a thread from within another thread [duplicate]

How can I start and stop a thread with my poor thread class?
It is in loop, and I want to restart it again at the beginning of the code. How can I do start-stop-restart-stop-restart?
My class:
import threading
class Concur(threading.Thread):
def __init__(self):
self.stopped = False
threading.Thread.__init__(self)
def run(self):
i = 0
while not self.stopped:
time.sleep(1)
i = i + 1
In the main code, I want:
inst = Concur()
while conditon:
inst.start()
# After some operation
inst.stop()
# Some other operation
You can't actually stop and then restart a thread since you can't call its start() method again after its run() method has terminated. However you can make one pause and then later resume its execution by using a threading.Condition variable to avoid concurrency problems when checking or changing its running state.
threading.Condition objects have an associated threading.Lock object and methods to wait for it to be released and will notify any waiting threads when that occurs. Here's an example derived from the code in your question which shows this being done. In the example code I've made the Condition variable a part of Thread subclass instances to better encapsulate the implementation and avoid needing to introduce additional global variables:
from __future__ import print_function
import threading
import time
class Concur(threading.Thread):
def __init__(self):
super(Concur, self).__init__()
self.iterations = 0
self.daemon = True # Allow main to exit even if still running.
self.paused = True # Start out paused.
self.state = threading.Condition()
def run(self):
self.resume()
while True:
with self.state:
if self.paused:
self.state.wait() # Block execution until notified.
# Do stuff...
time.sleep(.1)
self.iterations += 1
def pause(self):
with self.state:
self.paused = True # Block self.
def resume(self):
with self.state:
self.paused = False
self.state.notify() # Unblock self if waiting.
class Stopwatch(object):
""" Simple class to measure elapsed times. """
def start(self):
""" Establish reference point for elapsed time measurements. """
self.start_time = time.time()
return self
#property
def elapsed_time(self):
""" Seconds since started. """
try:
return time.time() - self.start_time
except AttributeError: # Wasn't explicitly started.
self.start_time = time.time()
return 0
MAX_RUN_TIME = 5 # Seconds.
concur = Concur()
stopwatch = Stopwatch()
print('Running for {} seconds...'.format(MAX_RUN_TIME))
concur.start()
while stopwatch.elapsed_time < MAX_RUN_TIME:
concur.resume()
# Can also do other concurrent operations here...
concur.pause()
# Do some other stuff...
# Show Concur thread executed.
print('concur.iterations: {}'.format(concur.iterations))
This is David Heffernan's idea fleshed-out. The example below runs for 1 second, then stops for 1 second, then runs for 1 second, and so on.
import time
import threading
import datetime as DT
import logging
logger = logging.getLogger(__name__)
def worker(cond):
i = 0
while True:
with cond:
cond.wait()
logger.info(i)
time.sleep(0.01)
i += 1
logging.basicConfig(level=logging.DEBUG,
format='[%(asctime)s %(threadName)s] %(message)s',
datefmt='%H:%M:%S')
cond = threading.Condition()
t = threading.Thread(target=worker, args=(cond, ))
t.daemon = True
t.start()
start = DT.datetime.now()
while True:
now = DT.datetime.now()
if (now-start).total_seconds() > 60: break
if now.second % 2:
with cond:
cond.notify()
The implementation of stop() would look like this:
def stop(self):
self.stopped = True
If you want to restart, then you can just create a new instance and start that.
while conditon:
inst = Concur()
inst.start()
#after some operation
inst.stop()
#some other operation
The documentation for Thread makes it clear that the start() method can only be called once for each instance of the class.
If you want to pause and resume a thread, then you'll need to use a condition variable.

python multiprocessing/threading cleanup

I have a python tool, that has basically this kind of setup:
main process (P1) -> spawns a process (P2) that starts a tcp connection
-> spawns a thread (T1) that starts a loop to receive
messages that are sent from P2 to P1 via a Queue (Q1)
server process (P2) -> spawns two threads (T2 and T3) that start loops to
receive messages that are sent from P1 to P2 via Queues (Q2 and Q3)
The problem I'm having is that when I stop my program (with Ctrl+C), it doesn't quit. The server process is ended, but the main process just hangs there and I have to kill it.
The thread loop functions all look the same:
def _loop(self):
while self.running:
res = self.Q1.get()
if res is None:
break
self._handle_msg(res)
All threads are started as daemon:
t = Thread(target=self._loop)
t.setDaemon(True)
t.start()
In my main process, I use atexit, to perform clean-up tasks:
atexit.register(self.on_exit)
Those clean-up tasks are essentially the following:
1) set self.running in P1 to False and sent None to Q1, so that the Thread T1 should finish
self.running = False
self.Q1.put(None)
2) send a message to P2 via Q2 to inform this process that it is ending
self.Q2.put("stop")
3) In P2, react to the "stop" message and do what we did in P1
self.running = False
self.Q2.put(None)
self.Q3.put(None)
That is it and in my understanding, that should make everything shut down nicely, but it doesn't.
The main code of P1 also contains the following endless loop, because otherwise the program would end prematurely:
while running:
sleep(1)
Maybe that has something to do with the problem, but I cannot see why it should.
So what did I do wrong? Does my setup have major design flaws? Did I forget to shut down something?
EDIT
Ok, I modified my code and managed to make it shut down correctly most of the time. Unfortunately, from now and then, it still got stuck.
I managed to write a small working example of my code. To demonstrate what happens, you need to simple start the script and then use Ctrl + C to stop it. It looks like the issue appears now usually if you press Ctrl + C as soon as possible after starting the tool.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import signal
import sys
import logging
from multiprocessing import Process, Queue
from threading import Thread
from time import sleep
logger = logging.getLogger("mepy-client")
class SocketClientProtocol(object):
def __init__(self, q_in, q_out, q_binary):
self.q_in = q_in
self.q_out = q_out
self.q_binary = q_binary
self.running = True
t = Thread(target=self._loop)
#t.setDaemon(True)
t.start()
t = Thread(target=self._loop_binary)
#t.setDaemon(True)
t.start()
def _loop(self):
print "start of loop 2"
while self.running:
res = self.q_in.get()
if res is None:
break
self._handle_msg(res)
print "end of loop 2"
def _loop_binary(self):
print "start of loop 3"
while self.running:
res = self.q_binary.get()
if res is None:
break
self._handle_binary(res)
print "end of loop 3"
def _handle_msg(self, msg):
msg_type = msg[0]
if msg_type == "stop2":
print "STOP RECEIVED"
self.running = False
self.q_in.put(None)
self.q_binary.put(None)
def _put_msg(self, msg):
self.q_out.put(msg)
def _handle_binary(self, data):
pass
def handle_element(self):
self._put_msg(["something"])
def run_twisted(q_in, q_out, q_binary):
s = SocketClientProtocol(q_in, q_out, q_binary)
while s.running:
sleep(2)
s.handle_element()
class MediatorSender(object):
def __init__(self):
self.q_in = None
self.q_out = None
self.q_binary = None
self.p = None
self.running = False
def start(self):
if self.running:
return
self.running = True
self.q_in = Queue()
self.q_out = Queue()
self.q_binary = Queue()
print "!!!!START"
self.p = Process(target=run_twisted, args=(self.q_in, self.q_out, self.q_binary))
self.p.start()
t = Thread(target=self._loop)
#t.setDaemon(True)
t.start()
def stop(self):
print "!!!!STOP"
if not self.running:
return
print "STOP2"
self.running = False
self.q_out.put(None)
self.q_in.put(["stop2"])
#self.q_in.put(None)
#self.q_binary.put(None)
try:
if self.p and self.p.is_alive():
self.p.terminate()
except:
pass
def _loop(self):
print "start of loop 1"
while self.running:
res = self.q_out.get()
if res is None:
break
self._handle_msg(res)
print "end of loop 1"
def _handle_msg(self, msg):
self._put_msg(msg)
def _put_msg(self, msg):
self.q_in.put(msg)
def _put_binary(self, msg):
self.q_binary.put(msg)
def send_chunk(self, chunk):
self._put_binary(chunk)
running = True
def signal_handler(signal, frame):
global running
if running:
running = False
ms.stop()
else:
sys.exit(0)
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal_handler)
ms = MediatorSender()
ms.start()
for i in range(100):
ms.send_chunk("some chunk of data")
while running:
sleep(1)
I think you're corrupting your multiprocessing.Queue by calling p.terminate() on on the child process. The docs have a warning about this:
Warning: If this method is used when the associated process is using a
pipe or queue then the pipe or queue is liable to become corrupted and
may become unusable by other process. Similarly, if the process has
acquired a lock or semaphore etc. then terminating it is liable to
cause other processes to deadlock.
In some cases, it looks like p is terminating before your MediatorSender._loop method can consume the sentinel you loaded into it to let it know that it should exit.
Also, you're installing a signal handler that expects to work in the main process only, but the SIGINT is actually received by both the parent and the child processes, which means signal_handler gets called in both processes, could result in ms.stop getting called twice, due to a race condition in the way you handle setting ms.running to False
I would recommend just exploiting that both processes receive the SIGINT, and have both the parent and child handle KeyboardInterrupt directly. That way, each then have each shut themselves down cleanly, rather than have the parent terminate the child. The following code demonstrates that, and in my testing never hung. I've simplified your code in a few places, but functionally it's exactly the same:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from multiprocessing import Process, Queue
from threading import Thread
from time import sleep
logger = logging.getLogger("mepy-client")
class SocketClientProtocol(object):
def __init__(self, q_in, q_out, q_binary):
self.q_in = q_in
self.q_out = q_out
self.q_binary = q_binary
t = Thread(target=self._loop)
t.start()
t = Thread(target=self._loop_binary)
t.start()
def _loop(self):
print("start of loop 2")
for res in iter(self.q_in.get, None):
self._handle_msg(res)
print("end of loop 2")
def _loop_binary(self):
print("start of loop 3")
for res in iter(self.q_binary.get, None):
self._handle_binary(res)
print("end of loop 3")
def _handle_msg(self, msg):
msg_type = msg[0]
if msg_type == "stop2":
self.q_in.put(None)
self.q_binary.put(None)
def _put_msg(self, msg):
self.q_out.put(msg)
def stop(self):
print("STOP RECEIVED")
self.q_in.put(None)
self.q_binary.put(None)
def _handle_binary(self, data):
pass
def handle_element(self):
self._put_msg(["something"])
def run_twisted(q_in, q_out, q_binary):
s = SocketClientProtocol(q_in, q_out, q_binary)
try:
while True:
sleep(2)
s.handle_element()
except KeyboardInterrupt:
s.stop()
class MediatorSender(object):
def __init__(self):
self.q_in = None
self.q_out = None
self.q_binary = None
self.p = None
self.running = False
def start(self):
if self.running:
return
self.running = True
self.q_in = Queue()
self.q_out = Queue()
self.q_binary = Queue()
print("!!!!START")
self.p = Process(target=run_twisted,
args=(self.q_in, self.q_out, self.q_binary))
self.p.start()
self.loop = Thread(target=self._loop)
self.loop.start()
def stop(self):
print("!!!!STOP")
if not self.running:
return
print("STOP2")
self.running = False
self.q_out.put(None)
def _loop(self):
print("start of loop 1")
for res in iter(self.q_out.get, None):
self._handle_msg(res)
print("end of loop 1")
def _handle_msg(self, msg):
self._put_msg(msg)
def _put_msg(self, msg):
self.q_in.put(msg)
def _put_binary(self, msg):
self.q_binary.put(msg)
def send_chunk(self, chunk):
self._put_binary(chunk)
if __name__ == "__main__":
ms = MediatorSender()
try:
ms.start()
for i in range(100):
ms.send_chunk("some chunk of data")
# You actually have to join w/ a timeout in a loop on
# Python 2.7. If you just call join(), SIGINT won't be
# received by the main process, and the program will
# hang. This is a bug, and is fixed in Python 3.x.
while True:
ms.loop.join()
except KeyboardInterrupt:
ms.stop()
Edit:
If you prefer to use a signal handler rather than catching KeyboardInterrupt, you just need to make sure the child process uses its own signal handler, rather than inheriting the parent's:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import signal
import logging
from functools import partial
from multiprocessing import Process, Queue
from threading import Thread
from time import sleep
logger = logging.getLogger("mepy-client")
class SocketClientProtocol(object):
def __init__(self, q_in, q_out, q_binary):
self.q_in = q_in
self.q_out = q_out
self.q_binary = q_binary
self.running = True
t = Thread(target=self._loop)
t.start()
t = Thread(target=self._loop_binary)
t.start()
def _loop(self):
print("start of loop 2")
for res in iter(self.q_in.get, None):
self._handle_msg(res)
print("end of loop 2")
def _loop_binary(self):
print("start of loop 3")
for res in iter(self.q_binary.get, None):
self._handle_binary(res)
print("end of loop 3")
def _handle_msg(self, msg):
msg_type = msg[0]
if msg_type == "stop2":
self.q_in.put(None)
self.q_binary.put(None)
def _put_msg(self, msg):
self.q_out.put(msg)
def stop(self):
print("STOP RECEIVED")
self.running = False
self.q_in.put(None)
self.q_binary.put(None)
def _handle_binary(self, data):
pass
def handle_element(self):
self._put_msg(["something"])
def run_twisted(q_in, q_out, q_binary):
s = SocketClientProtocol(q_in, q_out, q_binary)
signal.signal(signal.SIGINT, partial(signal_handler_child, s))
while s.running:
sleep(2)
s.handle_element()
class MediatorSender(object):
def __init__(self):
self.q_in = None
self.q_out = None
self.q_binary = None
self.p = None
self.running = False
def start(self):
if self.running:
return
self.running = True
self.q_in = Queue()
self.q_out = Queue()
self.q_binary = Queue()
print("!!!!START")
self.p = Process(target=run_twisted,
args=(self.q_in, self.q_out, self.q_binary))
self.p.start()
self.loop = Thread(target=self._loop)
self.loop.start()
def stop(self):
print("!!!!STOP")
if not self.running:
return
print("STOP2")
self.running = False
self.q_out.put(None)
def _loop(self):
print("start of loop 1")
for res in iter(self.q_out.get, None):
self._handle_msg(res)
print("end of loop 1")
def _handle_msg(self, msg):
self._put_msg(msg)
def _put_msg(self, msg):
self.q_in.put(msg)
def _put_binary(self, msg):
self.q_binary.put(msg)
def send_chunk(self, chunk):
self._put_binary(chunk)
def signal_handler_main(ms, *args):
ms.stop()
def signal_handler_child(s, *args):
s.stop()
if __name__ == "__main__":
ms = MediatorSender()
signal.signal(signal.SIGINT, partial(signal_handler_main, ms))
ms.start()
for i in range(100):
ms.send_chunk("some chunk of data")
while ms.loop.is_alive():
ms.loop.join(9999999)
print('done main')
Maybe you should try to capture SIGINT signal, which is generated by Ctrl + C using signal.signal like this:
#!/usr/bin/env python
import signal
import sys
def signal_handler(signal, frame):
print('You pressed Ctrl+C!')
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
print('Press Ctrl+C')
signal.pause()
Code stolen from here
This usually works for me if I am using the threading module. It will not work if you use the multiprocessing one though. If you are running the script from the terminal try running it in the background, like this.
python scriptFoo.py &
After you run the process it will output the PID like this
[1] 23107
Whenever you need to quit the script you just type kill and the script PID like this.
kill 23107
Hit enter again and it should kill all the subprocesses and output this.
[1]+ Terminated python scriptFoo.py
As far as I know you cannot kill all the subprocesses with 'Ctrl+C'

Why doesn’t this thread terminate?

I am trying to use the python threading module. As I am sysadmin, I struggle a little bit when developing; and this concept is kind of new for me. I launch two threads and I want to stop them, when the main thread sets a flag to False:
class My_Thread( threading.Thread):
def __init__(self, thread_id, thread_name, count):
threading.Thread.__init__(self)
self.thread_id = thread_id
self.thread_name = thread_name
self.count = count
def run(self):
do_job(self.thread_name, self.thread_id, self.count)
def do_job(t_name, t_id, count):
while not get_kill():
print "It is "+str(time.time())+" and I am "+str(t_name)
print get_kill()
time.sleep(count)
kill = False
def get_kill():
return kill
def set_kill(state):
kill = state
if __name__ == '__main__':
a = My_Thread(1, "Thread-1", 2)
b = My_Thread(2, "Thread-2", 1)
a.start()
b.start()
while(True):
try:
pass
except KeyboardInterrupt,ki:
set_kill(True)
sys.exit(0)
But the value is never read as changed in both threads and they don't exit. Why is this value not properly read from threads?
The problem
In set_kill(), you are creating a new local variable kill setting it to state, and returning from the function. You are not actually updating the value of kill in the global scope.
To do that, you would need to have:
def set_kill(state):
global kill
kill = state
A better way
Using globals like that is generally considered bad practice, you probably want to convert your kill variable and functions into an object, to encapsulate that data and behaviour together:
class Kill(object):
kill = False
def get(self):
return self.kill
def set(self, value):
self.kill = value
Which you would use like this:
class MyThread(Thread):
def __init__(self, thread_id, thread_name, count, kill):
self.kill = kill
...
def do_job(self, ...):
while not self.kill.get():
...
if __name__ == '__main__':
kill = Kill()
a = My_Thread(1, "Thread-1", 2, kill)
b = My_Thread(2, "Thread-2", 1, kill)
...
kill.set(True)

Most efficient way to perform a deferred action in Python

Consider a system where I have events coming in at unpredictable points in time. I want to be able to perform a "deferred" action that executes a fixed amount of time, X units, after the last event has come in. An event is considered "last" if it is the only event to have occurred in the last X units of time. What is the most efficient way to do this in Python?
One solution I have considered is using a threading.Event:
# This solution has the drawback that the deferred event may actually occur
# up to 2*X units of time after the last event.
# Also, it kinda sucks that the thread is basically polling once the first
# event comes in.
from threading import Thread
from threading import Event
import time
import sys
evt = Event()
die = False
X = 1
def thread_func_event():
while True:
evt.wait()
if die:
break
while True:
evt.clear()
time.sleep(X)
if not evt.is_set():
# No more events came in. Good.
break
# Looks like more events came in. Let's try again.
if die:
return
print('Deferred action performed.')
sys.stdout.flush()
def event_occurred():
evt.set()
t = Thread(target=thread_func_event)
t.start()
for _ in range(0, 1000000):
event_occurred()
print('First batch of events done.')
sys.stdout.flush()
time.sleep(3)
for _ in range(0, 1000000):
event_occurred()
print('Second batch of events done.')
sys.stdout.flush()
time.sleep(3)
die = True
evt.set()
t.join()
I've done something like this before.
import threading
import time
class waiter(object):
def __init__(self, action, delay = 0.5, *args, **kwargs):
self.action_lockout_timeout = threading.Thread()
self.action_lockout_event = threading.Event()
self.action = action
self.delay = delay
self.action_prevent()
def action_prevent(self):
def action_enable():
self.action_lockout_event.wait(self.delay)
if not self.action_lockout_event._Event__flag:
self.action()
if self.action_lockout_timeout.isAlive():
self.action_lockout_event.set()
self.action_lockout_timeout.join()
self.action_lockout_event.clear()
self.action_lockout_timeout = threading.Thread(target = action_enable)
self.action_lockout_timeout.setDaemon(True)
self.action_lockout_timeout.start()
def thanks():
print("Person 2: Thank you ...")
polite = waiter(thanks, 3)
print("Person 1: After you")
polite.action_prevent()
time.sleep(2)
print("Person 2: No, after you")
polite.action_prevent()
time.sleep(2)
print("Person 1: No I insist")
polite.action_prevent()
time.sleep(2)
print("Person 2: But it would be rude")
polite.action_prevent()
time.sleep(2)
print("---Akward Silence---")
time.sleep(2)
If you want to run a function with arguments, just wrap it with a lambda expression.
def thanks(person):
print("%s: Thank you ..." % person)
polite = waiter(lambda: thanks("Person 2"), 3)
EDIT:
Turns out that threading.Event is pretty slow. Here's a solution that replaces the Event with time.sleep and a bool. It also uses __slots__ to speed up attribute accesses
import sys
import threading
import time
class waiter(object):
__slots__ = \
[
"action",
"delay",
"undelayed",
"delay_timeout",
]
def __init__(self, action, delay = 0.5, *args, **kwargs):
self.action = action
self.delay = delay
self.undelayed = False
self.delay_timeout = threading.Thread(target = self.action_enable)
self.delay_timeout.start()
def action_prevent(self):
self.undelayed = False
def action_enable(self):
while True:
time.sleep(self.delay)
if self.undelayed:
self.action()
break
else:
self.undelayed = True
def thanks():
print("Person 2: Thank you ...")
polite = waiter(thanks, 1)
for _ in range(0, 1000000):
polite.action_prevent()
print("First batch of events done.")
time.sleep(2)

Python Equivalent of setInterval()?

Does Python have a function similar to JavaScript's setInterval()?
I would like to have:
def set_interval(func, interval):
...
That will call func every interval time units.
This might be the correct snippet you were looking for:
import threading
def set_interval(func, sec):
def func_wrapper():
set_interval(func, sec)
func()
t = threading.Timer(sec, func_wrapper)
t.start()
return t
This is a version where you could start and stop.
It is not blocking.
There is also no glitch as execution time error is not added (important for long time execution with very short interval as audio for example)
import time, threading
StartTime=time.time()
def action() :
print('action ! -> time : {:.1f}s'.format(time.time()-StartTime))
class setInterval :
def __init__(self,interval,action) :
self.interval=interval
self.action=action
self.stopEvent=threading.Event()
thread=threading.Thread(target=self.__setInterval)
thread.start()
def __setInterval(self) :
nextTime=time.time()+self.interval
while not self.stopEvent.wait(nextTime-time.time()) :
nextTime+=self.interval
self.action()
def cancel(self) :
self.stopEvent.set()
# start action every 0.6s
inter=setInterval(0.6,action)
print('just after setInterval -> time : {:.1f}s'.format(time.time()-StartTime))
# will stop interval in 5s
t=threading.Timer(5,inter.cancel)
t.start()
Output is :
just after setInterval -> time : 0.0s
action ! -> time : 0.6s
action ! -> time : 1.2s
action ! -> time : 1.8s
action ! -> time : 2.4s
action ! -> time : 3.0s
action ! -> time : 3.6s
action ! -> time : 4.2s
action ! -> time : 4.8s
Just keep it nice and simple.
import threading
def setInterval(func,time):
e = threading.Event()
while not e.wait(time):
func()
def foo():
print "hello"
# using
setInterval(foo,5)
# output:
hello
hello
.
.
.
EDIT : This code is non-blocking
import threading
class ThreadJob(threading.Thread):
def __init__(self,callback,event,interval):
'''runs the callback function after interval seconds
:param callback: callback function to invoke
:param event: external event for controlling the update operation
:param interval: time in seconds after which are required to fire the callback
:type callback: function
:type interval: int
'''
self.callback = callback
self.event = event
self.interval = interval
super(ThreadJob,self).__init__()
def run(self):
while not self.event.wait(self.interval):
self.callback()
event = threading.Event()
def foo():
print "hello"
k = ThreadJob(foo,event,2)
k.start()
print "It is non-blocking"
Change Nailxx's answer a bit and you got the answer!
from threading import Timer
def hello():
print "hello, world"
Timer(30.0, hello).start()
Timer(30.0, hello).start() # after 30 seconds, "hello, world" will be printed
The sched module provides these abilities for general Python code. However, as its documentation suggests, if your code is multithreaded it might make more sense to use the threading.Timer class instead.
I think this is what you're after:
#timertest.py
import sched, time
def dostuff():
print "stuff is being done!"
s.enter(3, 1, dostuff, ())
s = sched.scheduler(time.time, time.sleep)
s.enter(3, 1, dostuff, ())
s.run()
If you add another entry to the scheduler at the end of the repeating method, it'll just keep going.
I use sched to create setInterval function gist
import functools
import sched, time
s = sched.scheduler(time.time, time.sleep)
def setInterval(sec):
def decorator(func):
#functools.wraps(func)
def wrapper(*argv, **kw):
setInterval(sec)(func)
func(*argv, **kw)
s.enter(sec, 1, wrapper, ())
return wrapper
s.run()
return decorator
#setInterval(sec=3)
def testInterval():
print ("test Interval ")
testInterval()
Simple setInterval utils
from threading import Timer
def setInterval(timer, task):
isStop = task()
if not isStop:
Timer(timer, setInterval, [timer, task]).start()
def hello():
print "do something"
return False # return True if you want to stop
if __name__ == "__main__":
setInterval(2.0, hello) # every 2 seconds, "do something" will be printed
The above method didn't quite do it for me as I needed to be able to cancel the interval. I turned the function into a class and came up with the following:
class setInterval():
def __init__(self, func, sec):
def func_wrapper():
self.t = threading.Timer(sec, func_wrapper)
self.t.start()
func()
self.t = threading.Timer(sec, func_wrapper)
self.t.start()
def cancel(self):
self.t.cancel()
Most of the answers above do not shut down the Thread properly. While using Jupyter notebook I noticed that when an explicit interrupt was sent, the threads were still running and worse, they would keep multiplying starting at 1 thread running,2, 4 etc. My method below is based on the answer by #doom but cleanly handles interrupts by running an infinite loop in the Main thread to listen for SIGINT and SIGTERM events
No drift
Cancelable
Handles SIGINT and SIGTERM very well
Doesnt make a new thread for every run
Feel free to suggest improvements
import time
import threading
import signal
# Record the time for the purposes of demonstration
start_time=time.time()
class ProgramKilled(Exception):
"""
An instance of this custom exception class will be thrown everytime we get an SIGTERM or SIGINT
"""
pass
# Raise the custom exception whenever SIGINT or SIGTERM is triggered
def signal_handler(signum, frame):
raise ProgramKilled
# This function serves as the callback triggered on every run of our IntervalThread
def action() :
print('action ! -> time : {:.1f}s'.format(time.time()-start_time))
# https://stackoverflow.com/questions/2697039/python-equivalent-of-setinterval
class IntervalThread(threading.Thread) :
def __init__(self,interval,action, *args, **kwargs) :
super(IntervalThread, self).__init__()
self.interval=interval
self.action=action
self.stopEvent=threading.Event()
self.start()
def run(self) :
nextTime=time.time()+self.interval
while not self.stopEvent.wait(nextTime-time.time()) :
nextTime+=self.interval
self.action()
def cancel(self) :
self.stopEvent.set()
def main():
# Handle SIGINT and SIFTERM with the help of the callback function
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
# start action every 1s
inter=IntervalThread(1,action)
print('just after setInterval -> time : {:.1f}s'.format(time.time()-start_time))
# will stop interval in 500s
t=threading.Timer(500,inter.cancel)
t.start()
# https://www.g-loaded.eu/2016/11/24/how-to-terminate-running-python-threads-using-signals/
while True:
try:
time.sleep(1)
except ProgramKilled:
print("Program killed: running cleanup code")
inter.cancel()
break
if __name__ == "__main__":
main()
In the above solutions if a situation arises where program is shutdown, there is no guarantee that it will shutdown gracefully,Its always recommended to shut a program via a soft kill, neither did most of them have a function to stop I found a nice article on medium written by Sankalp which solves both of these issues (run periodic tasks in python) refer the attached link to get a deeper insight.
In the below sample a library named signal is used to track the kill is soft kill or a hard kill
import threading, time, signal
from datetime import timedelta
WAIT_TIME_SECONDS = 1
class ProgramKilled(Exception):
pass
def foo():
print time.ctime()
def signal_handler(signum, frame):
raise ProgramKilled
class Job(threading.Thread):
def __init__(self, interval, execute, *args, **kwargs):
threading.Thread.__init__(self)
self.daemon = False
self.stopped = threading.Event()
self.interval = interval
self.execute = execute
self.args = args
self.kwargs = kwargs
def stop(self):
self.stopped.set()
self.join()
def run(self):
while not self.stopped.wait(self.interval.total_seconds()):
self.execute(*self.args, **self.kwargs)
if __name__ == "__main__":
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
job = Job(interval=timedelta(seconds=WAIT_TIME_SECONDS), execute=foo)
job.start()
while True:
try:
time.sleep(1)
except ProgramKilled:
print "Program killed: running cleanup code"
job.stop()
break
#output
#Tue Oct 16 17:47:51 2018
#Tue Oct 16 17:47:52 2018
#Tue Oct 16 17:47:53 2018
#^CProgram killed: running cleanup code
setInterval should be run on multiple thread, and not freeze the task when it running loop.
Here is my RUNTIME package that support multithread feature:
setTimeout(F,ms) : timming to fire function in independence thread.
delayF(F,ms) : similar setTimeout(F,ms).
setInterval(F,ms) : asynchronous loop
.pause, .resume : pause and resume the interval
clearInterval(interval) : clear the interval
It's short and simple. Note that python need lambda if you input direct the function, but lambda is not support command block, so you should define the function content before put it in the setInterval.
### DEMO PYTHON MULTITHREAD ASYNCHRONOUS LOOP ###
import time;
import threading;
import random;
def delay(ms):time.sleep(ms/1000); # Controil while speed
def setTimeout(R,delayMS):
t=threading.Timer(delayMS/1000,R)
t.start();
return t;
def delayF(R,delayMS):
t=threading.Timer(delayMS/1000,R)
t.start();
return t;
class THREAD:
def __init__(this):
this.R_onRun=None;
this.thread=None;
def run(this):
this.thread=threading.Thread(target=this.R_onRun);
this.thread.start();
def isRun(this): return this.thread.isAlive();
class setInterval :
def __init__(this,R_onRun,msInterval) :
this.ms=msInterval;
this.R_onRun=R_onRun;
this.kStop=False;
this.thread=THREAD();
this.thread.R_onRun=this.Clock;
this.thread.run();
def Clock(this) :
while not this.kStop :
this.R_onRun();
delay(this.ms);
def pause(this) :
this.kStop=True;
def stop(this) :
this.kStop=True;
def resume(this) :
if (this.kStop) :
this.kStop=False;
this.thread.run();
def clearInterval(Timer): Timer.stop();
# EXAMPLE
def p():print(random.random());
tm=setInterval(p,20);
tm2=setInterval(lambda:print("AAAAA"),20);
delayF(tm.pause,1000);
delayF(tm.resume,2000);
delayF(lambda:clearInterval(tm),3000);
Save to file .py and run it. You will see it print both random number and string "AAAAA". The print number thread will pause printing after 1 second and resume print again for 1 second then stop, while the print string keep printing text not corrupt.
In case you use OpenCV for graphic animation with those setInterval for boost animate speed, you must have 1 main thread to apply waitKey, otherwise the window will freeze no matter how slow delay or you applied waitKey in sub thread:
def p:... # Your drawing task
setInterval(p,1); # Subthread1 running draw
setInterval(p,1); # Subthread2 running draw
setInterval(p,1); # Subthread3 running draw
while True: cv2.waitKey(10); # Main thread which waitKey have effect
You can also try out this method:
import time
while True:
time.sleep(5)
print("5 seconds has passed")
So it will print "5 seconds has passed" every 5 seconds.
The function sleep() suspends execution for the given number of seconds. The argument may be a floating point number to indicate a more precise sleep time.
Recently, I have the same issue as you. And I find these soluation:
1. you can use the library: threading.Time(this have introduction above)
2. you can use the library: sched(this have introduction above too)
3. you can use the library: Advanced Python Scheduler(Recommend)
Some answers above that uses func_wrapper and threading.Timer indeed work, except that it spawns a new thread every time an interval is called, which is causing memory problems.
The basic example below roughly implemented a similar mechanism by putting interval on a separate thread. It sleeps at the given interval. Before jumping into code, here are some of the limitations that you need to be aware of:
JavaScript is single threaded, so when the function inside setInterval is fired, nothing else will be working at the same time (excluding worker thread, but let's talk general use case of setInterval. Therefore, threading is safe. But here in this implementation, you may encounter race conditions unless using a threading.rLock.
The implementation below uses time.sleep to simulate intervals, but adding the execution time of func, the total time for this interval may be greater than what you expect. So depending on use cases, you may want to "sleep less" (minus time taken for calling func)
I only roughly tested this, and you should definitely not use global variables the way I did, feel free to tweak it so that it fits in your system.
Enough talking, here is the code:
# Python 2.7
import threading
import time
class Interval(object):
def __init__(self):
self.daemon_alive = True
self.thread = None # keep a reference to the thread so that we can "join"
def ticktock(self, interval, func):
while self.daemon_alive:
time.sleep(interval)
func()
num = 0
def print_num():
global num
num += 1
print 'num + 1 = ', num
def print_negative_num():
global num
print '-num = ', num * -1
intervals = {} # keep track of intervals
g_id_counter = 0 # roughly generate ids for intervals
def set_interval(interval, func):
global g_id_counter
interval_obj = Interval()
# Put this interval on a new thread
t = threading.Thread(target=interval_obj.ticktock, args=(interval, func))
t.setDaemon(True)
interval_obj.thread = t
t.start()
# Register this interval so that we can clear it later
# using roughly generated id
interval_id = g_id_counter
g_id_counter += 1
intervals[interval_id] = interval_obj
# return interval id like it does in JavaScript
return interval_id
def clear_interval(interval_id):
# terminate this interval's while loop
intervals[interval_id].daemon_alive = False
# kill the thread
intervals[interval_id].thread.join()
# pop out the interval from registry for reusing
intervals.pop(interval_id)
if __name__ == '__main__':
num_interval = set_interval(1, print_num)
neg_interval = set_interval(3, print_negative_num)
time.sleep(10) # Sleep 10 seconds on main thread to let interval run
clear_interval(num_interval)
clear_interval(neg_interval)
print "- Are intervals all cleared?"
time.sleep(3) # check if both intervals are stopped (not printing)
print "- Yup, time to get beers"
Expected output:
num + 1 = 1
num + 1 = 2
-num = -2
num + 1 = 3
num + 1 = 4
num + 1 = 5
-num = -5
num + 1 = 6
num + 1 = 7
num + 1 = 8
-num = -8
num + 1 = 9
num + 1 = 10
-num = -10
Are intervals all cleared?
Yup, time to get beers
My Python 3 module jsinterval.py will be helpful! Here it is:
"""
Threaded intervals and timeouts from JavaScript
"""
import threading, sys
__all__ = ['TIMEOUTS', 'INTERVALS', 'setInterval', 'clearInterval', 'setTimeout', 'clearTimeout']
TIMEOUTS = {}
INTERVALS = {}
last_timeout_id = 0
last_interval_id = 0
class Timeout:
"""Class for all timeouts."""
def __init__(self, func, timeout):
global last_timeout_id
last_timeout_id += 1
self.timeout_id = last_timeout_id
TIMEOUTS[str(self.timeout_id)] = self
self.func = func
self.timeout = timeout
self.threadname = 'Timeout #%s' %self.timeout_id
def run(self):
func = self.func
delx = self.__del__
def func_wrapper():
func()
delx()
self.t = threading.Timer(self.timeout/1000, func_wrapper)
self.t.name = self.threadname
self.t.start()
def __repr__(self):
return '<JS Timeout set for %s seconds, launching function %s on timeout reached>' %(self.timeout, repr(self.func))
def __del__(self):
self.t.cancel()
class Interval:
"""Class for all intervals."""
def __init__(self, func, interval):
global last_interval_id
self.interval_id = last_interval_id
INTERVALS[str(self.interval_id)] = self
last_interval_id += 1
self.func = func
self.interval = interval
self.threadname = 'Interval #%s' %self.interval_id
def run(self):
func = self.func
interval = self.interval
def func_wrapper():
timeout = Timeout(func_wrapper, interval)
self.timeout = timeout
timeout.run()
func()
self.t = threading.Timer(self.interval/1000, func_wrapper)
self.t.name = self.threadname
self.t.run()
def __repr__(self):
return '<JS Interval, repeating function %s with interval %s>' %(repr(self.func), self.interval)
def __del__(self):
self.timeout.__del__()
def setInterval(func, interval):
"""
Create a JS Interval: func is the function to repeat, interval is the interval (in ms)
of executing the function.
"""
temp = Interval(func, interval)
temp.run()
idx = int(temp.interval_id)
del temp
return idx
def clearInterval(interval_id):
try:
INTERVALS[str(interval_id)].__del__()
del INTERVALS[str(interval_id)]
except KeyError:
sys.stderr.write('No such interval "Interval #%s"\n' %interval_id)
def setTimeout(func, timeout):
"""
Create a JS Timeout: func is the function to timeout, timeout is the timeout (in ms)
of executing the function.
"""
temp = Timeout(func, timeout)
temp.run()
idx = int(temp.timeout_id)
del temp
return idx
def clearTimeout(timeout_id):
try:
TIMEOUTS[str(timeout_id)].__del__()
del TIMEOUTS[str(timeout_id)]
except KeyError:
sys.stderr.write('No such timeout "Timeout #%s"\n' %timeout_id)
CODE EDIT:
Fixed the memory leak (spotted by #benjaminz). Now ALL threads are cleaned up upon end. Why does this leak happen? It happens because of the implicit (or even explicit) references. In my case, TIMEOUTS and INTERVALS. Timeouts self-clean automatically (after this patch) because they use function wrapper which calls the function and then self-kills. But how does this happen? Objects can't be deleted from memory unless all references are deleted too or gc module is used. Explaining: there's no way to create (in my code) unwanted references to timeouts/intervals. They have only ONE referrer: the TIMEOUTS/INTERVALS dicts. And, when interrupted or finished (only timeouts can finish uninterrupted) they delete the only existing reference to themselves: their corresponding dict element. Classes are perfectly encapsulated using __all__, so no space for memory leaks.
Here is a low time drift solution that uses a thread to periodically signal an Event object. The thread's run() does almost nothing while waiting for a timeout; hence the low time drift.
# Example of low drift (time) periodic execution of a function.
import threading
import time
# Thread that sets 'flag' after 'timeout'
class timerThread (threading.Thread):
def __init__(self , timeout , flag):
threading.Thread.__init__(self)
self.timeout = timeout
self.stopFlag = False
self.event = threading.Event()
self.flag = flag
# Low drift run(); there is only the 'if'
# and 'set' methods between waits.
def run(self):
while not self.event.wait(self.timeout):
if self.stopFlag:
break
self.flag.set()
def stop(self):
stopFlag = True
self.event.set()
# Data.
printCnt = 0
# Flag to print.
printFlag = threading.Event()
# Create and start the timer thread.
printThread = timerThread(3 , printFlag)
printThread.start()
# Loop to wait for flag and print time.
while True:
global printCnt
# Wait for flag.
printFlag.wait()
# Flag must be manually cleared.
printFlag.clear()
print(time.time())
printCnt += 1
if printCnt == 3:
break;
# Stop the thread and exit.
printThread.stop()
printThread.join()
print('Done')
fall asleep until the next interval of seconds length starts: (not concurrent)
def sleep_until_next_interval(self, seconds):
now = time.time()
fall_asleep = seconds - now % seconds
time.sleep(fall_asleep)
while True:
sleep_until_next_interval(10) # 10 seconds - worktime
# work here
simple and no drift.
I have written my code to make a very very flexible setInterval in python. Here you are:
import threading
class AlreadyRunning(Exception):
pass
class IntervalNotValid(Exception):
pass
class setInterval():
def __init__(this, func=None, sec=None, args=[]):
this.running = False
this.func = func # the function to be run
this.sec = sec # interval in second
this.Return = None # The returned data
this.args = args
this.runOnce = None # asociated with run_once() method
this.runOnceArgs = None # asociated with run_once() method
if (func is not None and sec is not None):
this.running = True
if (not callable(func)):
raise TypeError("non-callable object is given")
if (not isinstance(sec, int) and not isinstance(sec, float)):
raise TypeError("A non-numeric object is given")
this.TIMER = threading.Timer(this.sec, this.loop)
this.TIMER.start()
def start(this):
if (not this.running):
if (not this.isValid()):
raise IntervalNotValid("The function and/or the " +
"interval hasn't provided or invalid.")
this.running = True
this.TIMER = threading.Timer(this.sec, this.loop)
this.TIMER.start()
else:
raise AlreadyRunning("Tried to run an already run interval")
def stop(this):
this.running = False
def isValid(this):
if (not callable(this.func)):
return False
cond1 = not isinstance(this.sec, int)
cond2 = not isinstance(this.sec, float)
if (cond1 and cond2):
return False
return True
def loop(this):
if (this.running):
this.TIMER = threading.Timer(this.sec, this.loop)
this.TIMER.start()
function_, Args_ = this.func, this.args
if (this.runOnce is not None): # someone has provide the run_once
runOnce, this.runOnce = this.runOnce, None
result = runOnce(*(this.runOnceArgs))
this.runOnceArgs = None
# if and only if the result is False. not accept "None"
# nor zero.
if (result is False):
return # cancel the interval right now
this.Return = function_(*Args_)
def change_interval(this, sec):
cond1 = not isinstance(sec, int)
cond2 = not isinstance(sec, float)
if (cond1 and cond2):
raise TypeError("A non-numeric object is given")
# prevent error when providing interval to a blueprint
if (this.running):
this.TIMER.cancel()
this.sec = sec
# prevent error when providing interval to a blueprint
# if the function hasn't provided yet
if (this.running):
this.TIMER = threading.Timer(this.sec, this.loop)
this.TIMER.start()
def change_next_interval(this, sec):
if (not isinstance(sec, int) and not isinstance(sec, float)):
raise TypeError("A non-numeric object is given")
this.sec = sec
def change_func(this, func, args=[]):
if (not callable(func)):
raise TypeError("non-callable object is given")
this.func = func
this.args = args
def run_once(this, func, args=[]):
this.runOnce = func
this.runOnceArgs = args
def get_return(this):
return this.Return
You can get many features and flexibility. Running this code won't freeze your code, you can change the interval at run time, you can change the function at run time, you can pass arguments, you can get the returned object from your function, and many more. You can make your tricks too!
here's a very simple and basic example to use it:
import time
def interval(name="world"):
print(f"Hello {name}!")
# function named interval will be called every two seconds
# output: "Hello world!"
interval1 = setInterval(interval, 2)
# function named interval will be called every 1.5 seconds
# output: "Hello Jane!"
interval2 = setInterval(interval, 1.5, ["Jane"])
time.sleep(5) #stop all intervals after 5 seconds
interval1.stop()
interval2.stop()
Check out my Github project to see more examples and follow next updates :D
https://github.com/Hzzkygcs/setInterval-python
Here's something easy peazy:
import time
delay = 10 # Seconds
def setInterval():
print('I print in intervals!')
time.sleep(delay)
setInterval()
Things work differently in Python: you need to either sleep() (if you want to block the current thread) or start a new thread. See http://docs.python.org/library/threading.html
From Python Documentation:
from threading import Timer
def hello():
print "hello, world"
t = Timer(30.0, hello)
t.start() # after 30 seconds, "hello, world" will be printed

Categories