Start python apscheduler at next full second - python

I've tested the APscheduler with the provieded example:
from datetime import datetime
import time
import os
from apscheduler.schedulers.background import BackgroundScheduler
def tick():
print('Tick! The time is: %s' % datetime.now())
if __name__ == '__main__':
scheduler = BackgroundScheduler()
scheduler.add_job(tick, 'interval', seconds=3)
scheduler.start()
print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
try:
# This is here to simulate application activity (which keeps the main thread alive).
while True:
time.sleep(2)
except (KeyboardInterrupt, SystemExit):
# Not strictly necessary if daemonic mode is enabled but should be done if possible
scheduler.shutdown()
The output:
Tick! The time is: 2020-09-14 00:56:23.225999
Tick! The time is: 2020-09-14 00:56:28.226864
...
What is the best way to start the scheduler at the next full second/the next full 10s/next full minute/... so the output is more readable?
Example output, when it starts at the next round second:
Tick! The time is: 2020-09-14 00:56:23.000000
Tick! The time is: 2020-09-14 00:56:28.000000
...

Referring to the documentation, you could set a start-date:
https://apscheduler.readthedocs.io/en/stable/modules/triggers/interval.html
scheduler.add_job(tick, 'interval', seconds=3,
start_date='2021-06-23 11:36:00', end_date='2021-06-23 11:37:00')

Related

callback doesnot work in pool.map_async()

In the following simple program, the callback passed to pool.map_async() does not seem to work properly. Could someone point out what is wrong?
import os
import multiprocessing
import time
def cube(x):
return "{}^3={}".format(x, x**3)
def prt(value):
print(value)
if __name__ == "__main__":
pool = multiprocessing.Pool(3)
start_time = time.perf_counter()
result = pool.map_async(cube, range(1,1000), callback=prt)
finish_time = time.perf_counter()
print(f"Program finished in {finish_time-start_time} seconds")
$ python3 /var/tmp/cube_map_async_callback.py
Program finished in 0.0001492840237915516 seconds
$

Most efficient way how to limit function execution in Python

I have a function that periodically does some stuff. The period is set to one minute hence I need to somehow limit the execution time. I have tried to use multiprocessing, however it drastically increased the execution time (from <1 sec to 2 - 10 seconds).
Is there any kind of better approach how to configure the max time execution (example_function in the code snippet below)?
I have tried using signal, however it did not work well together with scheduling.
I am using Python 3.9 in this project (planned update to 3.10)
Example code:
from apscheduler.schedulers.blocking import BlockingScheduler
from multiprocessing import Process
def example_function():
...
here is some processing
...
def scheduled_function():
limit = 4
p = Process(target=example_function, name='Example Function')
p.start()
p.join(timeout=limit)
p.terminate()
scheduler = BlockingScheduler()
scheduler.add_job(scheduled_function, 'cron', minute='*/1')
scheduler.start()
Thanks
EDIT
I found example using threading instead of multiprocessing. It seems that execution time is better, it just needs a different approach to handle timeout/success result
from apscheduler.schedulers.blocking import BlockingScheduler
from threading import Thread
finished = False
def example_function():
...
here is some processing
...
global finished
finished = True
def scheduled_function():
limit = 4
p = Thread(target=example_function)
p.start()
p.join(timeout=limit)
global finished
if finished:
return "Finished OK"
else:
return "Timeout"
scheduler = BlockingScheduler()
scheduler.add_job(scheduled_function, 'cron', minute='*/1')
scheduler.start()
I have tried using signal, however it did not work well together with scheduling.
I generally use signal for timeout, but since it does not work with BlockingScheduler (ValueError: signal only works in main thread), this is a workaround to trigger scheduled_function every x seconds:
def example_function(i, limit=4):
with timeout(limit):
try:
...
except TimeoutError:
...
def scheduled_function(cron):
with Timer() as t:
with timeout(cron - 1):
with Pool(1) as p:
p.map(partial(example_function, limit=4), range(1))
# sleep for remaining time
time.sleep(cron - t.secs)
while True:
scheduled_function(cron=10) # every 10s
Here is the complete code:
import random
import time
import signal
from functools import partial
from contextlib import contextmanager
from multiprocessing import Pool
from datetime import datetime
class Timer:
def __enter__(self):
self.start_time = time.perf_counter()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
end_time = time.perf_counter()
self.secs = end_time - self.start_time # seconds
#contextmanager
def timeout(t):
"""https://www.jujens.eu/posts/en/2018/Jun/02/python-timeout-function/"""
signal.signal(signal.SIGALRM, raise_timeout)
signal.alarm(t)
try:
yield
except TimeoutError:
pass
finally:
signal.signal(signal.SIGALRM, signal.SIG_IGN)
def raise_timeout(signum, frame):
raise TimeoutError
def example_function(i, limit=4):
with timeout(limit):
try:
a = random.uniform(1, 5)
print(f"running task {i} for {a} seconds")
time.sleep(a)
print(f"[OK] task {i} finished")
except TimeoutError:
print(f"[NOT OK] task {i} did not finish: {a}>{limit}")
print(f"timeout={limit}s has expired -> exiting task {i}")
def scheduled_function(cron):
print("starting scheduled_function at", datetime.now().strftime("%Mmin%Ss"))
with Timer() as t:
with timeout(cron - 1):
print("running small 4-secs jobs")
with Pool(1) as p:
p.map(partial(example_function, limit=4), range(1))
time.sleep(cron - t.secs)
print("1min -> exiting")
def main():
while True:
scheduled_function(cron=10) # every 10s
if __name__ == "__main__":
main()
And output:
starting scheduled_function at '10s' <-- starting every 10s
running small 4-secs jobs
running task 0 for 3.0037614230572447 seconds
[OK] task 0 finished
timeout=4s has expired -> exiting task 0 <-- inner task timeout=4s
10s -> exiting <-- outer job timeout=10s
starting scheduled_function at '20s' <-- starting every 10s
running small 4-secs jobs
running task 0 for 4.198487250169565 seconds
[NOT OK] task 0 did not finish: 4.198487250169565>4 <-- caught timeout error
timeout=4s has expired -> exiting task 0
starting scheduled_function at '30s'
running small 4-secs jobs
running task 0 for 3.489094988621927 seconds
[OK] task 0 finished
timeout=4s has expired -> exiting task 0

How to make a function run for specific duration periodically

I want to take a screenshot every second for 10 secs.
I have tried using threading and schedule but I've not been able to come up with the solution to satisfy my problem.
def fun(original):
end_time = datetime.now() + timedelta(seconds=10)
while datetime.now() < end_time:
current = ImageGrab.grab()
current.save("current.png")
current = cv2.imread("current.png")
found = screenshot_comparison(original,current)
if found :
print("matched")
else :
print("didntMATCH")
fun(original)
I want to take screenshots every second for 10 secs and match it with an already grabbed screenshot.
I would suggest utilizing the Advanced Python Scheduler and more specifically, use their interval scheduler, for example:
sched = BlockingScheduler()
sched.add_job(yourFunction, 'interval', seconds=10)
sched.start()
EDIT
Here's a more complete example:
from apscheduler.schedulers.blocking import BlockingScheduler
sched = BlockingScheduler()
def myFunction(testParam):
print("Message: {}".format(testParam))
if __name__ == '__main__':
sched.add_job(myFunction, 'interval', seconds=10, args=["Works!"])
sched.start()

Make python script execute some functions every hour

So lets say i have this code:
...
connect()
find_links()
find_numbers()
in fact what it does is login to an account,get some numbers and one link:
example:
1.23, 1.32 , 32.1, 2131.3 link.com/stats/
1.32, 1.41 , 3232.1, 21211.3 link.com/stats/
so all i want to do is make these functions run every one hour
and then print the time so i can then compare results.I tried:
sched = BlockingScheduler()
#sched.scheduled_job('interval', seconds=3600 )
def do_that():
connect()
find_links()
find_numbers()
print(datetime.datetime.now())
but this just executes one time the functions and then just prints the date.
This should call the function once, then wait 3600 second(an hour), call function, wait, ect. Does not require anything outside of the standard library.
from time import sleep
from threading import Thread
from datetime import datetime
def func():
connect()
find_links()
find_numbers()
print(datetime.now())
if __name__ == '__main__':
Thread(target = func).start()
while True:
sleep(3600)
Thread(target = func).start()
Your code may take some time to run. If you want to execute your function precisely an hour from the previous start time, try this:
from datetime import datetime
import time
def do_that():
connect()
find_links()
find_numbers()
print(datetime.now())
if __name__ == '__main__':
starttime = time.time()
while True:
do_that()
time.sleep(3600.0 - ((time.time() - starttime) % 3600.0))

Python multiprocessing - check status of each processes

I wonder if it is possible to check how long of each processes take.
for example, there are four workers and the job should take no more than 10 seconds, but one of worker take more than 10 seconds.Is there way to raise a alert after 10 seconds and before process finish the job.
My initial thought is using manager, but it seems I have wait till process finished.
Many thanks.
You can check whether process is alive after you tried to join it. Don't forget to set timeout otherwise it'll wait until job is finished.
Here is simple example for you
from multiprocessing import Process
import time
def task():
import time
time.sleep(5)
procs = []
for x in range(2):
proc = Process(target=task)
procs.append(proc)
proc.start()
time.sleep(2)
for proc in procs:
proc.join(timeout=0)
if proc.is_alive():
print "Job is not finished!"
I have found this solution time ago (somewhere here in StackOverflow) and I am very happy with it.
Basically, it uses signal to raise an exception if a process takes more than expected.
All you need to do is to add this class to your code:
import signal
class Timeout:
def __init__(self, seconds=1, error_message='TimeoutError'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
signal.alarm(0)
Here is a general example of how it works:
import time
with Timeout(seconds=3, error_message='JobX took too much time'):
try:
time.sleep(10) #your job
except TimeoutError as e:
print(e)
In your case, I would add the with statement to the job that your worker need to perform. Then you catch the Exception and you do what you think is best.
Alternatively, you can periodically check if a process is alive:
timeout = 3 #seconds
start = time.time()
while time.time() - start < timeout:
if any(proces.is_alive() for proces in processes):
time.sleep(1)
else:
print('All processes done')
else:
print("Timeout!")
# do something
Use Pipe and messages
from multiprocessing import Process, Pipe
import numpy as np
caller, worker = Pipe()
val1 = ['der', 'die', 'das']
def worker_function(info):
print (info.recv())
for i in range(10):
print (val1[np.random.choice(3, 1)[0]])
info.send(['job finished'])
info.close()
def request(data):
caller.send(data)
task = Process(target=worker_function, args=(worker,))
if not task.is_alive():
print ("task is requested")
task.start()
if caller.recv() == ['job finished']:
task.join()
print ("finished")
if __name__ == '__main__':
data = {'input': 'here'}
request(data)

Categories