Make python script execute some functions every hour - python

So lets say i have this code:
...
connect()
find_links()
find_numbers()
in fact what it does is login to an account,get some numbers and one link:
example:
1.23, 1.32 , 32.1, 2131.3 link.com/stats/
1.32, 1.41 , 3232.1, 21211.3 link.com/stats/
so all i want to do is make these functions run every one hour
and then print the time so i can then compare results.I tried:
sched = BlockingScheduler()
#sched.scheduled_job('interval', seconds=3600 )
def do_that():
connect()
find_links()
find_numbers()
print(datetime.datetime.now())
but this just executes one time the functions and then just prints the date.

This should call the function once, then wait 3600 second(an hour), call function, wait, ect. Does not require anything outside of the standard library.
from time import sleep
from threading import Thread
from datetime import datetime
def func():
connect()
find_links()
find_numbers()
print(datetime.now())
if __name__ == '__main__':
Thread(target = func).start()
while True:
sleep(3600)
Thread(target = func).start()

Your code may take some time to run. If you want to execute your function precisely an hour from the previous start time, try this:
from datetime import datetime
import time
def do_that():
connect()
find_links()
find_numbers()
print(datetime.now())
if __name__ == '__main__':
starttime = time.time()
while True:
do_that()
time.sleep(3600.0 - ((time.time() - starttime) % 3600.0))

Related

Better way to execute code at a specific time?

I need to execute code at exact time, for example 10:00:00.000.
while True:
now = datetime.utcnow()
if now.hour == 10 and now.minute == 0 and now.second == 0:
#execute code here
time.sleep(1)
So far it seems to work but if I launch the code for example one hour before launch I feel like there is a lag in the execution?
Is this the best to achieve what I want?
Using datetime and threading.Timer:
from datetime import datetime, time
from threading import Timer
def do_the_thing():
print("execute code here")
Timer(
(datetime.combine(
datetime.today(), time(10, 0, 0)
) - datetime.now()).total_seconds(),
do_the_thing
).start()
Since Timer runs in a background thread, your script can go on to do other things immediately, and do_the_thing will get called as soon as the timer is up.
Simply sleep for the total amount of time wanted:
target_date = datetime(day=12, month=12, year=2021, hour=10)
time.sleep((target_date - datetime.now()).total_seconds())

Why using Asyncio is not reducing the overall execution time in Python and run functions concurrently?

I am trying to run a piece of code using asyncio and reduce the time execution of the whole code. Below is my code which is taking around 6 seconds to fully execute itself
Normal function calls- (approach 1)
from time import time, sleep
import asyncio
def find_div(range_, divide_by):
lis_ = []
for i in range(range_):
if i % divide_by == 0:
lis_.append(i)
print("found numbers for range {}, divided by {}".format(range_, divide_by))
return lis_
if __name__ == "__main__":
start = time()
find_div(50800000, 341313)
find_div(10005200, 32110)
find_div(50000340, 31238)
print(time()-start)
The output of the above code is just the total execution time which is 6 secs.
Multithreaded Approach- (approach 2)
Used multithreading in this, but surprisingly the time increased
from time import time, sleep
import asyncio
import threading
def find_div(range_, divide_by):
lis_ = []
for i in range(range_):
if i % divide_by == 0:
lis_.append(i)
print("found numbers for range {}, divided by {}".format(range_, divide_by))
return lis_
if __name__ == "__main__":
start = time()
t1 = threading.Thread(target=find_div, args=(50800000, 341313))
t2 = threading.Thread(target=find_div, args=(10005200, 32110))
t3 = threading.Thread(target=find_div, args=(50000340, 31238))
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
print(time()-start)
The output of the above code is 12 secs.
Multiprocessing approach- (approach 3)
from time import time, sleep
import asyncio
from multiprocessing import Pool
def multi_run_wrapper(args):
return find_div(*args)
def find_div(range_, divide_by):
lis_ = []
for i in range(range_):
if i % divide_by == 0:
lis_.append(i)
print("found numbers for range {}, divided by {}".format(range_, divide_by))
return lis_
if __name__ == "__main__":
start = time()
with Pool(3) as p:
p.map(multi_run_wrapper,[(50800000, 341313),(10005200, 32110),(50000340, 31238)])
print(time()-start)
The output of the multiprocessing code is 3 secs which is better than the normal function call approach.
Asyncio Approach- (approach 4)
from time import time, sleep
import asyncio
async def find_div(range_, divide_by):
lis_ = []
for i in range(range_):
if i % divide_by == 0:
lis_.append(i)
print("found numbers for range {}, divided by {}".format(range_, divide_by))
return lis_
async def task():
tasks = [find_div(50800000, 341313),find_div(10005200, 32110),find_div(50000340, 31238)]
result = await asyncio.gather(*tasks)
print(result)
if __name__ == "__main__":
start = time()
asyncio.run(task())
print(time()-start)
The above code is also taking around 6 seconds which is the same as the normal execution function call that is the Approach 1.
Problem-
Why is my Asyncio approach not working as expected and reducing the overall time? What is wrong in the code?
You have code that exclusively uses the CPU.
Code like this cannot be sped up using async.
Async shines when you have tasks that are waiting on something not CPU related, e.g. a network request or reading from disk. This is generally true for all languages.
In python, also the thread based approach will not help you, as this still restricts you to a single core and not true parallel execution. This is due to the Global Interpreter Lock (GIL). The overhead of starting and switching between threads makes it slower than the simple version.
In this regard, threads are similar to async in python, it only helps if the time you are waiting is not spend mainly on the CPU or if you are calling code that's not bound by the GIL, e.g. c extensions.
Using multiprocessing really uses multiple cpu cores, so it is faster than the normal solution.
asyncio def run(time):
await asyncio.sleep(time)
This code takes 1 min 40 seconds
from datetime import datetime
now = datetime.now()
task=[]
for i in range(10):
await run(10)
now1=datetime.now()
print(now1-now)
OPTIMIZED USING async-->
THis takes 10 seconds only
from datetime import datetime
now = datetime.now()
task=[]
for i in range(10):
task.append(asyncio.create_task(run(10)))
await asyncio.gather(*task)
now1=datetime.now()
print(now1-now)

How to make a function run for specific duration periodically

I want to take a screenshot every second for 10 secs.
I have tried using threading and schedule but I've not been able to come up with the solution to satisfy my problem.
def fun(original):
end_time = datetime.now() + timedelta(seconds=10)
while datetime.now() < end_time:
current = ImageGrab.grab()
current.save("current.png")
current = cv2.imread("current.png")
found = screenshot_comparison(original,current)
if found :
print("matched")
else :
print("didntMATCH")
fun(original)
I want to take screenshots every second for 10 secs and match it with an already grabbed screenshot.
I would suggest utilizing the Advanced Python Scheduler and more specifically, use their interval scheduler, for example:
sched = BlockingScheduler()
sched.add_job(yourFunction, 'interval', seconds=10)
sched.start()
EDIT
Here's a more complete example:
from apscheduler.schedulers.blocking import BlockingScheduler
sched = BlockingScheduler()
def myFunction(testParam):
print("Message: {}".format(testParam))
if __name__ == '__main__':
sched.add_job(myFunction, 'interval', seconds=10, args=["Works!"])
sched.start()

How to rerun a program (while executing multiple tasks) every hour in Python?

I have a main function, where all the tasks are located. The first task I want to run continuous. My second task I want to run on a specific date (For example: Every Monday at 12AM). The last task must rerun the the main function but with other parameters (I want to run this task every hour).
I'm using Python on Ubuntu 18.
I've tried to use the module 'schedule' and searched for an answer here on stack overflow, google, ... but I didn't found anything useful.
import schedule, time
def main(par1, par2, par3):
def task1():
# Do something
print("Executing task1")
def task2():
# Do something different
print("Executing task2")
def rerunTask():
print("Reruning main task")
main(1,2,3) # Rerun main function with other parameters
schedule.every().monday.at("12:00").do(task2)
schedule.every(0.5).seconds.do(task1)
schedule.every().hour.do(rerunTask)
main(2,3,1)
When I tried this code everything worked fine until the "rerun task". After he executes this task he continuous reruns this function for the rest of the time.
Can someone please help me?
You could use time library with threading library and based on epochs value, the function will be executed.
Warning: Because of the use of thread, you might have to kill the terminal to exit.
import time, threading
def main(par1, par2, par3):
def task1():
# Do something
print("Executing task1")
def task2():
# Do something different
print("Executing task2")
def run_task1():
while(1):
task1()
time.sleep(0.5)
def run_task2():
while(1):
task2()
time.sleep(3600)
def run_task3():
week_diff = 604800
curr_time = time.time()
first_monday_epoch = 345600
total_monday_crossed = int((curr_time - first_monday_epoch) / week_diff)
next_monday = (total_monday_crossed + 1) * week_diff + first_monday_epoch
time.sleep(next_monday - time.time())
while(1):
task2()
time.sleep(604800) #week time difference
t1 = threading.Thread(target=run_task1, args=())
t2 = threading.Thread(target=run_task2, args=())
t3 = threading.Thread(target=run_task3, args=())
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
main(2,3,1)
Note: I used Epoch converter to calculate epoch of first monday 00:00 AM and other epoch information.

executing specific statement at a given rate in python

I want to write a code which execute a statement specified number of times per second,
Many of you might be familier about the term rate
Here i want rate to be 30 per second
say i want to execute a function 30 times per second for 60 seconds
means rate=30/sec duration=60sec
Can any one tell me is their any api available in python to do the same ?
The sched module is intended for exactly this:
from __future__ import division
import sched
import time
scheduler = sched.scheduler(time.time, time.sleep)
def schedule_it(frequency, duration, callable, *args):
no_of_events = int( duration / frequency )
priority = 1 # not used, lets you assign execution order to events scheduled for the same time
for i in xrange( no_of_events ):
delay = i * frequency
scheduler.enter( delay, priority, callable, args)
def printer(x):
print x
# execute printer 30 times a second for 60 seconds
schedule_it(1/30, 60, printer, 'hello')
scheduler.run()
For a threaded environment, the use of sched.scheduler can be replaced by threading.Timer:
from __future__ import division
import time
import threading
def schedule_it(frequency, duration, callable, *args, **kwargs):
no_of_events = int( duration / frequency )
for i in xrange( no_of_events ):
delay = i * frequency
threading.Timer(delay, callable, args=args, kwargs=kwargs).start()
def printer(x):
print x
schedule_it(5, 10, printer, 'hello')
Try using threading.Timer:
def hello():
print "hello, world"
t = Timer(30.0, hello)
t.start() # after 30 seconds, "hello, world" will be printed
You can use time.time() to do what you want:
import time
def your_function():
# do something...
while True:
start = time.time() # gives current time in seconds since Jan 1, 1970 (in Unix)
your_function()
while True:
current_time = time.time()
if current_time - start >= 1.0/30.0:
break
This will make sure that the delay between calls of your_function is very close to 1/30 of a second, even if your_function takes some time to run.
There is another way: using Pythons built-in scheduling module, sched. I never used it, so I can't help you there, but have a look at it.
After some time spending i discovered how to do it well i used multiprocessing in python to achieve it
here's my solution
#!/usr/bin/env python
from multiprocessing import Process
import os
import time
import datetime
def sleeper(name, seconds):
time.sleep(seconds)
print "PNAME:- %s"%name
if __name__ == '__main__':
pros={}
processes=[]
i=0
time2=0
time1=datetime.datetime.now()
for sec in range(5):
flag=0
while flag!=1:
time2=datetime.datetime.now()
if (time2-time1).seconds==1:
time1=time2
flag=1
print "Executing Per second"
for no in range(5):
i+=1
pros[i] = Process(target=sleeper, args=("Thread-%d"%i, 1))
j=i-5
for no in range(5):
j+=1
pros[j].start()
j=i-5
for no in range(5):
j+=1
processes.append(pros[j])
for p in processes:
p.join()

Categories