Repeating function at timed interval with multithreads - python

I want to repeat a function at timed intervals. The issue I have is that the function runs another function in a separate thread and therefore doesn't seem to be working with my code.
From the example below, I want to repeat function1 every 60 seconds:
from multiprocessing import Process
from threading import Event
def function2(type):
print("Function2")
def function1():
print("Function1")
if __name__ == '__main__':
p = Process(target=function2, args=('type',))
p.daemon = True
p.start()
p.join()
function1()
To repeat the function I attempted to use the following code:
class TimedThread(Thread):
def __init__(self, event, wait_time, tasks):
Thread.__init__(self)
self.stopped = event
self.wait_time = wait_time
self.tasks = tasks
def run(self):
while not self.stopped.wait(0.5):
self.tasks()
stopFlag = Event()
thread = TimedThread(stopFlag, 60, function1)
thread.start()
Both snippets combined print "Function1" in a timed loop but also produce the following error:
AttributeError: Can't get attribute 'function2' on <module '__main__' (built-in)
Any help would be greatly appreciated.

You can wrap your function1, like:
def main():
while True:
time.sleep(60)
function1()
or you can have it run in a separate thread:
def main():
while True:
time.sleep(60)
t = threading.Thread(target=function1)
t.start()

It actually works for me, printing Function1 and Function2 over and over. Are these two snippets in the same file?
If you import function1 from a different module, then the if __name__ == '__main__' check will fail.

I managed to find an alternative, working solution. Instead of using processes, I achieved the desired results using threads.The differences between the two are well explained here.
from threading import Event, Thread
class TimedThread(Thread):
def __init__(self, event, wait_time):
Thread.__init__(self)
self.stopped = event
self.wait_time = wait_time
def run(self):
while not self.stopped.wait(self.wait_time):
self.function1()
def function2(self):
print("Function2 started from thread")
# Do something
def function1(self):
print("Function1 started from thread")
# Do something
temp_thread = Thread(target=self.function2)
temp_thread.start()
stopFlag = Event()
thread = TimedThread(stopFlag, 60)
thread.start()

Related

Integrating multiprocessing.Process with concurrent.future._base.Future

I have a requirement of creating child processes, receive results using Future and then kill some of them when required.
For this I have subclassed multiprocessing.Process class and return a Future object from the start() method.
The problem is that I am not able to receive the result in the cb() function as it never gets called.
Please help/suggest if this can be done in some other way or something I am missing in my current implementation?
Following is my current approach
from multiprocessing import Process, Queue
from concurrent.futures import _base
import threading
from time import sleep
def foo(x,q):
print('result {}'.format(x*x))
result = x*x
sleep(5)
q.put(result)
class MyProcess(Process):
def __init__(self, target, args):
super().__init__()
self.target = target
self.args = args
self.f = _base.Future()
def run(self):
q = Queue()
worker_thread = threading.Thread(target=self.target, args=(self.args+ (q,)))
worker_thread.start()
r = q.get(block=True)
print('setting result {}'.format(r))
self.f.set_result(result=r)
print('done setting result')
def start(self):
f = _base.Future()
run_thread = threading.Thread(target=self.run)
run_thread.start()
return f
def cb(future):
print('received result in callback {}'.format(future))
def main():
p1 = MyProcess(target=foo, args=(2,))
f = p1.start()
f.add_done_callback(fn=cb)
sleep(10)
if __name__ == '__main__':
main()
print('Main thread dying')
In your start method you create a new Future which you then return. This is a different future then the one you set the result on, this future is just not used at all. Try:
def start(self):
run_thread = threading.Thread(target=self.run)
run_thread.start()
return self.f
However there are more problems with your code. You override the start method of the process, replacing it with execution on a worker thread, therefore actually bypassing multiprocessing. Also you shouldn't import the _base module, that is an implementation detail as seen from the leading underscore. You should import concurrent.futures.Future (it's the same class, but through public API).
This really uses multiprocessing:
from multiprocessing import Process, Queue
from concurrent.futures import Future
import threading
from time import sleep
def foo(x,q):
print('result {}'.format(x*x))
result = x*x
sleep(5)
q.put(result)
class MyProcess(Process):
def __init__(self, target, args):
super().__init__()
self.target = target
self.args = args
self.f = Future()
def run(self):
q = Queue()
worker_thread = threading.Thread(target=self.target, args=(self.args+ (q,)))
worker_thread.start()
r = q.get(block=True)
print('setting result {}'.format(r))
self.f.set_result(result=r)
print('done setting result')
def cb(future):
print('received result in callback {}: {}'.format(future, future.result()))
def main():
p1 = MyProcess(target=foo, args=(2,))
p1.f.add_done_callback(fn=cb)
p1.start()
p1.join()
sleep(10)
if __name__ == '__main__':
main()
print('Main thread dying')
And you're already in a new process now, spawning a worker thread to execute your target function shouldn't really be necessary, you could just execute your target function directly instead. Should the target function raise an Exception you wouldn't know about it, your callback will only be called on success. So if you fix that, then you're left with:
from multiprocessing import Process
from concurrent.futures import Future
import threading
from time import sleep
def foo(x):
print('result {}'.format(x*x))
result = x*x
sleep(5)
return result
class MyProcess(Process):
def __init__(self, target, args):
super().__init__()
self.target = target
self.args = args
self.f = Future()
def run(self):
try:
r = self.target(*self.args)
print('setting result {}'.format(r))
self.f.set_result(result=r)
print('done setting result')
except Exception as ex:
self.f.set_exception(ex)
def cb(future):
print('received result in callback {}: {}'.format(future, future.result()))
def main():
p1 = MyProcess(target=foo, args=(2,))
p1.f.add_done_callback(fn=cb)
p1.start()
p1.join()
sleep(10)
if __name__ == '__main__':
main()
print('Main thread dying')
This is basically what a ProcessPoolExecutor does.

Multiprocessing Class in Subprocess

I want to use python's multiprocessing module in a class, which itself uses subprocesses to not block the main call.
The minimal example looks like this:
import multiprocessing as mp
class mpo():
def __init__(self):
cpu = mp.cpu_count()
self.Pool = mp.Pool(processes = 2)
self.alive = True
self.p = mp.Process(target = self.sub,args=())
def worker():
print 'Alive'
def sub(self):
print self.alive
for i in range(2):
print i
self.Pool.apply_async(self.worker, args=())
print 'done'
self.Pool.close()
# self.Pool.join()
I commented the last line out, as it raises an assertion Error (can only join a child process).
When I do:
m =mpo()
m.p.start()
The output is
True
0
1
done
My main question is, why the print statement in the worker thread never is reached?
Update:
The updated code looks like this.
import multiprocessing as mp
class mpo():
def __init__(self):
cpu = mp.cpu_count()
self.alive = True
self.p = mp.Process(target = self.sub,args=())
self.result=[]
def worker(self):
self.result.append(1)
print 'Alive'
def sub(self):
print self.alive
Pool = mp.Pool(processes = 2)
for i in range(2):
print i
Pool.apply_async(self.worker, args=())
print 'done'
Pool.close()
Pool.join()
The pool now doesn't have to be inherited as it is created in the subprocess. Instead of the print statement the result is appended to the calling object and the pool is properly joined. Nevertheless, there is no result showing up.
so I think this may correspond to a simple example of what you are looking for:
import multiprocessing as mp
def worker(arg):
#print 'Alive'+str(arg)
return "Alive and finished {0}".format(arg)
class mpo():
def __init__(self):
cpu = mp.cpu_count()
self.alive = True
self.pool = mp.Pool(processes = 2)
def sub(self,arguments):
self.results=self.pool.map_async(worker, arguments)
return self.results
if __name__=="__main__":
s=mpo()
s.sub(range(10))
print s.results.get()
Additionally you can call
self.results.ready()
to find out whether the processes have finished their work. You do not have to put this inside of another process because the map_async call does not block the rest of your program.
EDIT:
Concerning your comment, I do not really see the value of putting the calculation in a separate process, because the function is already running in separate processes (in the pool). You only add complexity by nesting it in another subprocess, but it is possible:
import multiprocessing as mp
def worker(arg):
#print 'Alive'+str(arg)
return "Alive and finished {0}".format(arg)
class mpo():
def __init__(self):
cpu = mp.cpu_count()
self.alive = True
self.pool = mp.Pool(processes = 2)
def sub(self,arguments):
self.results=self.pool.map_async(worker, arguments)
return self.results
def run_calculation(q):
s=mpo()
results=s.sub(range(10))
q.put(results.get())
queue=mp.Queue()
proc=mp.Process(target=run_calculation,args=(queue,))
proc.start()
proc.join()
queue.get()

Process containing object method doesn't recognize edit to object

I have the following situation process=Process(target=sample_object.run) I then would like to edit a property of the sample_object: sample_object.edit_property(some_other_object).
class sample_object:
def __init__(self):
self.storage=[]
def edit_property(self,some_other_object):
self.storage.append(some_other_object)
def run:
while True:
if len(self.storage) is not 0:
print "1"
#I know it's an infinite loop. It's just an example.
_______________________________________________________
from multiprocessing import Process
from sample import sample_object
from sample2 import some_other_object
class driver:
if __name__ == "__main__":
samp = sample_object()
proc = Process(target=samp.run)
proc.start()
while True:
some = some_other_object()
samp.edit_property(some)
#I know it's an infinite loop
The previous code never prints "1". How would I connect the Process to the sample_object so that an edit made to the object whose method Process is calling is recognized by the process? In other words, is there a way to get .run to recognize the change in sample_object ?
Thank you.
You can use multiprocessing.Manager to share Python data structures between processes.
from multiprocessing import Process, Manager
class A(object):
def __init__(self, storage):
self.storage = storage
def add(self, item):
self.storage.append(item)
def run(self):
while True:
if self.storage:
print 1
if __name__ == '__main__':
manager = Manager()
storage = manager.list()
a = A(storage)
p = Process(target=a.run)
p.start()
for i in range(10):
a.add({'id': i})
p.join()

How to pause multiprocessing Pool from execution

While using:
def myFunction(arg):
for i in range(10000):
pass
from multiprocessing import Pool
pool = Pool(processes=3)
pool.map_async( myFunction, ['first','second','third'] )
I want the user to be able to pause an execution of the multiprocessing's Pool at any given time after the Pool was started. Then I would like the user to be able to unpause (to continue) with the rest of the items in a Pool. How to achieve it?
EDIT:
Here is the working implementation of suggestions posted by Blckknght. Thanks Blckknght!
import multiprocessing
from PyQt4 import QtGui, QtCore
def setup(event):
global unpaused
unpaused = event
def myFunction( arg=None):
unpaused.wait()
print "Task started...", arg
for i in range(15000000):
pass
print '...task completed.', arg
class MyApp(object):
def __init__(self):
super(MyApp, self).__init__()
app = QtGui.QApplication(sys.argv)
self.mainWidget = QtGui.QWidget()
self.mainLayout = QtGui.QVBoxLayout()
self.mainWidget.setLayout(self.mainLayout)
self.groupbox = QtGui.QGroupBox()
self.layout = QtGui.QVBoxLayout()
self.groupbox.setLayout(self.layout)
self.pauseButton = QtGui.QPushButton('Pause')
self.pauseButton.clicked.connect(self.pauseButtonClicked)
self.layout.addWidget(self.pauseButton)
self.okButton = QtGui.QPushButton('Start Pool')
self.okButton.clicked.connect(self.startPool)
self.layout.addWidget(self.okButton)
self.layout.addWidget(self.pauseButton)
self.mainLayout.addWidget(self.groupbox)
self.mainWidget.show()
sys.exit(app.exec_())
def startPool(self):
self.event = multiprocessing.Event()
self.pool=multiprocessing.Pool(1, setup, (self.event,))
self.result=self.pool.map_async(myFunction, [1,2,3,4,5,6,7,8,9,10])
self.event.set()
# self.result.wait()
def pauseJob(self):
self.event.clear()
def continueJob(self):
self.event.set()
def pauseButtonClicked(self):
if self.pauseButton.text()=='Pause':
print '\n\t\t ...pausing job...','\n'
self.pauseButton.setText('Resume')
self.pauseJob()
else:
print '\n\t\t ...resuming job...','\n'
self.pauseButton.setText('Pause')
self.continueJob()
if __name__ == '__main__':
MyApp()
It sounds like you want to use a multiprocessing.Event to control the running of your worker function. You can create one, then pass it to an initializer of the pool, then wait on it in myFunction.
Here's an example that runs workers that print their argument every second. The workers can be paused by clearing the event, and restarted by setting it again.
from time import sleep
import multiprocessing
def setup(event):
global unpaused
unpaused = event
def myFunction(arg):
for i in range(10):
unpaused.wait()
print(arg)
sleep(1)
if __name__ == "__main__":
event = multiprocessing.Event() # initially unset, so workers will be paused at first
pool = multiprocessing.Pool(3, setup, (event,))
result = pool.map_async(myFunction, ["foo", "bar", "baz"])
event.set() # unpause workers
sleep(5)
event.clear() # pause after five seconds
sleep(5)
event.set() # unpause again after five more seconds
result.wait() # wait for the rest of the work to be completed
The worker processes should print "foo", "bar" and "baz" ten times each, with a one second delay between each repetition. The workers will be paused after the first five seconds though, and restarted after a five more seconds. There are probably various ways to improve this code, depending on what your actual use case is, but hopefully it is enough to get you headed in the right direction.

How to communicate with worker thread

I'm using a library which heaviliy uses I/O. For that reason calls to that library can last very long (more than 5 seconds) possible.
Using that directly inside an UI is not a good idea because it will freeze.
For that reason I outsourced the library calls to a thread queue like shown in this example: Python threads: communication and stopping
Nevertheless I'm not very happy with that solution since this has a major drawback:
I cannot really communicate with the UI.
Every lib command returns a return message, which can either be an error message or some computational result.
How would I get this?
Consider a library call do_test(foo):
def do_test(foo):
time.sleep(10)
return random.random() * foo
def ui_btn_click():
threaded_queue.put((do_test, 42))
# Now how to display the result without freezing the UI?
Can someone give me advice how to realize such a pattern?
Edit:
This here is a minimal example:
import os, time, random
import threading, queue
CMD_FOO = 1
CMD_BAR = 2
class ThreadedQueue(threading.Thread):
def __init__(self):
super().__init__()
self.in_queue = queue.Queue()
self.out_queue = queue.Queue()
self.__stoprequest = threading.Event()
def run(self):
while not self.__stoprequest.isSet():
(cmd, arg) = self.in_queue.get(True)
if cmd == CMD_FOO:
ret = self.handle_foo(arg)
elif cmd == CMD_BAR:
ret = self.handle_bar(arg)
else:
print("Unsupported cmd {0}".format(cmd))
self.out_queue.put(ret)
self.in_queue.task_done()
def handle_foo(self, arg):
print("start handle foo")
time.sleep(10)
return random.random() * arg
def handle_bar(self, arg):
print("start handle bar")
time.sleep(2)
return (random.random() * arg, 2 * arg)
if __name__ == "__main__":
print("START")
t = ThreadedQueue()
t.start()
t.in_queue.put((CMD_FOO, 10))
t.in_queue.put((CMD_BAR, 10))
print("Waiting")
while True:
x = t.out_queue.get(True)
t.out_queue.task_done()
print(x)
I personally use PySide but I don't want to depend this library on PySide or any other ui-related library.
I thought a bit about my implementations. THe conclusion is that I start another thread for picking the results of the queue:
class ReceiveThread(threading.Thread):
"""
Processes the output queue and calls a callback for each message
"""
def __init__(self, queue, callback):
super().__init__()
self.__queue = queue
self.__callback = callback
self.__stoprequest = threading.Event()
self.start()
def run(self):
while not self.__stoprequest.isSet():
ret = self.__queue.get(True)
self.__callback(ret)
self.__queue.task_done()
The given callback from an UI or elsewhere is called with every result from the queue.

Categories