multi thread is not closing though terminated - python

For some reason, when the timeout is reached and the except is therefore executed, thread 2 is still "working", still expecting to get values from the user. Even though the closing_threads function is entered.
Why can't I terminate the thread? Why is it still waiting for keyboard entry?
If I add t2.join() then execution hangs indefinitely.
def main():
q2 = queue.Queue()
q1 = queue.Queue()
t1 = threading.Thread(target=nothing, name='t1', args=(q1,))
t2 = threading.Thread(target=get_interrupt_from_user, name='t2', args=(q2,))
t1.start()
t2.start()
try:
q2.get(timeout=4)
except:
...
closing_threads(t1, t2)
def closing_threads(t1, t2):
print('closing the threads')
t1.join()
t2.join()
print(t1.is_alive())
print(t2.is_alive())
def get_interrupt_from_user(q) -> None:
print('############ Thread 2 is starting! ############')
interrupt = False
while not interrupt:
print('use KeyboardInterrupt to stop the execution')
try:
input()
except KeyboardInterrupt:
print('KeyboardInterrupt exception took place')
else:
print('exit by KeyboardInterrupt!!!')
interrupt = True
print(f'interrupt took place = {interrupt}')
q.put(interrupt)
def nothing(q) -> None:
print('############ Thread 1 is starting! ############')

the second thread is technically not working, nor terminated, it is in a suspended state by the operating system, it will be terminated when it returns from this suspended state.
when you call input the operating system suspends the thread, and waits for input, then when input is available it wakes the thread and sends it the user input ... problem is that during that time it cannot handle interrupts ... because it is not executing code.
one way you can solve this is to declare the thread as daemon and not join it, so it will be killed when the other python threads die.
import queue
import threading
def main():
q2 = queue.Queue()
q1 = queue.Queue()
t1 = threading.Thread(target=nothing, name='t1', args=(q1,))
t2 = threading.Thread(target=get_interrupt_from_user, name='t2', args=(q2,),daemon=True)
t1.start()
t2.start()
try:
q2.get(timeout=4)
except:
...
closing_threads(t1, t2)
def closing_threads(t1, t2):
print('closing the threads')
t1.join()
print(t1.is_alive())
print(t2.is_alive())
def get_interrupt_from_user(q) -> None:
print('############ Thread 2 is starting! ############')
interrupt = False
while not interrupt:
print('use KeyboardInterrupt to stop the execution')
try:
input()
except KeyboardInterrupt:
print('KeyboardInterrupt exception took place')
else:
print('exit by KeyboardInterrupt!!!')
interrupt = True
print(f'interrupt took place = {interrupt}')
q.put(interrupt)
def nothing(q) -> None:
print('############ Thread 1 is starting! ############')
if __name__ == "__main__":
main()
other ways are sort of platform dependent and would involve summoning another processes that would signal the operating system to wake your thread up or terminate it and it gets messy really quick.
one last method is to use the select module on your stdin for this and create your own eventloop on the child thread, but this only works on linux.

Related

What is the correct way to control threads?

I need run 3 or 5 threads approx, this threads monitoring some activities in the OS. Because of this, the main program must be running in background. I've read many examples and explanations, but I'm not clear yet how to launch threads and main program in the background and after that, how to control them.
I start threads in daemon mode from main program:
import threading
import time
def fun1():
while True:
print("Thread 1")
time.sleep(1)
def fun2():
while True:
print("Thread 2")
time.sleep(1)
def fun3():
while True:
print("Thread 3")
time.sleep(1)
def main():
thread1 = threading.Thread(target=fun1)
thread1.daemon = True
thread1.start()
thread2 = threading.Thread(target=fun2)
thread2.daemon = True
thread2.start()
thread3 = threading.Thread(target=fun3)
thread3.daemon = True
thread3.start()
if __name__ == '__main__':
try:
main()
while True:
print("------------")
print("Main program")
print("------------")
time.sleep(3)
except (KeyboardInterrupt, SystemExit):
print("Terminated")
and after that I run the main program in background with (I'm not sure that this is the best way to do it for what I want to achieve):
python daemon_thread.py &
How control the threads after main program initialization if I need stop a specific thread, change its state, or whatever? How to access a specific thread or the main program?
I understand now how to do, to resume the problem: I have a main program running in background and this main program have some threads. But I want with another script or program stop the main program with the threads safetly and in some cases pause and resume threads.
I didn't have a correctly concept about how to use the Threads. I can stop or send signal to this threads from main program How?,with a database or config file.
I updated my project with this changes:
import threading
import time
import sqlite3
def fun1(stop_event1):
while not stop_event1.is_set():
print("Thread 1")
time.sleep(1)
def fun2(stop_event2):
while not stop_event2.is_set():
print("Thread 2")
time.sleep(1)
def fun3(stop_event3):
while not stop_event3.is_set():
print("Thread 3")
time.sleep(1)
def main():
stop_event1 = threading.Event()
thread1 = threading.Thread(target=fun1, args=(stop_event1,))
thread1.daemon = True
thread1.start()
stop_event2 = threading.Event()
thread2 = threading.Thread(target=fun2, args=(stop_event2,))
thread2.daemon = True
thread2.start()
stop_event3 = threading.Event()
thread3 = threading.Thread(target=fun3, args=(stop_event3,))
thread3.daemon = True
thread3.start()
while True:
print("------------")
print("Main program")
print("------------")
time.sleep(3)
if alive_main():
print("Finish Threads")
stop_event1.set()
stop_event2.set()
stop_event3.set()
print("Bye")
break
def alive_main():
conn = sqlite3.connect('example.db')
c = conn.cursor()
c.execute('SELECT alive_main FROM config')
row = c.fetchone()
if row[0] == 1:
return True
else:
return False
if __name__ == '__main__':
try:
main()
except (KeyboardInterrupt, SystemExit):
print("Terminated")
If I want change with another class or script the state of my threads, I just change config table in my database y this take effect in the Threads, from main function. In this example if I stop correctly my threads and program just I update table, that's it.
sqlite> UPDATE config SET alive_main = 1;
I need read about Signals and Condition Objects to complement correctly Threads uses.
Thanks everyone!

Timeouts for multiprocessing?

I've searched StackOverflow and although I've found many questions on this, I haven't found an answer that fits for my situation/not a strong python programmer to adapt their answer to fit my need.
I've looked here to no avail:
kill a function after a certain time in windows
Python: kill or terminate subprocess when timeout
signal.alarm replacement in Windows [Python]
I am using multiprocessing to run multiple SAP windows at once to pull reports. The is set up to run on a schedule every 5 minutes. Every once in a while, one of the reports gets stalled due to the GUI interface and never ends. I don't get an error or exception, it just stalls forever. What I would like is to have a timeout function that during this part of the code that is executed in SAP, if it takes longer than 4 minutes, it times out, closes SAP, skips the rest of the code, and waits for next scheduled report time.
I am using Windows Python 2.7
import multiprocessing
from multiprocessing import Manager, Process
import time
import datetime
### OPEN SAP ###
def start_SAP():
print 'opening SAP program'
### REPORTS IN SAP ###
def report_1(q, lock):
while True: # logic to get shared queue
if not q.empty():
lock.acquire()
k = q.get()
time.sleep(1)
lock.release()
break
else:
time.sleep(1)
print 'running report 1'
def report_2(q, lock):
while True: # logic to get shared queue
if not q.empty():
lock.acquire()
k = q.get()
time.sleep(1)
lock.release()
break
else:
time.sleep(1)
print 'running report 2'
def report_3(q, lock):
while True: # logic to get shared queue
if not q.empty():
lock.acquire()
k = q.get()
time.sleep(1)
lock.release()
break
else:
time.sleep(1)
time.sleep(60000) #mimicking the stall for report 3 that takes longer than allotted time
print 'running report 3'
def report_N(q, lock):
while True: # logic to get shared queue
if not q.empty():
lock.acquire()
k = q.get()
time.sleep(1)
lock.release()
break
else:
time.sleep(1)
print 'running report N'
### CLOSES SAP ###
def close_SAP():
print 'closes SAP'
def format_file():
print 'formatting files'
def multi_daily_pull():
lock = multiprocessing.Lock() # creating a lock in multiprocessing
shared_list = range(6) # creating a shared list for all functions to use
q = multiprocessing.Queue() # creating an empty queue in mulitprocessing
for n in shared_list: # putting list into the queue
q.put(n)
print 'Starting process at ', time.strftime('%m/%d/%Y %H:%M:%S')
print 'Starting SAP Pulls at ', time.strftime('%m/%d/%Y %H:%M:%S')
StartSAP = Process(target=start_SAP)
StartSAP.start()
StartSAP.join()
report1= Process(target=report_1, args=(q, lock))
report2= Process(target=report_2, args=(q, lock))
report3= Process(target=report_3, args=(q, lock))
reportN= Process(target=report_N, args=(q, lock))
report1.start()
report2.start()
report3.start()
reportN.start()
report1.join()
report2.join()
report3.join()
reportN.join()
EndSAP = Process(target=close_SAP)
EndSAP.start()
EndSAP.join()
formatfile = Process(target=format_file)
formatfile .start()
formatfile .join()
if __name__ == '__main__':
multi_daily_pull()
One way to do what you want would be to use the optional timeout argument that the Process.join() method accepts. This will make it only block the calling thread at most that length of time.
I also set the daemon attribute of each Process instance so your main thread will be able to terminate even if one of the processes it started is still "running" (or has hung up).
One final point, you don't need a multiprocessing.Lock to control access a multiprocessing.Queue, because they handle that aspect of things automatically, so I removed it. You may still want to have one for some other reason, such as controlling access to stdout so printing to it from the various processes doesn't overlap and mess up what is output to the screen.
import multiprocessing
from multiprocessing import Process
import time
import datetime
def start_SAP():
print 'opening SAP program'
### REPORTS IN SAP ###
def report_1(q):
while True: # logic to get shared queue
if q.empty():
time.sleep(1)
else:
k = q.get()
time.sleep(1)
break
print 'report 1 finished'
def report_2(q):
while True: # logic to get shared queue
if q.empty():
time.sleep(1)
else:
k = q.get()
time.sleep(1)
break
print 'report 2 finished'
def report_3(q):
while True: # logic to get shared queue
if q.empty():
time.sleep(1)
else:
k = q.get()
time.sleep(60000) # Take longer than allotted time
break
print 'report 3 finished'
def report_N(q):
while True: # logic to get shared queue
if q.empty():
time.sleep(1)
else:
k = q.get()
time.sleep(1)
break
print 'report N finished'
def close_SAP():
print 'closing SAP'
def format_file():
print 'formatting files'
def multi_daily_pull():
shared_list = range(6) # creating a shared list for all functions to use
q = multiprocessing.Queue() # creating an empty queue in mulitprocessing
for n in shared_list: # putting list into the queue
q.put(n)
print 'Starting process at ', time.strftime('%m/%d/%Y %H:%M:%S')
print 'Starting SAP Pulls at ', time.strftime('%m/%d/%Y %H:%M:%S')
StartSAP = Process(target=start_SAP)
StartSAP.start()
StartSAP.join()
report1 = Process(target=report_1, args=(q,))
report1.daemon = True
report2 = Process(target=report_2, args=(q,))
report2.daemon = True
report3 = Process(target=report_3, args=(q,))
report3.daemon = True
reportN = Process(target=report_N, args=(q,))
reportN.daemon = True
report1.start()
report2.start()
report3.start()
reportN.start()
report1.join(30)
report2.join(30)
report3.join(30)
reportN.join(30)
EndSAP = Process(target=close_SAP)
EndSAP.start()
EndSAP.join()
formatfile = Process(target=format_file)
formatfile .start()
formatfile .join()
if __name__ == '__main__':
multi_daily_pull()

Python multiprocessing - check status of each processes

I wonder if it is possible to check how long of each processes take.
for example, there are four workers and the job should take no more than 10 seconds, but one of worker take more than 10 seconds.Is there way to raise a alert after 10 seconds and before process finish the job.
My initial thought is using manager, but it seems I have wait till process finished.
Many thanks.
You can check whether process is alive after you tried to join it. Don't forget to set timeout otherwise it'll wait until job is finished.
Here is simple example for you
from multiprocessing import Process
import time
def task():
import time
time.sleep(5)
procs = []
for x in range(2):
proc = Process(target=task)
procs.append(proc)
proc.start()
time.sleep(2)
for proc in procs:
proc.join(timeout=0)
if proc.is_alive():
print "Job is not finished!"
I have found this solution time ago (somewhere here in StackOverflow) and I am very happy with it.
Basically, it uses signal to raise an exception if a process takes more than expected.
All you need to do is to add this class to your code:
import signal
class Timeout:
def __init__(self, seconds=1, error_message='TimeoutError'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
signal.alarm(0)
Here is a general example of how it works:
import time
with Timeout(seconds=3, error_message='JobX took too much time'):
try:
time.sleep(10) #your job
except TimeoutError as e:
print(e)
In your case, I would add the with statement to the job that your worker need to perform. Then you catch the Exception and you do what you think is best.
Alternatively, you can periodically check if a process is alive:
timeout = 3 #seconds
start = time.time()
while time.time() - start < timeout:
if any(proces.is_alive() for proces in processes):
time.sleep(1)
else:
print('All processes done')
else:
print("Timeout!")
# do something
Use Pipe and messages
from multiprocessing import Process, Pipe
import numpy as np
caller, worker = Pipe()
val1 = ['der', 'die', 'das']
def worker_function(info):
print (info.recv())
for i in range(10):
print (val1[np.random.choice(3, 1)[0]])
info.send(['job finished'])
info.close()
def request(data):
caller.send(data)
task = Process(target=worker_function, args=(worker,))
if not task.is_alive():
print ("task is requested")
task.start()
if caller.recv() == ['job finished']:
task.join()
print ("finished")
if __name__ == '__main__':
data = {'input': 'here'}
request(data)

Python Thread getting items from Queue - how do you determine thread has not called task_done()?

dispQ = Queue.Queue()
stop_thr_event = threading.Event()
def worker (stop_event):
while not stop_event.wait(0):
try:
job = dispQ.get(timeout=1)
job.do_work() # does some work and may put more jobs in the dispQ
dispQ.task_done()
except Queue.Empty, msg:
continue
t1 = threading.Thread( target=worker, args=(stop_thr_event,) )
t1.daemon = True
t1.start()
t2 = threading.Thread( target=worker, args=(stop_thr_event,) )
t2.daemon = True
t2.start()
# put some initial jobs in the dispQ here
while True:
if dispQ.qsize() == 0:
break
else:
# do something important
stop_thr_event.set()
t1.join()
t2.join()
The Problem:
dispQ.qsize is 0 when inside worker func, job is creating more items and hence breaking out the loop (after breaking out of the loop, there are more jobs in dispQ)
I need do something like:
if dispQ.qsize() == 0 and there is no item being worked inside the worker func, then break out of the while loop
i.e. task_done() hasnt called yet after get() was called
it would be nice if it had dispQ.join() with a timeout
Here is the source for Queue.join, it looks like you can just use queue.unfinished_tasks > 0 to check if any tasks are running or in queue. You may want to acquire (then release) the lock to ensure that the queue doesn't change while you're checking its status. (I believe) You don't need to do this if you can guarantee that no items will be added to the queue after the last task finishes.
def join(self):
# ...
self.all_tasks_done.acquire()
try:
while self.unfinished_tasks:
self.all_tasks_done.wait()
finally:
self.all_tasks_done.release()

Controlling a python thread with a function

Thanks to those who helped me figure out I needed to use threading to run a loop in a control script I have run, I now have an issue to try and control the thread - by starting or stopping it based on a function:
I want to start a process to get a motor to cycle through a movement based on a 'start' parameter sent to the controlling function, also I want to send a 'stop' parameter to stop the thread too - here's where I got to:
def looper():
while True:
print 'forward loop'
bck.ChangeDutyCycle(10)
fwd.ChangeDutyCycle(0)
time.sleep(5)
print 'backwards loop'
bck.ChangeDutyCycle(0)
fwd.ChangeDutyCycle(20)
time.sleep(5)
def looper_control(state):
t = threading.Thread(target=looper)
if state == 'start':
t.start()
elif state == 'stop':
t.join()
print 'looper stopped!!'
This starts the thread okay when I call looper_control('start') but throws an error when looper_control('stop'):
File "/usr/lib/python2.7/threading.py", line 657, in join
raise RuntimeError("cannot join thread before it is started")
RuntimeError: cannot join thread before it is started
EDIT: looper_control called from here
if "motor" in tmp:
if tmp[-1:] == '0':
#stop both pin
MotorControl('fwd',0,0)
print 'stop motors'
looper_control('stop')
elif tmp[-1:] == '2':
#loop the motor
print 'loop motors'
looper_control('start')
UPDATE: Ive not been able to stop the thread using the method suggested - I thought I had it!
here's where I am:
class sliderControl(threading.Thread):
def __init__(self,stop_event):
super(sliderControl,self).__init__()
self.stop_event = stop_event
def run(self):
while self.stop_event:
print 'forward loop'
bck.ChangeDutyCycle(10)
fwd.ChangeDutyCycle(0)
time.sleep(5)
print 'backwards loop'
bck.ChangeDutyCycle(0)
fwd.ChangeDutyCycle(20)
time.sleep(5)
def looper_control(state,stop_event):
if state == 'start':
t = sliderControl(stop_event=stop_event)
t.start()
elif state == 'stop':
#time.sleep(3)
stop_event.set()
#t.join()
print 'looper stopped!!'
called via:
if tmp[-1:] == '0':
#stop both pin
MotorControl('fwd',0,0)
print 'stop motors'
#stop_thread_event = threading.Event()
print 'stopping thread'
print stop_thread_event
looper_control('stop',stop_thread_event)
elif tmp[-1:] == '2':
#loop the motor
print 'loop motors'
global stop_thread_event
stop_thread_event = threading.Event()
print stop_thread_event
looper_control('start', stop_thread_event)
It looked like a separate thread event was being called by loop and stop, so I thought a global would sort it out but its just not playing ball. When I start the loop - it runs, but when I try to stop it, I get looper stopped!! , but the process just keeps running
Your top-level thread routine will need to become an event handler that listens to a Queue object (as in from Queue import Queue) for messages, then handles them based on state. One of those messages can be a shutdown command, in which case the worker thread function simply exits, allowing the main thread to join it.
Instead of time.sleep, use threading.Timer with the body of the timer sending a message into your event queue.
This is a substantial refactoring. But especially if you plan on adding more conditions, you'll need it. One alternative is to use a package that handles this kind of thing for you, maybe pykka.
To stop a python thread you can use threading.Event()
try this:
class YourClass(threading.Thread):
def __init__(self, stop_event):
super(YourClass, self).__init__()
self.stop_event = stop_event
def run(self):
while not self.stop_event.is_set():
# do what you need here (what you had in looper)
def looper_control(state, stop_event):
if state == 'start':
t = YourClass(stop_event=stop_event)
t.start()
elif state == 'stop':
stop_event.set()
and call to looper_control:
stop_thread_event = threading.Event()
looper_control(state, stop_thread_event)
you only can "start" once a thread
but you can lock and unlock the thread.
the best way to stop and start a thread is with mutex, Example:
#!/usr/bin/python
import threading
from time import sleep
mutex2 = threading.Lock()
#This thread add values to d[]
class Hilo(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
while True:
mutex2.acquire()
#Add values to d[]
d.append("hi from Peru")
mutex2.release()
sleep(1)
d=[];
hilos = [Hilo()]
#Stop Thread
#If you have more threads you need make a mutex for every thread
mutex2.acquire()
#Start treades, but the thread is lock
for h in hilos:
h.start()
#so you need do
#unlock THREAD<
mutex2.release()
#>START THREAD
#Sleep for 4 seconds
sleep(4)
#And print d[]
print d
print "------------------------------------------"
#WAIT 5 SECONDS AND STOP THE THREAD
sleep(5)
try:
mutex2.acquire()
except Exception, e:
mutex2.release()
mutex2.acquire()
#AND PRINT d[]
print d
#AND NOW YOUR TRHEAD IS STOP#
#When the thread is lock(stop), you only need call: mutex2.release() for unlock(start)
#When your thread is unlock(start) and you want lock(stop):
#try:
# mutex2.acquire()
#except Exception, e:
# mutex2.release()
# mutex2.acquire()

Categories