How can I run my own python service at a given time? - python

I have created a python service
(based on this video: https://www.youtube.com/watch?v=_lmFArB6OI8&ab_channel=MATRIX)
and I noticed that after a while it stopped by itself
here is the code:
# these methods are working well
def job(t):
del()
create()
time.sleep(5)
copy()
time.sleep(2)
cut()
correct()
class aservice(win32serviceutil.ServiceFramework):
_svc_name_ = "SERVICE_NAME"
_svc_display_name_ = "DISPLAY_NAME"
_svc_description_ = "XYZ."
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
def SvcDoRun(self):
import servicemanager
servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE, servicemanager.PYS_SERVICE_STARTED,
(self._svc_name_, ''))
# self.timeout = 640000 #640 seconds / 10 minutes (value is in milliseconds)
self.timeout = 10000 # 120 seconds / 2 minutes
# This is how long the service will wait to run / refresh itself (see script below)
while 1:
# Wait for service stop signal, if I timeout, loop again
rc = win32event.WaitForSingleObject(self.hWaitStop, self.timeout)
# Check to see if self.hWaitStop happened
if rc == win32event.WAIT_OBJECT_0:
# Stop signal encountered
servicemanager.LogInfoMsg("SomeShortNameVersion - STOPPED!") # For Event Log
break
else:
# Ok, here's the real money shot right here.
# [actual service code between rests]
try:
schedule.every().day.at("10:20").do(job,'It is 01:00')
while True:
schedule.run_pending()
time.sleep(60)
except:
pass
# [actual service code between rests]
def ctrlHandler(ctrlType):
return True
if __name__ == '__main__':
if len(sys.argv) == 1:
servicemanager.Initialize()
servicemanager.PrepareToHostSingle(aservice)
servicemanager.StartServiceCtrlDispatcher()
win32api.SetConsoleCtrlHandler(ctrlHandler, True)
else:
win32serviceutil.HandleCommandLine(aservice)
If I don't do it as a service, it works, as soon as I put it in a service it runs and stops after a while
I tried to simply run the methods in the task scheduler, I got a "0x1" error there, I tried to solve that too, but it didn't want to succeed.

Related

Python - Mouse+Keyboard Activity Monitor (Windows)

I'm creating a script to monitor my mouse/keyboard activity. The intent is to update a timestamp whenever I move the mouse or press a button the keyboard. I've threaded the mouse and keyboard check methods and the main thread will check if we have passed the timeout/inactive duration and click at a specified location if we have.
Unfortunately, the keyboard monitoring is not working. The pynput.keyboard.Listener object seems to never join().
I'm not particularly comfortable with multithreading but I think I need it for this script. Please share a better way if there is one. I want to be able to run this script/class as a thread in another script later.
from pynput.keyboard import Listener
import pyautogui as gui
import threading, time
from datetime import datetime, timedelta
class activity(threading.Thread):
def __init__(self, timeout: int = 60):
self.stop_flag = False
self.timeout = timedelta(seconds=timeout)
self.last_timestamp = datetime.now()
def update_timestamp(self):
self.last_timestamp = datetime.now()
print('timestamp updated')
# For monitoring if the keyboard is active
def keybd_monitoring(self, lock: threading.Lock) -> None:
with Listener(on_release=lambda x: True) as listener:
listener.join()
lock.acquire()
self.update_timestamp()
lock.release()
print('Keyboard pressed')
if self.stop_flag:
return
# For monitoring if the mouse is active
def mouse_monitoring(self, lock: threading.Lock) -> None:
last_position = gui.position()
while not self.stop_flag:
time.sleep(3)
curr_position = gui.position()
if last_position != curr_position:
last_position = curr_position
lock.acquire()
self.update_timestamp()
lock.release()
print('Mouse Moved')
def stop(self):
self.stop_flag = True
# For monitoring if the mouse/keyboard have been used in the last TIMEOUT seconds
def run(self):
try:
width, height = gui.size()
lock = threading.Lock()
mouse = threading.Thread(target=self.mouse_monitoring, args=(lock,))
keybd = threading.Thread(target=self.keybd_monitoring, args=(lock,))
mouse.start()
keybd.start()
while not self.stop_flag:
time.sleep(.1)
if datetime.now() > self.last_timestamp + self.timeout:
curr_position = gui.position()
gui.click(int(width*.6),height)
gui.moveTo(curr_position)
finally:
self.stop()
if mouse.is_alive():
mouse.join()
if keybd.is_alive():
keybd.join()
if __name__ == '__main__':
act = activity()
act.run()
I've made it work without the monitoring functions being in a class. I'm still curious if it could work within a class.
from pynput.keyboard import Listener
import pyautogui as gui
import threading, time
from datetime import datetime, timedelta
stop_flag = False
timeout = timedelta(seconds=60)
last_timestamp = datetime.now()
lock = threading.Lock()
def update_timestamp(key=None):
lock.acquire()
global last_timestamp
last_timestamp = datetime.now()
lock.release()
return stop_flag
# For monitoring if the keyboard is active
def keybd_monitoring(lock: threading.Lock) -> None:
with Listener(on_release=update_timestamp) as listener:
listener.join()
# For monitoring if the mouse is active
def mouse_monitoring(lock: threading.Lock) -> None:
last_position = gui.position()
while not stop_flag:
time.sleep(3)
curr_position = gui.position()
if last_position != curr_position:
last_position = curr_position
update_timestamp()
def stop():
global stop_flag
stop_flag = True
# For monitoring if the mouse/keyboard have been used in the last TIMEOUT seconds
def activity():
try:
width, height = gui.size()
mouse = threading.Thread(target=mouse_monitoring, args=(lock,))
keybd = threading.Thread(target=keybd_monitoring, args=(lock,))
mouse.start()
keybd.start()
while not stop_flag:
time.sleep(1)
if datetime.now() > last_timestamp + timeout:
curr_position = gui.position()
gui.click(int(width*.6),height)
gui.moveTo(curr_position)
update_timestamp()
finally:
stop()
if mouse.is_alive():
mouse.join()
if keybd.is_alive():
keybd.join()
if __name__ == '__main__':
activity()

"When" loop in asynchronous Python

This isn't as much of a question as something I'm interested in.
I do quite a bit of asynchronous coding in Python, and there's a bit of code that I'm frequently writing over and over while I'm waiting for threads to stop (if I'm trying to exit cleanly).
while not class_containing_threads.stopped:
pass
else:
do_something()
do_something_else()
do_some_other_thing()
Although I'm sure there's a nice decorator that one can write to make this happen, I'm not too sure how I would go about writing it without ultimately making my code more complicated than it needs to be.
Basically, I wish there were something along the lines of:
when condition:
do_something()
where the thread is effectively halted while we wait for some event to occur.
To demonstrate what I mean, here's some working code that shows how much I actually end up writing the same thing over and over
import threading
import random
import time
class ClassContainingThreads:
def __init__(self):
# Just stating what stuff can be found here
self._coordinating_thread = None
self._worker_thread_1 = None
self._worker_thread_2 = None
self._worker_thread_3 = None
self._stopping = False
self._stopped = False
def run(self):
# Main method to get everything running
self._coordinating_thread = threading.Thread(target=self._run)
self._coordinating_thread.start()
def stop(self):
# Used to stop everything
self._stopping = True
#property
def stopped(self):
# Lets you know when things have stopped
return self._stopped
#property
def all_workers_running(self):
# Lets you know whether all the workers are running
return self._all_workers_are_alive()
def _run(self):
# Coordinating thread getting worker threads to start
self._worker_thread_1 = threading.Thread(
target=self._important_function_1)
self._worker_thread_2 = threading.Thread(
target=self._important_function_2)
self._worker_thread_3 = threading.Thread(
target=self._important_function_3)
self._worker_thread_1.start()
self._worker_thread_2.start()
self._worker_thread_3.start()
# Coincidentally, the block appears here
while not self._stopping:
pass
else:
while self._any_workers_are_alive():
pass
else:
self._stopping = False
self._stopped = True
def _important_function_1(self):
print(f'Thread 1 started')
# Coincidentally, the block appears here
while not self._stopping:
pass
else:
print('Thread 1 received stop signal')
# Emulating some process that takes some unknown time to stop
delay_long = random.random() * 5
delay_start = time.time()
while not (time.time() - delay_start) > delay_long:
pass
else:
print(f'Thread 1 stopped')
def _important_function_2(self):
print(f'Thread 2 started')
# Coincidentally, the block appears here
while not self._stopping:
pass
else:
print('Thread 2 received stop signal')
# Emulating some process that takes some unknown time to stop
delay = random.random() * 5
delay_start = time.time()
while not (time.time() - delay_start) > delay:
pass
else:
print(f'Thread 2 stopped')
def _important_function_3(self):
print(f'Thread 3 started')
# Coincidentally, the block appears here
while not self._stopping:
pass
else:
print('Thread 3 received stop signal')
# Emulating some process that takes some unknown time to stop
delay = random.random() * 5
delay_start = time.time()
while not (time.time() - delay_start) > delay:
pass
else:
print(f'Thread 3 stopped')
def _any_workers_are_alive(self):
# Check whether any workers are alive
if (self._worker_thread_1.is_alive() or
self._worker_thread_2.is_alive() or
self._worker_thread_3.is_alive()):
return True
else:
return False
def _all_workers_are_alive(self):
# Check whether all workers are alive
if (self._worker_thread_1.is_alive() and
self._worker_thread_2.is_alive() and
self._worker_thread_3.is_alive()):
return True
else:
return False
if __name__ == '__main__':
# Just booting everything up
print('Program started')
class_containing_threads = ClassContainingThreads()
class_containing_threads.run()
# Block I'm interested in appears here
while not class_containing_threads.all_workers_running:
pass
else:
# and here
while not input("Type 'exit' to exit > ") == "exit":
pass
else:
class_containing_threads.stop()
# and here
while not class_containing_threads.stopped:
pass
else:
print('Program stopped')
exit() # I know this is pointless here
Also, critiques are welcome.
The pattern of repeatedly checking a flag is a form of busy wait. This is an extremely wasteful pattern, as the task checking the flag will do so very, very often.
Concrete alternatives depend on the concurrency pattern used, but usually come in the form of signals, events or locks – these are generally known as "synchronisation primitives".
For example, threading provides a threading.Event that can be "waited for" and "triggered". The desired operation when condition: is simply event.wait() – this automatically pauses the current thread until the event is triggered. Another thread can trigger this condition via event.set().
Thanks to the feedback, I've rewritten the code snippet into something that uses the threading.Thread.join() method and threading.Event object. It's much simpler now, and hopefully doesn't involve any unintentional busy waiting.
import threading
import random
import time
class ClassContainingThreads:
def __init__(self, blocking_event):
# Just stating what stuff can be found here
self._blocking_event = blocking_event
self._coordinating_thread = None
self._worker_thread_1 = None
self._worker_thread_2 = None
self._worker_thread_3 = None
self._stopping = False
self._stopped = False
def run(self):
# Main method to get everything running
self._coordinating_thread = threading.Thread(target=self._run)
self._coordinating_thread.start()
def stop(self):
# Used to stop everything
self._stopping = True
#property
def stopped(self):
return self._stopped
def _run(self):
# Coordinating thread getting worker threads to start
self._worker_thread_1 = threading.Thread(
target=self._important_function_1)
self._worker_thread_2 = threading.Thread(
target=self._important_function_2)
self._worker_thread_3 = threading.Thread(
target=self._important_function_3)
# Start the workers
self._worker_thread_1.start()
self._worker_thread_2.start()
self._worker_thread_3.start()
# Let main_thread continue when workers have started
self._blocking_event.set()
# Wait for workers to complete
self._worker_thread_1.join()
self._worker_thread_2.join()
self._worker_thread_3.join()
# Once all threads are dead
self._stopping = False
self._stopped = True
self._blocking_event.set()
def _important_function_1(self):
print(f'Thread 1 started')
# Emulating some job being done
while not self._stopping:
pass
else:
print('Thread 1 received stop signal')
# Emulating some process that takes some unknown time to stop
delay_long = random.random() * 5
delay_start = time.time()
while not (time.time() - delay_start) > delay_long:
pass
else:
print(f'Thread 1 stopped')
def _important_function_2(self):
print('Thread 2 started')
# Emulating some job being done
while not self._stopping:
pass
else:
print('Thread 2 received stop signal')
# Emulating some process that takes some unknown time to stop
delay = random.random() * 5
delay_start = time.time()
while not (time.time() - delay_start) > delay:
pass
else:
print(f'Thread 2 stopped')
def _important_function_3(self):
print('Thread 3 started')
# Emulating some job being done
while not self._stopping:
pass
else:
print('Thread 3 received stop signal')
# Emulating some process that takes some unknown time to stop
delay = random.random() * 5
delay_start = time.time()
while not (time.time() - delay_start) > delay:
pass
else:
print(f'Thread 3 stopped')
if __name__ == '__main__':
# Just booting everything up
print('Program started')
blocking_event = threading.Event()
class_containing_threads = ClassContainingThreads(blocking_event)
class_containing_threads.run()
blocking_event.wait()
while not input("Type 'exit' to exit > ") == "exit":
pass
else:
class_containing_threads.stop()
blocking_event.wait()
print('Program stopped')
exit() # I know this is pointless here

Python exit from all running threads on truthy condition

I am using threads to check a header status code from an API url. How can i break loop/stop all other threads if condition is true. Please check following code..
import logging, time, threading, requests
#: Log items
logging.basicConfig(format='%(asctime)s %(levelname)s : %(message)s', level=logging.INFO)
class EppThread(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, verbose=None):
threading.Thread.__init__(self, group=group, target=target, name=name, verbose=verbose)
self.args = args
def run(self):
startTime = time.time()
url = self.args[0]
limit = self.args[1]
for i in range(limit):
response = requests.get(url)
if response.status_code != 200:
break
#Exit other similar threads (with same url)
else:
print('Thread {0} - success'.format(thread.getName()))
print('process completed')
# Send Email
number_of_threads = 5
number_of_requests = 100
urls = ['https://v1.api.com/example', 'https://v2.api.com/example']
if __name__ == '__main__':
startTime = time.time()
for url in urls:
threads = []
for i in range(number_of_threads):
et = EppThread(name = "{0}-Thread-{1}".format(name, i + 1), args=(url, number_of_requests))
threads.append(et)
et.start()
# Check if execution time is not greater than 1 minute
while len(threads) > 0 and (time.time() - startTime) < 60:
time.sleep(0.5)
for thread in threads:
if not thread.isAlive():
threads.remove(thread)
print('Thread {0} terminated'.format(thread.getName()))
os._exit(1)
Please suggest some better ways that stops code execution if condition gets true in any running thread.
Thanks for your help.
An important thing to note here is that when the run method of a Thread is complete, the Thread is set to dead and garbage collected. So all we really need is a boolean class variable that breaks that loop. Class variables are the same for all objects instantiated from that class and subclasses; so once we set it, all of the objects in our class will act the same way:
import logging, time, threading, requests
#: Log items
logging.basicConfig(format='%(asctime)s %(levelname)s : %(message)s', level=logging.INFO)
class EppThread(threading.Thread):
kill = False # new Boolean class variable
url = 'https://v1.api.com/example' # keep this in mind for later
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, verbose=None):
threading.Thread.__init__(self, group=group, target=target, name=name, verbose=verbose)
self.args = args
def run(self):
limit = self.args[0]
for i in range(limit):
response = requests.get(self.url)
if response.status_code != 200:
self.kill = True # ends this loop on all Threads since it's changing a class variable
else:
print('Thread {0} - success'.format(self.getName())) # changed to self.getName()
if self.kill: # if kill is True, break the loop, send the email, and finish the Thread
break
print('process completed')
# Send Email
number_of_threads = 5
number_of_requests = 100
if __name__ == '__main__':
startTime = time.time()
threads = []
for i in range(number_of_threads):
et = EppThread(name="{0}-Thread-{1}".format(name, i + 1), args=(number_of_requests))
threads.append(et)
et.start()
# Check if execution time is not greater than 1 minute
while threads and time.time() - startTime < 60: # removed len() due to implicit Falsiness of empty containers in Python
time.sleep(0.5)
for thread in threads:
if not thread.isAlive():
threads.remove(thread)
print('Thread {0} terminated'.format(thread.getName()))
EppThread.kill = True
Now when any of the EppThreads has a bad connection it sets the class variable to True, which makes all of the other EppThreads break the loop as well. I also added EppThread.kill = True at the end so it'll break the request loops more cleanly if you exceed 1 minute run time.
Lastly, I added the url class variable. This is because you expressed interest in running different urls simultaneously and only kill the ones that specifically have a bad connection. All you have to do at this point is subclass EppThread and overwrite kill and url.
class EppThread2(EppThread):
kill = False
url = 'https://v2.example.com/api?$awesome=True'
Then you can instantiate EppThread2 and add it to the threads list and everything should work as you want it to.
You could create an event object that's shared between all your threads that share the same url. When you run into an error in the thread, set the event. Then, in your run loop check for the event. If it has happend, kill the thread by breaking the loop.
Here's a version of your example modified to use the Event.
import logging, time, threading, requests
#: Log items
logging.basicConfig(format='%(asctime)s %(levelname)s : %(message)s', level=logging.INFO)
class EppThread(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, verbose=None, bad_status=None):
threading.Thread.__init__(self, group=group, target=target, name=name, verbose=verbose)
self.args = args
self.bad_status = bad_status
def run(self):
startTime = time.time()
url = self.args[0]
limit = self.args[1]
for i in range(limit):
if self.bad_status.is_set():
# break the loop on errors in any thread.
break
response = requests.get(url)
if response.status_code != 200:
# Set the event when an error occurs
self.bad_status.set()
break
#Exit other similar threads (with same url)
else:
print('Thread {0} - success'.format(thread.getName()))
print('process completed')
# Send Email
number_of_threads = 5
number_of_requests = 100
urls = ['https://v1.api.com/example', 'https://v2.api.com/example']
if __name__ == '__main__':
startTime = time.time()
threads = []
for url in urls:
# Create an event for each URL
bad_status = threading.Event()
for i in range(number_of_threads):
et = EppThread(name = "{0}-Thread-{1}".format(name, i + 1), args=(url, number_of_requests), bad_status=bad_status)
threads.append(et)
et.start()
# Check if execution time is not greater than 1 minute
while len(threads) > 0 and (time.time() - startTime) < 60:
time.sleep(0.5)
for thread in threads:
if not thread.isAlive():
threads.remove(thread)
print('Thread {0} terminated'.format(thread.getName()))
os._exit(1)
The threading.Event class works for both threads and processes. So, if at somepoint you wanted to switch to using Process it would "just work".
Import sys
Here is an example:
import sys
list = []
if len(list) < 1:
sys.exit("You don\'t have any items in your list")

python multiprocessing/threading cleanup

I have a python tool, that has basically this kind of setup:
main process (P1) -> spawns a process (P2) that starts a tcp connection
-> spawns a thread (T1) that starts a loop to receive
messages that are sent from P2 to P1 via a Queue (Q1)
server process (P2) -> spawns two threads (T2 and T3) that start loops to
receive messages that are sent from P1 to P2 via Queues (Q2 and Q3)
The problem I'm having is that when I stop my program (with Ctrl+C), it doesn't quit. The server process is ended, but the main process just hangs there and I have to kill it.
The thread loop functions all look the same:
def _loop(self):
while self.running:
res = self.Q1.get()
if res is None:
break
self._handle_msg(res)
All threads are started as daemon:
t = Thread(target=self._loop)
t.setDaemon(True)
t.start()
In my main process, I use atexit, to perform clean-up tasks:
atexit.register(self.on_exit)
Those clean-up tasks are essentially the following:
1) set self.running in P1 to False and sent None to Q1, so that the Thread T1 should finish
self.running = False
self.Q1.put(None)
2) send a message to P2 via Q2 to inform this process that it is ending
self.Q2.put("stop")
3) In P2, react to the "stop" message and do what we did in P1
self.running = False
self.Q2.put(None)
self.Q3.put(None)
That is it and in my understanding, that should make everything shut down nicely, but it doesn't.
The main code of P1 also contains the following endless loop, because otherwise the program would end prematurely:
while running:
sleep(1)
Maybe that has something to do with the problem, but I cannot see why it should.
So what did I do wrong? Does my setup have major design flaws? Did I forget to shut down something?
EDIT
Ok, I modified my code and managed to make it shut down correctly most of the time. Unfortunately, from now and then, it still got stuck.
I managed to write a small working example of my code. To demonstrate what happens, you need to simple start the script and then use Ctrl + C to stop it. It looks like the issue appears now usually if you press Ctrl + C as soon as possible after starting the tool.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import signal
import sys
import logging
from multiprocessing import Process, Queue
from threading import Thread
from time import sleep
logger = logging.getLogger("mepy-client")
class SocketClientProtocol(object):
def __init__(self, q_in, q_out, q_binary):
self.q_in = q_in
self.q_out = q_out
self.q_binary = q_binary
self.running = True
t = Thread(target=self._loop)
#t.setDaemon(True)
t.start()
t = Thread(target=self._loop_binary)
#t.setDaemon(True)
t.start()
def _loop(self):
print "start of loop 2"
while self.running:
res = self.q_in.get()
if res is None:
break
self._handle_msg(res)
print "end of loop 2"
def _loop_binary(self):
print "start of loop 3"
while self.running:
res = self.q_binary.get()
if res is None:
break
self._handle_binary(res)
print "end of loop 3"
def _handle_msg(self, msg):
msg_type = msg[0]
if msg_type == "stop2":
print "STOP RECEIVED"
self.running = False
self.q_in.put(None)
self.q_binary.put(None)
def _put_msg(self, msg):
self.q_out.put(msg)
def _handle_binary(self, data):
pass
def handle_element(self):
self._put_msg(["something"])
def run_twisted(q_in, q_out, q_binary):
s = SocketClientProtocol(q_in, q_out, q_binary)
while s.running:
sleep(2)
s.handle_element()
class MediatorSender(object):
def __init__(self):
self.q_in = None
self.q_out = None
self.q_binary = None
self.p = None
self.running = False
def start(self):
if self.running:
return
self.running = True
self.q_in = Queue()
self.q_out = Queue()
self.q_binary = Queue()
print "!!!!START"
self.p = Process(target=run_twisted, args=(self.q_in, self.q_out, self.q_binary))
self.p.start()
t = Thread(target=self._loop)
#t.setDaemon(True)
t.start()
def stop(self):
print "!!!!STOP"
if not self.running:
return
print "STOP2"
self.running = False
self.q_out.put(None)
self.q_in.put(["stop2"])
#self.q_in.put(None)
#self.q_binary.put(None)
try:
if self.p and self.p.is_alive():
self.p.terminate()
except:
pass
def _loop(self):
print "start of loop 1"
while self.running:
res = self.q_out.get()
if res is None:
break
self._handle_msg(res)
print "end of loop 1"
def _handle_msg(self, msg):
self._put_msg(msg)
def _put_msg(self, msg):
self.q_in.put(msg)
def _put_binary(self, msg):
self.q_binary.put(msg)
def send_chunk(self, chunk):
self._put_binary(chunk)
running = True
def signal_handler(signal, frame):
global running
if running:
running = False
ms.stop()
else:
sys.exit(0)
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal_handler)
ms = MediatorSender()
ms.start()
for i in range(100):
ms.send_chunk("some chunk of data")
while running:
sleep(1)
I think you're corrupting your multiprocessing.Queue by calling p.terminate() on on the child process. The docs have a warning about this:
Warning: If this method is used when the associated process is using a
pipe or queue then the pipe or queue is liable to become corrupted and
may become unusable by other process. Similarly, if the process has
acquired a lock or semaphore etc. then terminating it is liable to
cause other processes to deadlock.
In some cases, it looks like p is terminating before your MediatorSender._loop method can consume the sentinel you loaded into it to let it know that it should exit.
Also, you're installing a signal handler that expects to work in the main process only, but the SIGINT is actually received by both the parent and the child processes, which means signal_handler gets called in both processes, could result in ms.stop getting called twice, due to a race condition in the way you handle setting ms.running to False
I would recommend just exploiting that both processes receive the SIGINT, and have both the parent and child handle KeyboardInterrupt directly. That way, each then have each shut themselves down cleanly, rather than have the parent terminate the child. The following code demonstrates that, and in my testing never hung. I've simplified your code in a few places, but functionally it's exactly the same:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from multiprocessing import Process, Queue
from threading import Thread
from time import sleep
logger = logging.getLogger("mepy-client")
class SocketClientProtocol(object):
def __init__(self, q_in, q_out, q_binary):
self.q_in = q_in
self.q_out = q_out
self.q_binary = q_binary
t = Thread(target=self._loop)
t.start()
t = Thread(target=self._loop_binary)
t.start()
def _loop(self):
print("start of loop 2")
for res in iter(self.q_in.get, None):
self._handle_msg(res)
print("end of loop 2")
def _loop_binary(self):
print("start of loop 3")
for res in iter(self.q_binary.get, None):
self._handle_binary(res)
print("end of loop 3")
def _handle_msg(self, msg):
msg_type = msg[0]
if msg_type == "stop2":
self.q_in.put(None)
self.q_binary.put(None)
def _put_msg(self, msg):
self.q_out.put(msg)
def stop(self):
print("STOP RECEIVED")
self.q_in.put(None)
self.q_binary.put(None)
def _handle_binary(self, data):
pass
def handle_element(self):
self._put_msg(["something"])
def run_twisted(q_in, q_out, q_binary):
s = SocketClientProtocol(q_in, q_out, q_binary)
try:
while True:
sleep(2)
s.handle_element()
except KeyboardInterrupt:
s.stop()
class MediatorSender(object):
def __init__(self):
self.q_in = None
self.q_out = None
self.q_binary = None
self.p = None
self.running = False
def start(self):
if self.running:
return
self.running = True
self.q_in = Queue()
self.q_out = Queue()
self.q_binary = Queue()
print("!!!!START")
self.p = Process(target=run_twisted,
args=(self.q_in, self.q_out, self.q_binary))
self.p.start()
self.loop = Thread(target=self._loop)
self.loop.start()
def stop(self):
print("!!!!STOP")
if not self.running:
return
print("STOP2")
self.running = False
self.q_out.put(None)
def _loop(self):
print("start of loop 1")
for res in iter(self.q_out.get, None):
self._handle_msg(res)
print("end of loop 1")
def _handle_msg(self, msg):
self._put_msg(msg)
def _put_msg(self, msg):
self.q_in.put(msg)
def _put_binary(self, msg):
self.q_binary.put(msg)
def send_chunk(self, chunk):
self._put_binary(chunk)
if __name__ == "__main__":
ms = MediatorSender()
try:
ms.start()
for i in range(100):
ms.send_chunk("some chunk of data")
# You actually have to join w/ a timeout in a loop on
# Python 2.7. If you just call join(), SIGINT won't be
# received by the main process, and the program will
# hang. This is a bug, and is fixed in Python 3.x.
while True:
ms.loop.join()
except KeyboardInterrupt:
ms.stop()
Edit:
If you prefer to use a signal handler rather than catching KeyboardInterrupt, you just need to make sure the child process uses its own signal handler, rather than inheriting the parent's:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import signal
import logging
from functools import partial
from multiprocessing import Process, Queue
from threading import Thread
from time import sleep
logger = logging.getLogger("mepy-client")
class SocketClientProtocol(object):
def __init__(self, q_in, q_out, q_binary):
self.q_in = q_in
self.q_out = q_out
self.q_binary = q_binary
self.running = True
t = Thread(target=self._loop)
t.start()
t = Thread(target=self._loop_binary)
t.start()
def _loop(self):
print("start of loop 2")
for res in iter(self.q_in.get, None):
self._handle_msg(res)
print("end of loop 2")
def _loop_binary(self):
print("start of loop 3")
for res in iter(self.q_binary.get, None):
self._handle_binary(res)
print("end of loop 3")
def _handle_msg(self, msg):
msg_type = msg[0]
if msg_type == "stop2":
self.q_in.put(None)
self.q_binary.put(None)
def _put_msg(self, msg):
self.q_out.put(msg)
def stop(self):
print("STOP RECEIVED")
self.running = False
self.q_in.put(None)
self.q_binary.put(None)
def _handle_binary(self, data):
pass
def handle_element(self):
self._put_msg(["something"])
def run_twisted(q_in, q_out, q_binary):
s = SocketClientProtocol(q_in, q_out, q_binary)
signal.signal(signal.SIGINT, partial(signal_handler_child, s))
while s.running:
sleep(2)
s.handle_element()
class MediatorSender(object):
def __init__(self):
self.q_in = None
self.q_out = None
self.q_binary = None
self.p = None
self.running = False
def start(self):
if self.running:
return
self.running = True
self.q_in = Queue()
self.q_out = Queue()
self.q_binary = Queue()
print("!!!!START")
self.p = Process(target=run_twisted,
args=(self.q_in, self.q_out, self.q_binary))
self.p.start()
self.loop = Thread(target=self._loop)
self.loop.start()
def stop(self):
print("!!!!STOP")
if not self.running:
return
print("STOP2")
self.running = False
self.q_out.put(None)
def _loop(self):
print("start of loop 1")
for res in iter(self.q_out.get, None):
self._handle_msg(res)
print("end of loop 1")
def _handle_msg(self, msg):
self._put_msg(msg)
def _put_msg(self, msg):
self.q_in.put(msg)
def _put_binary(self, msg):
self.q_binary.put(msg)
def send_chunk(self, chunk):
self._put_binary(chunk)
def signal_handler_main(ms, *args):
ms.stop()
def signal_handler_child(s, *args):
s.stop()
if __name__ == "__main__":
ms = MediatorSender()
signal.signal(signal.SIGINT, partial(signal_handler_main, ms))
ms.start()
for i in range(100):
ms.send_chunk("some chunk of data")
while ms.loop.is_alive():
ms.loop.join(9999999)
print('done main')
Maybe you should try to capture SIGINT signal, which is generated by Ctrl + C using signal.signal like this:
#!/usr/bin/env python
import signal
import sys
def signal_handler(signal, frame):
print('You pressed Ctrl+C!')
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
print('Press Ctrl+C')
signal.pause()
Code stolen from here
This usually works for me if I am using the threading module. It will not work if you use the multiprocessing one though. If you are running the script from the terminal try running it in the background, like this.
python scriptFoo.py &
After you run the process it will output the PID like this
[1] 23107
Whenever you need to quit the script you just type kill and the script PID like this.
kill 23107
Hit enter again and it should kill all the subprocesses and output this.
[1]+ Terminated python scriptFoo.py
As far as I know you cannot kill all the subprocesses with 'Ctrl+C'

Timeout function in Python

I want to have a function, in Python (3.x), which force to the script itself to terminate, like :
i_time_value = 10
mytimeout(i_time_value ) # Terminate the script if not in i_time_value seconds
for i in range(10):
print("go")
time.sleep(2)
Where "mytimeout" is the function I need : it terminate the script in "arg" seconds if the script is not terminated.
I have seen good solutions for put a timeout to a function here or here, but I don't want a timeout for a function but for the script.
Also :
I know that I can put my script in a function or using something like subprocess and use it with a
timeout, I tried it and it works, but I want something more simple.
It must be Unix & Windows compatible.
The function must be universal i.e. : it may be add to any script in
one line (except import)
I need a function not a 'how to put a timeout in a script'.
signal is not Windows compatible.
You can send some signals on Windows e.g.:
os.kill(os.getpid(), signal.CTRL_C_EVENT) # send Ctrl+C to itself
You could use threading.Timer to call a function at a later time:
from threading import Timer
def kill_yourself(delay):
t = Timer(delay, kill_yourself_now)
t.daemon = True # no need to kill yourself if we're already dead
t.start()
where kill_yourself_now():
import os
import signal
import sys
def kill_yourself_now():
sig = signal.CTRL_C_EVENT if sys.platform == 'win32' else signal.SIGINT
os.kill(os.getpid(), sig) # raise KeyboardInterrupt in the main thread
If your scripts starts other processes then see: how to kill child process(es) when parent dies? See also, How to terminate a python subprocess launched with shell=True -- it demonstrates how to kill a process tree.
I would use something like this.
import sys
import time
import threading
def set_timeout(event):
event.set()
event = threading.Event()
i_time_value = 2
t = threading.Timer(i_time_value, set_timeout, [event])
t.start()
for i in range(10):
print("go")
if event.is_set():
print('Timed Out!')
sys.exit()
time.sleep(2)
A little bit of googling turned this answer up:
import multiprocessing as MP
from sys import exc_info
from time import clock
DEFAULT_TIMEOUT = 60
################################################################################
def timeout(limit=None):
if limit is None:
limit = DEFAULT_TIMEOUT
if limit <= 0:
raise ValueError()
def wrapper(function):
return _Timeout(function, limit)
return wrapper
class TimeoutError(Exception): pass
################################################################################
def _target(queue, function, *args, **kwargs):
try:
queue.put((True, function(*args, **kwargs)))
except:
queue.put((False, exc_info()[1]))
class _Timeout:
def __init__(self, function, limit):
self.__limit = limit
self.__function = function
self.__timeout = clock()
self.__process = MP.Process()
self.__queue = MP.Queue()
def __call__(self, *args, **kwargs):
self.cancel()
self.__queue = MP.Queue(1)
args = (self.__queue, self.__function) + args
self.__process = MP.Process(target=_target, args=args, kwargs=kwargs)
self.__process.daemon = True
self.__process.start()
self.__timeout = self.__limit + clock()
def cancel(self):
if self.__process.is_alive():
self.__process.terminate()
#property
def ready(self):
if self.__queue.full():
return True
elif not self.__queue.empty():
return True
elif self.__timeout < clock():
self.cancel()
else:
return False
#property
def value(self):
if self.ready is True:
flag, load = self.__queue.get()
if flag:
return load
raise load
raise TimeoutError()
def __get_limit(self):
return self.__limit
def __set_limit(self, value):
if value <= 0:
raise ValueError()
self.__limit = value
limit = property(__get_limit, __set_limit)
It might be Python 2.x, but it shouldn't be terribly hard to convert.

Categories