How to Loop in threaded script on Python - python

Right now I have code in my script
def run(self):
while True:
try:
self.one()
self.two()
self.three()
self.four()
except (IndexError, ProxyError, SSLError, ConnectionError, errno.ECONNREFUSED):
print statusTime(self.index) + 'Bad Proxy'
break
def jobWorker(username,password,proxy):
bot = Bot(username,password,proxy)
bot.start()
bot.join()
return
How can I tell python to continuously run def run(self) so basically keep looping through self.one(), self.two(), self.three(), self.four() so it doesnt stop. Right now each Bot stops at self.four() and doesnt start again at self.one()

you can put the function with in
def __init__(self):
self.run()

Related

python how to do integration test of watchdog

I am having an application that listens to changes in a certain folder with watchdog
inside an infinite loop
class MyObserver:
def __init__(self, observer, handler):
self._event_handler = handler
self._event_observer = observer
def watch(self, path):
self.start(path)
try:
while True:
time.sleep(60)
except KeyboardInterrupt:
self.stop()
except Exception as err:
# log
def start(self, path):
self._schedule(path)
self._event_observer.start()
def stop(self):
self._event_observer.stop()
self._event_observer.join()
def _schedule(self, path):
self._event_observer.schedule(self._event_handler, path, recursive=True)
this is my eventHandler
class ImagesEventHandler(RegexMatchingEventHandler):
IMAGES_REGEX = [r"(.*).(jpe?g|bmp)$"]
def __init__(self):
super().__init__(self.IMAGES_REGEX)
def on_created(self, event):
return self._process(event)
#staticmethod
def _process(event):
# log
return event.src_path # i want to test this!
I want to be able to start the application, insert an image manually to the folder
and be able to test the results of capturing the event of creating the image
and also finishing the infinite loop,
I tried using interruptingcow to cut the loop but it's not working on Windows,
and also tried to mock the time.sleep but it didnt work as well
#pytest.mark.integration_test
class TestLoop(TestCase):
#patch("time.sleep", side_effect=InterruptedError)
def test_collect_images(self, mocked_sleep):
try:
main.run()
except InterruptedError:
print("Test passed")
I see this test separated into 3 parts:
starting the loop
inserting images to my folder
interrupting the loop
validating the log messages or the return value of _process somehow
how would you test it?

kivy threading not working with me (app not responding)

Hello guys I have a problem with my kivy app when i start to run some functions the app stop responding i tried to solve it by using threading but its not working with.
so this is widget class :
class PyWidget(Widget):
stop = threading.Event()
def start_thread(self):
self.ids.mic.source = 'assets/open2.png'
self.ids.mic.reload()
time.sleep(1)
threading.Thread(target=self.start_listening).start();
#mainthread
def start_listening(self):
while True:
try:
time.sleep(1)
print('Listening.......')
voiceText = RecognizeVoice()
# time.sleep(1)
if 'hello' in voiceText and Talking(App, respone, RecognizeVoice):
return
# else: Talking(App, respone, RecognizeVoice)
time.sleep(1)
except Exception as e:
print(f'start_listening: {e}')
RecognizeVoice function to start the mic and get user voice to text
def RecognizeVoice():
try:
with speechRec.Microphone() as sound:
voice = recognizer.listen(sound)
voiceText = recognizer.recognize_google(voice, language="en-US") #online
voiceText = voiceText.lower()
print(f'Input : {voiceText}')
return voiceText
except speechRec.UnknownValueError as e:
print(f'RecognizeVoice: {e}')
except speechRec.RequestError as e:
print(f'RecognizeVoice: {e}')
respone('Sorry, something went wrong.')
# Text to speech
def respone(message):
AI.say(message)
AI.runAndWait()
in my GUI i have a button that when i click the start_thread function starts and all others follow it, i hope that i explained everything. thanks for helping
The #mainthread decorator on the start_listening() method defeats the threading. Just remove that decorator.

Broken pipe error when trying to send anything over pipe between processes with sending process running Tkinter

I am playing around with Pipe and Process from the multiprocessing module (Python 3.8). My initial program looks like this:
from multiprocessing import Process, Pipe
class Process1(object):
def __init__(self, pipe_out):
self.pipe_out = pipe_out
self.run()
def run(self):
try:
while True:
print("Sending message to process 2")
self.pipe_out.send(["hello"])
except KeyboardInterrupt:
pass
class Process2(object):
def __init__(self, pipe_in):
self.pipe_in = pipe_in
self.run()
def run(self):
try:
while self.pipe_in.poll():
request = self.pipe_in.recv()
method = request[0]
args = request[1:]
try:
getattr(self, method + "_callback")(*args)
except AttributeError as ae:
print("Unknown callback received from pipe", str(ae))
print("Process 2 done with receiving")
except KeyboardInterrupt:
pass
def hello_callback(self):
print("Process 1 said hello")
class Controller(object):
def __init__(self):
pipe_proc1_out, pipe_proc2_in = Pipe()
self.proc1 = Process(
target=Process1,
args=(pipe_proc1_out, )
)
self.proc2 = Process(
target=Process2,
args=(pipe_proc2_in, )
)
def run(self):
try:
self.proc1.start()
self.proc2.start()
while True:
continue
except KeyboardInterrupt:
print("Quitting processes...")
self.proc1.join(1)
if self.proc1.is_alive():
self.proc1.terminate()
self.proc2.join(1)
if self.proc2.is_alive():
self.proc2.terminate()
print("Finished")
def pipes():
c = Controller()
c.run()
if __name__ == "__main__":
pipes()
I have a Controller instance that runs until a keyboard interruption is received. It also handles two processes Process1 and Process2 with the former constantly sending and the latter constantly receiving.
The code above is a skeleton for a larger undertaking that involves a complex GUI (PySide), image processing (OpenCV) and a game engine (Panda3D). So I tried to add Tkinter as a GUI example:
from multiprocessing import Process, Pipe
import tkinter as tk
class Process1(tk.Frame):
def __init__(self, pipe_out):
self.pipe_out = pipe_out
self.setup_gui()
self.run()
def setup_gui(self):
self.app = tk.Tk()
lb1 = tk.Label(self.app, text="Message:")
lb1.pack()
self.ent1 = tk.Entry(self.app)
self.ent1.pack()
btn1 = tk.Button(self.app, text="Say hello to other process",
command=self.btn1_clicked)
btn1.pack()
def btn1_clicked(self):
msg = self.ent1.get()
self.pipe_out.send(["hello", msg])
def run(self):
try:
self.app.mainloop()
except KeyboardInterrupt:
pass
class Process2(object):
def __init__(self, pipe_in):
self.pipe_in = pipe_in
self.run()
def run(self):
try:
while self.pipe_in.poll():
request = self.pipe_in.recv()
method = request[0]
args = request[1:]
try:
getattr(self, method + "_callback")(*args)
except AttributeError as ae:
print("Unknown callback received from pipe", str(ae))
print("Process 2 done with receiving")
except KeyboardInterrupt:
pass
def hello_callback(self, msg):
print("Process 1 say\"" + msg + "\"")
class Controller(object):
def __init__(self):
pipe_proc1_out, pipe_proc2_in = Pipe()
self.proc1 = Process(
target=Process1,
args=(pipe_proc1_out, )
)
self.proc2 = Process(
target=Process2,
args=(pipe_proc2_in, )
)
def run(self):
try:
self.proc1.start()
self.proc2.start()
while True:
continue
except KeyboardInterrupt:
print("Quitting processes...")
self.proc1.join(1)
if self.proc1.is_alive():
self.proc1.terminate()
self.proc2.join(1)
if self.proc2.is_alive():
self.proc2.terminate()
print("Finished")
def pipes():
c = Controller()
c.run()
if __name__ == "__main__":
pipes()
Notice that currently the Tkinter window can only be closed if the "parent" process is interrupted via keyboard.
Whenever I click the button and invoke the button's command, my program goes into an error state with the following message:
Exception in Tkinter callback
Traceback (most recent call last):
File "C:\Users\USER\Anaconda3\envs\THS\lib\tkinter\__init__.py", line 1705, in __call__
return self.func(*args)
File "C:\Users\USER\PycharmProjects\PythonPlayground\pipes_advanced.py", line 26, in btn1_clicked
self.pipe_out.send(["hello", 1, 2])
File "C:\Users\USER\Anaconda3\envs\THS\lib\multiprocessing\connection.py", line 206, in send
self._send_bytes(_ForkingPickler.dumps(obj))
File "C:\Users\USER\Anaconda3\envs\THS\lib\multiprocessing\connection.py", line 280, in _send_bytes
ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True)
BrokenPipeError: [WinError 232] The pipe is being closed
At first I thought that the problem is with the value I'm receiving from the Entry.get() call (my Tkinter skills are rusty). I printed msg and got the text from the widget.
Next thing I tried was to put a constant string as the value of the argument that I sent over the pipe:
def btn1_clicked(self):
self.pipe_out.send(["hello", "world"])
The same error appeared. Catching the exception BrokenPipeError doesn't really do me any good (except if I want to handle the case when the pipe is broken I guess).
If I do the same for the first version of the program (without Tkinter), it works. This leads me to believe that my problem comes from the way I have integrated Tkinter.
The issue you have is that you poll the pipe, but the documentation says:
poll([timeout])
Return whether there is any data available to be read.
If timeout is not specified then it will return immediately.
In the first example it works because when starting Process1 you send data to the pipe immediately:
def run(self):
try:
while True:
print("Sending message to process 2")
self.pipe_out.send(["hello"])
except KeyboardInterrupt:
pass
And you do this continuously so the .poll will return True and the loop in Process2 will continue.
As with tkinter nothing gets sent to the pipe immediately it waits for user to click a button, by the time any of that can happen the Process2 already has called poll and it immediately returned False and it didn't even start that loop. If you notice then it also almost immediately prints in the terminal that
"Process 2 done with receiving"
To solve this issue the easiest seems to use
while self.pipe_in.poll(None):
which per the docs means
"If timeout is None then an infinite timeout is used."
and for something like user interface this seems to be the best fit (from user's perspective at least (or so I think)) so basically your run method in Process2 should look like this:
def run(self):
try:
while self.pipe_in.poll(None):
request = self.pipe_in.recv()
method = request[0]
args = request[1:]
try:
getattr(self, method + "_callback")(*args)
except AttributeError as ae:
print("Unknown callback received from pipe", str(ae))
print("Process 2 done with receiving")
except (KeyboardInterrupt, EOFError):
pass
Also not related to the problem but there seems to be no need to inherit from tk.Frame in Process1 (or object in Process2 (unless you really need to make it compatible with Python2)), you almost could inherit from tk.Tk, that should make it easier to actually use it as the main window since self would be the Tk instance

Asyncio.sleep causes script to End Immediately

In my simple asyncio Python program below, bar_loop is supposed to run continuously with a 1 second delay between loops.
Things run as expected when we have simply
async def bar_loop(self):
while True:
print('bar')
However, when we add a asyncio.sleep(1), the loop will end instead of looping.
async def bar_loop(self):
while True:
print('bar')
await asyncio.sleep(1)
Why does asyncio.sleep() cause bar_loop to exit immediately? How can we let it loop with a 1 sec delay?
Full Example:
import asyncio
from typing import Optional
class Foo:
def __init__(self):
self.bar_loop_task: Optional[asyncio.Task] = None
async def start(self):
self.bar_loop_task = asyncio.create_task(self.bar_loop())
async def stop(self):
if self.bar_loop_task is not None:
self.bar_loop_task.cancel()
async def bar_loop(self):
while True:
print('bar')
await asyncio.sleep(1)
if __name__ == '__main__':
try:
foo = Foo()
asyncio.run(foo.start())
except KeyboardInterrupt:
asyncio.run(foo.stop())
Using Python 3.9.5 on Ubuntu 20.04.
This behavior has nothing to do with calling asyncio.sleep, but with the expected behavior of creating a task and doing nothing else.
Tasks will run in parallel in the the asyncio loop, while other code that uses just coroutine and await expressions can be thought as if run in a linear pattern - however, as the are "out of the way" of the - let's call it "visible path of execution", they also won't prevent that flow.
In this case, your program simply reaches the end of the start method, with nothing left being "awaited", the asyncio loop simply finishes its execution.
If you have no explicit code to run in parallel to bar_loop, just await for the task. Change your start method to read:
async def start(self):
self.bar_loop_task = asyncio.create_task(self.bar_loop())
try:
await self.bar_loop_task
except XXX:
# handle excptions that might have taken place inside the task

Close asyncio loop on KeyboardInterrupt - Run stop routine

I'm using python to create a script which runs and interacts with some processes simultaneously. For that I'm using asyncio to implement this parallelism. The main problem is how to run another cleanup routine when a KeyboardInterrupt or a SIGINT occurs.
Here's an example code I wrote to show the problem:
import asyncio
import logging
import signal
from time import sleep
class Process:
async def start(self, arguments):
self._process = await asyncio.create_subprocess_exec("/bin/bash", *arguments)
return await self._process.wait()
async def stop(self):
self._process.terminate()
class BackgroundTask:
async def start(self):
# Very important process which needs to run while process 2 is running
self._process1 = Process()
self._process1_task = asyncio.create_task(self._process1.start(["-c", "sleep 100"]))
self._process2 = Process()
self._process2_task = asyncio.create_task(self._process2.start(["-c", "sleep 50"]))
await asyncio.wait([self._process1_task, self._process2_task], return_when=asyncio.ALL_COMPLETED)
async def stop(self):
# Stop process
await self._process1.stop()
# Call a cleanup process which cleans up process 1
cleanup_process = Process()
await cleanup_process.start(["-c", "sleep 10"])
# After that we can stop our second process
await self._process2.stop()
backgroundTask = BackgroundTask()
async def main():
await asyncio.create_task(backgroundTask.start())
logging.basicConfig(level=logging.DEBUG)
asyncio.run(main(), debug=True)
This code creates a background task which starts two processes (in this example two bash sleep commands) and waits for them to finish. This works fine and both command are running in parallel.
The main problem is the stop routine. I'd like to run the stop method when the program receives a SIGINT or KeyboardInterrupt, which first stops the process1, then starts a cleanup method and stops process2 afterwards. This is necessary because the cleanup command depends on process2.
What I've tried (instead of the asyncio.run() and the async main):
def main():
try:
asyncio.get_event_loop().run_until_complete(backgroundTask.start())
except KeyboardInterrupt:
asyncio.get_event_loop().run_until_complete(backgroundTask.stop())
main()
This of course doens't work as expected, because as soon as an KeyboardInterrupt exception occours the backgroundTask.start Task is canceled and the backgroundTask.stop is started in the main loop, so my processes are canceled and can't stopped properly.
So is there a way to detect the KeyboardInterrupt without canceling the current main loop and run my backgroundTask.stop method instead?
You want to add a signal handler as shown in this example in the docs:
import asyncio
import functools
import os
import signal
def ask_exit(signame, loop):
print("got signal %s: exit" % signame)
loop.stop()
async def main():
loop = asyncio.get_running_loop()
for signame in {'SIGINT', 'SIGTERM'}:
loop.add_signal_handler(
getattr(signal, signame),
functools.partial(ask_exit, signame, loop))
await asyncio.sleep(3600)
print("Event loop running for 1 hour, press Ctrl+C to interrupt.")
print(f"pid {os.getpid()}: send SIGINT or SIGTERM to exit.")
asyncio.run(main())
That's a bit of an overcomplicated/outdated example though, consider it more like this (your coroutine code goes where the asyncio.sleep call is):
import asyncio
from signal import SIGINT, SIGTERM
async def main():
loop = asyncio.get_running_loop()
for signal_enum in [SIGINT, SIGTERM]:
loop.add_signal_handler(signal_enum, loop.stop)
await asyncio.sleep(3600) # Your code here
asyncio.run(main())
At this point a Ctrl + C will break the loop and raise a RuntimeError, which you can catch by putting the asyncio.run call in a try/except block like so:
try:
asyncio.run(main())
except RuntimeError as exc:
expected_msg = "Event loop stopped before Future completed."
if exc.args and exc.args[0] == expected_msg:
print("Bye")
else:
raise
That's not very satisfying though (what if something else caused the same error?), so I'd prefer to raise a distinct error. Also, if you're exiting on the command line, the proper thing to do is to return the proper exit code (in fact, the code in the example just uses the name, but it's actually an IntEnum with that numeric exit code in it!)
import asyncio
from functools import partial
from signal import SIGINT, SIGTERM
from sys import stderr
class SignalHaltError(SystemExit):
def __init__(self, signal_enum):
self.signal_enum = signal_enum
print(repr(self), file=stderr)
super().__init__(self.exit_code)
#property
def exit_code(self):
return self.signal_enum.value
def __repr__(self):
return f"\nExitted due to {self.signal_enum.name}"
def immediate_exit(signal_enum, loop):
loop.stop()
raise SignalHaltError(signal_enum=signal_enum)
async def main():
loop = asyncio.get_running_loop()
for signal_enum in [SIGINT, SIGTERM]:
exit_func = partial(immediate_exit, signal_enum=signal_enum, loop=loop)
loop.add_signal_handler(signal_enum, exit_func)
await asyncio.sleep(3600)
print("Event loop running for 1 hour, press Ctrl+C to interrupt.")
asyncio.run(main())
Which when Ctrl + C'd out of gives:
python cancelling_original.py
⇣
Event loop running for 1 hour, press Ctrl+C to interrupt.
^C
Exitted due to SIGINT
echo $?
⇣
2
Now there's some code I'd be happy to serve! :^)
P.S. here it is with type annotations:
from __future__ import annotations
import asyncio
from asyncio.events import AbstractEventLoop
from functools import partial
from signal import Signals, SIGINT, SIGTERM
from sys import stderr
from typing import Coroutine
class SignalHaltError(SystemExit):
def __init__(self, signal_enum: Signals):
self.signal_enum = signal_enum
print(repr(self), file=stderr)
super().__init__(self.exit_code)
#property
def exit_code(self) -> int:
return self.signal_enum.value
def __repr__(self) -> str:
return f"\nExitted due to {self.signal_enum.name}"
def immediate_exit(signal_enum: Signals, loop: AbstractEventLoop) -> None:
loop.stop()
raise SignalHaltError(signal_enum=signal_enum)
async def main() -> Coroutine:
loop = asyncio.get_running_loop()
for signal_enum in [SIGINT, SIGTERM]:
exit_func = partial(immediate_exit, signal_enum=signal_enum, loop=loop)
loop.add_signal_handler(signal_enum, exit_func)
return await asyncio.sleep(3600)
print("Event loop running for 1 hour, press Ctrl+C to interrupt.")
asyncio.run(main())
The advantage of a custom exception here is that you can then catch it specifically, and avoid the traceback being dumped to the screen
try:
asyncio.run(main())
except SignalHaltError as exc:
# log.debug(exc)
pass
else:
raise

Categories