Hello guys I have a problem with my kivy app when i start to run some functions the app stop responding i tried to solve it by using threading but its not working with.
so this is widget class :
class PyWidget(Widget):
stop = threading.Event()
def start_thread(self):
self.ids.mic.source = 'assets/open2.png'
self.ids.mic.reload()
time.sleep(1)
threading.Thread(target=self.start_listening).start();
#mainthread
def start_listening(self):
while True:
try:
time.sleep(1)
print('Listening.......')
voiceText = RecognizeVoice()
# time.sleep(1)
if 'hello' in voiceText and Talking(App, respone, RecognizeVoice):
return
# else: Talking(App, respone, RecognizeVoice)
time.sleep(1)
except Exception as e:
print(f'start_listening: {e}')
RecognizeVoice function to start the mic and get user voice to text
def RecognizeVoice():
try:
with speechRec.Microphone() as sound:
voice = recognizer.listen(sound)
voiceText = recognizer.recognize_google(voice, language="en-US") #online
voiceText = voiceText.lower()
print(f'Input : {voiceText}')
return voiceText
except speechRec.UnknownValueError as e:
print(f'RecognizeVoice: {e}')
except speechRec.RequestError as e:
print(f'RecognizeVoice: {e}')
respone('Sorry, something went wrong.')
# Text to speech
def respone(message):
AI.say(message)
AI.runAndWait()
in my GUI i have a button that when i click the start_thread function starts and all others follow it, i hope that i explained everything. thanks for helping
The #mainthread decorator on the start_listening() method defeats the threading. Just remove that decorator.
Related
I am playing around with Pipe and Process from the multiprocessing module (Python 3.8). My initial program looks like this:
from multiprocessing import Process, Pipe
class Process1(object):
def __init__(self, pipe_out):
self.pipe_out = pipe_out
self.run()
def run(self):
try:
while True:
print("Sending message to process 2")
self.pipe_out.send(["hello"])
except KeyboardInterrupt:
pass
class Process2(object):
def __init__(self, pipe_in):
self.pipe_in = pipe_in
self.run()
def run(self):
try:
while self.pipe_in.poll():
request = self.pipe_in.recv()
method = request[0]
args = request[1:]
try:
getattr(self, method + "_callback")(*args)
except AttributeError as ae:
print("Unknown callback received from pipe", str(ae))
print("Process 2 done with receiving")
except KeyboardInterrupt:
pass
def hello_callback(self):
print("Process 1 said hello")
class Controller(object):
def __init__(self):
pipe_proc1_out, pipe_proc2_in = Pipe()
self.proc1 = Process(
target=Process1,
args=(pipe_proc1_out, )
)
self.proc2 = Process(
target=Process2,
args=(pipe_proc2_in, )
)
def run(self):
try:
self.proc1.start()
self.proc2.start()
while True:
continue
except KeyboardInterrupt:
print("Quitting processes...")
self.proc1.join(1)
if self.proc1.is_alive():
self.proc1.terminate()
self.proc2.join(1)
if self.proc2.is_alive():
self.proc2.terminate()
print("Finished")
def pipes():
c = Controller()
c.run()
if __name__ == "__main__":
pipes()
I have a Controller instance that runs until a keyboard interruption is received. It also handles two processes Process1 and Process2 with the former constantly sending and the latter constantly receiving.
The code above is a skeleton for a larger undertaking that involves a complex GUI (PySide), image processing (OpenCV) and a game engine (Panda3D). So I tried to add Tkinter as a GUI example:
from multiprocessing import Process, Pipe
import tkinter as tk
class Process1(tk.Frame):
def __init__(self, pipe_out):
self.pipe_out = pipe_out
self.setup_gui()
self.run()
def setup_gui(self):
self.app = tk.Tk()
lb1 = tk.Label(self.app, text="Message:")
lb1.pack()
self.ent1 = tk.Entry(self.app)
self.ent1.pack()
btn1 = tk.Button(self.app, text="Say hello to other process",
command=self.btn1_clicked)
btn1.pack()
def btn1_clicked(self):
msg = self.ent1.get()
self.pipe_out.send(["hello", msg])
def run(self):
try:
self.app.mainloop()
except KeyboardInterrupt:
pass
class Process2(object):
def __init__(self, pipe_in):
self.pipe_in = pipe_in
self.run()
def run(self):
try:
while self.pipe_in.poll():
request = self.pipe_in.recv()
method = request[0]
args = request[1:]
try:
getattr(self, method + "_callback")(*args)
except AttributeError as ae:
print("Unknown callback received from pipe", str(ae))
print("Process 2 done with receiving")
except KeyboardInterrupt:
pass
def hello_callback(self, msg):
print("Process 1 say\"" + msg + "\"")
class Controller(object):
def __init__(self):
pipe_proc1_out, pipe_proc2_in = Pipe()
self.proc1 = Process(
target=Process1,
args=(pipe_proc1_out, )
)
self.proc2 = Process(
target=Process2,
args=(pipe_proc2_in, )
)
def run(self):
try:
self.proc1.start()
self.proc2.start()
while True:
continue
except KeyboardInterrupt:
print("Quitting processes...")
self.proc1.join(1)
if self.proc1.is_alive():
self.proc1.terminate()
self.proc2.join(1)
if self.proc2.is_alive():
self.proc2.terminate()
print("Finished")
def pipes():
c = Controller()
c.run()
if __name__ == "__main__":
pipes()
Notice that currently the Tkinter window can only be closed if the "parent" process is interrupted via keyboard.
Whenever I click the button and invoke the button's command, my program goes into an error state with the following message:
Exception in Tkinter callback
Traceback (most recent call last):
File "C:\Users\USER\Anaconda3\envs\THS\lib\tkinter\__init__.py", line 1705, in __call__
return self.func(*args)
File "C:\Users\USER\PycharmProjects\PythonPlayground\pipes_advanced.py", line 26, in btn1_clicked
self.pipe_out.send(["hello", 1, 2])
File "C:\Users\USER\Anaconda3\envs\THS\lib\multiprocessing\connection.py", line 206, in send
self._send_bytes(_ForkingPickler.dumps(obj))
File "C:\Users\USER\Anaconda3\envs\THS\lib\multiprocessing\connection.py", line 280, in _send_bytes
ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True)
BrokenPipeError: [WinError 232] The pipe is being closed
At first I thought that the problem is with the value I'm receiving from the Entry.get() call (my Tkinter skills are rusty). I printed msg and got the text from the widget.
Next thing I tried was to put a constant string as the value of the argument that I sent over the pipe:
def btn1_clicked(self):
self.pipe_out.send(["hello", "world"])
The same error appeared. Catching the exception BrokenPipeError doesn't really do me any good (except if I want to handle the case when the pipe is broken I guess).
If I do the same for the first version of the program (without Tkinter), it works. This leads me to believe that my problem comes from the way I have integrated Tkinter.
The issue you have is that you poll the pipe, but the documentation says:
poll([timeout])
Return whether there is any data available to be read.
If timeout is not specified then it will return immediately.
In the first example it works because when starting Process1 you send data to the pipe immediately:
def run(self):
try:
while True:
print("Sending message to process 2")
self.pipe_out.send(["hello"])
except KeyboardInterrupt:
pass
And you do this continuously so the .poll will return True and the loop in Process2 will continue.
As with tkinter nothing gets sent to the pipe immediately it waits for user to click a button, by the time any of that can happen the Process2 already has called poll and it immediately returned False and it didn't even start that loop. If you notice then it also almost immediately prints in the terminal that
"Process 2 done with receiving"
To solve this issue the easiest seems to use
while self.pipe_in.poll(None):
which per the docs means
"If timeout is None then an infinite timeout is used."
and for something like user interface this seems to be the best fit (from user's perspective at least (or so I think)) so basically your run method in Process2 should look like this:
def run(self):
try:
while self.pipe_in.poll(None):
request = self.pipe_in.recv()
method = request[0]
args = request[1:]
try:
getattr(self, method + "_callback")(*args)
except AttributeError as ae:
print("Unknown callback received from pipe", str(ae))
print("Process 2 done with receiving")
except (KeyboardInterrupt, EOFError):
pass
Also not related to the problem but there seems to be no need to inherit from tk.Frame in Process1 (or object in Process2 (unless you really need to make it compatible with Python2)), you almost could inherit from tk.Tk, that should make it easier to actually use it as the main window since self would be the Tk instance
I have the following code:
import click
#click.command()
def main():
while True:
pass
try:
main()
except KeyboardInterrupt:
print('My own message!')
When I press Ctrl+C to exit the program, I want to print my own message. However, click intercepts the error and this is the output:
^C
Aborted!
How can I stop click from handling the errors?
I think I've solved my problem with this code! Hopefully, it's the correct way to handle my problem.
import click
#click.command()
def main():
while True:
pass
try:
main(standalone_mode=False)
except click.exceptions.Abort:
print('My own message!')
I'm trying to understand how to use the error handling correct in Python.
I'm usting Watchdog to look in my folers in a network connected disc. Somethimes the disc dissconnects shortly and then connects again and an error pops up. "Exception in thread Thread-2:"
I have an error handeler but I'm not sure I'm doing it correctly.
Should I put an other try at the observer.schedule step?
Python 3.6, Windows 10
if __name__ == '__main__':
path = "P:\\03_auto\\Indata"
observer = Observer()
observer.schedule(MyHandler(), path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
Well, the sys.excepthook approach I mentioned in my comment is not feasible at the moment. There's a more than a decade old bug that makes threads derived from threading.Thread ignore sys.excepthook. There are some workarounds outlined in the message thread of that bug, but I am hestiant to post an answer employing a workaround, especially since this bug finally seems to get a fix for Python 3.8.
The other option would be to derive a custom Observer from Watchdog's Observer.
The most basic way I can think of is a wrapper around the parent's run() method:
class CustomObserver(Observer):
def run(self):
while self.should_keep_running():
try:
# Tweak the super call if you require compatibility with Python 2
super().run()
except OSError:
# You did not mention the excpetion class in your post.
# Be specific about what you want to handle here
# give the file system some time to recover
time.sleep(.5)
Judging by a quick glance at the source, all Observers seem to be inheriting their run from EventDispatcher.run(), so you probably could even omit the wrapper and reimplement that method directly
class CustomObserver(Observer):
def run(self):
while self.should_keep_running():
try:
self.dispatch_events(self.event_queue, self.timeout)
except queue.Empty:
continue
except OSError:
time.sleep(.5)
However, I don't have that package installed on my box, so these things are untested; you might have to fiddle around a bit to get this going.
Oh, and make sure to replace the OSError* by whatever exception class actually raises in your case :)
Edit:
* As per #HenryYik's comment below, a network drive disconnecting, appears to be raising an OSError (WinError 64: ERROR_NETNAME_DELETED) on Windows systems. I find it likely that UNIX style OSs raise the same type of exception in that situation, hence I updated the code snippets to now use OSError instead of the FileNotFoundError I used originally.
I think super().run() is not a good answer.
I've tried many things with shmee's answer, but it was not concrete and meaningless.
Below is my answer.
I've finished the test.
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
class Watcher :
def __init__(self):
self.observer = Observer()
print('observer init...')
def run(self):
global osv_status
try :
event_handler = MyHandler(patterns=["*.txt"])
self.observer.schedule(event_handler, Directory, recursive=True)
self.observer.start()
osv_status = 1
except OSError as ex:
print("OSError")
time.sleep(.5)
if self.observer.is_alive() is True:
self.observer.stop()
self.observer.join()
print("Observer is removed [ ",osv_status," ]")
osv_status = 2
except Exception as ex:
self.logger.exception("Exception ex :{0} ".format(ex))
class MyHandler(PatternMatchingEventHandler):
def __init__(self,*args, **kwargs):
super(MyHandler, self).__init__(*args, **kwargs)
print("MyHandler Init")
def on_created(self, event):
print('New file is created ',event.src_path)
except Exception as ex:
self.logger.exception("Exception ex :{0} ".format(ex))
def on_modified(self,event):
print('File is modified ',event.src_path)
try :
doing()
except Exception as ex:
self.logger.exception("Exception ex :{0} ".format(ex))
if __name__ == "__main__":
.....
osv_status = 0 # 0: ready, 1:start 2: halt
wch =Watcher()
wch.run()
try :
while(1):
if is_Connect == False:
sock_connect()
print('sock_connect')
time.sleep(1)
if os.path.exists(Directory) is False and osv_status == 1:
wch.observer.stop()
wch.observer.join()
print("Observer is removed [ ",osv_status," ]")
osv_status = 0
elif os.path.exists(Directory) is True and (osv_status == 0 or osv_status == 2):
if wch.observer.is_alive() is False:
wch =Watcher()
wch.run()
I've also posted this in a related thread here
This is how i solved this problem:
from watchdog import observers
from watchdog.observers.api import DEFAULT_OBSERVER_TIMEOUT, BaseObserver
class MyEmitter(observers.read_directory_changes.WindowsApiEmitter):
def queue_events(self, timeout):
try:
super().queue_events(timeout)
except OSError as e:
print(e)
connected = False
while not connected:
try:
self.on_thread_start() # need to re-set the directory handle.
connected = True
print('reconnected')
except OSError:
print('attempting to reconnect...')
time.sleep(10)
observer = BaseObserver(emitter_class=MyEmitter, timeout=DEFAULT_OBSERVER_TIMEOUT)
...
Subclassing WindowsApiEmitter to catch the exception in queue_events. In order to continue after reconnecting, watchdog needs to re-set the directory handle, which we can do with self.on_thread_start().
Then use MyEmitter with BaseObserver, we can now handle losing and regaining connection to shared drives.
Right now I have code in my script
def run(self):
while True:
try:
self.one()
self.two()
self.three()
self.four()
except (IndexError, ProxyError, SSLError, ConnectionError, errno.ECONNREFUSED):
print statusTime(self.index) + 'Bad Proxy'
break
def jobWorker(username,password,proxy):
bot = Bot(username,password,proxy)
bot.start()
bot.join()
return
How can I tell python to continuously run def run(self) so basically keep looping through self.one(), self.two(), self.three(), self.four() so it doesnt stop. Right now each Bot stops at self.four() and doesnt start again at self.one()
you can put the function with in
def __init__(self):
self.run()
I am weak in the multi-threading area in python.
Now i have a request, here are the pseudo code:
def main():
try:
print("A")
start_non_block_thread(func=test_func)
time.sleep(10)
print("B")
except Exception:
print("catch it")
def test_func():
print("in sub-threading")
raise Exception("quit")
The output i want is:
A
in sub-threading
catch it
Pls note that there is not "B" in the output.
How can i implement the start_non_block_thread to make sure the behavior is as my expected?
Thanks in advance~