Background task with asyncio - python

I have one class produceMessages() that keeps feeding messages to its attribute message_logs.
I want to process these messages in the other class processMessages(). However, the code in processMessages() won't execute until produceMessages() is done - which is never as I want these messages to come forever.
Looking at documentation, I've found that the library asyncio might help me but I'm struggling to get the below example work:
This is first_file.py
import asyncio
import random
class produceMessages():
def __init__(self, timeout = 10):
self.timeout = timeout
self.message_logs = []
async def run(self):
while(True):
self.message_logs.append(random.uniform(0, 1))
await asyncio.sleep(self.timeout)
This is second_file.py
import first_file
import asyncio
import time
class processMessages():
def __init__(self):
self.producer = first_file.produceMessages()
asyncio.run(self.producer.run())
def print_logs(self):
print(self.producer.message_logs)
time.sleep(1)
x = processMessages()
x.print_logs()
How can I make this work? Thanks

I would recommend you try the library threading. This is how I would approach it with that:
import first_file
import asyncio
import time
class processMessages():
def __init__(self):
self.test = first_file.produceMessages()
t = threading.Thread(target=self.test.run)
t.run()
t2 = threading.Thread(target=self.print_logs)
def print_logs(self):
print(self.test.message_logs)
time.sleep(1)
x = processMessages()
x.t2.run()

Related

Nothing happened when scheduling a function to start at a certain time

I have a class to start a thread for a while loop. I tried to scheduling the thread class to start within a certain time but it doesn't work:
def test():
if __name__ == "__main__":
main()
schedule.every().day.at("17:25:50").do(test)
The function does not do anything even the time reached "17:25:50"
My full code:
import discord
import random
import time
import asyncio
import schedule
from facebook_scraper import get_posts, _scraper, exceptions
from discord.ext import commands, tasks
import threading
import time
import re
class LEDManager(threading.Thread):
def __init__(self, id_manager):
threading.Thread.__init__(self)
self.id_manager = int(id_manager)
def run(self):
while True:
try:
wanted = "Pecahan setiap negeri (Kumulatif):" # wanted post
for post in get_posts("myhealthkkm", pages=5):
if post.get("post_text") is not None and wanted in post.get("post_text"):
# print("Found", t)
listposts.append(post.get("post_text"))
# append until 3 page finish then go here
time.sleep(1)
print(listposts)
global listView
if listposts != 0:
listView = listposts.copy()
print(listView)
listposts.clear()
except exceptions.TemporarilyBanned:
print("Temporarily banned, sleeping for 10m")
time.sleep(600)
def main():
thread_id = ("0")
led_index = 0
thread_list = list()
for objs in thread_id:
thread = LEDManager(led_index)
thread_list.append(thread)
led_index += 1
for thread in thread_list:
thread.start()
time.sleep(1)
def test():
if __name__ == "__main__":
main()
schedule.every().day.at("17:25:50").do(test)
You forgot to add these lines:
while True:
schedule.run_pending()
time.sleep(1)
You should add them at the end of the file, so the system will keep checking forever, if "the job" needs to be done (if the hour is "17:25:50").
And here is the full documentation to see how to use the schedule module:
https://schedule.readthedocs.io/en/stable/

python Thread.name is printing last thread created name

I'm a newbie to Python and learning about threads. I have created a sample Producer-Consumer code wherein I add a movie to a list in Producer thread and pop the front element from the same list in Consumer thread. The problem is while printing the items of the movie List along with thread name I'm getting incorrect thread name in Producer thread. This is my code
Producer.py
from threading import Thread
from threading import RLock
import time
class Producer(Thread):
def __init__(self):
Thread.__init__(self)
Thread.name = 'Producer'
self.movieList = list()
self.movieListLock = RLock()
def printMovieList(self):
self.movieListLock.acquire()
if len(self.movieList) > 0:
for movie in self.movieList:
print(Thread.name, movie)
print('\n')
self.movieListLock.release()
def pushMovieToList(self, movie):
self.movieListLock.acquire()
self.movieList.append(movie)
self.printMovieList()
self.movieListLock.release()
def run(self):
for i in range(6):
self.pushMovieToList('Avengers' + str(i + 1))
time.sleep(1)
Consumer.py
from threading import Thread
import time
class Consumer(Thread):
def __init__(self):
Thread.__init__(self)
Thread.name = 'Consumer'
self.objProducer = None
def popMovieFromList(self):
self.objProducer.movieListLock.acquire()
if len(self.objProducer.movieList) > 0:
movie = self.objProducer.movieList.pop(0)
print(Thread.name, ':', movie)
print('\n')
self.objProducer.movieListLock.release()
def run(self):
while True:
time.sleep(1)
self.popMovieFromList()
Main.py
from Producer import *
from Consumer import *
def main():
objProducer = Producer()
objConsumer = Consumer()
objConsumer.objProducer = objProducer
objProducer.start()
objConsumer.start()
objProducer.join()
objConsumer.join()
main()
I am not sure whether you solve this problem.
Hope my answer will be helpful.
You can check the document of threading.
Here it says that Thread.name may set same name for multiple thread.
name
A string used for identification purposes only. It has no semantics. Multiple threads may be given the same name. The initial name is set by the constructor.
I think Thread.name is a static variable so it shares in different thread.
If you want to set name of thread, you can set it in thread object like this:
class Producer(Thread):
def __init__(self):
Thread.__init__(self)
self.name= 'Producer'
and get it by threading.current_thread().name.
if len(self.movieList) > 0:
for movie in self.movieList:
print(threading.current_thread().name, movie)
Hope you enjoy it!

Share ctype memory across python processes using the address

I am trying to send a dynamic array across multiple processes in python. My first solution was sending the data directly through the Queue/Pipe of multiprocessing class. The problem is that it is limited by the bandwidth of the ethernet connection. Therefore I am trying to use the ctype array and pass just the address of the object. When I try to access the array from the second process (either A.raw or A.value) the process exit without any exception. Does somebody have an idea what is going on? Maybe some problems with lock etc.
from multiprocessing import Process,Queue
from ctypes import c_char,addressof
from time import sleep
import os
class ProcessIn(Process):
def __init__(self,QueueI):
super().__init__(daemon=True)
self.QueueI=QueueI
def run(self):
Array=[]
while True:
N=100000
A=(c_char*N)()
A.value=b'\x01'
Address=addressof(A)
Array.append(A)
print(os.getpid(),'putted',Address)
self.QueueI.put((Address,N))
sleep(2)
class ProcessOut(Process):
def __init__(self,QueueI):
super().__init__(daemon=True)
self.QueueI=QueueI
def run(self):
while True:
print(os.getpid(),'step 1')
Address,N=self.QueueI.get()
print(os.getpid(),'step 2',Address)
A=(c_char*N).from_address(Address)
print(os.getpid(),'step 3')
Value=A.raw #This will fail
print(os.getpid(),'step 4',Value)
sleep(1)
if __name__ == '__main__':
QueueI=Queue()
In=ProcessIn(QueueI)
Out=ProcessOut(QueueI)
print(os.getpid(),'main')
In.start()
Out.start()
input('press key to finish\n')
Ok, I got it - using mmap with tag:
from multiprocessing import Process,Queue
from ctypes import c_char,addressof
import pyarrow as pa
import numpy as np
from time import sleep
import os
import datetime
import mmap
from sys import getsizeof
class ProcessIn(Process):
def __init__(self,QueueI):
super().__init__(daemon=True)
self.QueueI=QueueI
def run(self):
i=0
while True:
N=np.random.randint(10,14)*100000
data = b'abcdefghijklmnopqrstuvwxyz'
Tag='Data_'+str(i)
buf = mmap.mmap(-1, N*len(data),tagname=Tag)
buf[0]=i
NN=N*len(data)
# print(buf[0:10])
print(os.getpid(),'putted',Tag,NN)
if self.QueueI.qsize()==0:
self.QueueI.put((Tag,NN,datetime.datetime.now()))
i+=1
sleep(1)
class ProcessOut(Process):
def __init__(self,QueueI):
super().__init__(daemon=True)
self.QueueI=QueueI
def run(self):
while True:
# print(os.getpid(),'step 1')
Tag,N,start=self.QueueI.get()
buf = mmap.mmap(-1, N,tagname=Tag)
print('got',buf[0:10],Tag)
# data=buf.read()
dt=(datetime.datetime.now()-start).total_seconds()
if dt!=0:
print(os.getpid(),N/dt/1024**2,'MBs',dt*1000,'ms',N/1024**2,'MB',N)
else:
print(os.getpid(),np.nan,'MBs',dt*1000,'ms',N/1024**2,'MB',N)
buf=None
if __name__ == '__main__':
QueueI=Queue()
In=ProcessIn(QueueI)
Out=ProcessOut(QueueI)
print(os.getpid(),'main')
In.start()
Out.start()
input('press key to finish\n')

Python3 weakref WeakMethod and thread safety

I am trying to create a simple callback that can be registered to an object from another thread. The initial object that calls the callback is running on its own thread in this case.
This is best illustrated through the following example:
from pprint import pprint
import sys
import weakref
import threading
import time
class DummyController(object):
def __init__(self):
self.name = "fortytwo"
def callback(self):
print("I am number : " + self.name)
class SomeThread(threading.Thread):
def __init__(self, listener):
threading.Thread.__init__(self)
self.listener = listener
def run(self):
time.sleep(1)
dummy = DummyController()
self.listener.register_callback(dummy.callback)
time.sleep(5)
del dummy
class Listener(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.runner = weakref.WeakMethod(self.default_callback)
self.counter = 20
def default_callback(self):
print("Not implemented")
def register_callback(self, function):
self.runner = weakref.WeakMethod(function)
def run(self):
while self.counter:
try:
self.runner()()
except Exception as e:
pprint(e)
self.counter -= 1
time.sleep(1)
listen = Listener()
some = SomeThread(listen)
listen.start()
some.start()
Now the above code works just fine. But I am concerned about thread-safety here. Reading through weakref docs, it isn't very clear if weakref is really thread safe or not, except for the line:
Changed in version 3.2: Added support for thread.lock, threading.Lock, and code objects.
I might be simply not reading that right. Do I need to add locking, or is everything actually fine and pretty thread safe?
Many thanks
OK, I understand. This is not a problem about thread safe, but just a problem about weak reference.
There is an executable example:
from pprint import pprint
import sys
import weakref
import threading
import time
import gc
class SomeThread(threading.Thread):
def __init__(self, listener):
threading.Thread.__init__(self)
self.listener = listener
def run(self):
class test: # simplify this example.
def callback(self, count):
print(count)
time.sleep(1)
dummy = test()
self.listener.register_callback(dummy.callback)
time.sleep(5)
del dummy
gc.collect() # add this line to do garbage collecting.
class Listener(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.runner = weakref.WeakMethod(self.default_callback)
self.counter = 20
def default_callback(self):
print("Not implemented")
def register_callback(self, function):
self.runner = weakref.WeakMethod(function)
def run(self):
while self.counter:
try:
self.runner()(self.counter)
except Exception as e:
pprint(e)
self.counter -= 1
time.sleep(1)
listen = Listener()
some = SomeThread(listen)
listen.start()
some.start()
output:
TypeError('default_callback() takes 1 positional argument but 2 were given',)
TypeError('default_callback() takes 1 positional argument but 2 were given',)
18
17
16
15
TypeError("'NoneType' object is not callable",)
TypeError("'NoneType' object is not callable",)
TypeError("'NoneType' object is not callable",)
If you explicitly call gc.collect(), callback loses its last strong reference and then it becomes None. As you will never know when will gc collect garbage, there is a potential issue.
It is no matter you use thread or not, just a normal behave of weak reference.
BTW, be careful that exiting SomeThread.run will also implicitly del dummy, you can test it by removing del dummy and moving gc.collect() into try block.

Python pyttsx, how to use external loop

In my program I need class(which can be some thread) to check some list like "say_list" and when other classes add some text to it, pyttsx say that text.
I search in pyttsx docs and I find some external loops feature but I can not find example which work correctly.
I want something like this:
import pyttsx
import threading
class VoiceAssistant(threading.Thread):
def __init__(self):
super(VoiceAssistant, self).__init__()
self.engine = pyttsx.init()
self.say_list = []
def add_say(self, msg):
self.say_list.append(msg)
def run(self):
while True:
if len(self.say_list) > 0:
self.engine.say(self.say_list[0])
self.say_list.remove(self.say_list[0])
if __name__ == '__main__':
va = VoiceAssistant()
va.start()
Thanks.
I can get the proper results by using python's built in Queue class:
import pyttsx
from Queue import Queue
from threading import Thread
q = Queue()
def say_loop():
engine = pyttsx.init()
while True:
engine.say(q.get())
engine.runAndWait()
q.task_done()
def another_method():
t = Thread(target=say_loop)
t.daemon = True
t.start()
for i in range(0, 3):
q.put('Sally sells seashells by the seashore.')
print "end of another method..."
def third_method():
q.put('Something Something Something')
if __name__=="__main__":
another_method()
third_method()
q.join() # ends the loop when queue is empty
Above is a simple example I whipped up. It uses the 'queue/consumer' model to allow separate functions/classes to access the same queue, then a worker that will execute anytime the queue has items. Should be pretty easy to adapt to your needs.
Further reading about Queues: https://docs.python.org/2/library/queue.html
There appears to be an interface for this in the docs you linked to, but it seemed like you were already on the separate thread track so this seemed closer to what you wanted.
And here's the modified version of your code:
import pyttsx
from Queue import Queue
import threading
class VoiceAssistant(threading.Thread):
def __init__(self):
super(VoiceAssistant, self).__init__()
self.engine = pyttsx.init()
self.q = Queue()
self.daemon = True
def add_say(self, msg):
self.q.put(msg)
def run(self):
while True:
self.engine.say(self.q.get())
self.engine.runAndWait()
self.q.task_done()
if __name__ == '__main__':
va = VoiceAssistant()
va.start()
for i in range(0, 3):
va.add_say('Sally sells seashells by the seashore.')
print "now we want to exit..."
va.q.join() # ends the loop when queue is empty
pyttsx3 loop pronunciation
import pyttsx3
voice_text = 'test'
def loopSound():
def onEnd(name, completed):
print('finishing', name, completed)
engine.say(voice_text, 'voice_a')
engine = pyttsx3.init()
engine.connect('finished-utterance', onEnd)
engine.say(voice_text, 'voice_e')
engine.startLoop()
loopSound()

Categories