Producer-Consumer algorithm in python - python

I built the following code while studying about threads and queues from the following website.
from __future__ import print_function
import queue
import threading
import time
class a3_4(threading.Thread):
q = queue.Queue()
def __init__(self, begin, end, waiting_time):
self.begin = begin
self.end = end
self.waiting_time = waiting_time
threading.Thread.__init__(self)
def run(self):
while True:
if self.begin != self.end:
self.q.put(self.begin)
self.begin += 1
time.sleep(5)
else:
break
def op(self):
self.start()
while True:
if not self.q.empty():
print("Outputting: ", self.q.get())
time.sleep(self.waiting_time)
if __name__ == '__main__':
myThread = a3_4(1, 5, 1)
myThread.op()
I get the following output:
python3 a3_4.py
Outputting: 1
Outputting: 2
Outputting: 3
Outputting: 4
But the program doesn't stop on its own.
I tried inserting else: break but that only gives me Outputting: 1
Am I missing something very basic here?

I think, you are simulating producer-consumer problem. The problem is that your producer thread stops properly, but there's no termination clause for your consumer thread(main thread). So I think You need to add some termination clause for your consumer method op().
May be :
def op(self):
self.start()
while True:
time.sleep(self.waiting_time)
if not self.q.empty():
print("Outputting: ", self.q.get())
else:
break

Related

Nothing happened when scheduling a function to start at a certain time

I have a class to start a thread for a while loop. I tried to scheduling the thread class to start within a certain time but it doesn't work:
def test():
if __name__ == "__main__":
main()
schedule.every().day.at("17:25:50").do(test)
The function does not do anything even the time reached "17:25:50"
My full code:
import discord
import random
import time
import asyncio
import schedule
from facebook_scraper import get_posts, _scraper, exceptions
from discord.ext import commands, tasks
import threading
import time
import re
class LEDManager(threading.Thread):
def __init__(self, id_manager):
threading.Thread.__init__(self)
self.id_manager = int(id_manager)
def run(self):
while True:
try:
wanted = "Pecahan setiap negeri (Kumulatif):" # wanted post
for post in get_posts("myhealthkkm", pages=5):
if post.get("post_text") is not None and wanted in post.get("post_text"):
# print("Found", t)
listposts.append(post.get("post_text"))
# append until 3 page finish then go here
time.sleep(1)
print(listposts)
global listView
if listposts != 0:
listView = listposts.copy()
print(listView)
listposts.clear()
except exceptions.TemporarilyBanned:
print("Temporarily banned, sleeping for 10m")
time.sleep(600)
def main():
thread_id = ("0")
led_index = 0
thread_list = list()
for objs in thread_id:
thread = LEDManager(led_index)
thread_list.append(thread)
led_index += 1
for thread in thread_list:
thread.start()
time.sleep(1)
def test():
if __name__ == "__main__":
main()
schedule.every().day.at("17:25:50").do(test)
You forgot to add these lines:
while True:
schedule.run_pending()
time.sleep(1)
You should add them at the end of the file, so the system will keep checking forever, if "the job" needs to be done (if the hour is "17:25:50").
And here is the full documentation to see how to use the schedule module:
https://schedule.readthedocs.io/en/stable/

Python: multiprocessing Queue.put() in module won't send anything to parent process

I am trying to make 2 processes communicate between each other using the multiprocessing package in Python, and more precisely the Queue() class. From the parent process, I want to get an updated value of the child process each 5 seconds. This child process is a class function. I have done a toy example where everything works fine.
However, when I try to implement this solution in my project, it seems that the Queue.put() method of the child process in the sub-module won't send anything to the parent process, because the parent process won't print the desired value and the code never stops running. Actually, the parent process only prints the value sent to the child process, which is True here, but as I said, never stops.
So my questions are:
Is there any error in my toy-example ?
How should I modify my project in order to get it working just like my toy example ?
Toy example: works
main module
from multiprocessing import Process, Event, Lock, Queue, Pipe
import time
import test_mod as test
def loop(output):
stop_event = Event()
q = Queue()
child_process = Process(target=test.child.sub, args=(q,))
child_process.start()
i = 0
print("started at {} ".format(time.time()))
while not stop_event.is_set():
i+=1
time.sleep(5)
q.put(True)
print(q.get())
if i == 5:
child_process.terminate()
stop_event.set()
output.put("main process looped")
if __name__ == '__main__':
stop_event, output = Event(), Queue()
k = 0
while k < 5:
loop_process = Process(target=loop, args=(output,))
loop_process.start()
print(output.get())
loop_process.join()
k+=1
submodule
from multiprocessing import Process, Event, Lock, Queue, Pipe
import time
class child(object):
def __init__(self):
pass
def sub(q):
i = 0
while i < 2000:
latest_value = time.time()
accord = q.get()
if accord == True:
q.put(latest_value)
accord = False
time.sleep(0.0000000005)
i+=1
Project code: doesn't work
main module
import neat #package in which the submodule is
import *some other stuff*
def run(config_file):
config = neat.Config(some configuration)
p = neat.Population(config)
**WHERE MY PROBLEM IS**
stop_event = Event()
q = Queue()
pe = neat.ParallelEvaluator(**args)
child_process = Process(target=p.run, args=(pe.evaluate, q, other args))
child_process.start()
i = 0
while not stop_event.is_set():
q.put(True)
print(q.get())
time.sleep(5)
i += 1
if i == 5:
child_process.terminate()
stop_event.set()
if __name__ == '__main__':
run(config_file)
submodule
class Population(object):
def __init__():
*initialization*
def run(self, q, other args):
while n is None or k < n:
*some stuff*
accord = add_2.get()
if accord == True:
add_2.put(self.best_genome.fitness)
accord = False
return self.best_genome
NB:
I am not used to multiprocessing
I have tried to give the most relevant parts of my project, given that the entire code would be far too long.
I have also considered using Pipe(), however this option didn't work either.
If I see it correctly, your desired submodule is the class Population. However, you start your process with a parameter of the type ParallelEvaluator. Next, I can't see that you supply your Queue q to the sub-Process. That's what I see from the code provided:
stop_event = Event()
q = Queue()
pe = neat.ParallelEvaluator(**args)
child_process = Process(target=p.run, args=(pe.evaluate, **args)
child_process.start()
Moreover, the following lines create a race condition:
q.put(True)
print(q.get())
The get command is like a pop. So it takes an element and deletes it from the queue. If your sub-process doesn't access the queue between these two lines (because it is busy), the True will never make it to the child-process. Hence, it is better two use multiple queues. One for each direction. Something like:
stop_event = Event()
q_in = Queue()
q_out = Queue()
pe = neat.ParallelEvaluator(**args)
child_process = Process(target=p.run, args=(pe.evaluate, **args))
child_process.start()
i = 0
while not stop_event.is_set():
q_in.put(True)
print(q_out.get())
time.sleep(5)
i += 1
if i == 5:
child_process.terminate()
stop_event.set()
This is your submodule
class Population(object):
def __init__():
*initialization*
def run(self, **args):
while n is None or k < n:
*some stuff*
accord = add_2.get() # add_2 = q_in
if accord == True:
add_3.put(self.best_genome.fitness) #add_3 = q_out
accord = False
return self.best_genome

Run infinite while loop in Python module

I'm writing a Python module to read jstest output and make Xbox gamepad working in Python on Linux. I need to start in background infinite while loop in __init__ on another thread that looks like this:
import os
from threading import Thread
import time
import select
import subprocess
class Joystick:
"""Initializes base class and launches jstest and xboxdrv"""
def __init__(self, refreshRate=2000, deadzone=4000):
self.proc = subprocess.Popen(['xboxdrv', '-D', '-v', '--detach-kernel-driver', '--dpad-as-button'], stdout=subprocess.PIPE, bufsize=0)
self.pipe = self.proc.stdout
self.refresh = refreshRate
self.refreshDelay = 1.0 / refreshRate
self.refreshTime = 0 # indicates the next refresh
self.deadzone = deadzone
self.start()
self.xbox = subprocess.Popen(['jstest', '--normal', '/dev/input/js0'], stdout=subprocess.PIPE, bufsize=-1, universal_newlines=True)
self.response = self.xbox.stdout.readline()
a = Thread(target=self.reload2())
a.start()
print("working")
def reload2(self):
while True:
self.response = self.xbox.stdout.readline()
print("read")
time.sleep(0.5)
def start(self):
global leftVibrateAmount, rightVibrateAmount
leftVibrateAmount = 0
rightVibrateAmount = 0
readTime = time.time() + 1 # here we wait a while
found = False
while readTime > time.time() and not found:
readable, writeable, exception = select.select([self.pipe], [], [], 0)
if readable:
response = self.pipe.readline()
# tries to detect if controller is connected
if response == b'[ERROR] XboxdrvDaemon::run(): fatal exception: DBusSubsystem::request_name(): failed to become primary owner of dbus name\n':
raise IOError("Another instance of xboxdrv is running.")
elif response == b'[INFO] XboxdrvDaemon::connect(): connecting slot to thread\n':
found = True
self.reading = response
elif response == b'':
raise IOError('Are you running as sudo?')
if not found:
self.pipe.close()
# halt if controller not found
raise IOError("Xbox controller/receiver isn't connected")
The loop is defined to start running in __init__ function like so:
a = threading.Thread(target=self.reload2) # code hangs here
a.start()
But each time I create variable "a", whole program hangs in while loop, which should be running in another thread.
Thanks for help.
You may be having issues with your __init__. I put it in a simple class as an example, and it runs as expected.
import time
from threading import Thread
class InfiniteLooper():
def __init__(self):
a = Thread(target=self.reload2) # reload, not reload(), otherwise you're executing reload2 and assigning the result to Target, but it's an infinite loop, so...
print('Added thread')
a.start()
print('Thread started')
def reload2(self):
while True:
self.response = input('Enter something')
print('read')
time.sleep(0.5)
loop = InfiniteLooper()
Output:
Added thread
Thread started
Enter something
1
read
Enter something
1
read
As you can see, the "Enter something" appears after I've added the thread and started it. It also loops fine

How to let a Python thread finish gracefully

I'm doing a project involving data collection and logging. I have 2 threads running, a collection thread and a logging thread, both started in main. I'm trying to allow the program to be terminated gracefully when with Ctrl-C.
I'm using a threading.Event to signal to the threads to end their respective loops. It works fine to stop the sim_collectData method, but it doesn't seem to be properly stopping the logData thread. The Collection terminated print statement is never executed, and the program just stalls. (It doesn't end, just sits there).
The second while loop in logData is to make sure everything in the queue is logged. The goal is for Ctrl-C to stop the collection thread immediately, then allow the logging thread to finish emptying the queue, and only then fully terminate the program. (Right now, the data is just being printed out - eventually it's going to be logged to a database).
I don't understand why the second thread never terminates. I'm basing what I've done on this answer: Stopping a thread after a certain amount of time. What am I missing?
def sim_collectData(input_queue, stop_event):
''' this provides some output simulating the serial
data from the data logging hardware.
'''
n = 0
while not stop_event.is_set():
input_queue.put("DATA: <here are some random data> " + str(n))
stop_event.wait(random.randint(0,5))
n += 1
print "Terminating data collection..."
return
def logData(input_queue, stop_event):
n = 0
# we *don't* want to loop based on queue size because the queue could
# theoretically be empty while waiting on some data.
while not stop_event.is_set():
d = input_queue.get()
if d.startswith("DATA:"):
print d
input_queue.task_done()
n += 1
# if the stop event is recieved and the previous loop terminates,
# finish logging the rest of the items in the queue.
print "Collection terminated. Logging remaining data to database..."
while not input_queue.empty():
d = input_queue.get()
if d.startswith("DATA:"):
print d
input_queue.task_done()
n += 1
return
def main():
input_queue = Queue.Queue()
stop_event = threading.Event() # used to signal termination to the threads
print "Starting data collection thread...",
collection_thread = threading.Thread(target=sim_collectData, args=(input_queue, stop_event))
collection_thread.start()
print "Done."
print "Starting logging thread...",
logging_thread = threading.Thread(target=logData, args=(input_queue, stop_event))
logging_thread.start()
print "Done."
try:
while True:
time.sleep(10)
except (KeyboardInterrupt, SystemExit):
# stop data collection. Let the logging thread finish logging everything in the queue
stop_event.set()
main()
The problem is that your logger is waiting on d = input_queue.get() and will not check the event. One solution is to skip the event completely and invent a unique message that tells the logger to stop. When you get a signal, send that message to the queue.
import threading
import Queue
import random
import time
def sim_collectData(input_queue, stop_event):
''' this provides some output simulating the serial
data from the data logging hardware.
'''
n = 0
while not stop_event.is_set():
input_queue.put("DATA: <here are some random data> " + str(n))
stop_event.wait(random.randint(0,5))
n += 1
print "Terminating data collection..."
input_queue.put(None)
return
def logData(input_queue):
n = 0
# we *don't* want to loop based on queue size because the queue could
# theoretically be empty while waiting on some data.
while True:
d = input_queue.get()
if d is None:
input_queue.task_done()
return
if d.startswith("DATA:"):
print d
input_queue.task_done()
n += 1
def main():
input_queue = Queue.Queue()
stop_event = threading.Event() # used to signal termination to the threads
print "Starting data collection thread...",
collection_thread = threading.Thread(target=sim_collectData, args=(input_queue, stop_event))
collection_thread.start()
print "Done."
print "Starting logging thread...",
logging_thread = threading.Thread(target=logData, args=(input_queue,))
logging_thread.start()
print "Done."
try:
while True:
time.sleep(10)
except (KeyboardInterrupt, SystemExit):
# stop data collection. Let the logging thread finish logging everything in the queue
stop_event.set()
main()
I'm not an expert in threading, but in your logData function the first d=input_queue.get() is blocking, i.e., if the queue is empty it will sit an wait forever until a queue message is received. This is likely why the logData thread never terminates, it's sitting waiting forever for a queue message.
Refer to the [Python docs] to change this to a non-blocking queue read: use .get(False) or .get_nowait() - but either will require some exception handling for cases when the queue is empty.
You are calling a blocking get on your input_queue with no timeout. In either section of logData, if you call input_queue.get() and the queue is empty, it will block indefinitely, preventing the logging_thread from reaching completion.
To fix, you will want to call input_queue.get_nowait() or pass a timeout to input_queue.get().
Here is my suggestion:
def logData(input_queue, stop_event):
n = 0
while not stop_event.is_set():
try:
d = input_queue.get_nowait()
if d.startswith("DATA:"):
print "LOG: " + d
n += 1
except Queue.Empty:
time.sleep(1)
return
You are also signalling the threads to terminate, but not waiting for them to do so. Consider doing this in your main function.
try:
while True:
time.sleep(10)
except (KeyboardInterrupt, SystemExit):
stop_event.set()
collection_thread.join()
logging_thread.join()
Based on the answer of tdelaney I created an iterator based approach. The iterator exits when the termination message is encountered. I also added a counter of how many get-calls are currently blocking and a stop-method, which sends just as many termination messages. To prevent a race condition between incrementing and reading the counter, I'm setting a stopping bit there. Furthermore I don't use None as the termination message, because it can not necessarily be compared to other data types when using a PriorityQueue.
There are two restrictions, that I had no need to eliminate. For one the stop-method first waits until the queue is empty before shutting down the threads. The second restriction is, that I did not any code to make the queue reusable after stop. The latter can probably be added quite easily, while the former requires being careful about concurrency and the context in which the code is used.
You have to decide whether you want stop to also wait for all the termination messages to be consumed. I choose to put the necessary join there, but you may just remove it.
So this is the code:
import threading, queue
from functools import total_ordering
#total_ordering
class Final:
def __repr__(self):
return "∞"
def __lt__(self, other):
return False
def __eq__(self, other):
return isinstance(other, Final)
Infty = Final()
class IterQueue(queue.Queue):
def __init__(self):
self.lock = threading.Lock()
self.stopped = False
self.getters = 0
super().__init__()
def __iter__(self):
return self
def get(self):
raise NotImplementedError("This queue may only be used as an iterator.")
def __next__(self):
with self.lock:
if self.stopped:
raise StopIteration
self.getters += 1
data = super().get()
if data == Infty:
self.task_done()
raise StopIteration
with self.lock:
self.getters -= 1
return data
def stop(self):
self.join()
self.stopped = True
with self.lock:
for i in range(self.getters):
self.put(Infty)
self.join()
class IterPriorityQueue(IterQueue, queue.PriorityQueue):
pass
Oh, and I wrote this in python 3.2. So after backporting,
import threading, Queue
from functools import total_ordering
#total_ordering
class Final:
def __repr__(self):
return "Infinity"
def __lt__(self, other):
return False
def __eq__(self, other):
return isinstance(other, Final)
Infty = Final()
class IterQueue(Queue.Queue, object):
def __init__(self):
self.lock = threading.Lock()
self.stopped = False
self.getters = 0
super(IterQueue, self).__init__()
def __iter__(self):
return self
def get(self):
raise NotImplementedError("This queue may only be used as an iterator.")
def next(self):
with self.lock:
if self.stopped:
raise StopIteration
self.getters += 1
data = super(IterQueue, self).get()
if data == Infty:
self.task_done()
raise StopIteration
with self.lock:
self.getters -= 1
return data
def stop(self):
self.join()
self.stopped = True
with self.lock:
for i in range(self.getters):
self.put(Infty)
self.join()
class IterPriorityQueue(IterQueue, Queue.PriorityQueue):
pass
you would use it as
import random
import time
def sim_collectData(input_queue, stop_event):
''' this provides some output simulating the serial
data from the data logging hardware.
'''
n = 0
while not stop_event.is_set():
input_queue.put("DATA: <here are some random data> " + str(n))
stop_event.wait(random.randint(0,5))
n += 1
print "Terminating data collection..."
return
def logData(input_queue):
n = 0
# we *don't* want to loop based on queue size because the queue could
# theoretically be empty while waiting on some data.
for d in input_queue:
if d.startswith("DATA:"):
print d
input_queue.task_done()
n += 1
def main():
input_queue = IterQueue()
stop_event = threading.Event() # used to signal termination to the threads
print "Starting data collection thread...",
collection_thread = threading.Thread(target=sim_collectData, args=(input_queue, stop_event))
collection_thread.start()
print "Done."
print "Starting logging thread...",
logging_thread = threading.Thread(target=logData, args=(input_queue,))
logging_thread.start()
print "Done."
try:
while True:
time.sleep(10)
except (KeyboardInterrupt, SystemExit):
# stop data collection. Let the logging thread finish logging everything in the queue
stop_event.set()
input_queue.stop()
main()

Cancellable threading.Timer in Python

I am trying to write a method that counts down to a given time and unless a restart command is given, it will execute the task. But I don't think Python threading.Timer class allows for timer to be cancelable.
import threading
def countdown(action):
def printText():
print 'hello!'
t = threading.Timer(5.0, printText)
if (action == 'reset'):
t.cancel()
t.start()
I know the above code is wrong somehow. Would appreciate some kind guidance over here.
You would call the cancel method after you start the timer:
import time
import threading
def hello():
print "hello, world"
time.sleep(2)
t = threading.Timer(3.0, hello)
t.start()
var = 'something'
if var == 'something':
t.cancel()
You might consider using a while-loop on a Thread, instead of using a Timer.
Here is an example appropriated from Nikolaus Gradwohl's answer to another question:
import threading
import time
class TimerClass(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.event = threading.Event()
self.count = 10
def run(self):
while self.count > 0 and not self.event.is_set():
print self.count
self.count -= 1
self.event.wait(1)
def stop(self):
self.event.set()
tmr = TimerClass()
tmr.start()
time.sleep(3)
tmr.stop()
I'm not sure if I understand correctly. Do you want to write something like in this example?
>>> import threading
>>> t = None
>>>
>>> def sayHello():
... global t
... print "Hello!"
... t = threading.Timer(0.5, sayHello)
... t.start()
...
>>> sayHello()
Hello!
Hello!
Hello!
Hello!
Hello!
>>> t.cancel()
>>>
The threading.Timer class does have a cancel method, and although it won't cancel the thread, it will stop the timer from actually firing. What actually happens is that the cancel method sets a threading.Event, and the thread actually executing the threading.Timer will check that event after it's done waiting and before it actually executes the callback.
That said, timers are usually implemented without using a separate thread for each one. The best way to do it depends on what your program is actually doing (while waiting for this timer), but anything with an event loop, like GUI and network frameworks, all have ways to request a timer that is hooked into the eventloop.
Im not sure if best option but for me is woking like this:
t = timer_mgr(.....) append to list "timers.append(t)" and then after all created you can call:
for tm in timers:#threading.enumerate():
print "********", tm.cancel()
my timer_mgr() class is this:
class timer_mgr():
def __init__(self, st, t, hFunction, id, name):
self.is_list = (type(st) is list)
self.st = st
self.t = t
self.id = id
self.hFunction = hFunction
self.thread = threading.Timer(t, self.handle_function, [id])
self.thread.name = name
def handle_function(self, id):
if self.is_list:
print "run_at_time:", datetime.now()
self.hFunction(id)
dt = schedule_fixed_times(datetime.now(), self.st)
print "next:", dt
self.t = (dt-datetime.now()).total_seconds()
else:
self.t = self.st
print "run_every", self.t, datetime.now()
self.hFunction(id)
self.thread = threading.Timer(self.t, self.handle_function, [id])
self.thread.start()
def start(self):
self.thread.start()
def cancel(self):
self.thread.cancel()
Inspired by above post.
Cancelable and Resetting Timer in Python. It uses thread.
Features: Start, Stop, Restart, callback function.
Input: Timeout, sleep_chunk values, and callback_function.
Can use or inherit this class in any other program. Can also pass arguments to the callback function.
Timer should respond in middle also. Not just after completion of full sleep time. So instead of using one full sleep, using small chunks of sleep and kept checking event object in loop.
import threading
import time
class TimerThread(threading.Thread):
def __init__(self, timeout=3, sleep_chunk=0.25, callback=None, *args):
threading.Thread.__init__(self)
self.timeout = timeout
self.sleep_chunk = sleep_chunk
if callback == None:
self.callback = None
else:
self.callback = callback
self.callback_args = args
self.terminate_event = threading.Event()
self.start_event = threading.Event()
self.reset_event = threading.Event()
self.count = self.timeout/self.sleep_chunk
def run(self):
while not self.terminate_event.is_set():
while self.count > 0 and self.start_event.is_set():
# print self.count
# time.sleep(self.sleep_chunk)
# if self.reset_event.is_set():
if self.reset_event.wait(self.sleep_chunk): # wait for a small chunk of timeout
self.reset_event.clear()
self.count = self.timeout/self.sleep_chunk # reset
self.count -= 1
if self.count <= 0:
self.start_event.clear()
#print 'timeout. calling function...'
self.callback(*self.callback_args)
self.count = self.timeout/self.sleep_chunk #reset
def start_timer(self):
self.start_event.set()
def stop_timer(self):
self.start_event.clear()
self.count = self.timeout / self.sleep_chunk # reset
def restart_timer(self):
# reset only if timer is running. otherwise start timer afresh
if self.start_event.is_set():
self.reset_event.set()
else:
self.start_event.set()
def terminate(self):
self.terminate_event.set()
#=================================================================
def my_callback_function():
print 'timeout, do this...'
timeout = 6 # sec
sleep_chunk = .25 # sec
tmr = TimerThread(timeout, sleep_chunk, my_callback_function)
tmr.start()
quit = '0'
while True:
quit = raw_input("Proceed or quit: ")
if quit == 'q':
tmr.terminate()
tmr.join()
break
tmr.start_timer()
if raw_input("Stop ? : ") == 's':
tmr.stop_timer()
if raw_input("Restart ? : ") == 'r':
tmr.restart_timer()

Categories