Cannot to line up item from one queue to another - python

I deal with two python queues.
Short description of my issue:
Clients pass through the waiting queue(q1) and they (the clients) are served afterwards. The size of the waiting queue can't be greater than N (10 in my program). If waiting queue becomes full, clients pass to outside queue(q2, size 20). If outside queue becomes full, clients are rejected and not served.
Every client that left a waiting queue allows another client from outside queue to join the waiting queue.
Work with queues should be thread-safe.
Below I implemented approximately what I want. But I'm faced with the problem - enqueuing a client from outside queue (q1) to the waiting queue (q2) during execution serve function. I guess I lost or forgot something important. I think this statement q1.put(client) blocks permanently but don't know why.
import time
import threading
from random import randrange
from Queue import Queue, Full as FullQueue
class Client(object):
def __repr__(self):
return '<{0}: {1}>'.format(self.__class__.__name__, id(self))
def serve(q1, q2):
while True:
if not q2.empty():
client = q2.get()
print '%s leaved outside queue' % client
q1.put(client)
print '%s is in the waiting queue' % client
q2.task_done()
client = q1.get()
print '%s leaved waiting queue for serving' % client
time.sleep(2) # Do something with client
q1.task_done()
def main():
waiting_queue = Queue(10)
outside_queue = Queue(20)
for _ in range(2):
worker = threading.Thread(target=serve, args=(waiting_queue, outside_queue))
worker.setDaemon(True)
worker.start()
delays = [randrange(1, 5) for _ in range(100)]
# Every d seconds 10 clients enter to the waiting queue
for d in delays:
time.sleep(d)
for _ in range(10):
client = Client()
try:
waiting_queue.put_nowait(client)
except FullQueue:
print 'Waiting queue is full. Please line up in outside queue.'
try:
outside_queue.put_nowait(client)
except FullQueue:
print 'Outside queue is full. Please go out.'
waiting_queue.join()
outside_queue.join()
print 'Done'

Finally I found the solution. I check docs more attentive
If full() returns True it doesn’t guarantee that a subsequent call to get() will not block https://docs.python.org/2/library/queue.html#Queue.Queue.full
That's why q1.full() is not reliable in a few threads. I added mutex before inserting item to queues and checking queue is full:
class Client(object):
def __init__(self, ident):
self.ident = ident
def __repr__(self):
return '<{0}: {1}>'.format(self.__class__.__name__, self.ident)
def serve(q1, q2, mutex):
while True:
client = q1.get()
print '%s leaved waiting queue for serving' % client
time.sleep(2) # Do something with client
q1.task_done()
with mutex:
if not q2.empty() and not q1.full():
client = q2.get()
print '%s leaved outside queue' % client
q1.put(client)
print '%s is in the waiting queue' % client
q2.task_done()
def main():
waiting_queue = Queue(10)
outside_queue = Queue(20)
lock = threading.RLock()
for _ in range(2):
worker = threading.Thread(target=serve, args=(waiting_queue, outside_queue, lock))
worker.setDaemon(True)
worker.start()
# Every 1-5 seconds 10 clients enter to the waiting room
i = 1 # Used for unique <int> client's id
while True:
delay = randrange(1, 5)
time.sleep(delay)
for _ in range(10):
client = Client(i)
try:
lock.acquire()
if not waiting_queue.full():
waiting_queue.put(client)
else:
outside_queue.put_nowait(client)
except FullQueue:
# print 'Outside queue is full. Please go out.'
pass
finally:
lock.release()
i += 1
waiting_queue.join()
outside_queue.join()
print 'Done'
Now it works well.

Related

How can I ACK A MESSAGE after doing analysis

I am using RabbitMQ to get message from queue.
This messages being processed then sent to different queue in RabbitMQ.
here is how my program works:
I have consuming Thread for message consuming that puts the revised message in local Queue..
Another thread is listening and when a message arrives a nested Thread is created for analysis..
when the analysis is done the message is sent to RabbitMQ.
I AM TRYING TO ACK after this operation is done but since consuming Thread works faster my channel is closed. How can I ACK after finishing my analysis?
Here is my Python code:
import pandas as pd
import pickle
from queue import Queue
from threading import Thread
import time
class MyAnalysisThread (Thread):
def __init__(self, comingQuery,deliverTag):
Thread.__init__(self)
self.comingQuery = comingQuery
self.deliverTag = deliverTag
def run(self):
analysis(self.comingQuery,self.deliverTag)
def sendToRabbit(linkFeatures):
.....send
print(" [x] send to rabbitMQ ")
def analysis(comingQuery,deliverTag):
/////do analysis
sendToRabbit(message)
global ResiveChannel;
# I WANNA ACK HERE after analysis finish and sent
ResiveChannel.basic_ack(delivery_tag = deliverTag,multiple=False)
def analysisCall(messageQueue, consumerClose):
print('//////////////////////////// analysis started')
global sendChannel;
sendChannel.queue_declare(queue='send')
while True:
if(messageQueue.empty()==False):
#get the message
message=messageQueue.get()
comingQuery=message['comingQuery']
deliverTag=message['deliverTag']
messageQueue.task_done()
# each message will create different thread for analysis
analysisThread = MyAnalysisThread (comingQuery, deliverTag)
analysisThread.start()
analysisThread.join()
elif(consumerClose.empty()==False):
# when the consumer is stopped go out the loop
if(consumerClose.get()==True):
print('consumer stopped')
break;
else:
print('sleeping for .....1')
# wait some n sec before next iteration
time.sleep(1)
def consumeFromRabbit(messageQueue, consumerClose):
def callback(ch, method, properties, body):
comingQuery=json.loads(body)
message={'comingQuery':comingQuery,
'deliverTag':method.delivery_tag,
}
messageQueue.put(message)
global ResiveChannel;
ResiveChannel.basic_consume(queue='link_raw', on_message_callback=callback, auto_ack=False)
print(' [*] Waiting for messages. To exit press CTRL+C')
try:
ResiveChannel.start_consuming()
except KeyboardInterrupt:
consumerClose.put(True)
print("consumer stopped")
pass
def main():
print('this is main')
#create shared ques for passing messages :
messageQueue = Queue()
consumerClose=Queue()
consumerThread = Thread(target = consumeFromRabbit, args =(messageQueue, consumerClose))
analysisThreadCall = Thread(target = analysisCall, args =(messageQueue,consumerClose ))
consumerThread.start()
analysisThreadCall.start()
consumerThread.join()
analysisThreadCall.join()
if __name__ == '__main__':
#create connection for sending
Sendconnection = pika.BlockingConnection(pika.ConnectionParameters('host',,'/',credentials))
sendChannel= Sendconnection.channel()
ResiveConnection = pika.BlockingConnection( pika.ConnectionParameters(''host',,'/','/',credentials))
ResiveChannel = ResiveConnection.channel()
try:
main()
except KeyboardInterrupt:
print('Interrupted')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
The error I got after few threads is :
Exception in thread Thread-38:
pika.exceptions.ChannelWrongStateError: Channel is closed.

RabbitMQ Unack'ed messages not getting requeued

I'm facing an issue in running RabbitMQ consumers for a long time. Several of my messages end up in an unack'ed state.
My RabbitMQ version: 3.6.15
Pika version: 0.11.0b
import pika
import time
import sys
import threading
from Queue import Queue
rabbitmq_server = "<SERVER>"
queue = "<QUEUE>"
connection = None
def check_acknowledge(channel, connection, ack_queue):
delivery_tag = None
while(True):
try:
delivery_tag = ack_queue.get_nowait()
channel.basic_nack(delivery_tag=delivery_tag)
break
except:
connection.process_data_events()
time.sleep(1)
def process_message(body, delivery_tag, ack_queue):
print "Received %s" % (body)
print "Waiting for 600 seconds before receiving next ID\n"
start = time.time()
elapsed = 0
while elapsed < 10:
elapsed = time.time() - start
print "loop cycle time: %f, seconds count: %02d" %(time.clock(), elapsed)
time.sleep(1)
ack_queue.put(delivery_tag)
def callback(ch, method, properties, body):
global connection
ack_queue = Queue()
t = threading.Thread(target=process_message, args=(body, method.delivery_tag, ack_queue))
t.start()
check_acknowledge(ch, connection, ack_queue)
while True:
try:
connection = pika.BlockingConnection(pika.ConnectionParameters(host=rabbitmq_server))
channel = connection.channel()
print ' [*] Waiting for messages. To exit press CTRL+C'
channel.basic_qos(prefetch_count=1)
channel.basic_consume(callback, queue=queue)
channel.start_consuming()
except KeyboardInterrupt:
break
channel.close()
connection.close()
exit(0)
Am I missing something here?
I used the following multi-threaded consumer to solve this problem.
import pika
import time
import sys
import threading
from Queue import Queue
rabbitmq_server = "<RABBITMQ_SERVER_IP>"
queue = "hello1"
connection = None
def check_acknowledge(channel, connection, ack_queue):
delivery_tag = None
while(True):
try:
delivery_tag = ack_queue.get_nowait()
channel.basic_ack(delivery_tag=delivery_tag)
break
except:
connection.process_data_events()
time.sleep(1)
def process_message(body, delivery_tag, ack_queue):
print "Received %s" % (body)
print "Waiting for 600 seconds before receiving next ID\n"
start = time.time()
elapsed = 0
while elapsed < 300:
elapsed = time.time() - start
print "loop cycle time: %f, seconds count: %02d" %(time.clock(), elapsed)
time.sleep(1)
ack_queue.put(delivery_tag)
def callback(ch, method, properties, body):
global connection
ack_queue = Queue()
t = threading.Thread(target=process_message, args=(body, method.delivery_tag, ack_queue))
t.start()
check_acknowledge(ch, connection, ack_queue)
while True:
try:
connection = pika.BlockingConnection(pika.ConnectionParameters(host=rabbitmq_server))
channel = connection.channel()
print ' [*] Waiting for messages. To exit press CTRL+C'
channel.basic_qos(prefetch_count=1)
channel.basic_consume(callback, queue=queue)
channel.start_consuming()
except KeyboardInterrupt:
break
channel.close()
connection.close()
exit(0)
The consumer callback function triggers a separate function check_acknowledge in the main thread itself. Due to this, connection and channel objects are retained in the same thread. Note that Pika is not thread-safe so we need to maintain these objects in the same thread.
The actual processing happens in a new thread spawned off the main.
Once process_message is done with its processing, it puts the delivery_tag in the queue.
check_acknowledge loops indefinitely till it finds the delivery_tag put in the queue by process_message. Once it does find, it acks the message and returns.
I have tested this implementation by running this consumer by sleeping for 5 min, 10 mins, 30 mins and an hour. This is working very well for me.

Timeouts for multiprocessing?

I've searched StackOverflow and although I've found many questions on this, I haven't found an answer that fits for my situation/not a strong python programmer to adapt their answer to fit my need.
I've looked here to no avail:
kill a function after a certain time in windows
Python: kill or terminate subprocess when timeout
signal.alarm replacement in Windows [Python]
I am using multiprocessing to run multiple SAP windows at once to pull reports. The is set up to run on a schedule every 5 minutes. Every once in a while, one of the reports gets stalled due to the GUI interface and never ends. I don't get an error or exception, it just stalls forever. What I would like is to have a timeout function that during this part of the code that is executed in SAP, if it takes longer than 4 minutes, it times out, closes SAP, skips the rest of the code, and waits for next scheduled report time.
I am using Windows Python 2.7
import multiprocessing
from multiprocessing import Manager, Process
import time
import datetime
### OPEN SAP ###
def start_SAP():
print 'opening SAP program'
### REPORTS IN SAP ###
def report_1(q, lock):
while True: # logic to get shared queue
if not q.empty():
lock.acquire()
k = q.get()
time.sleep(1)
lock.release()
break
else:
time.sleep(1)
print 'running report 1'
def report_2(q, lock):
while True: # logic to get shared queue
if not q.empty():
lock.acquire()
k = q.get()
time.sleep(1)
lock.release()
break
else:
time.sleep(1)
print 'running report 2'
def report_3(q, lock):
while True: # logic to get shared queue
if not q.empty():
lock.acquire()
k = q.get()
time.sleep(1)
lock.release()
break
else:
time.sleep(1)
time.sleep(60000) #mimicking the stall for report 3 that takes longer than allotted time
print 'running report 3'
def report_N(q, lock):
while True: # logic to get shared queue
if not q.empty():
lock.acquire()
k = q.get()
time.sleep(1)
lock.release()
break
else:
time.sleep(1)
print 'running report N'
### CLOSES SAP ###
def close_SAP():
print 'closes SAP'
def format_file():
print 'formatting files'
def multi_daily_pull():
lock = multiprocessing.Lock() # creating a lock in multiprocessing
shared_list = range(6) # creating a shared list for all functions to use
q = multiprocessing.Queue() # creating an empty queue in mulitprocessing
for n in shared_list: # putting list into the queue
q.put(n)
print 'Starting process at ', time.strftime('%m/%d/%Y %H:%M:%S')
print 'Starting SAP Pulls at ', time.strftime('%m/%d/%Y %H:%M:%S')
StartSAP = Process(target=start_SAP)
StartSAP.start()
StartSAP.join()
report1= Process(target=report_1, args=(q, lock))
report2= Process(target=report_2, args=(q, lock))
report3= Process(target=report_3, args=(q, lock))
reportN= Process(target=report_N, args=(q, lock))
report1.start()
report2.start()
report3.start()
reportN.start()
report1.join()
report2.join()
report3.join()
reportN.join()
EndSAP = Process(target=close_SAP)
EndSAP.start()
EndSAP.join()
formatfile = Process(target=format_file)
formatfile .start()
formatfile .join()
if __name__ == '__main__':
multi_daily_pull()
One way to do what you want would be to use the optional timeout argument that the Process.join() method accepts. This will make it only block the calling thread at most that length of time.
I also set the daemon attribute of each Process instance so your main thread will be able to terminate even if one of the processes it started is still "running" (or has hung up).
One final point, you don't need a multiprocessing.Lock to control access a multiprocessing.Queue, because they handle that aspect of things automatically, so I removed it. You may still want to have one for some other reason, such as controlling access to stdout so printing to it from the various processes doesn't overlap and mess up what is output to the screen.
import multiprocessing
from multiprocessing import Process
import time
import datetime
def start_SAP():
print 'opening SAP program'
### REPORTS IN SAP ###
def report_1(q):
while True: # logic to get shared queue
if q.empty():
time.sleep(1)
else:
k = q.get()
time.sleep(1)
break
print 'report 1 finished'
def report_2(q):
while True: # logic to get shared queue
if q.empty():
time.sleep(1)
else:
k = q.get()
time.sleep(1)
break
print 'report 2 finished'
def report_3(q):
while True: # logic to get shared queue
if q.empty():
time.sleep(1)
else:
k = q.get()
time.sleep(60000) # Take longer than allotted time
break
print 'report 3 finished'
def report_N(q):
while True: # logic to get shared queue
if q.empty():
time.sleep(1)
else:
k = q.get()
time.sleep(1)
break
print 'report N finished'
def close_SAP():
print 'closing SAP'
def format_file():
print 'formatting files'
def multi_daily_pull():
shared_list = range(6) # creating a shared list for all functions to use
q = multiprocessing.Queue() # creating an empty queue in mulitprocessing
for n in shared_list: # putting list into the queue
q.put(n)
print 'Starting process at ', time.strftime('%m/%d/%Y %H:%M:%S')
print 'Starting SAP Pulls at ', time.strftime('%m/%d/%Y %H:%M:%S')
StartSAP = Process(target=start_SAP)
StartSAP.start()
StartSAP.join()
report1 = Process(target=report_1, args=(q,))
report1.daemon = True
report2 = Process(target=report_2, args=(q,))
report2.daemon = True
report3 = Process(target=report_3, args=(q,))
report3.daemon = True
reportN = Process(target=report_N, args=(q,))
reportN.daemon = True
report1.start()
report2.start()
report3.start()
reportN.start()
report1.join(30)
report2.join(30)
report3.join(30)
reportN.join(30)
EndSAP = Process(target=close_SAP)
EndSAP.start()
EndSAP.join()
formatfile = Process(target=format_file)
formatfile .start()
formatfile .join()
if __name__ == '__main__':
multi_daily_pull()

Why the threads are not released after all work is consumed from python Queue

I use Queue to provide tasks that threads can work on. After all work is done from Queue, I see the threads are still alive while I expected them being released. Here is my code. You can see the active threads number is increasing after a batch of task(in the same queue) increases from the console. How could I release the threads after a batch of work get done?
import threading
import time
from Queue import Queue
class ThreadWorker(threading.Thread):
def __init__(self, task_queue):
threading.Thread.__init__(self)
self.task_queue = task_queue
def run(self):
while True:
work = self.task_queue.get()
#do some work
# do_work(work)
time.sleep(0.1)
self.task_queue.task_done()
def get_batch_work_done(works):
task_queue = Queue()
for _ in range(5):
t = ThreadWorker(task_queue)
t.setDaemon(True)
t.start()
for work in range(works):
task_queue.put(work)
task_queue.join()
print 'get batch work done'
print 'active threads count is {}'.format(threading.activeCount())
if __name__ == '__main__':
for work_number in range(3):
print 'start with {}'.format(work_number)
get_batch_work_done(work_number)
Do a non blocking read in a loop and use the exception handling to terminate
def run(self):
try:
while True:
work = self.task_queue.get(True, 0.1)
#do some work
# do_work(work)
except Queue.Empty:
print "goodbye"

How to let a Python thread finish gracefully

I'm doing a project involving data collection and logging. I have 2 threads running, a collection thread and a logging thread, both started in main. I'm trying to allow the program to be terminated gracefully when with Ctrl-C.
I'm using a threading.Event to signal to the threads to end their respective loops. It works fine to stop the sim_collectData method, but it doesn't seem to be properly stopping the logData thread. The Collection terminated print statement is never executed, and the program just stalls. (It doesn't end, just sits there).
The second while loop in logData is to make sure everything in the queue is logged. The goal is for Ctrl-C to stop the collection thread immediately, then allow the logging thread to finish emptying the queue, and only then fully terminate the program. (Right now, the data is just being printed out - eventually it's going to be logged to a database).
I don't understand why the second thread never terminates. I'm basing what I've done on this answer: Stopping a thread after a certain amount of time. What am I missing?
def sim_collectData(input_queue, stop_event):
''' this provides some output simulating the serial
data from the data logging hardware.
'''
n = 0
while not stop_event.is_set():
input_queue.put("DATA: <here are some random data> " + str(n))
stop_event.wait(random.randint(0,5))
n += 1
print "Terminating data collection..."
return
def logData(input_queue, stop_event):
n = 0
# we *don't* want to loop based on queue size because the queue could
# theoretically be empty while waiting on some data.
while not stop_event.is_set():
d = input_queue.get()
if d.startswith("DATA:"):
print d
input_queue.task_done()
n += 1
# if the stop event is recieved and the previous loop terminates,
# finish logging the rest of the items in the queue.
print "Collection terminated. Logging remaining data to database..."
while not input_queue.empty():
d = input_queue.get()
if d.startswith("DATA:"):
print d
input_queue.task_done()
n += 1
return
def main():
input_queue = Queue.Queue()
stop_event = threading.Event() # used to signal termination to the threads
print "Starting data collection thread...",
collection_thread = threading.Thread(target=sim_collectData, args=(input_queue, stop_event))
collection_thread.start()
print "Done."
print "Starting logging thread...",
logging_thread = threading.Thread(target=logData, args=(input_queue, stop_event))
logging_thread.start()
print "Done."
try:
while True:
time.sleep(10)
except (KeyboardInterrupt, SystemExit):
# stop data collection. Let the logging thread finish logging everything in the queue
stop_event.set()
main()
The problem is that your logger is waiting on d = input_queue.get() and will not check the event. One solution is to skip the event completely and invent a unique message that tells the logger to stop. When you get a signal, send that message to the queue.
import threading
import Queue
import random
import time
def sim_collectData(input_queue, stop_event):
''' this provides some output simulating the serial
data from the data logging hardware.
'''
n = 0
while not stop_event.is_set():
input_queue.put("DATA: <here are some random data> " + str(n))
stop_event.wait(random.randint(0,5))
n += 1
print "Terminating data collection..."
input_queue.put(None)
return
def logData(input_queue):
n = 0
# we *don't* want to loop based on queue size because the queue could
# theoretically be empty while waiting on some data.
while True:
d = input_queue.get()
if d is None:
input_queue.task_done()
return
if d.startswith("DATA:"):
print d
input_queue.task_done()
n += 1
def main():
input_queue = Queue.Queue()
stop_event = threading.Event() # used to signal termination to the threads
print "Starting data collection thread...",
collection_thread = threading.Thread(target=sim_collectData, args=(input_queue, stop_event))
collection_thread.start()
print "Done."
print "Starting logging thread...",
logging_thread = threading.Thread(target=logData, args=(input_queue,))
logging_thread.start()
print "Done."
try:
while True:
time.sleep(10)
except (KeyboardInterrupt, SystemExit):
# stop data collection. Let the logging thread finish logging everything in the queue
stop_event.set()
main()
I'm not an expert in threading, but in your logData function the first d=input_queue.get() is blocking, i.e., if the queue is empty it will sit an wait forever until a queue message is received. This is likely why the logData thread never terminates, it's sitting waiting forever for a queue message.
Refer to the [Python docs] to change this to a non-blocking queue read: use .get(False) or .get_nowait() - but either will require some exception handling for cases when the queue is empty.
You are calling a blocking get on your input_queue with no timeout. In either section of logData, if you call input_queue.get() and the queue is empty, it will block indefinitely, preventing the logging_thread from reaching completion.
To fix, you will want to call input_queue.get_nowait() or pass a timeout to input_queue.get().
Here is my suggestion:
def logData(input_queue, stop_event):
n = 0
while not stop_event.is_set():
try:
d = input_queue.get_nowait()
if d.startswith("DATA:"):
print "LOG: " + d
n += 1
except Queue.Empty:
time.sleep(1)
return
You are also signalling the threads to terminate, but not waiting for them to do so. Consider doing this in your main function.
try:
while True:
time.sleep(10)
except (KeyboardInterrupt, SystemExit):
stop_event.set()
collection_thread.join()
logging_thread.join()
Based on the answer of tdelaney I created an iterator based approach. The iterator exits when the termination message is encountered. I also added a counter of how many get-calls are currently blocking and a stop-method, which sends just as many termination messages. To prevent a race condition between incrementing and reading the counter, I'm setting a stopping bit there. Furthermore I don't use None as the termination message, because it can not necessarily be compared to other data types when using a PriorityQueue.
There are two restrictions, that I had no need to eliminate. For one the stop-method first waits until the queue is empty before shutting down the threads. The second restriction is, that I did not any code to make the queue reusable after stop. The latter can probably be added quite easily, while the former requires being careful about concurrency and the context in which the code is used.
You have to decide whether you want stop to also wait for all the termination messages to be consumed. I choose to put the necessary join there, but you may just remove it.
So this is the code:
import threading, queue
from functools import total_ordering
#total_ordering
class Final:
def __repr__(self):
return "∞"
def __lt__(self, other):
return False
def __eq__(self, other):
return isinstance(other, Final)
Infty = Final()
class IterQueue(queue.Queue):
def __init__(self):
self.lock = threading.Lock()
self.stopped = False
self.getters = 0
super().__init__()
def __iter__(self):
return self
def get(self):
raise NotImplementedError("This queue may only be used as an iterator.")
def __next__(self):
with self.lock:
if self.stopped:
raise StopIteration
self.getters += 1
data = super().get()
if data == Infty:
self.task_done()
raise StopIteration
with self.lock:
self.getters -= 1
return data
def stop(self):
self.join()
self.stopped = True
with self.lock:
for i in range(self.getters):
self.put(Infty)
self.join()
class IterPriorityQueue(IterQueue, queue.PriorityQueue):
pass
Oh, and I wrote this in python 3.2. So after backporting,
import threading, Queue
from functools import total_ordering
#total_ordering
class Final:
def __repr__(self):
return "Infinity"
def __lt__(self, other):
return False
def __eq__(self, other):
return isinstance(other, Final)
Infty = Final()
class IterQueue(Queue.Queue, object):
def __init__(self):
self.lock = threading.Lock()
self.stopped = False
self.getters = 0
super(IterQueue, self).__init__()
def __iter__(self):
return self
def get(self):
raise NotImplementedError("This queue may only be used as an iterator.")
def next(self):
with self.lock:
if self.stopped:
raise StopIteration
self.getters += 1
data = super(IterQueue, self).get()
if data == Infty:
self.task_done()
raise StopIteration
with self.lock:
self.getters -= 1
return data
def stop(self):
self.join()
self.stopped = True
with self.lock:
for i in range(self.getters):
self.put(Infty)
self.join()
class IterPriorityQueue(IterQueue, Queue.PriorityQueue):
pass
you would use it as
import random
import time
def sim_collectData(input_queue, stop_event):
''' this provides some output simulating the serial
data from the data logging hardware.
'''
n = 0
while not stop_event.is_set():
input_queue.put("DATA: <here are some random data> " + str(n))
stop_event.wait(random.randint(0,5))
n += 1
print "Terminating data collection..."
return
def logData(input_queue):
n = 0
# we *don't* want to loop based on queue size because the queue could
# theoretically be empty while waiting on some data.
for d in input_queue:
if d.startswith("DATA:"):
print d
input_queue.task_done()
n += 1
def main():
input_queue = IterQueue()
stop_event = threading.Event() # used to signal termination to the threads
print "Starting data collection thread...",
collection_thread = threading.Thread(target=sim_collectData, args=(input_queue, stop_event))
collection_thread.start()
print "Done."
print "Starting logging thread...",
logging_thread = threading.Thread(target=logData, args=(input_queue,))
logging_thread.start()
print "Done."
try:
while True:
time.sleep(10)
except (KeyboardInterrupt, SystemExit):
# stop data collection. Let the logging thread finish logging everything in the queue
stop_event.set()
input_queue.stop()
main()

Categories