How does Python threadsafe queue work after calling get() - python

In the example from documentation:
import threading, queue
q = queue.Queue()
def worker():
while True:
item = q.get()
print(f'Working on {item}')
print(f'Finished {item}')
q.task_done()
# turn-on the worker thread
threading.Thread(target=worker, daemon=True).start()
# send thirty task requests to the worker
for item in range(30):
q.put(item)
print('All task requests sent\n', end='')
# block until all tasks are done
q.join()
print('All work completed')
After the worker thread get from the queue, which i assume is protected by some lock and checking the queue and modifying the queue is atomic, it does prints. How are the prints also atomic across all the worker threads and we won't see intermingled prints?

Your threads print to stdout, which is a shared global object. One possible solution is to use a threading.Lock or threading.Semaphore to guard stdout. For example:
import threading, queue
print_semaphore = threading.Semaphore()
q = queue.Queue()
def worker():
while True:
item = q.get()
with print_semaphore:
print(f'Working on {item}')
print(f'Finished {item}')
q.task_done()
# turn-on the worker thread
threading.Thread(target=worker, daemon=True).start()
# send thirty task requests to the worker
for item in range(99):
q.put(item)
with print_semaphore:
print('All task requests sent\n', end='')
# block until all tasks are done
q.join()
with print_semaphore:
print('All work completed')
Another solution would be to introduce another queue, and have your threads put messages to that queue, instead of printing to stdout directly.

Related

python threads and Queue messages between them

I have a program that contains two parts, they work at the same time using threads ,they communicate using a Queue.
import kafka
import time
from queue import Queue
from threading import Thread
# A thread that consumes data from Kafka consumer,it will stop after 40s if there are no new messages.
def consumer(in_q):
consumer = kafka.KafkaConsumer('mytopic',bootstrap_servers=['myserver'],enable_auto_commit=True,group_id='30',auto_offset_reset='earliest',consumer_timeout_ms=40000)
for message in consumer:
messageStr=message.value.decode("utf-8")
in_q.put(messageStr)
print(messageStr)
print(message.offset)
print("consumer is closed ")
# A thread that modify data
def modifier (out_q):
while True:
if(out_q.empty()==False):
data=out_q.get()
print('data after some modification',data)
# Create the shared queue and launch both threads
message = Queue()
consumeMessgae = Thread(target = consumer, args =(message, ))
modifyMessage = Thread(target = modifier , args =(message, ))
consumeMessgae.start()
modifyMessage.start()
I want to update my modifier function to be able :
change the while loop because it is CPU consuming and instead keep listening to the Queue
I want to be able to close the modifier function when the consumer thread is closed (consumer function will automatically close after 40s if no new messages)
how can I achieve this ?
You can achieve that using a threading.Event to notify the modifier thread to abort execution. the final code would be as follows:
import kafka
import time
from queue import Queue, Empty
from threading import Thread, Event
# A thread that consumes data from Kafka consumer,it will stop after 40s if there are no new messages.
def consumer(in_q, event):
consumer = kafka.KafkaConsumer('mytopic', bootstrap_servers=['myserver'], enable_auto_commit=True, group_id='30',
auto_offset_reset='earliest', consumer_timeout_ms=40000)
try:
for message in consumer:
messageStr = message.value.decode("utf-8")
in_q.put(messageStr)
print(messageStr)
print(message.offset)
except StopIteration:
# This exception is raised by the kafka.KafkaConsumer iterator, after be waiting 40000 without any new message:
# Notify the modifier thread, that he should abort execution:
event.set()
print("consumer is closed ")
# A thread that modify data
def modifier(out_q, event):
while True:
try:
# Block in the queue only for 1 second waiting for a pending item,
# to be able to check if the event was signaled, at most after 1 second lapsed:
data = out_q.get(timeout=1)
except Empty:
print("Queue 'out_q' is empty")
else:
# Will be executed only if there were no Exception.
# Add you additional logic here, but take care of handle any possible Exception that could be generated
# by your data processing logic:
print('data after some modification', data)
# Wait for 1 second before next while loop iteration, but it will shot-circuit before 1 second lapsed, if the
# event is signaled by the consumer thread:
if event.wait(1):
# If we're here is because event was signaled by consumer thread, so we must abort execution:
break
print("modifier is closed ")
# Create the shared queue and launch both threads
message = Queue()
# Create an Event, it will be used by the consumer thread, to notify the modifier thread that he must abort execution:
alert_event = Event()
consumeMessgae = Thread(target=consumer, args=(message, alert_event,))
modifyMessage = Thread(target=modifier, args=(message, alert_event,))
consumeMessgae.start()
modifyMessage.start()

pop unique items from queue and use in multiple threads

I don't understand how to have each thread pop a unique item off the queue and run the function concurrently. The way this is currently written it runs linearly, two threads run by popping the same item from the queue.
How could I have the loop pass unique items from the queue to each thread?
import sys
import subprocess
import threading
import Queue
def pinger(host):
subprocess.Popen(['ping', '-c 1', '-W 1', host])
ping_hosts = ['google.com', 'yahoo.com', 'disney.com', 'myspace.com','www.pingler.com', 'www.pingmylink.com',
'www.pingoat.net' ,'www.blogsearch.google.com' ,'pingmyblog.com', 'www.twingly.com', 'www.weblogs.com', 'auto-ping.com' ]
ping_hosts = [ 'google.com', 'yahoo.com', 'disney.com']
def Main():
q = Queue.Queue()
for item in ping_hosts:
q.put(item)
while q.qsize() != 0:
host = q.get()
t1 = threading.Thread(target=pinger, args=(host,)
t2 = threading.Thread(target=pinger, args=(host,)
t1.start()
t2.start()
t1.join()
t2.join()
print "Main Completed"
if __name__ == '__main__':
Main()
Your worker threads get to ping the same host, because you give the same host value to them. But the main problem with your code is that you consume the queue in the main (single) thread. What you do is put tasks in a queue (main thread), then get them (main thread) and start two worker threads, that do the ping (thread 1, thread 2), then join them (back in main thread again) and do that while there're tasks in a queue.
Also, the popen actually starts a new process to do the ping, so you might not even need the threads if it's only the ping you're interested in, just:
for host in ping_hosts:
subprocess.Popen(['ping', '-c 1', '-W 1', host])
But assuming your question is about threading, not doing the ping...
When using a queue, usually, you put tasks (data) in it in one thread, called the "producer", and get and process tasks (data) in another thread, called the "consumer", running simultaneously. Of course there may be several producers and several consumers.
So, in your case what you want is: put the hosts in the queue (main thread), start worker threads, that will get the data from the queue (thread 1, thread 2) and process it (same thread 1, thread 2), running simultaneously.
Try like this:
import subprocess
import threading
import Queue
def ping(host):
"""Pings a host."""
print("Pinging '%s'" % host)
subprocess.Popen(['ping', '-c 1', '-W 1', host])
def pinger_thread(thread_name, task_queue):
"""Gets the data from the queue and processes it."""
while task_queue.qsize() != 0:
host = task_queue.get()
print("%s: pinging '%s'" % (thread_name, host))
ping(host)
def main():
ping_hosts = ['google.com', 'yahoo.com', 'disney.com']
# Fill the queue
q = Queue.Queue()
for item in ping_hosts:
q.put(item)
# Start the threads, that will consume data from the queue
t1 = threading.Thread(target=pinger_thread, args=("Thread 1", q))
t2 = threading.Thread(target=pinger_thread, args=("Thread 2", q))
t1.start()
t2.start()
t1.join()
t2.join()
print("Main Completed")
if __name__ == '__main__':
main()
The output:
python2 test.py
Thread 1: pinging 'google.com'
Pinging 'google.com'
Thread 2: pinging 'yahoo.com'
Pinging 'yahoo.com'
Thread 1: pinging 'disney.com'
Pinging 'disney.com'
Main Completed
PING google.com (74.125.232.231) 56(84) bytes of data.
PING yahoo.com (206.190.36.45) 56(84) bytes of data.
PING disney.com (199.181.131.249) 56(84) bytes of data.
...
--- google.com ping statistics ---
...
--- disney.com ping statistics ---
...
--- yahoo.com ping statistics ---
...
I removed some ping command output as irrelevant. But what's important, you can see that the hosts are processed by two threads in order.

Python threading - with limited threads, iterate over n number of Items

Here is an example read from IBM python threading tutorial. I was going through this URL (http://www.ibm.com/developerworks/aix/library/au-threadingpython/)
#!/usr/bin/env python
import Queue
import threading
import urllib2
import time
hosts = ["http://yahoo.com", "http://google.com", "http://amazon.com",
"http://ibm.com", "http://apple.com"]
queue = Queue.Queue()
class ThreadUrl(threading.Thread):
"""Threaded Url Grab"""
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
while True:
#grabs host from queue
host = self.queue.get()
#grabs urls of hosts and prints first 1024 bytes of page
url = urllib2.urlopen(host)
print url.read(1024)
#signals to queue job is done
self.queue.task_done()
start = time.time()
def main():
#spawn a pool of threads, and pass them queue instance
for i in range(5):
t = ThreadUrl(queue)
t.setDaemon(True)
t.start()
#populate queue with data
for host in hosts:
queue.put(host)
#wait on the queue until everything has been processed
queue.join()
main()
print "Elapsed Time: %s" % (time.time() - start)
The example here works perfectly. I have been looking for a slightly different modification. Here there are known number of URL's , like for example 5. used range(5) in for loop to iterate over the URL's and process it.
What if, i want to use only '5' threads to process 1000 URL's? so when a thread completes, the completed URL should be removed from queue and new URL needs to be added to queue. But all these should happen by using the same thread.
I can check ,
if self.queue.task_done():
return host
This is the only way i can check if the URL is processed successfully or not. Once returned , i should remove URL from the queue. and add a new URL to queue. How to implement this using queue ?
Thanks,
That code will already do what you describe. If you put 1000 items into the queue instead of 5, they will be processed by those same 5 threads - each one will take an item from the queue, process it, then take a new one as long as there are items left in the queue.

Threading in python using queue

I wanted to use threading in python to download lot of webpages and went through the following code which uses queues in one of the website.
it puts a infinite while loop. Does each of thread run continuously with out ending till all of them are complete? Am I missing something.
#!/usr/bin/env python
import Queue
import threading
import urllib2
import time
hosts = ["http://yahoo.com", "http://google.com", "http://amazon.com",
"http://ibm.com", "http://apple.com"]
queue = Queue.Queue()
class ThreadUrl(threading.Thread):
"""Threaded Url Grab"""
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
while True:
#grabs host from queue
host = self.queue.get()
#grabs urls of hosts and prints first 1024 bytes of page
url = urllib2.urlopen(host)
print url.read(1024)
#signals to queue job is done
self.queue.task_done()
start = time.time()
def main():
#spawn a pool of threads, and pass them queue instance
for i in range(5):
t = ThreadUrl(queue)
t.setDaemon(True)
t.start()
#populate queue with data
for host in hosts:
queue.put(host)
#wait on the queue until everything has been processed
queue.join()
main()
print "Elapsed Time: %s" % (time.time() - start)
Setting the thread's to be daemon threads causes them to exit when the main is done. But, yes you are correct in that your threads will run continuously for as long as there is something in the queue else it will block.
The documentation explains this detail Queue docs
The python Threading documentation explains the daemon part as well.
The entire Python program exits when no alive non-daemon threads are left.
So, when the queue is emptied and the queue.join resumes when the interpreter exits the threads will then die.
EDIT: Correction on default behavior for Queue
Your script works fine for me, so I assume you are asking what is going on so you can understand it better. Yes, your subclass puts each thread in an infinite loop, waiting on something to be put in the queue. When something is found, it grabs it and does its thing. Then, the critical part, it notifies the queue that it's done with queue.task_done, and resumes waiting for another item in the queue.
While all this is going on with the worker threads, the main thread is waiting (join) until all the tasks in the queue are done, which will be when the threads have sent the queue.task_done flag the same number of times as messages in the queue . At that point the main thread finishes and exits. Since these are deamon threads, they close down too.
This is cool stuff, threads and queues. It's one of the really good parts of Python. You will hear all kinds of stuff about how threading in Python is screwed up with the GIL and such. But if you know where to use them (like in this case with network I/O), they will really speed things up for you. The general rule is if you are I/O bound, try and test threads; if you are cpu bound, threads are probably not a good idea, maybe try processes instead.
good luck,
Mike
I don't think Queue is necessary in this case. Using only Thread:
import threading, urllib2, time
hosts = ["http://yahoo.com", "http://google.com", "http://amazon.com",
"http://ibm.com", "http://apple.com"]
class ThreadUrl(threading.Thread):
"""Threaded Url Grab"""
def __init__(self, host):
threading.Thread.__init__(self)
self.host = host
def run(self):
#grabs urls of hosts and prints first 1024 bytes of page
url = urllib2.urlopen(self.host)
print url.read(1024)
start = time.time()
def main():
#spawn a pool of threads
for i in range(len(hosts)):
t = ThreadUrl(hosts[i])
t.start()
main()
print "Elapsed Time: %s" % (time.time() - start)

How does this while loop exit?

So, how does this code exit the while statement when the thread is started? (Please do not consider indentation)
class ThreadUrl(threading.Thread):
"""Threaded Url Grab"""
def __init__(self, queue, out_queue):
threading.Thread.__init__(self)
self.queue = queue
self.out_queue = out_queue
def run(self):
while True:
#grabs host from queue
host = self.queue.get()
#grabs urls of hosts and then grabs chunk of webpage
url = urllib2.urlopen(host)
chunk = url.read()
#place chunk into out queue
self.out_queue.put(chunk)
#signals to queue job is done
self.queue.task_done()
** EDIT *
The code that starts the thread:
def main():
#spawn a pool of threads, and pass them queue instance
for i in range(5):
t = ThreadUrl(queue)
t.setDaemon(True)
t.start()
queue.join()
It doesn't have to exit the while statement for the code to terminate. All that is happening here is that the thread has consumed everything in the queue at which point queue.join() returns.
As soon as the call to queue.join() in the main code returns the main code will exit and because you marked the thread as a daemon the entire application will exit and your background thread will be killed.
The quick answer: it doesn't, unless an exception is raised anywhere, which depends on the functions/methods called in run.
Of course, there is the possibility, that your thread is suspended/stopped from another thread, which effectively terminates your while loop.
Your code will only breaks if an exception occurs during the execution of the content of the while True loop.... not the better way to exit from a thread, but it could work.
If you want to exit properly from your thread, try to replace the while True with something like while self.continue_loop:
class ThreadUrl(threading.Thread):
"""Threaded Url Grab"""
def __init__(self, queue, out_queue):
threading.Thread.__init__(self)
self.queue = queue
self.out_queue = out_queue
self.continue_loop = True
def run(self):
while self.continue_loop:
#grabs host from queue
host = self.queue.get()
#grabs urls of hosts and then grabs chunk of webpage
url = urllib2.urlopen(host)
chunk = url.read()
#place chunk into out queue
self.out_queue.put(chunk)
#signals to queue job is done
self.queue.task_done()
And to start/stop the threads :
def main():
#spawn a pool of threads, and pass them queue instance
threads = []
for i in range(5):
t = ThreadUrl(queue, out_queue)
t.setDaemon(True)
t.start()
threads.append(t)
for t in threads:
t.continue_loop = False
t.join()
queue.join()
You could pass in block=False or timeout=5 to your self.queue.get() method. This will raise an Queue.Empty exception if no items remains in the queue. Otherwise AFAIK, the self.queue.get() will block the whole loop so even additional break attempts further on would not be reached.
def run(self):
while True:
#grabs host from queue
try:
host = self.queue.get(block=False)
except Queue.Empty, ex:
break
#grabs urls of hosts and then grabs chunk of webpage
url = urllib2.urlopen(host)
chunk = url.read()
#place chunk into out queue
self.out_queue.put(chunk)
#signals to queue job is done
self.queue.task_done()
Another approach would be to put a "Stop" flag in the queue after all your other items have been added. Then in the thread put a check for this stop flag and break if found.
Eg.
host = self.queue.get()
if host == 'STOP':
#Still need to signal that the task is done, else your queue join() will wait forever
self.queue.task_done()
break

Categories