why queue is showing incorrect data? - python

I have used queue for passing urls to download, however the queue gets corrupted when received in the thread:
class ThreadedFetch(threading.Thread):
""" docstring for ThreadedFetch
"""
def __init__(self, queue, out_queue):
super(ThreadedFetch, self).__init__()
self.queue = queue
self.outQueue = out_queue
def run(self):
items = self.queue.get()
print items
def main():
for i in xrange(len(args.urls)):
t = ThreadedFetch(queue, out_queue)
t.daemon = True
t.start()
# populate queue with data
for url, saveTo in urls_saveTo.iteritems():
queue.put([url, saveTo, split])
# wait on the queue until everything has been processed
queue.join()
output resulting execution of run() when I execute the main is :
['http://www.nasa.gov/images/content/607800main_kepler1200_1600-1200.jpg', ['http://broadcast.lds.org/churchmusic/MP3/1/2/nowords/271.mp3', None, 3None, 3]
]
while expected is
['http://www.nasa.gov/images/content/607800main_kepler1200_1600-1200.jpg', None, 3]
['http://broadcast.lds.org/churchmusic/MP3/1/2/nowords/271.mp3', None, 3]

All of the threads print their data at once and the results are interleaved. If you want threads to display data in production code, you need some way for them to cooperate when writing. One option is a global lock that all screen writers use, another is the logging module.

Populate your queue before you start your threads. Add a lock for I/O (for the reason #tdelaney says -- the threads are interleaving writes to stdout and the results appear broken). And modify your run method to this:
lock = threading.Lock()
def run(self):
while True:
try:
items = self.queue.get_nowait()
with lock:
print items
except Queue.Empty:
break
except Exception as err:
pass
self.queue.task_done()
You might also find that it is easier to do this with concurrent.futures. There is a solid example of using a method that returns a value that is called in a thread pool.

Related

Ensuring a python queue that can be populated by multiple threads will always be cleared without polling

I have the below code that shows how a queue would always be cleared even with multiple threads adding to the queue. It's using recursion but a while loop could work as well. Is this a bad practice or would there be a scenario where the queue might have an object and it won't get pulled until something gets added to the queue.
The primary purpose of this is to have a queue that ensures order of execution without the need to continually poll or block with q.get()
import queue
import threading
lock = threading.RLock()
q = queue.Queue()
def execute():
with lock:
if not q.empty():
text = q.get()
print(text)
execute()
def add_to_queue(text):
q.put(text)
execute()
# Assume multiple threads can call add to queue
add_to_queue("Hello")
This is one solution that uses timeout on the .get function, one pushes to the queue and one reads from the queue. You could have multiple readers and writers.
import queue
import threading
q = queue.Queue()
def read():
try:
while True:
text = q.get(timeout=1)
print(text)
except queue.Empty:
print("exiting")
def write():
q.put("Hello")
q.put("There")
q.put("My")
q.put("Friend")
writer = threading.Thread(target=write)
reader = threading.Thread(target=read)
writer.start()
reader.start()
reader.join()

multiprocessing.Process doesn't terminate after putting requests response.content to queue

I'm trying to run multiple API requests in parallel with multiprocessing.Process and requests. I put urls to parse into JoinableQueue instance and put back the content to the Queue instance. I've noticed that putting response.content into the Queue somehow prevents the process from terminating.
Here's simplified example with just 1 process (Python 3.5):
import multiprocessing as mp
import queue
import requests
import time
class ChildProcess(mp.Process):
def __init__(self, q, qout):
super().__init__()
self.qin = qin
self.qout = qout
self.daemon = True
def run(self):
while True:
try:
url = self.qin.get(block=False)
r = requests.get(url, verify=False)
self.qout.put(r.content)
self.qin.task_done()
except queue.Empty:
break
except requests.exceptions.RequestException as e:
print(self.name, e)
self.qin.task_done()
print("Infinite loop terminates")
if __name__ == '__main__':
qin = mp.JoinableQueue()
qout = mp.Queue()
for _ in range(5):
qin.put('http://en.wikipedia.org')
w = ChildProcess(qin, qout)
w.start()
qin.join()
time.sleep(1)
print(w.name, w.is_alive())
After running the code I get:
Infinite loop terminates
ChildProcess-1 True
Please help to understand why the process doesn't terminate after run function exits.
Update: added print statement to show the loop terminates
As noted in the Pipes and Queues documentation
if a child process has put items on a queue (and it has not used
JoinableQueue.cancel_join_thread), then that process will not
terminate until all buffered items have been flushed to the pipe.
This means that if you try joining that process you may get a deadlock
unless you are sure that all items which have been put on the queue
have been consumed.
...
Note that a queue created using a manager does not have this issue.
If you switch over to a manager queue, then the process terminates successfully:
import multiprocessing as mp
import queue
import requests
import time
class ChildProcess(mp.Process):
def __init__(self, q, qout):
super().__init__()
self.qin = qin
self.qout = qout
self.daemon = True
def run(self):
while True:
try:
url = self.qin.get(block=False)
r = requests.get(url, verify=False)
self.qout.put(r.content)
self.qin.task_done()
except queue.Empty:
break
except requests.exceptions.RequestException as e:
print(self.name, e)
self.qin.task_done()
print("Infinite loop terminates")
if __name__ == '__main__':
manager = mp.Manager()
qin = mp.JoinableQueue()
qout = manager.Queue()
for _ in range(5):
qin.put('http://en.wikipedia.org')
w = ChildProcess(qin, qout)
w.start()
qin.join()
time.sleep(1)
print(w.name, w.is_alive())
It's a bit hard to figure this out based on the Queue documentation - I struggled with the same problem.
The key concept here is that before a producer thread terminates, it joins any queues that it has put data into; that join then blocks until the queue's background thread terminates, which only happens when the queue is empty. So basically, before your ChildProcess can exit, someone has to consume all the stuff it put into the queue!
There is some documentation of the Queue.cancel_join_thread function, which is supposed to circumvent this problem, but I couldn't get it to have any effect - maybe I'm not using it correctly.
Here's an example modification you can make that should fix the issue:
if __name__ == '__main__':
qin = mp.JoinableQueue()
qout = mp.Queue()
for _ in range(5):
qin.put('http://en.wikipedia.org')
w = ChildProcess(qin, qout)
w.start()
qin.join()
while True:
try:
qout.get(True, 0.1) # Throw away remaining stuff in qout (or process it or whatever,
# just get it out of the queue so the queue background process
# can terminate, so your ChildProcess can terminate.
except queue.Empty:
break
w.join() # Wait for your ChildProcess to finish up.
# time.sleep(1) # Not necessary since we've joined the ChildProcess
print(w.name, w.is_alive())
Add a call to w.terminate() above the print message.
Regarding why the process doesn't terminate itself; your function code is an infinite loop, so it doesn't ever return. Calling terminate signals the process to kill itself.

Why am I unable to join this thread in python?

I am writing a multithreading class. The class has a parallel_process() function that is overridden with the parallel task. The data to be processed is put in the queue. The worker() function in each thread keeps calling parallel_process() until the queue is empty. Results are put in the results Queue object. The class definition is:
import threading
try:
from Queue import Queue
except ImportError:
from queue import Queue
class Parallel:
def __init__(self, pkgs, common=None, nthreads=1):
self.nthreads = nthreads
self.threads = []
self.queue = Queue()
self.results = Queue()
self.common = common
for pkg in pkgs:
self.queue.put(pkg)
def parallel_process(self, pkg, common):
pass
def worker(self):
while not self.queue.empty():
pkg = self.queue.get()
self.results.put(self.parallel_process(pkg, self.common))
self.queue.task_done()
return
def start(self):
for i in range(self.nthreads):
t = threading.Thread(target=self.worker)
t.daemon = False
t.start()
self.threads.append(t)
def wait_for_threads(self):
print('Waiting on queue to empty...')
self.queue.join()
print('Queue processed. Joining threads...')
for t in self.threads:
t.join()
print('...Thread joined.')
def get_results(self):
results = []
print('Obtaining results...')
while not self.results.empty():
results.append(self.results.get())
return results
I use it to create a parallel task:
class myParallel(Parallel): # return square of numbers in a list
def parallel_process(self, pkg, common):
return pkg**2
p = myParallel(range(50),nthreads=4)
p.start()
p.wait_for_threads()
r = p.get_results()
print('FINISHED')
However all threads do not join every time the code is run. Sometimes only 2 join, sometimes no thread joins. I do not think I am blocking the threads from finishing. What reason could there be for join() to not work here?
This statement may lead to errors:
while not self.queue.empty():
pkg = self.queue.get()
With multiple threads pulling items from the queue, there's no guarantee that self.queue.get() will return a valid item, even if you check if the queue is empty beforehand. Here is a possible scenario
Thread 1 checks the queue and the queue is not empty, control proceeds into the while loop.
Control passes to Thread 2, which also checks the queue, finds it is not empty and enters the while loop. Thread 2 gets an item from the loop. The queue is now empty.
Control passes back to Thread 1, it gets an item from the queue, but the queue is now empty, an Empty Exception should be raised.
You should just use a try/except to get an item from the queue
try:
pkg = self.queue.get_nowait()
except Empty:
pass
#Brendan Abel identified the cause. I'd like to suggest a different solution: queue.join() is usually a Bad Idea too. Instead, create a unique value to use as a sentinel:
class Parallel:
_sentinel = object()
At the end of __init__(), add one sentinel to the queue for each thread:
for i in range(nthreads):
self.queue.put(self._sentinel)
Change the start of worker() like so:
while True:
pkg = self.queue.get()
if pkg is self._sentinel:
break
By the construction of the queue, it won't be empty until each thread has seen its sentinel value, so there's no need to mess with the unpredictable queue.size().
Also remove the queue.join() and queue.task_done() cruft.
This will give you reliable code that's easy to modify for fancier scenarios. For example, if you want to add more work items while the threads are running, fine - just write another method to say "I'm done adding work items now", and move the loop adding sentinels into that.

Why does this http thread pool die (join), but keep functioning?

The following code takes an initial string ('a', 'b', or 'c'), and the two thread types pass it back and forth, appending 'W' and 'H' to it repeatedly, marking that the Worker thread or the Http thread last handled the string.
The code is a simple test to try and eventually accomplish the following. The http thread pool will pull web pages, and the worker thread will add info to a db, and then give the http thread more urls to pull. They just go back and forth. I want both thread pools and queues to stay alive unless BOTH are empty simultaneously. (there are cases where one pool will temporarily run out of things to do, and I don't want it to join because it's companion thread pool will probably be adding more work to it's queue soon.)
In the following code, the http thread pool runs out of things to do almost immediately, and then joins. But you'll notice that the threads keep functioning.
Why does it do this
And how do I make it so neither queues can join until BOTH are simultaneously empty?
from queue import Queue
import threading
import time
class http(threading.Thread):
def __init__(self, queue, out_queue):
threading.Thread.__init__(self)
self.queue = queue
self.out_queue = out_queue
def run(self):
while True:
row = self.queue.get()
print(row)
self.out_queue.put(row+'H')
self.queue.task_done()
class worker(threading.Thread):
def __init__(self, queue, out_queue):
threading.Thread.__init__(self)
self.queue = queue
self.out_queue = out_queue
def run(self):
while True:
time.sleep(1)
row = self.out_queue.get()
self.queue.put(row+'W')
self.out_queue.task_done()
URL_THREAD_COUNT = 3
rows = [chr(x) for x in range(97, 100)]
def main():
queue = Queue()
out_queue = Queue()
#spawn a pool of threads, and pass them queue instance
for i in range(URL_THREAD_COUNT):
t = http(queue, out_queue)
t.daemon = True
t.start()
#populate queue with data
for row in rows:
queue.put(row)
#spawn worker thread
dt = worker(queue, out_queue)
dt.daemon = True
dt.start()
#time.sleep(5)
# wait for queues
queue.join()
print('EXIT http')
out_queue.join()
print('EXIT worker')
start = time.time()
main()
print("Elapsed Time: %s" % (time.time() - start))
"joining" a queue waits until the queue is empty. If worker finishes processing some out_queue messages before the other threads can add more messages, the outer out_queue.join thinks you are done. You may want to add a control message that tells the threads when their work is done so that they can exit, and call thread.join() for them all instead. That will mean keeping a list of threads created in the for loop instead of just abandoning them.

Checking on a thread / remove from list

I have a thread which extends Thread. The code looks a little like this;
class MyThread(Thread):
def run(self):
# Do stuff
my_threads = []
while has_jobs() and len(my_threads) < 5:
new_thread = MyThread(next_job_details())
new_thread.run()
my_threads.append(new_thread)
for my_thread in my_threads
my_thread.join()
# Do stuff
So here in my pseudo code I check to see if there is any jobs (like a db etc) and if there is some jobs, and if there is less than 5 threads running, create new threads.
So from here, I then check over my threads and this is where I get stuck, I can use .join() but my understanding is that - this then waits until it's finished so if the first thread it checks is still in progress, it then waits till it's done - even if the other threads are finished....
so is there a way to check if a thread is done, then remove it if so?
eg
for my_thread in my_threads:
if my_thread.done():
# process results
del (my_threads[my_thread]) ?? will that work...
As TokenMacGuy says, you should use thread.is_alive() to check if a thread is still running. To remove no longer running threads from your list you can use a list comprehension:
for t in my_threads:
if not t.is_alive():
# get results from thread
t.handled = True
my_threads = [t for t in my_threads if not t.handled]
This avoids the problem of removing items from a list while iterating over it.
mythreads = threading.enumerate()
Enumerate returns a list of all Thread objects still alive.
https://docs.python.org/3.6/library/threading.html
you need to call thread.isAlive()to find out if the thread is still running
The answer has been covered, but for simplicity...
# To filter out finished threads
threads = [t for t in threads if t.is_alive()]
# Same thing but for QThreads (if you are using PyQt)
threads = [t for t in threads if t.isRunning()]
Better way is to use Queue class:
http://docs.python.org/library/queue.html
Look at the good example code in the bottom of documentation page:
def worker():
while True:
item = q.get()
do_work(item)
q.task_done()
q = Queue()
for i in range(num_worker_threads):
t = Thread(target=worker)
t.daemon = True
t.start()
for item in source():
q.put(item)
q.join() # block until all tasks are done
A easy solution to check thread finished or not. It is thread safe
Install pyrvsignal
pip install pyrvsignal
Example:
import time
from threading import Thread
from pyrvsignal import Signal
class MyThread(Thread):
started = Signal()
finished = Signal()
def __init__(self, target, args):
self.target = target
self.args = args
Thread.__init__(self)
def run(self) -> None:
self.started.emit()
self.target(*self.args)
self.finished.emit()
def do_my_work(details):
print(f"Doing work: {details}")
time.sleep(10)
def started_work():
print("Started work")
def finished_work():
print("Work finished")
thread = MyThread(target=do_my_work, args=("testing",))
thread.started.connect(started_work)
thread.finished.connect(finished_work)
thread.start()

Categories