python threadpool problem (wait for something) - python

I wrote simple web site crowler with threadpool. The problem is: then crawler is get all over site it must finish, but in real it wait for something in the end,and script dont finished, why this happend?
from Queue import Queue
from threading import Thread
import sys
from urllib import urlopen
from BeautifulSoup import BeautifulSoup, SoupStrainer
import re
from Queue import Queue, Empty
from threading import Thread
visited = set()
queue = Queue()
class Worker(Thread):
"""Thread executing tasks from a given tasks queue"""
def __init__(self, tasks):
Thread.__init__(self)
self.tasks = tasks
self.daemon = True
self.start()
def run(self):
while True:
func, args, kargs = self.tasks.get()
print "startcall in thread",self
print args
try: func(*args, **kargs)
except Exception, e: print e
print "stopcall in thread",self
self.tasks.task_done()
class ThreadPool:
"""Pool of threads consuming tasks from a queue"""
def __init__(self, num_threads):
self.tasks = Queue(num_threads)
for _ in range(num_threads): Worker(self.tasks)
def add_task(self, func, *args, **kargs):
"""Add a task to the queue"""
self.tasks.put((func, args, kargs))
def wait_completion(self):
"""Wait for completion of all the tasks in the queue"""
self.tasks.join()
def process(pool,host,url):
try:
print "get url",url
#content = urlopen(url).read().decode(charset)
content = urlopen(url).read()
except UnicodeDecodeError:
return
for link in BeautifulSoup(content, parseOnlyThese=SoupStrainer('a')):
#print "link",link
try:
href = link['href']
except KeyError:
continue
if not href.startswith('http://'):
href = 'http://%s%s' % (host, href)
if not href.startswith('http://%s%s' % (host, '/')):
continue
if href not in visited:
visited.add(href)
pool.add_task(process,pool,host,href)
print href
def start(host,charset):
pool = ThreadPool(7)
pool.add_task(process,pool,host,'http://%s/' % (host))
pool.wait_completion()
start('simplesite.com','utf8')

The problem I see is that you never quit the while in run. So, it will block forever. You need to break that loop when the jobs are done.
You could try to :
1) insert
if not func: break
after task.get(...) in run.
2) append
pool.add_task(None, None, None)
at the end of process.
This is a way for process to notify the pool that he has no more task to process.

Related

RuntimeError: reentrant call inside <_io.BufferedWriter name='<stdout>'>

I'm writing a program which starts one thread to generate "work" and add it to a queue every N seconds. Then, I have a thread pool which processes items in the queue.
The program below works perfectly fine, until I comment out/delete line #97 (time.sleep(0.5) in the main function). Once I do that, it generates a RuntimeError which attempting to gracefully stop the program (by sending a SIGINT or SIGTERM to the main process). It even works fine with an extremely small sleep like 0.1s, but has an issue with none at all.
I tried researching "reentrancy" but it went a bit over my head unfortunately.
Can anyone help me to understand this?
Code:
import random
import signal
import threading
import time
from concurrent.futures import Future, ThreadPoolExecutor
from datetime import datetime
from queue import Empty, Queue, SimpleQueue
from typing import Any
class UniqueQueue:
"""
A thread safe queue which can only ever contain unique items.
"""
def __init__(self) -> None:
self._q = Queue()
self._items = []
self._l = threading.Lock()
def get(self, block: bool = False, timeout: float | None = None) -> Any:
with self._l:
try:
item = self._q.get(block=block, timeout=timeout)
except Empty:
raise
else:
self._items.pop(0)
return item
def put(self, item: Any, block: bool = False, timeout: float | None = None) -> None:
with self._l:
if item in self._items:
return None
self._items.append(item)
self._q.put(item, block=block, timeout=timeout)
def size(self) -> int:
return self._q.qsize()
def empty(self) -> bool:
return self._q.empty()
def stop_app(sig_num, sig_frame) -> None:
# global stop_app_event
print("Signal received to stop the app")
stop_app_event.set()
def work_generator(q: UniqueQueue) -> None:
last_execution = time.time()
is_first_execution = True
while not stop_app_event.is_set():
elapsed_seconds = int(time.time() - last_execution)
if elapsed_seconds <= 10 and not is_first_execution:
time.sleep(0.5)
continue
last_execution = time.time()
is_first_execution = False
print("Generating work...")
for _ in range(100):
q.put({"n": random.randint(0, 500)})
def print_work(w) -> None:
print(f"{datetime.now()}: {w}")
def main():
# Create a work queue
work_queue = UniqueQueue()
# Create a thread to generate the work and add to the queue
t = threading.Thread(target=work_generator, args=(work_queue,))
t.start()
# Create a thread pool, get work from the queue, and submit to the pool for processing
pool = ThreadPoolExecutor(max_workers=20)
futures: list[Future] = []
while True:
print("Processing work...")
if stop_app_event.is_set():
print("stop_app_event is set:", stop_app_event.is_set())
for future in futures:
future.cancel()
break
print("Queue Size:", work_queue.size())
try:
while not work_queue.empty():
work = work_queue.get()
future = pool.submit(print_work, work)
futures.append(future)
except Empty:
pass
time.sleep(0.5)
print("Stopping the work generator thread...")
t.join(timeout=10)
print("Work generator stopped")
print("Stopping the thread pool...")
pool.shutdown(wait=True)
print("Thread pool stopped")
if __name__ == "__main__":
stop_app_event = threading.Event()
signal.signal(signalnum=signal.SIGINT, handler=stop_app)
signal.signal(signalnum=signal.SIGTERM, handler=stop_app)
main()
It's because you called print() in the signal handler, stop_app().
A signal handler is executed in a background thread In C, but in Python it is executed in the main thread(See the reference.). In your case, while executing a print() call, another print() was called, so the term 'reentrant' fits perfectly. And the current IO stack prohibits a reentrant call.(See the implementation if you are interested.)
You can remedy this by using os.write() and sys.stdout like the following.
import sys
import os
...
def stop_app(sig_num, sig_frame):
os.write(sys.stdout.fileno(), b"Signal received to stop the app\n")
stop_app_event.set()

how can you use threading in python, so that it would change the value of i in loop which is outside class in a function [duplicate]

Is there a Pool class for worker threads, similar to the multiprocessing module's Pool class?
I like for example the easy way to parallelize a map function
def long_running_func(p):
c_func_no_gil(p)
p = multiprocessing.Pool(4)
xs = p.map(long_running_func, range(100))
however I would like to do it without the overhead of creating new processes.
I know about the GIL. However, in my usecase, the function will be an IO-bound C function for which the python wrapper will release the GIL before the actual function call.
Do I have to write my own threading pool?
I just found out that there actually is a thread-based Pool interface in the multiprocessing module, however it is hidden somewhat and not properly documented.
It can be imported via
from multiprocessing.pool import ThreadPool
It is implemented using a dummy Process class wrapping a python thread. This thread-based Process class can be found in multiprocessing.dummy which is mentioned briefly in the docs. This dummy module supposedly provides the whole multiprocessing interface based on threads.
In Python 3 you can use concurrent.futures.ThreadPoolExecutor, i.e.:
executor = ThreadPoolExecutor(max_workers=10)
a = executor.submit(my_function)
See the docs for more info and examples.
Yes, and it seems to have (more or less) the same API.
import multiprocessing
def worker(lnk):
....
def start_process():
.....
....
if(PROCESS):
pool = multiprocessing.Pool(processes=POOL_SIZE, initializer=start_process)
else:
pool = multiprocessing.pool.ThreadPool(processes=POOL_SIZE,
initializer=start_process)
pool.map(worker, inputs)
....
For something very simple and lightweight (slightly modified from here):
from Queue import Queue
from threading import Thread
class Worker(Thread):
"""Thread executing tasks from a given tasks queue"""
def __init__(self, tasks):
Thread.__init__(self)
self.tasks = tasks
self.daemon = True
self.start()
def run(self):
while True:
func, args, kargs = self.tasks.get()
try:
func(*args, **kargs)
except Exception, e:
print e
finally:
self.tasks.task_done()
class ThreadPool:
"""Pool of threads consuming tasks from a queue"""
def __init__(self, num_threads):
self.tasks = Queue(num_threads)
for _ in range(num_threads):
Worker(self.tasks)
def add_task(self, func, *args, **kargs):
"""Add a task to the queue"""
self.tasks.put((func, args, kargs))
def wait_completion(self):
"""Wait for completion of all the tasks in the queue"""
self.tasks.join()
if __name__ == '__main__':
from random import randrange
from time import sleep
delays = [randrange(1, 10) for i in range(100)]
def wait_delay(d):
print 'sleeping for (%d)sec' % d
sleep(d)
pool = ThreadPool(20)
for i, d in enumerate(delays):
pool.add_task(wait_delay, d)
pool.wait_completion()
To support callbacks on task completion you can just add the callback to the task tuple.
Hi to use the thread pool in Python you can use this library :
from multiprocessing.dummy import Pool as ThreadPool
and then for use, this library do like that :
pool = ThreadPool(threads)
results = pool.map(service, tasks)
pool.close()
pool.join()
return results
The threads are the number of threads that you want and tasks are a list of task that most map to the service.
Yes, there is a threading pool similar to the multiprocessing Pool, however, it is hidden somewhat and not properly documented. You can import it by following way:-
from multiprocessing.pool import ThreadPool
Just I show you simple example
def test_multithread_stringio_read_csv(self):
# see gh-11786
max_row_range = 10000
num_files = 100
bytes_to_df = [
'\n'.join(
['%d,%d,%d' % (i, i, i) for i in range(max_row_range)]
).encode() for j in range(num_files)]
files = [BytesIO(b) for b in bytes_to_df]
# read all files in many threads
pool = ThreadPool(8)
results = pool.map(self.read_csv, files)
first_result = results[0]
for result in results:
tm.assert_frame_equal(first_result, result)
Here's the result I finally ended up using. It's a modified version of the classes by dgorissen above.
File: threadpool.py
from queue import Queue, Empty
import threading
from threading import Thread
class Worker(Thread):
_TIMEOUT = 2
""" Thread executing tasks from a given tasks queue. Thread is signalable,
to exit
"""
def __init__(self, tasks, th_num):
Thread.__init__(self)
self.tasks = tasks
self.daemon, self.th_num = True, th_num
self.done = threading.Event()
self.start()
def run(self):
while not self.done.is_set():
try:
func, args, kwargs = self.tasks.get(block=True,
timeout=self._TIMEOUT)
try:
func(*args, **kwargs)
except Exception as e:
print(e)
finally:
self.tasks.task_done()
except Empty as e:
pass
return
def signal_exit(self):
""" Signal to thread to exit """
self.done.set()
class ThreadPool:
"""Pool of threads consuming tasks from a queue"""
def __init__(self, num_threads, tasks=[]):
self.tasks = Queue(num_threads)
self.workers = []
self.done = False
self._init_workers(num_threads)
for task in tasks:
self.tasks.put(task)
def _init_workers(self, num_threads):
for i in range(num_threads):
self.workers.append(Worker(self.tasks, i))
def add_task(self, func, *args, **kwargs):
"""Add a task to the queue"""
self.tasks.put((func, args, kwargs))
def _close_all_threads(self):
""" Signal all threads to exit and lose the references to them """
for workr in self.workers:
workr.signal_exit()
self.workers = []
def wait_completion(self):
"""Wait for completion of all the tasks in the queue"""
self.tasks.join()
def __del__(self):
self._close_all_threads()
def create_task(func, *args, **kwargs):
return (func, args, kwargs)
To use the pool
from random import randrange
from time import sleep
delays = [randrange(1, 10) for i in range(30)]
def wait_delay(d):
print('sleeping for (%d)sec' % d)
sleep(d)
pool = ThreadPool(20)
for i, d in enumerate(delays):
pool.add_task(wait_delay, d)
pool.wait_completion()
another way can be adding the process to thethread queue pool
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:
for i in range(10):
a = executor.submit(arg1, arg2,....)
The overhead of creating the new processes is minimal, especially when it's just 4 of them. I doubt this is a performance hot spot of your application. Keep it simple, optimize where you have to and where profiling results point to.
There is no built in thread based pool. However, it can be very quick to implement a producer/consumer queue with the Queue class.
From:
https://docs.python.org/2/library/queue.html
from threading import Thread
from Queue import Queue
def worker():
while True:
item = q.get()
do_work(item)
q.task_done()
q = Queue()
for i in range(num_worker_threads):
t = Thread(target=worker)
t.daemon = True
t.start()
for item in source():
q.put(item)
q.join() # block until all tasks are done
If you don't mind executing other's code, here's mine:
Note: There is lot of extra code you may want to remove [added for better clarificaiton and demonstration how it works]
Note: Python naming conventions were used for method names and variable names instead of camelCase.
Working procedure:
MultiThread class will initiate with no of instances of threads by sharing lock, work queue, exit flag and results.
SingleThread will be started by MultiThread once it creates all instances.
We can add works using MultiThread (It will take care of locking).
SingleThreads will process work queue using a lock in middle.
Once your work is done, you can destroy all threads with shared boolean value.
Here, work can be anything. It can automatically import (uncomment import line) and process module using given arguments.
Results will be added to results and we can get using get_results
Code:
import threading
import queue
class SingleThread(threading.Thread):
def __init__(self, name, work_queue, lock, exit_flag, results):
threading.Thread.__init__(self)
self.name = name
self.work_queue = work_queue
self.lock = lock
self.exit_flag = exit_flag
self.results = results
def run(self):
# print("Coming %s with parameters %s", self.name, self.exit_flag)
while not self.exit_flag:
# print(self.exit_flag)
self.lock.acquire()
if not self.work_queue.empty():
work = self.work_queue.get()
module, operation, args, kwargs = work.module, work.operation, work.args, work.kwargs
self.lock.release()
print("Processing : " + operation + " with parameters " + str(args) + " and " + str(kwargs) + " by " + self.name + "\n")
# module = __import__(module_name)
result = str(getattr(module, operation)(*args, **kwargs))
print("Result : " + result + " for operation " + operation + " and input " + str(args) + " " + str(kwargs))
self.results.append(result)
else:
self.lock.release()
# process_work_queue(self.work_queue)
class MultiThread:
def __init__(self, no_of_threads):
self.exit_flag = bool_instance()
self.queue_lock = threading.Lock()
self.threads = []
self.work_queue = queue.Queue()
self.results = []
for index in range(0, no_of_threads):
thread = SingleThread("Thread" + str(index+1), self.work_queue, self.queue_lock, self.exit_flag, self.results)
thread.start()
self.threads.append(thread)
def add_work(self, work):
self.queue_lock.acquire()
self.work_queue._put(work)
self.queue_lock.release()
def destroy(self):
self.exit_flag.value = True
for thread in self.threads:
thread.join()
def get_results(self):
return self.results
class Work:
def __init__(self, module, operation, args, kwargs={}):
self.module = module
self.operation = operation
self.args = args
self.kwargs = kwargs
class SimpleOperations:
def sum(self, *args):
return sum([int(arg) for arg in args])
#staticmethod
def mul(a, b, c=0):
return int(a) * int(b) + int(c)
class bool_instance:
def __init__(self, value=False):
self.value = value
def __setattr__(self, key, value):
if key != "value":
raise AttributeError("Only value can be set!")
if not isinstance(value, bool):
raise AttributeError("Only True/False can be set!")
self.__dict__[key] = value
# super.__setattr__(key, bool(value))
def __bool__(self):
return self.value
if __name__ == "__main__":
multi_thread = MultiThread(5)
multi_thread.add_work(Work(SimpleOperations(), "mul", [2, 3], {"c":4}))
while True:
data_input = input()
if data_input == "":
pass
elif data_input == "break":
break
else:
work = data_input.split()
multi_thread.add_work(Work(SimpleOperations(), work[0], work[1:], {}))
multi_thread.destroy()
print(multi_thread.get_results())

Python Multithreading with requests

i have one scraper which initiate the "requestes" session and fetch some data, using a IPV6, i have now 10000 ip list, I have prepared it using threading, but its giving error.
Need support to find out the issue.
import requests, queue,threading, urllib3,jso,pandas as pd, os, time, datetime,inspect
num_threads = 2
root = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
with open (root+ "/ip_list.txt") as ips:
device_ip = list(ips)
class Writer_Worker(threading.Thread):
def __init__(self, queue, df, *args, **kwargs):
if not queue:
print("Device Queue not specified")
exit(1)
self.out_q = queue
self.df = df
super().__init__(*args, **kwargs)
def run(self):
while True:
try:
device_details = self.out_q.get(timeout=3)
except queue.Empty:
return
self.df[device_details[0]] = device_details
self.out_q.task_done()
class Worker(threading.Thread):
def __init__(self, queue, out_queue, device_password, *args, **kwargs):
if not queue:
print("Device Queue not specified")
exit(1)
self.queue = queue
self.pas = device_password
self.out_q = out_queue
super().__init__(*args, **kwargs)
def run(self):
while True:
try:
device_ip = self.queue.get(timeout=3)
except queue.Empty:
return
self.connect_to_device_and_process(device_ip)
self.queue.task_done()
def connect_to_device_and_process(self, device_ip):
st = str("Online")
try:
r = requests.post("https://["+device_ip+"]/?q=index.login&mimosa_ajax=1", {"username":"configure", "password":self.pas}, verify=False)
except requests.exceptions.ConnectionError:
st = str("Offline")
self.out_q.put([device_ip,st,"","","","","","","","","","","","","","","","","",""])
return
finally:
if 'Online' in st:
r = requests.get("https://["+device_ip+"]/cgi/dashboard.php", cookies=r.cookies, verify=False)
if "Response [401]" in str(r):
st2 = str("Password Error")
self.out_q.put([device_ip,st2,"","","","","","","","","","","","","","","","","",""])
else:
data = json.loads(r.content.decode())
output5 = data ['config'] ['Spectrum_Power']
self.out_q.put([device_ip,st,output5['Auto_Power'].replace('2', 'Max Power').replace('1', 'Min Power').replace('0', 'off'),output5['AutoConfig']])
def main():
start = time.time()
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
pas = input("Enter Device Password:")
df =pd.DataFrame(columns = ["IP","Status","Auto_Power","AutoChannel"])
q = queue.Queue(len(device_ip))
for ip in device_ip:
q.put_nowait(ip)
out_q = queue.Queue(len(device_ip))
Writer_Worker(out_q, df).start()
for _ in range(num_threads):
Worker(q, out_q, pas).start()
q.join()
print(df)
df.to_excel('iBridge_C5x_Audit_Report.xlsx', sheet_name='Detail', index = False)
if __name__ == "__main__":
main()
below is the error while running the script, seeps I am unable to login to this device.
Any help is appreciable.
You should use a thread pool that distributes the work between a fixed number of threads. This is a core feature of Python since version 3.2.
from concurrent.futures import ThreadPoolExecutor
Define a function perform(ip) that performs the request for one ip
Set variable numThreads to the number of desired threads
Run the thread-pool executor:
print(f'Using {numThreads} threads')
with ThreadPoolExecutor(max_workers=numThreads) as pool:
success = all(pool.map(perform, ips))
Source: https://docs.python.org/3/library/concurrent.futures.html
On that page you find an example even better tailored to your application: https://docs.python.org/3/library/concurrent.futures.html#threadpoolexecutor-example
from threading import Thread
th = Thread(target=self.fill_imdb, args=(movies_info_part, "thread " + str(count)))
th.start()
fill_imdb is my method

why is queue empty for the file to be downloaded

Below is the code that I have that downloads various URLS into each separate thread, I was in attempt to make some changes before I implement the thread pool but with this change the queue is coming to be empty and download is not beginning.
import Queue
import urllib2
import os
import utils as _fdUtils
import signal
import sys
import time
import threading
class ThreadedFetch(threading.Thread):
""" docstring for ThreadedFetch
"""
def __init__(self, queue, out_queue):
super(ThreadedFetch, self).__init__()
self.queueItems = queue.get()
self.__url = self.queueItems[0]
self.__saveTo = self.queueItems[1]
self.outQueue = out_queue
def run(self):
fileName = self.__url.split('/')[-1]
path = os.path.join(DESKTOP_PATH, fileName)
file_size = int(_fdUtils.getUrlSizeInBytes(self.__url))
while not STOP_REQUEST.isSet():
urlFh = urllib2.urlopen(self.__url)
_log.info("Download: %s" , fileName)
with open(path, 'wb') as fh:
file_size_dl = 0
block_sz = 8192
while True:
buffer = urlFh.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
fh.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
sys.stdout.write('%s\r' % status)
time.sleep(.05)
sys.stdout.flush()
if file_size_dl == file_size:
_log.info("Download Completed %s%% for file %s, saved to %s",
file_size_dl * 100. / file_size, fileName, DESKTOP_PATH)
below is the main function that does the call and initiation.
def main(appName):
args = _fdUtils.getParser()
urls_saveTo = {}
# spawn a pool of threads, and pass them queue instance
# each url will be downloaded concurrently
for i in range(len(args.urls)):
t = ThreadedFetch(queue, out_queue)
t.daemon = True
t.start()
try:
for url in args.urls:
urls_saveTo[url] = args.saveTo
# urls_saveTo = {urls[0]: args.saveTo, urls[1]: args.saveTo, urls[2]: args.saveTo}
# populate queue with data
for item, value in urls_saveTo.iteritems():
queue.put([item, value])
# wait on the queue until everything has been processed
queue.join()
print '*** Done'
except (KeyboardInterrupt, SystemExit):
lgr.critical('! Received keyboard interrupt, quitting threads.')
You create the queue and then the first thread which immediately tries to fetch an item from the still empty queue. The ThreadedFetch.__init__() method isn't run asynchronously, just the run() method when you call start() on a thread object.
Store the queue in the __init__() and move the get() into the run() method. That way you can create all the threads and they are blocking in their own thread, giving you the chance to put items into the queue in the main thread.
class ThreadedFetch(threading.Thread):
def __init__(self, queue, out_queue):
super(ThreadedFetch, self).__init__()
self.queue = queue
self.outQueue = out_queue
def run(self):
url, save_to = self.queue.get()
# ...
For this example the queue is unnecessary by the way as every thread gets exactly one item from the queue. You could pass that item directly to the thread when creating the thread object:
class ThreadedFetch(threading.Thread):
def __init__(self, url, save_to, out_queue):
super(ThreadedFetch, self).__init__()
self.url = url
self.save_to = save_to
self.outQueue = out_queue
def run(self):
# ...
And when the ThreadedFetch class really just consists of the __init__() and run() method you may consider moving the run() method into a function and start that asynchronously.
def fetch(url, save_to, out_queue):
# ...
# ...
def main():
# ...
thread = Thread(target=fetch, args=(url, save_to, out_queue))
thread.daemon = True
thread.start()

Limiting Threads within Python Threading, Queue

Im using the following code to multithread urlib2. However what is the best way to limit the number of threads that it consumes ??
class ApiMultiThreadHelper:
def __init__(self,api_calls):
self.q = Queue.Queue()
self.api_datastore = {}
self.api_calls = api_calls
self.userpass = '#####'
def query_api(self,q,api_query):
self.q.put(self.issue_request(api_query))
def issue_request(self,api_query):
self.api_datastore.update({api_query:{}})
for lookup in ["call1","call2"]:
query = api_query+lookup
request = urllib2.Request(query)
request.add_header("Authorization", "Basic %s" % self.userpass)
f = urllib2.urlopen(request)
response = f.read()
f.close()
self.api_datastore[api_query].update({lookup:response})
return True
def go(self):
threads = []
for i in self.api_calls:
t = threading.Thread(target=self.query_api, args = (self.q,i))
t.start()
threads.append(t)
for t in threads:
t.join()
You should use a thread pool. Here's my implementation I've made years ago (Python 3.x friendly):
import traceback
from threading import Thread
try:
import queue as Queue # Python3.x
except ImportError:
import Queue
class ThreadPool(object):
def __init__(self, no=10):
self.alive = True
self.tasks = Queue.Queue()
self.threads = []
for _ in range(no):
t = Thread(target=self.worker)
t.start()
self.threads.append(t)
def worker(self):
while self.alive:
try:
fn, args, kwargs = self.tasks.get(timeout=0.5)
except Queue.Empty:
continue
except ValueError:
self.tasks.task_done()
continue
try:
fn(*args, **kwargs)
except Exception:
# might wanna add some better error handling
traceback.print_exc()
self.tasks.task_done()
def add_job(self, fn, args=[], kwargs={}):
self.tasks.put((fn, args, kwargs))
def join(self):
self.tasks.join()
def deactivate(self):
self.alive = False
for t in self.threads:
t.join()
You can also find a similar class in multiprocessing.pool module (don't ask me why it is there). You can then refactor your code like this:
def go(self):
tp = ThreadPool(20) # <-- 20 thread workers
for i in self.api_calls:
tp.add_job(self.query_api, args=(self.q, i))
tp.join()
tp.deactivate()
Number of threads is now defined a priori.

Categories