I am trying to use this example as a template for a queuing system on my cherrypy app.
I was able to convert it from python 2 to python 3 (change from Queue import Empty into from queue import Empty) and to execute it in Ubuntu. But when I execute it in Windows I get the following error:
F:\workspace\test>python test.py
Traceback (most recent call last):
File "test.py", line 112, in <module>
broker.start()
File "C:\Anaconda3\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "C:\Anaconda3\lib\multiprocessing\context.py", line 212, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Anaconda3\lib\multiprocessing\context.py", line 313, in _Popen
return Popen(process_obj)
File "C:\Anaconda3\lib\multiprocessing\popen_spawn_win32.py", line 66, in __init__
reduction.dump(process_obj, to_child)
File "C:\Anaconda3\lib\multiprocessing\reduction.py", line 59, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: cannot serialize '_io.TextIOWrapper' object
F:\workspace\test>Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Anaconda3\lib\multiprocessing\spawn.py", line 100, in spawn_main
new_handle = steal_handle(parent_pid, pipe_handle)
File "C:\Anaconda3\lib\multiprocessing\reduction.py", line 81, in steal_handle
_winapi.PROCESS_DUP_HANDLE, False, source_pid)
OSError: [WinError 87] The parameter is incorrect
Here is the full code:
# from http://www.defuze.org/archives/198-managing-your-process-with-the-cherrypy-bus.html
import sys
import logging
from logging import handlers
from cherrypy.process import wspbus
class MyBus(wspbus.Bus):
def __init__(self, name=""):
wspbus.Bus.__init__(self)
self.open_logger(name)
self.subscribe("log", self._log)
def exit(self):
wspbus.Bus.exit(self)
self.close_logger()
def open_logger(self, name=""):
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
h = logging.StreamHandler(sys.stdout)
h.setLevel(logging.INFO)
h.setFormatter(logging.Formatter("[%(asctime)s] %(name)s - %(levelname)s - %(message)s"))
logger.addHandler(h)
self.logger = logger
def close_logger(self):
for handler in self.logger.handlers:
handler.flush()
handler.close()
def _log(self, msg="", level=logging.INFO):
self.logger.log(level, msg)
import random
import string
from multiprocessing import Process
class Bank(object):
def __init__(self, queue):
self.bus = MyBus(Bank.__name__)
self.queue = queue
self.bus.subscribe("main", self.randomly_place_order)
self.bus.subscribe("exit", self.terminate)
def randomly_place_order(self):
order = random.sample(['BUY', 'SELL'], 1)[0]
code = random.sample(string.ascii_uppercase, 4)
amount = random.randint(0, 100)
message = "%s %s %d" % (order, ''.join(code), amount)
self.bus.log("Placing order: %s" % message)
self.queue.put(message)
def run(self):
self.bus.start()
self.bus.block(interval=0.01)
def terminate(self):
self.bus.unsubscribe("main", self.randomly_place_order)
self.bus.unsubscribe("exit", self.terminate)
from queue import Empty
class Broker(Process):
def __init__(self, queue):
Process.__init__(self)
self.queue = queue
self.bus = MyBus(Broker.__name__)
self.bus.subscribe("main", self.check)
def check(self):
try:
message = self.queue.get_nowait()
except Empty:
return
if message == "stop":
self.bus.unsubscribe("main", self.check)
self.bus.exit()
elif message.startswith("BUY"):
self.buy(*message.split(' ', 2)[1:])
elif message.startswith("SELL"):
self.sell(*message.split(' ', 2)[1:])
def run(self):
self.bus.start()
self.bus.block(interval=0.01)
def stop(self):
self.queue.put("stop")
def buy(self, code, amount):
self.bus.log("BUY order placed for %s %s" % (amount, code))
def sell(self, code, amount):
self.bus.log("SELL order placed for %s %s" % (amount, code))
if __name__ == '__main__':
from multiprocessing import Queue
queue = Queue()
broker = Broker(queue)
broker.start()
bank = Bank(queue)
bank.run()
The problem is that parts of the MyBus object are not picklable, and you're saving an instance of MyBus to your Broker instance. Because Windows lacks fork() support, when you call broker.start(), the entire state of broker must be pickled and recreated in the child process that multiprocessing spawns to execute broker.run. It works on Linux because Linux supports fork; it doesn't need to pickle anything in this case - the child process contains the complete state of the parent as soon as it is forked.
There are two ways to sole this problem. The first, and more difficult, way, is to make your broker instance picklable. To do that, you need to make MyBus picklable. The error you're getting right now refers to the logger attribute on MyBus, which is not picklable. That one is easy to fix; just add __getstate__/__setstate__ methods to MyBus, which are used to control how the object is pickled/unpickled. If we remove the logger when we pickle, and recreate it when we unpickle, we'll work around the issue:
class MyBus(wspbus.Bus):
...
def __getstate__(self):
self_dict = self.__dict__
del self_dict['logger']
return self_dict
def __setstate__(self, d):
self.__dict__.update(d)
self.open_logger()
This works, but then we hit another pickling error:
Traceback (most recent call last):
File "async2.py", line 121, in <module>
broker.start()
File "C:\python34\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "C:\python34\lib\multiprocessing\context.py", line 212, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\python34\lib\multiprocessing\context.py", line 313, in _Popen
return Popen(process_obj)
File "C:\python34\lib\multiprocessing\popen_spawn_win32.py", line 66, in __init__
reduction.dump(process_obj, to_child)
File "C:\python34\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
_pickle.PicklingError: Can't pickle <class 'cherrypy.process.wspbus._StateEnum.State'>: attribute lookup State on cherrypy.process.wspbus failed
Turns outcherrypy.process.wspbus._StateEnum.State, which is an attribute on the wspbus.Bus class inherited by MyBus, is a nested class, and nested classes can't be pickled:
class _StateEnum(object):
class State(object):
name = None
def __repr__(self):
return "states.%s" % self.name
The State object (surprise) is used to track the Bus instance's state. Since we're doing the pickling before we start up the bus, we could just remove the state attribute from the object when we pickle, and set it to States.STOPPED when we unpickle.
class MyBus(wspbus.Bus):
def __init__(self, name=""):
wspbus.Bus.__init__(self)
self.open_logger(name)
self.subscribe("log", self._log)
def __getstate__(self):
self_dict = self.__dict__
del self_dict['logger']
del self_dict['state']
return self_dict
def __setstate__(self, d):
self.__dict__.update(d)
self.open_logger()
self.state = wspbus.states.STOPPED # Initialize to STOPPED
With these changes, the code works as expected! The only limitation is that it's only safe to pickle MyBus if the bus hasn't started yet, which is fine for your usecase.
Again, this is the hard way. The easy way is to just remove the need to pickle the MyBus instance altogether. You can just create the MyBus instance in the child process, rather than the parent:
class Broker(Process):
def __init__(self, queue):
Process.__init__(self)
self.queue = queue
...
def run(self):
self.bus = MyBus(Broker.__name__) # Create the instance here, in the child
self.bus.subscribe("main", self.check)
self.bus.start()
self.bus.block(interval=0.01)
As long as you don't need to access broker.bus in the parent, this is the simpler option.
Related
This is an old issue which suggested workaround does not work.
Below is a complete example showing how the suggested approach fails. Uncomment L31 for error.
import multiprocessing
import os
import time
from multiprocessing import get_context
from multiprocessing.queues import Queue
class SharedCounter(object):
def __init__(self, n=0):
self.count = multiprocessing.Value('i', n)
def increment(self, n=1):
with self.count.get_lock():
self.count.value += n
#property
def value(self):
return self.count.value
class MyQueue(Queue):
def __init__(self, *args, **kwargs):
super(MyQueue, self).__init__(*args, ctx=get_context(), **kwargs)
self.size = SharedCounter(0)
def put(self, *args, **kwargs):
self.size.increment(1)
super(MyQueue, self).put(*args, **kwargs)
def get(self, *args, **kwargs):
# self.size.increment(-1) # uncomment this for error
return super(MyQueue, self).get(*args, **kwargs)
def qsize(self):
return self.size.value
def empty(self):
return not self.qsize()
def clear(self):
while not self.empty():
self.get()
def worker(queue):
while True:
item = queue.get()
if item is None:
break
print(f'[{os.getpid()}]: got {item}')
time.sleep(1)
if __name__ == '__main__':
num_processes = 4
q = MyQueue()
pool = multiprocessing.Pool(num_processes, worker, (q,))
for i in range(10):
q.put("hello")
q.put("world")
for i in range(num_processes):
q.put(None)
q.close()
q.join_thread()
pool.close()
pool.join()
For some reason, the newly defined MyQueue forgets about the size attribute.
Process SpawnPoolWorker-1:
Traceback (most recent call last):
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/process.py", line 314, in _bootstrap
self.run()
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/pool.py", line 109, in worker
initializer(*initargs)
File "/Users/user/Library/Application Support/JetBrains/PyCharm2022.3/scratches/scratch.py", line 47, in worker
item = queue.get()
^^^^^^^^^^^
File "/Users/user/Library/Application Support/JetBrains/PyCharm2022.3/scratches/scratch.py", line 31, in get
self.size.increment(-1) # uncomment this for error
^^^^^^^^^
AttributeError: 'MyQueue' object has no attribute 'size'
Process SpawnPoolWorker-2:
Traceback (most recent call last):
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/process.py", line 314, in _bootstrap
self.run()
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/pool.py", line 109, in worker
initializer(*initargs)
File "/Users/user/Library/Application Support/JetBrains/PyCharm2022.3/scratches/scratch.py", line 47, in worker
item = queue.get()
^^^^^^^^^^^
File "/Users/user/Library/Application Support/JetBrains/PyCharm2022.3/scratches/scratch.py", line 31, in get
self.size.increment(-1) # uncomment this for error
^^^^^^^^^
AttributeError: 'MyQueue' object has no attribute 'size'
Process SpawnPoolWorker-4:
Process SpawnPoolWorker-3:
Traceback (most recent call last):
Traceback (most recent call last):
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/process.py", line 314, in _bootstrap
self.run()
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/pool.py", line 109, in worker
initializer(*initargs)
File "/Users/user/Library/Application Support/JetBrains/PyCharm2022.3/scratches/scratch.py", line 47, in worker
item = queue.get()
^^^^^^^^^^^
File "/Users/user/Library/Application Support/JetBrains/PyCharm2022.3/scratches/scratch.py", line 31, in get
self.size.increment(-1) # uncomment this for error
^^^^^^^^^
AttributeError: 'MyQueue' object has no attribute 'size'
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/process.py", line 314, in _bootstrap
self.run()
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/pool.py", line 109, in worker
initializer(*initargs)
File "/Users/user/Library/Application Support/JetBrains/PyCharm2022.3/scratches/scratch.py", line 47, in worker
item = queue.get()
^^^^^^^^^^^
File "/Users/user/Library/Application Support/JetBrains/PyCharm2022.3/scratches/scratch.py", line 31, in get
self.size.increment(-1) # uncomment this for error
^^^^^^^^^
AttributeError: 'MyQueue' object has no attribute 'size'
well, you didn't to override __setstate__ and __getstate__ to include your variable, which are used by pickle to control the serialization Handling Stateful Objects ... so you should override them to add your variable to what's being serialized.
import multiprocessing
import os
import time
from multiprocessing import get_context
from multiprocessing.queues import Queue
class SharedCounter(object):
def __init__(self, n=0):
self.count = multiprocessing.Value('i', n)
def increment(self, n=1):
with self.count.get_lock():
self.count.value += n
#property
def value(self):
return self.count.value
class MyQueue(Queue):
def __init__(self, *args, **kwargs):
super(MyQueue, self).__init__(*args, ctx=get_context(), **kwargs)
self.size = SharedCounter(0)
def __getstate__(self):
return (super(MyQueue, self).__getstate__(),self.size)
def __setstate__(self, state):
super(MyQueue, self).__setstate__(state[0])
self.size = state[1]
def put(self, *args, **kwargs):
self.size.increment(1)
super(MyQueue, self).put(*args, **kwargs)
def get(self, *args, **kwargs):
self.size.increment(-1) # uncomment this for error
return super(MyQueue, self).get(*args, **kwargs)
def qsize(self):
return self.size.value
def empty(self):
return not self.qsize()
def clear(self):
while not self.empty():
self.get()
def worker(queue):
while True:
item = queue.get()
if item is None:
break
print(f'[{os.getpid()}]: got {item}')
time.sleep(1)
if __name__ == '__main__':
num_processes = 4
q = MyQueue()
pool = multiprocessing.Pool(num_processes, initializer=worker, initargs=(q,))
for i in range(10):
q.put("hello")
q.put("world")
for i in range(num_processes):
q.put(None)
q.close()
q.join_thread()
pool.close()
pool.join()
note that in python 3 we don't need to use super(MyQueue, self), as super() would suffice, and will make it easier to rename your class in the future and other portability and refactoring benefits, so consider swapping any super(x,y) with just super()
I would like to make a program which runs two threads which can be simultaneously interrupted using ctrl+C. The following script is a simplified version of this:
import time
import threading
class Controller(object):
def __init__(self, name=None):
self.name = name
def run_once(self):
print("Controller {} is running once...".format(self.name))
def run_forever(self):
while True:
self.run_once()
time.sleep(1)
if __name__ == "__main__":
controller1 = Controller(name="1")
controller2 = Controller(name="2")
thread1 = threading.Thread(target=controller1.run_forever)
thread2 = threading.Thread(target=controller2.run_forever)
thread1.daemon = True
thread2.daemon = True
thread1.start()
thread2.start()
try:
while True:
thread1.join(1)
thread2.join(1)
if not thread1.isAlive() or not thread2.isAlive():
break
except KeyboardInterrupt:
pass
I'm trying to make the code a bit more DRY by doing the following:
import time
import threading
class Controller(object):
def __init__(self, name=None):
self.name = name
def run_once(self):
print("Controller {} is running once...".format(self.name))
def run_forever(self):
while True:
self.run_once()
time.sleep(1)
class ThreadController(Controller, threading.Thread):
def __init__(self, *args, **kwargs):
Controller.__init__(self, *args, **kwargs)
threading.Thread.__init__(self, target=self.run_forever)
self.daemon = True
self.start()
if __name__ == "__main__":
thread1 = ThreadController(name="1")
thread2 = ThreadController(name="2")
try:
while True:
thread1.join(1)
thread2.join(1)
if not thread1.isAlive() or not thread2.isAlive():
break
except KeyboardInterrupt:
pass
However, when I try to run the latter script, I get the following error:
Traceback (most recent call last):
File "threading_test3.py", line 34, in <module>
thread1 = ThreadController(name="1")
File "threading_test3.py", line 18, in __init__
Controller.__init__(self, *args, **kwargs)
File "threading_test3.py", line 6, in __init__
self.name = name
File "/usr/lib/python2.7/threading.py", line 971, in name
assert self.__initialized, "Thread.__init__() not called"
AssertionError: Thread.__init__() not called
I don't understand why Thread.__init__() is not called, because it seems like it is called in the __init__ of ThreadController. What is causing this error?
Call Thread's init, first;
class ThreadController(Controller, threading.Thread):
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self, target=self.run_forever)
Controller.__init__(self, *args, **kwargs)
self.daemon = True
self.start()
I have pretty simple code of worker, might be doing some silly mistake.
class BingWorker(object):
def __init__(self):
self.gm_worker = gearman.GearmanWorker(['localhost:4730'])
completed_job_request = self.gm_worker.register_task('bingmedia', callBing)
def callBing(self, gearman_worker, gearman_job):
print "In worker ", gearman_job.data
return "Kools"
def run(self):
self.gm_worker.work()
if __name__ == '__main__':
BingWorker().run()
gives
Traceback (most recent call last):
File "worker.py", line 16, in <module>
BingWorker().run()
File "worker.py", line 6, in __init__
completed_job_request = self.gm_worker.register_task('bingmedia', callBing)
NameError: global name 'callBing' is not defined
any hints? example is very similar to python example at http://gearman.org/examples/reverse/. just put in class structure
Improved code:
class BingWorker(object):
def __init__(self):
self.gm_worker = gearman.GearmanWorker(['localhost:4730'])
def callBing(self, gearman_worker, gearman_job):
print "In worker ", gearman_job.data
return "Kools"
def run(self):
completed_job_request = self.gm_worker.register_task('bingmedia', self.callBing)
self.gm_worker.work()
if __name__ == '__main__':
BingWorker().run()
You need to change callBing to self.callBing and it might be better to move the registration to the first line of the run method rather than the __init__ method.
The fist is because callBing is a reference to a missing global while self.callBing is a reference to the classes method. The second is because potentially you could get a call to self.callBing before init has finished which would be bad news.
I'm using thread pool while using Tornado to do some work. This is the code:
common/thread_pool.py
import tornado.ioloop
class Worker(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self._queue = queue
def run(self):
logging.info('Worker start')
while True:
content = self._queue.get()
if isinstance(content, str) and content == 'quit':
break
#content: (func, args, on_complete)
func = content[0]
args = content[1]
on_complete = content[2]
resp = func(args)
tornado.ioloop.IOLoop.instance().add_callback(lambda: on_complete(resp))
#i dont know is correct to call this
#self._queue.task_done()
logging.info('Worker stop')
class WorkerPool(object):
_workers = []
def __init__(self, num):
self._queue = Queue.Queue()
self._size = num
def start(self):
logging.info('WorkerPool start %d' % self._size)
for _ in range(self._size):
worker = Worker(self._queue)
worker.start()
self._workers.append(worker)
def stop(self):
for worker in self._workers:
self._queue.put('quit')
for worker in self._workers:
worker.join()
logging.info('WorkerPool stopd')
def append(self, content):
self._queue.put(content)
gateway.py
import tornado.ioloop
import tornado.web
from common import thread_pool
workers = None
class MainServerHandler(tornado.web.RequestHandler):
#tornado.web.asynchronous
def get(self):
start_time = time.time()
method = 'get'
content = (self.handle, (method, self.request, start_time), self.on_complete)
workers.append(content)
#tornado.web.asynchronous
def post(self):
start_time = time.time()
method = 'post'
content = (self.handle, (method, self.request, start_time), self.on_complete)
workers.append(content)
def handle(self, args):
method, request, start_time = args
#for test, just return
return 'test test'
def on_complete(self, res):
logging.debug('on_complete')
self.write(res)
self.finish()
return
def main(argv):
global workers
workers = thread_pool.WorkerPool(conf_mgr.thread_num)
workers.start()
application = tornado.web.Application([(r"/", MainServerHandler)])
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main(sys.argv[1:])
When I make many concurrent requests, I get this error:
ERROR: 2014-09-15 18:04:03: ioloop.py:435 * 140500107065056 Exception in callback <tornado.stack_context._StackContextWrapper object at 0x7fc8b4d6b9f0>
Traceback (most recent call last):
File "/home/work/nlp_arch/project/ps/se/nlp-arch/gateway/gateway/../third-party/tornado-2.4.1/tornado/ioloop.py", line 421, in _run_callback
callback()
File "/home/work/nlp_arch/project/ps/se/nlp-arch/gateway/gateway/../common/thread_pool.py", line 39, in <lambda>
tornado.ioloop.IOLoop.instance().add_callback(lambda: on_complete(resp))
File "/home/work/nlp_arch/project/ps/se/nlp-arch/gateway/gateway/gateway.py", line 92, in on_complete
self.write(res)
File "/home/work/nlp_arch/project/ps/se/nlp-arch/gateway/gateway/../third-party/tornado-2.4.1/tornado/web.py", line 489, in write
raise RuntimeError("Cannot write() after finish(). May be caused "
RuntimeError: Cannot write() after finish(). May be caused by using async operations without the #asynchronous decorator.
But I didn't call write after finish. I'm also using the #asynchronous decorator. At the same time, in the logs I see that write/finish is called by same thread.
The issue is with the way you're adding the callback to the I/O loop. Add it like this:
tornado.ioloop.IOLoop.instance().add_callback(on_complete, resp)
And the errors will go away.
You're seeing this strange behavior because when you use a lambda function, you're creating a closure in the local scope of the function, and the variables used in that closure get bound at the point the lambda is executed, not when its created. Consider this example:
funcs = []
def func(a):
print a
for i in range(5):
funcs.append(lambda: func(i))
for f in funcs:
f()
Output:
4
4
4
4
4
Because your worker method is running in a while loop, on_complete ends up getting redefined several times, which also changes the value of on_complete inside the lambda. That means if one worker thread sets on_complete for a handler A, but then gets another task and sets on_complete for handler B prior to the callback set for handler A running, both callbacks end up up running handler B's on_complete.
If you really wanted to use a lambda, you could also avoid this by binding on_complete in the local scope of the lambda:
tornado.ioloop.IOLoop.instance().add_callback(lambda on_complete=on_complete: on_complete(resp))
But just adding the function and its argument directly is much nicer.
#!/usr/bin/env python
#coding=utf-8
import sys,os,threading
import Queue
keyword = sys.argv[1]
path = sys.argv[2]
class keywordMatch(threading.Thread):
def __init__(self,queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
while True:
line = self.queue.get()
if keyword in line:
print line
queue.task_done()
def main():
concurrent = 100 # Number of threads
queue = Queue.Queue()
for i in range(concurrent):
t = keywordMatch(True)
t.setDaemon(True)
t.start()
allfiles = os.listdir(path)
for files in allfiles:
pathfile = os.path.join(path,files)
fp = open(pathfile)
lines = fp.readlines()
for line in lines:
queue.put(line.strip())
queue.join()
if __name__ == '__main__':
main()
This program is for searching the keyword in a directory,
but there occurs an error:
Exception in thread Thread-100:
Traceback (most recent call last):
File "/usr/local/Cellar/python/2.7.3/Frameworks/Python.framework/Versions/2.7/lib/python2.7/threading.py", line 551, in __bootstrap_inner
self.run()
File "du.py", line 17, in run
line = self.queue.get()
AttributeError: 'bool' object has no attribute 'get'
How can I get rid of the error?
You're instantiating the thread with t = keywordMatch(True), and then in __init__ you're taking this argument and saving it as self.queue - so naturally self.queue is going to be a bool. If you want there to be a Queue instance there, you should pass it in.
In main() you wrote:
t = keywordMatch(True)
The keywordMatch class's __init__ does this:
def __init__(self,queue):
self.queue = queue
So now self.queue is True! Later, trying to do self.queue.get fails because it isn't a queue at all.