logging seems to have memory leak for multi-thread usage - python

I meet one scenario of memory leak in Python, I guess it's related with logging module for multi-thread, but I don't find why.
Version1 (With memory-leak and multi-thread call)
campaign_id_queue = Queue.Queue()
campaign_worker = {} # it has data inside, key is ID, value is Class object
for campaign_id, worker in campaign_worker.iteritems():
campaign_id.queue.put(campaign_id)
thread_list = []
for n in range(THREAD_NUM): # defined already
thread_list.append( Thread(target=parallel_run, args=(campaign_id_queue, now, n, logger)))
for thread in thread_list:
thread.daemon = True
thread.start()
campaign_id_queue.join()
# another file
def parallel_run(campaign_id_queue, now, n, logger):
while True:
try:
campaign_id = campaign_id_queue.get()
except Queue.Empty:
logger.warning('Queue empty')
else:
try:
if worker.open_clients(logger) < 0:
logger.error('error here')
continue
worker.run(now, logger)
except Exception, e:
logger.exception(e)
finally:
campaign_id_queue.task_done()
Version2 (Without memory-leak and single-thread call)
campaign_worker = {} # it has data inside, key is ID, value is Class object
for campaign_id, worker in campaign_worker.iteritems():
if worker.open_clients(logger) < 0:
logger.error('error here')
continue
worker.run(now, logger)

It's related with thread not killed after use, not related with logging module, it's solved now, thanks for attention.

Related

Sending KeyboardInterrupt to child process

Is there a way to raise an exception in the child process when the main process gets an KeyboardInterrupt exception (instead of a loop polling for an event or queue value)?
For now I am using a Queue to communicate the KeyboardInterrupt triggered in the main process to the child processes. For the while part it gets noticed in the child process loop and until now I can do a good clean up for the child process.
However, when KeyboardInterrupt gets triggered during child's initialization I have to check after every statement if the user has aborted the main process. Another option would be to trigger an exception by freeing the connection resource - that will be called later - to trigger a (general or connection-related) exception.
Are there better ways for a good clean up (daemon processes will not yield a good clean up I think)?
def connect(self):
self.conn = mysql.connector.connect(
host="192.168.10.10",
user="homestead",
password="xxxx",
database="xxxx"
)
self.cursor = self.conn.cursor()
def dispose(self):
self.cursor.close()
self.conn.close()
def init(self):
# set up root logger
# ...
root_logger = logging.getLogger()
root_logger.addHandler(fh)
# ...
try: # init check 1 for KeyboardInterrupt exception in
# main process (*1)
row = self.task_queue.get(timeout=5) # or something like using an
# Event.is_set() whenever KeyboardInterrupt is raised
# in main process could be possible too
if row is None: # None is sent when KeyboardInterrupt exception
# in main process
self.task_queue.task_done()
return false
# for example, calling self.dispose() here generates an exception at
# self.connect() because connection gets closed / freed (*2)
# or raise CustomException (*2b)?
except:
pass
# ...
self.connect()
# ...
try: # init check 2 for KeyboardInterrupt exception in main process (*3)
row = self.task_queue.get(timeout=5)
if row is None:
self.task_queue.task_done()
self.dispose()
return false
# raise CustomException?
except:
pass
return true
def run(self):
try:
self.init()
except KeyboardInterrupt: # would something like this be possible
# (or disrupt the code flow to elicit another exception like in *2
# , or raise a CustomException in *2b, both which get caught here, as
# an alternative)?
...
# this would be handy instead of checking after each statement
# in the init parts (*1, *3)
except:
logging.error("Something went wrong during initialization")
self.task_queue.task_done()
self.dispose()
return
while True:
if self.conn.is_connected() == False:
# ....
row = None
empty = False
leave = False
try:
row = self.task_queue.get(timeout=5)
if row is None:
self.task_queue.task_done()
leave = True
else:
# save item
except:
empty = True
pass
if (leave == True):
break
self.dispose()
BTW: I have read some other topics like Python: while KeyboardInterrupt is forwarded to multiprocessing child process? and How to use KeyboardInterrupt from the main process to stop child processes?
Edit (added from main()):
def manage_ctrlC(*args):
sqlDataSaver.exit.set()
def main():
global tasks, sqlDataSaver
# Manage Ctrl_C keyboard event
signal.signal(signal.SIGINT, manage_ctrlC) # dummy, not used yet
# ...
tasks = multiprocessing.JoinableQueue()
sqlDataSaver = sqlExecutor(tasks) # inherits from multiprocessing.Process
sqlDataSaver.start()
#Tim Roberts:
You mean something like this? So each process has its own sigint handler and a separate cleanup process that is triggered by the exception that is raised in each handler?
from multiprocessing import *
import signal
import time
import sys
class SigInt(Exception):
"""SIG INT"""
pass
class MyProcess(Process):
def __init__(self, toExecute, sighandler):
Process.__init__(self)
self.toExecute = toExecute
self.sighandler = sighandler
def run(self):
# set up custom handler
signal.signal(signal.SIGINT, self.sighandler)
while True:
try:
self.toExecute()
except SigInt:
# clean up
break
print(current_process().name," process exited")
def manage_ctrlC_main(*args):
print('main crtl-c')
sys.exit()
def toExecute1():
time.sleep(1)
print("exec1");
def toExecute2():
time.sleep(1)
print("exec2");
def sigh1(signal, frame):
print('sig 1 handler')
raise SigInt
def sigh2(signal, frame):
print('sig 2 handler')
raise SigInt
def main():
global myProcess1, myProcess2
signal.signal(signal.SIGINT, manage_ctrlC_main)
myProcess1 = MyProcess(toExecute1,sigh1)
myProcess1.start()
time.sleep(4)
myProcess2 = MyProcess(toExecute2,sigh2)
myProcess2.start()
myProcess1.join()
myProcess2.join()
if __name__ == '__main__':
main()

Cannot exit from multithreading for loop

I can not break the following function if an error occurs.
def run(self, max_workers=10):
outputs = {}
q = queue.Queue()
for key, ground_truth in self.ground_truths.items():
q.put((key, ground_truth))
count = {}
count['total_finish'] = 0
start_time = time.time()
def worker():
while True:
try:
key, value = self.pred_on_one_image(q.get())
outputs[key] = value
count['total_finish'] += 1
except:
os._exit()
finally:
q.task_done()
for i in range(max_workers):
t = Thread(target=worker)
t.daemon = True
t.start()
q.join()
return outputs
I tried to use return, q.put(None), sys.exit(), but all of them not work, I have to manually Ctrl+C to break it.
quit() and exit() usually work for me.
Set q.get(block=False) to raise Empty Exception if queue is empty. Otherwise, the queue will wait until the item is available in Queue. The default value of block is True, therefore, the queue was being blocked and no exception was raised.

Break Main Calling Thread If Child Thread Throws An Exception

I'm using threading.Thread and t.start() with a List of Callables to do long-running multithreaded processing. My main thread is blocked until all threads did finish. I'd like however t.start() to immediately return if one of the Callables throw an exception and terminate the other threads.
Using t.join() to check that the thread got executed provides no information about failures due to exception.
Here is the code:
import json
import requests
class ThreadServices:
def __init__(self):
self.obj = ""
def execute_services(self, arg1, arg2):
try:
result = call_some_process(arg1, arg2) #some method
#save results somewhere
except Exception, e:
# raise exception
print e
def invoke_services(self, stubs):
"""
Thread Spanning Function
"""
try:
p1 = "" #some value
p2 = "" #some value
# Call service 1
t1 = threading.Thread(target=self.execute_services, args=(a, b,)
# Start thread
t1.start()
# Block till thread completes execution
t1.join()
thread_pool = list()
for stub in stubs:
# Start parallel execution of threads
t = threading.Thread(target=self.execute_services,
args=(p1, p2))
t.start()
thread_pool.append(t)
for thread in thread_pool:
# Block till all the threads complete execution: Wait for all
the parallel tasks to complete
thread.join()
# Start another process thread
t2 = threading.Thread(target=self.execute_services,
args=(p1, p2)
t2.start()
# Block till this thread completes execution
t2.join()
requests.post(url, data= json.dumps({status_code=200}))
except Exception, e:
print e
requests.post(url, data= json.dumps({status_code=500}))
# Don't return anything as this function is invoked as a thread from
# main calling function
class Service(ThreadServices):
"""
Service Class
"""
def main_thread(self, request, context):
"""
Main Thread:Invokes Task Execution Sequence in ThreadedService
:param request:
:param context:
:return:
"""
try:
main_thread = threading.Thread(target=self.invoke_services,
args=(request,))
main_thread.start()
return True
except Exception, e:
return False
When i call Service().main_thread(request, context) and there is some exception executing t1, I need to get it raised in main_thread and return False. How can i implement it for this structure. Thanks!!
For one thing, you are complicating matters too much. I would do it this way:
from thread import start_new_thread as thread
from time import sleep
class Task:
"""One thread per task.
This you should do with subclassing threading.Thread().
This is just conceptual example.
"""
def __init__ (self, func, args=(), kwargs={}):
self.func = func
self.args = args
self.kwargs = kwargs
self.error = None
self.done = 0
self.result = None
def _run (self):
self.done = 0
self.error = None
self.result = None
# So this is what you should do in subclassed Thread():
try: self.result = self.func(*self.args, **self.kwargs)
except Exception, e:
self.error = e
self.done = 1
def start (self):
thread(self._run,())
def wait (self, retrexc=1):
"""Used in place of threading.Thread.join(), but it returns the result of the function self.func() and manages errors.."""
while not self.done: sleep(0.001)
if self.error:
if retrexc: return self.error
raise self.error
return self.result
# And this is how you should use your pool:
def do_something (tasknr):
print tasknr-20
if tasknr%7==0: raise Exception, "Dummy exception!"
return tasknr**120/82.0
pool = []
for task in xrange(20, 50):
t = Task(do_something, (task,))
pool.append(t)
# And only then wait for each one:
results = []
for task in pool:
results.append(task.wait())
print results
This way you can make task.wait() raise the error instead. The thread would already be stopped. So all you need to do is remove their references from pool, or whole pool, after you are done. You can even:
results = []
for task in pool:
try: results.append(task.wait(0))
except Exception, e:
print task.args, "Error:", str(e)
print results
Now, do not use strictly this (I mean Task() class) as it needs a lot of things added to be used for real.
Just subclass threading.Thread() and implement the similar concept by overriding run() and join() or add new functions like wait().

Deadlock when calling task_done() as the task has been pulled by a new thread

I am giving Python (3.4) a try with multithreading and have a question on the following code.
The code works well when I have a a greater amount of work than NUM_WORKER_THREADS; however once the queue shrinks to less than the NUM_WORKER_THREADS count, new iterations can take the same item due to the time between items.get() and the task_done call.This results in a deadlock when calling task_done.
What is the correct way to handle this?
import time
import threading
from queue import Queue
NUM_WORKER_THREADS = 8
def worker():
try:
while items.qsize() > 0:
print("{} items left to process".format(items.qsize()))
item = items.get()
print("Processing {}".format(item))
itemrec = getItemRecord(item) # external call to webservice ~3 second response.
items.task_done()
except Exception as inst:
print("---------------EXCEPTION OCCURRED----------------")
print(type(inst))
print(inst.args)
print(inst)
# start counter to monitor performance
start = time.perf_counter()
items = Queue()
# get the items we need to work on for allocations
searchResults = getSearchResults() # external call to webservice
# add results of search to a collection
for itemid in searchResults:
if itemid['recordtype'] == 'inventoryitem':
items.put(itemid['id'])
for i in range(NUM_WORKER_THREADS):
try:
t = threading.Thread(target=worker)
t.daemon = True
t.start()
except Exception as inst:
print("---------------EXCEPTION OCCURRED----------------")
print(type(inst))
print(inst.args)
print(inst)
items.join()
# print end of execution performance counter
print('time:', time.perf_counter() - start)
I would use a sentinel to tell the workers to shut down when there are no more work items to process, rather than relying on the Queue size, which is susceptible to race conditions:
import time
import threading
from queue import Queue
NUM_WORKER_THREADS = 8
def worker():
for item in iter(items.get, None):
try:
print("{} items left to process".format(items.qsize()))
print("Processing {}".format(item))
except Exception as inst:
print("---------------EXCEPTION OCCURRED----------------")
print(type(inst))
print(inst.args)
print(inst)
finally:
items.task_done()
print("Got sentinel, shut down")
items.task_done()
# start counter to monitor performance
start = time.perf_counter()
items = Queue()
# get the items we need to work on for allocations
searchResults = getSearchResults() # external call to webservice
# add results of search to a collection
for itemid in searchResults:
if itemid['recordtype'] == 'inventoryitem':
items.put(itemid['id'])
for _ in range(NUM_WORKER_THREADS):
items.put(None) # Load a sentinel for each worker thread
for i in range(NUM_WORKER_THREADS):
try:
t = threading.Thread(target=worker)
t.daemon = True
t.start()
except Exception as inst:
print("---------------EXCEPTION OCCURRED----------------")
print(type(inst))
print(inst.args)
print(inst)
items.join()
# print end of execution performance counter
print('time:', time.perf_counter() - start)
Also note that you can use the built-in thread pool provided by Python (multiprocessing.dummy.Pool) to do this more elegantly:
import time
from multiprocessing.dummy import Pool # Thread Pool
NUM_WORKER_THREADS = 8
def worker(item):
try:
print("Processing {}".format(item))
itemrec = getItemRecord(item) # external call to webservice ~3 second response.
except Exception as inst:
print("---------------EXCEPTION OCCURRED----------------")
print(type(inst))
print(inst.args)
print(inst)
# start counter to monitor performance
start = time.perf_counter()
# get the items we need to work on for allocations
searchResults = getSearchResults() # external call to webservice
pool = Pool(NUM_WORKER_THREADS)
pool.map(worker, [item['id'] for item in searchResults
if item['recordtype'] == 'inventoryitem'])
pool.close()
pool.join()
# print end of execution performance counter
print('time:', time.perf_counter() - start)

How to let a Python thread finish gracefully

I'm doing a project involving data collection and logging. I have 2 threads running, a collection thread and a logging thread, both started in main. I'm trying to allow the program to be terminated gracefully when with Ctrl-C.
I'm using a threading.Event to signal to the threads to end their respective loops. It works fine to stop the sim_collectData method, but it doesn't seem to be properly stopping the logData thread. The Collection terminated print statement is never executed, and the program just stalls. (It doesn't end, just sits there).
The second while loop in logData is to make sure everything in the queue is logged. The goal is for Ctrl-C to stop the collection thread immediately, then allow the logging thread to finish emptying the queue, and only then fully terminate the program. (Right now, the data is just being printed out - eventually it's going to be logged to a database).
I don't understand why the second thread never terminates. I'm basing what I've done on this answer: Stopping a thread after a certain amount of time. What am I missing?
def sim_collectData(input_queue, stop_event):
''' this provides some output simulating the serial
data from the data logging hardware.
'''
n = 0
while not stop_event.is_set():
input_queue.put("DATA: <here are some random data> " + str(n))
stop_event.wait(random.randint(0,5))
n += 1
print "Terminating data collection..."
return
def logData(input_queue, stop_event):
n = 0
# we *don't* want to loop based on queue size because the queue could
# theoretically be empty while waiting on some data.
while not stop_event.is_set():
d = input_queue.get()
if d.startswith("DATA:"):
print d
input_queue.task_done()
n += 1
# if the stop event is recieved and the previous loop terminates,
# finish logging the rest of the items in the queue.
print "Collection terminated. Logging remaining data to database..."
while not input_queue.empty():
d = input_queue.get()
if d.startswith("DATA:"):
print d
input_queue.task_done()
n += 1
return
def main():
input_queue = Queue.Queue()
stop_event = threading.Event() # used to signal termination to the threads
print "Starting data collection thread...",
collection_thread = threading.Thread(target=sim_collectData, args=(input_queue, stop_event))
collection_thread.start()
print "Done."
print "Starting logging thread...",
logging_thread = threading.Thread(target=logData, args=(input_queue, stop_event))
logging_thread.start()
print "Done."
try:
while True:
time.sleep(10)
except (KeyboardInterrupt, SystemExit):
# stop data collection. Let the logging thread finish logging everything in the queue
stop_event.set()
main()
The problem is that your logger is waiting on d = input_queue.get() and will not check the event. One solution is to skip the event completely and invent a unique message that tells the logger to stop. When you get a signal, send that message to the queue.
import threading
import Queue
import random
import time
def sim_collectData(input_queue, stop_event):
''' this provides some output simulating the serial
data from the data logging hardware.
'''
n = 0
while not stop_event.is_set():
input_queue.put("DATA: <here are some random data> " + str(n))
stop_event.wait(random.randint(0,5))
n += 1
print "Terminating data collection..."
input_queue.put(None)
return
def logData(input_queue):
n = 0
# we *don't* want to loop based on queue size because the queue could
# theoretically be empty while waiting on some data.
while True:
d = input_queue.get()
if d is None:
input_queue.task_done()
return
if d.startswith("DATA:"):
print d
input_queue.task_done()
n += 1
def main():
input_queue = Queue.Queue()
stop_event = threading.Event() # used to signal termination to the threads
print "Starting data collection thread...",
collection_thread = threading.Thread(target=sim_collectData, args=(input_queue, stop_event))
collection_thread.start()
print "Done."
print "Starting logging thread...",
logging_thread = threading.Thread(target=logData, args=(input_queue,))
logging_thread.start()
print "Done."
try:
while True:
time.sleep(10)
except (KeyboardInterrupt, SystemExit):
# stop data collection. Let the logging thread finish logging everything in the queue
stop_event.set()
main()
I'm not an expert in threading, but in your logData function the first d=input_queue.get() is blocking, i.e., if the queue is empty it will sit an wait forever until a queue message is received. This is likely why the logData thread never terminates, it's sitting waiting forever for a queue message.
Refer to the [Python docs] to change this to a non-blocking queue read: use .get(False) or .get_nowait() - but either will require some exception handling for cases when the queue is empty.
You are calling a blocking get on your input_queue with no timeout. In either section of logData, if you call input_queue.get() and the queue is empty, it will block indefinitely, preventing the logging_thread from reaching completion.
To fix, you will want to call input_queue.get_nowait() or pass a timeout to input_queue.get().
Here is my suggestion:
def logData(input_queue, stop_event):
n = 0
while not stop_event.is_set():
try:
d = input_queue.get_nowait()
if d.startswith("DATA:"):
print "LOG: " + d
n += 1
except Queue.Empty:
time.sleep(1)
return
You are also signalling the threads to terminate, but not waiting for them to do so. Consider doing this in your main function.
try:
while True:
time.sleep(10)
except (KeyboardInterrupt, SystemExit):
stop_event.set()
collection_thread.join()
logging_thread.join()
Based on the answer of tdelaney I created an iterator based approach. The iterator exits when the termination message is encountered. I also added a counter of how many get-calls are currently blocking and a stop-method, which sends just as many termination messages. To prevent a race condition between incrementing and reading the counter, I'm setting a stopping bit there. Furthermore I don't use None as the termination message, because it can not necessarily be compared to other data types when using a PriorityQueue.
There are two restrictions, that I had no need to eliminate. For one the stop-method first waits until the queue is empty before shutting down the threads. The second restriction is, that I did not any code to make the queue reusable after stop. The latter can probably be added quite easily, while the former requires being careful about concurrency and the context in which the code is used.
You have to decide whether you want stop to also wait for all the termination messages to be consumed. I choose to put the necessary join there, but you may just remove it.
So this is the code:
import threading, queue
from functools import total_ordering
#total_ordering
class Final:
def __repr__(self):
return "∞"
def __lt__(self, other):
return False
def __eq__(self, other):
return isinstance(other, Final)
Infty = Final()
class IterQueue(queue.Queue):
def __init__(self):
self.lock = threading.Lock()
self.stopped = False
self.getters = 0
super().__init__()
def __iter__(self):
return self
def get(self):
raise NotImplementedError("This queue may only be used as an iterator.")
def __next__(self):
with self.lock:
if self.stopped:
raise StopIteration
self.getters += 1
data = super().get()
if data == Infty:
self.task_done()
raise StopIteration
with self.lock:
self.getters -= 1
return data
def stop(self):
self.join()
self.stopped = True
with self.lock:
for i in range(self.getters):
self.put(Infty)
self.join()
class IterPriorityQueue(IterQueue, queue.PriorityQueue):
pass
Oh, and I wrote this in python 3.2. So after backporting,
import threading, Queue
from functools import total_ordering
#total_ordering
class Final:
def __repr__(self):
return "Infinity"
def __lt__(self, other):
return False
def __eq__(self, other):
return isinstance(other, Final)
Infty = Final()
class IterQueue(Queue.Queue, object):
def __init__(self):
self.lock = threading.Lock()
self.stopped = False
self.getters = 0
super(IterQueue, self).__init__()
def __iter__(self):
return self
def get(self):
raise NotImplementedError("This queue may only be used as an iterator.")
def next(self):
with self.lock:
if self.stopped:
raise StopIteration
self.getters += 1
data = super(IterQueue, self).get()
if data == Infty:
self.task_done()
raise StopIteration
with self.lock:
self.getters -= 1
return data
def stop(self):
self.join()
self.stopped = True
with self.lock:
for i in range(self.getters):
self.put(Infty)
self.join()
class IterPriorityQueue(IterQueue, Queue.PriorityQueue):
pass
you would use it as
import random
import time
def sim_collectData(input_queue, stop_event):
''' this provides some output simulating the serial
data from the data logging hardware.
'''
n = 0
while not stop_event.is_set():
input_queue.put("DATA: <here are some random data> " + str(n))
stop_event.wait(random.randint(0,5))
n += 1
print "Terminating data collection..."
return
def logData(input_queue):
n = 0
# we *don't* want to loop based on queue size because the queue could
# theoretically be empty while waiting on some data.
for d in input_queue:
if d.startswith("DATA:"):
print d
input_queue.task_done()
n += 1
def main():
input_queue = IterQueue()
stop_event = threading.Event() # used to signal termination to the threads
print "Starting data collection thread...",
collection_thread = threading.Thread(target=sim_collectData, args=(input_queue, stop_event))
collection_thread.start()
print "Done."
print "Starting logging thread...",
logging_thread = threading.Thread(target=logData, args=(input_queue,))
logging_thread.start()
print "Done."
try:
while True:
time.sleep(10)
except (KeyboardInterrupt, SystemExit):
# stop data collection. Let the logging thread finish logging everything in the queue
stop_event.set()
input_queue.stop()
main()

Categories