Pass object to two threads - python

I have 3 threads. 1 threads collects data and returns it
var1 = Thread1.start()
Thread 2 and thread 3 use this variable var1 to do routine.
I'm not sure If I do it right. because sometimes var1 is returned and it's not an empty list, I store it in variable of each thread, use list comprehension to extract data. In debugging logs I see elements that must be in thread 3, but debugging logger of that thread returns nothing.
In Thread 2 and thread 3 algoritm:
def __init__(self):
self.lock = threading.RLock()
def do_smth2(self,var1):
self.lock.acquire()
var1_2 = var1
self.lock.release()
def do_smth3(self,var3)
self.lock.acquire()
var1_3 = var1
self.lock.release()
in main
object = thread1.start()
thread2.start(object)
thread3.start(object)
Thread 2 and thread 3 run in the same time, and I use time.sleep(3) if var1_3 or var1_2 is None (this is a list type) or when len(var1_3) <0
EDIT
class Application:
def __init__(self):
self.logger = RootLogger()
self.logger.set_config(__name__, sys_log)
self.adapter = Adapter()
self.transit_listener = TransitListener()
def run(self):
#start listeners
transits_list = self.transit_listener.start()
self.adapter.start(transits_list)
# start REST service
RestWebService().run()
Thread 1
class TransitListener:
def __init__(self):
self.interval = session_interval
def _transits_data(self):
# while polling is running change interval after 1st cycle
while datetime.now() >= session_interval:
result = self.connector.query(self.statement,
fetch=True)
self.logger.debug(result)
# store result
self.transits_queue.put(result)
self.logger.debug(self.interval)
time.sleep(5)
# change interval
self._interval_granularity()
self.logger.debug(self.interval)
def start(self):
self.worker = Thread(target=self._transits_data)
self._configure()
self.logger.info("Starting thread 'transists listener'...")
try:
self.worker.start()
if self.worker.is_alive():
self.logger.info("Thread 'transits listener' started")
# return result from queue
return self.transits_queue.get()
Thread 2 and 3
class Adapter:
def __init__(self):
self.logger = RootLogger()
self.logger.set_config(name=__name__, logfile=epp_log)
self.lock = RLock()
self.threads = []
def _session_start(self, transits):
while datetime.now() >= session_interval:
self.lock.acquire()
transit_list = transits
self.lock.release()
self.logger.debug(f"ENTRIES {transit_list}")
def _session_stop(self, transits):
while datetime.now() >= session_interval:
self.lock.acquire()
transit_list = transits
self.lock.release()
self.logger.debug(f"EXITS{transit_list}")
def start(self, transits):
# prepare SQL tables
# define priority of threads
# 1st
session_start_thread = Thread(target=self._session_start, args=(transits,))
self.threads.append(session_start_thread)
# 2nd
session_stop_thread = Thread(target=self._session_stop, args=(transits,))
self.threads.append(session_stop_thread)
self.threads[0].start()
self.threads[1].start()

Related

how to terminate a thread from within another thread [duplicate]

How can I start and stop a thread with my poor thread class?
It is in loop, and I want to restart it again at the beginning of the code. How can I do start-stop-restart-stop-restart?
My class:
import threading
class Concur(threading.Thread):
def __init__(self):
self.stopped = False
threading.Thread.__init__(self)
def run(self):
i = 0
while not self.stopped:
time.sleep(1)
i = i + 1
In the main code, I want:
inst = Concur()
while conditon:
inst.start()
# After some operation
inst.stop()
# Some other operation
You can't actually stop and then restart a thread since you can't call its start() method again after its run() method has terminated. However you can make one pause and then later resume its execution by using a threading.Condition variable to avoid concurrency problems when checking or changing its running state.
threading.Condition objects have an associated threading.Lock object and methods to wait for it to be released and will notify any waiting threads when that occurs. Here's an example derived from the code in your question which shows this being done. In the example code I've made the Condition variable a part of Thread subclass instances to better encapsulate the implementation and avoid needing to introduce additional global variables:
from __future__ import print_function
import threading
import time
class Concur(threading.Thread):
def __init__(self):
super(Concur, self).__init__()
self.iterations = 0
self.daemon = True # Allow main to exit even if still running.
self.paused = True # Start out paused.
self.state = threading.Condition()
def run(self):
self.resume()
while True:
with self.state:
if self.paused:
self.state.wait() # Block execution until notified.
# Do stuff...
time.sleep(.1)
self.iterations += 1
def pause(self):
with self.state:
self.paused = True # Block self.
def resume(self):
with self.state:
self.paused = False
self.state.notify() # Unblock self if waiting.
class Stopwatch(object):
""" Simple class to measure elapsed times. """
def start(self):
""" Establish reference point for elapsed time measurements. """
self.start_time = time.time()
return self
#property
def elapsed_time(self):
""" Seconds since started. """
try:
return time.time() - self.start_time
except AttributeError: # Wasn't explicitly started.
self.start_time = time.time()
return 0
MAX_RUN_TIME = 5 # Seconds.
concur = Concur()
stopwatch = Stopwatch()
print('Running for {} seconds...'.format(MAX_RUN_TIME))
concur.start()
while stopwatch.elapsed_time < MAX_RUN_TIME:
concur.resume()
# Can also do other concurrent operations here...
concur.pause()
# Do some other stuff...
# Show Concur thread executed.
print('concur.iterations: {}'.format(concur.iterations))
This is David Heffernan's idea fleshed-out. The example below runs for 1 second, then stops for 1 second, then runs for 1 second, and so on.
import time
import threading
import datetime as DT
import logging
logger = logging.getLogger(__name__)
def worker(cond):
i = 0
while True:
with cond:
cond.wait()
logger.info(i)
time.sleep(0.01)
i += 1
logging.basicConfig(level=logging.DEBUG,
format='[%(asctime)s %(threadName)s] %(message)s',
datefmt='%H:%M:%S')
cond = threading.Condition()
t = threading.Thread(target=worker, args=(cond, ))
t.daemon = True
t.start()
start = DT.datetime.now()
while True:
now = DT.datetime.now()
if (now-start).total_seconds() > 60: break
if now.second % 2:
with cond:
cond.notify()
The implementation of stop() would look like this:
def stop(self):
self.stopped = True
If you want to restart, then you can just create a new instance and start that.
while conditon:
inst = Concur()
inst.start()
#after some operation
inst.stop()
#some other operation
The documentation for Thread makes it clear that the start() method can only be called once for each instance of the class.
If you want to pause and resume a thread, then you'll need to use a condition variable.

Python function call in thread always returns same value

I'm boggled over why a function called in a thread always returns the same value. I've confirmed that the parameters are different for each call. If I call the function after acquiring a lock then the function returns the correct value. This obviously defeats the purpose of using threads, because then this function is just called sequentially, one thread after another. Here is what I have. The function is called "get_related_properties" and I've made a note of it in the code:
class ThreadedGetMultipleRelatedProperties():
def __init__(self, property_values, **kwargs):
self.property_values = property_values
self.kwargs = kwargs
self.timeout = kwargs.get('timeout', 20)
self.lock = threading.RLock()
def get_result_dict(self):
queue = QueueWithTimeout()
result_dictionary = {}
num_threads = len(self.property_values)
threads = []
for i in range(num_threads):
t = GetMultipleRelatedPropertiesThread(queue,
result_dictionary,
self.lock)
t.setDaemon(True)
try:
threads.append(t)
t.start()
except:
return {"Error": "Unable to process results at this time." }
for property_value in self.property_values:
kwargs_copy = dict.copy(kwargs)
kwargs_copy['property_value'] = property_value
queue.put(self.kwargs_copy)
queue.join_with_timeout(self.timeout)
# cleanup threads
for i in range(num_threads):
queue.put(None)
for t in threads: t.join()
return result_dictionary
class GetMultipleRelatedPropertiesThread(threading.Thread):
def __init__(self, queue, result_dictionary, lock):
threading.Thread.__init__(self)
self.queue = queue
self.result_dictionary = result_dictionary
self.lock = lock
def run(self):
from mixpanel_helpers import get_related_properties
while True:
kwargs = self.queue.get()
if kwargs == None:
break
current_property_value = kwargs.get('property_value')
self.lock.acquire()
# The function call below always returns the same value if called before acquire
result = get_related_properties(**kwargs)
try:
self.result_dictionary[current_property_value] = result
finally:
self.lock.release()
#signals to queue job is done
self.queue.task_done()
Here is get_related_properties, although it makes other calls, so I'm not sure the problem lives in here:
def get_related_properties(property_name,
property_value,
related_properties,
properties={},
**kwargs):
kwargs['exclude_detailed_data'] = True
properties[property_name] = property_value
result = get_multiple_mixpanel_results(properties=properties,
filter_on_values=related_properties,
**kwargs)
result_dictionary = {}
for related_property in related_properties:
try:
# grab the last result here, because it'll more likely have the most up to date properties
current_result = result[related_property][0]['__results'][0]['label']
except Exception as e:
current_result = None
try:
related_property = int(related_property)
except:
pass
result_dictionary[related_property] = current_result
return result_dictionary
An additional note, I've also tried to copy the function using Python's copy module, both a deep and shallow copy and call the function copy, but neither of those worked.

thread lock issue python barrier pattern?

I have this
#threads
import thread
import threading
import time
class ThreadTask(threading.Thread):
def __init__(self,name,delay,callback):
threading.Thread.__init__(self)
self.name = name
self.counter = 0
self.delay = delay
self.callback = callback
self.lock = threading.Lock()
def run(self):
while True:
self.counter += 1
print 'running ', self.name , self.counter
time.sleep(self.delay)
if self.counter % 5 == 0:
self.callback(self)
class Barrier(object):
def __init__(self):
self.locks = []
def wait_task(self,task):
print 'lock acquire'
self.locks.append(task.lock)
task.lock.acquire(True)
task.lock.acquire(True)
def notity_task(self,task):
print 'release lock'
for i in self.locks:
try:
i.release()
except Exception, e:
print 'Error', e.message
print 'Lock released'
self.locks = []
try:
barrier = Barrier()
task1 = ThreadTask('Task_1',1,barrier.wait_task)
task4 = ThreadTask('Task_4',1,barrier.wait_task)
task3 = ThreadTask('Task_3',2,barrier.wait_task)
task2 = ThreadTask('Task_2',3,barrier.notity_task)
task2.start()
task1.start()
task3.start()
task4.start()
except Exception as e:
raise e
while 1:
pass
These Thread run ok but if I put 2 task.lock.acquire(True) consecutive otherwise do not work, they stop each 10 when need to be each 5. Any one know what happening?
Thanks
You create a regular, non-reentrant lock here:
self.lock = threading.Lock()
And then try to acquire it twice here
task.lock.acquire(True)
task.lock.acquire(True)
This is illegal, regular locks cannot be acquired twice by same thread.
Perhaps you means to use a threading.RLock()?

Python: sharing variable between two threads spawn from multiprocess.Process

Python 3.1.2
I have a problem with variable sharing between two threads spawned by multiprocessing.Process. It is simple bool variable which should determine if thread should run or should it stop execution. Below is simplified code (but using the same mechanisms as my original code) shown in three cases:
main class beeing of threading.Thread type and self.is_running bool type [Works fine].
main class beeing of multiprocess.Process type and self.is_running bool type [Not working. Child threads have local copies of self.is_running instead of sharing it].
main class beeing of multiprocess.Process type and self.is_running is of type multiprocessing.Value("b", True) [Works fine].
What I'd like is to understand WHY it's working this way and not the other. (i.e. why point 2. isn't working as I'm assuming it should).
Testing is done from python's interpreter:
from testclass import *
d = TestClass()
d.start()
d.stop()
Below is example from point 1:
import threading
import time
import queue
import multiprocessing
class TestClass(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.q = queue.Queue(10)
self.is_running = True
self.sema = threading.Semaphore()
def isRunning(self):
self.sema.acquire()
print ("Am I running?", self.is_running)
z = self.is_running
self.sema.release()
return z
def stop(self):
self.sema.acquire()
self.is_running = False
print("STOPPING")
self.sema.release()
def reader(self):
while self.isRunning():
print("R] Reading!")
try:
data = self.q.get(timeout=1)
except:
print("R] NO DATA!")
else:
print("R] Read: ", data)
def writer(self):
while self.isRunning():
print("W] Writing!")
self.q.put(time.time())
time.sleep(2)
def run(self):
tr = threading.Thread(target=self.reader)
tw = threading.Thread(target=self.writer)
tr.start()
tw.start()
tr.join()
tw.join()
Example from point 2:
import threading
import time
import queue
import multiprocessing
class Test(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.q = queue.Queue(10)
self.is_running = True
self.sema = threading.Semaphore()
def isRunning(self):
self.sema.acquire()
print ("Am I running?", self.is_running)
z = self.is_running
self.sema.release()
return z
def stop(self):
self.sema.acquire()
self.is_running = False
print("STOPPING")
self.sema.release()
def reader(self):
while self.isRunning():
print("R] Reading!")
try:
data = self.q.get(timeout=1)
except:
print("R] NO DATA!")
else:
print("R] Read: ", data)
def writer(self):
while self.isRunning():
print("W] Writing!")
self.q.put(time.time())
time.sleep(2)
def run(self):
tr = threading.Thread(target=self.reader)
tw = threading.Thread(target=self.writer)
tr.start()
tw.start()
tr.join()
tw.join()
Example from point 3:
import threading
import time
import queue
import multiprocessing
class TestClass(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.q = queue.Queue(10)
self.is_running = multiprocessing.Value("b", True)
self.sema = threading.Semaphore()
def isRunning(self):
self.sema.acquire()
print ("Am I running?", self.is_running)
z = self.is_running.value
self.sema.release()
return z
def stop(self):
self.sema.acquire()
self.is_running.value = False
print("STOPPING")
self.sema.release()
def reader(self):
while self.isRunning():
print("R] Reading!")
try:
data = self.q.get(timeout=1)
except:
print("R] NO DATA!")
else:
print("R] Read: ", data)
def writer(self):
while self.isRunning():
print("W] Writing!")
self.q.put(time.time())
time.sleep(2)
def run(self):
tr = threading.Thread(target=self.reader)
tw = threading.Thread(target=self.writer)
tr.start()
tw.start()
tr.join()
tw.join()
Threads are all part of the same process, so they share memory. Another consequence is that threads cannot be executed exactly at the same time by different cpu's as a process can only be picked up by one cpu .
Processes have seperate memory space. One cpu can run one process while at the same time another runs the other process. Special constructions are needed to let processes cooperate.
In point 2, both the parent process and the child process have their own copy of is_running. When you call stop() in the parent process, it only modifies is_running in the parent process and not in the child process. The reason multiprocessing.Value works is that its memory is shared between both processes.
If you want a process-aware queue, use multiprocessing.Queue.

Cancellable threading.Timer in Python

I am trying to write a method that counts down to a given time and unless a restart command is given, it will execute the task. But I don't think Python threading.Timer class allows for timer to be cancelable.
import threading
def countdown(action):
def printText():
print 'hello!'
t = threading.Timer(5.0, printText)
if (action == 'reset'):
t.cancel()
t.start()
I know the above code is wrong somehow. Would appreciate some kind guidance over here.
You would call the cancel method after you start the timer:
import time
import threading
def hello():
print "hello, world"
time.sleep(2)
t = threading.Timer(3.0, hello)
t.start()
var = 'something'
if var == 'something':
t.cancel()
You might consider using a while-loop on a Thread, instead of using a Timer.
Here is an example appropriated from Nikolaus Gradwohl's answer to another question:
import threading
import time
class TimerClass(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.event = threading.Event()
self.count = 10
def run(self):
while self.count > 0 and not self.event.is_set():
print self.count
self.count -= 1
self.event.wait(1)
def stop(self):
self.event.set()
tmr = TimerClass()
tmr.start()
time.sleep(3)
tmr.stop()
I'm not sure if I understand correctly. Do you want to write something like in this example?
>>> import threading
>>> t = None
>>>
>>> def sayHello():
... global t
... print "Hello!"
... t = threading.Timer(0.5, sayHello)
... t.start()
...
>>> sayHello()
Hello!
Hello!
Hello!
Hello!
Hello!
>>> t.cancel()
>>>
The threading.Timer class does have a cancel method, and although it won't cancel the thread, it will stop the timer from actually firing. What actually happens is that the cancel method sets a threading.Event, and the thread actually executing the threading.Timer will check that event after it's done waiting and before it actually executes the callback.
That said, timers are usually implemented without using a separate thread for each one. The best way to do it depends on what your program is actually doing (while waiting for this timer), but anything with an event loop, like GUI and network frameworks, all have ways to request a timer that is hooked into the eventloop.
Im not sure if best option but for me is woking like this:
t = timer_mgr(.....) append to list "timers.append(t)" and then after all created you can call:
for tm in timers:#threading.enumerate():
print "********", tm.cancel()
my timer_mgr() class is this:
class timer_mgr():
def __init__(self, st, t, hFunction, id, name):
self.is_list = (type(st) is list)
self.st = st
self.t = t
self.id = id
self.hFunction = hFunction
self.thread = threading.Timer(t, self.handle_function, [id])
self.thread.name = name
def handle_function(self, id):
if self.is_list:
print "run_at_time:", datetime.now()
self.hFunction(id)
dt = schedule_fixed_times(datetime.now(), self.st)
print "next:", dt
self.t = (dt-datetime.now()).total_seconds()
else:
self.t = self.st
print "run_every", self.t, datetime.now()
self.hFunction(id)
self.thread = threading.Timer(self.t, self.handle_function, [id])
self.thread.start()
def start(self):
self.thread.start()
def cancel(self):
self.thread.cancel()
Inspired by above post.
Cancelable and Resetting Timer in Python. It uses thread.
Features: Start, Stop, Restart, callback function.
Input: Timeout, sleep_chunk values, and callback_function.
Can use or inherit this class in any other program. Can also pass arguments to the callback function.
Timer should respond in middle also. Not just after completion of full sleep time. So instead of using one full sleep, using small chunks of sleep and kept checking event object in loop.
import threading
import time
class TimerThread(threading.Thread):
def __init__(self, timeout=3, sleep_chunk=0.25, callback=None, *args):
threading.Thread.__init__(self)
self.timeout = timeout
self.sleep_chunk = sleep_chunk
if callback == None:
self.callback = None
else:
self.callback = callback
self.callback_args = args
self.terminate_event = threading.Event()
self.start_event = threading.Event()
self.reset_event = threading.Event()
self.count = self.timeout/self.sleep_chunk
def run(self):
while not self.terminate_event.is_set():
while self.count > 0 and self.start_event.is_set():
# print self.count
# time.sleep(self.sleep_chunk)
# if self.reset_event.is_set():
if self.reset_event.wait(self.sleep_chunk): # wait for a small chunk of timeout
self.reset_event.clear()
self.count = self.timeout/self.sleep_chunk # reset
self.count -= 1
if self.count <= 0:
self.start_event.clear()
#print 'timeout. calling function...'
self.callback(*self.callback_args)
self.count = self.timeout/self.sleep_chunk #reset
def start_timer(self):
self.start_event.set()
def stop_timer(self):
self.start_event.clear()
self.count = self.timeout / self.sleep_chunk # reset
def restart_timer(self):
# reset only if timer is running. otherwise start timer afresh
if self.start_event.is_set():
self.reset_event.set()
else:
self.start_event.set()
def terminate(self):
self.terminate_event.set()
#=================================================================
def my_callback_function():
print 'timeout, do this...'
timeout = 6 # sec
sleep_chunk = .25 # sec
tmr = TimerThread(timeout, sleep_chunk, my_callback_function)
tmr.start()
quit = '0'
while True:
quit = raw_input("Proceed or quit: ")
if quit == 'q':
tmr.terminate()
tmr.join()
break
tmr.start_timer()
if raw_input("Stop ? : ") == 's':
tmr.stop_timer()
if raw_input("Restart ? : ") == 'r':
tmr.restart_timer()

Categories