I have the following python script:
#! /usr/bin/python
import os
from gps import *
from time import *
import time
import threading
import sys
gpsd = None #seting the global variable
class GpsPoller(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
global gpsd #bring it in scope
gpsd = gps(mode=WATCH_ENABLE) #starting the stream of info
self.current_value = None
self.running = True #setting the thread running to true
def run(self):
global gpsd
while gpsp.running:
gpsd.next() #this will continue to loop and grab EACH set of gpsd info to clear the buffer
if __name__ == '__main__':
gpsp = GpsPoller() # create the thread
try:
gpsp.start() # start it up
while True:
print gpsd.fix.speed
time.sleep(1) ## <<<< THIS LINE HERE
except (KeyboardInterrupt, SystemExit): #when you press ctrl+c
print "\nKilling Thread..."
gpsp.running = False
gpsp.join() # wait for the thread to finish what it's doing
print "Done.\nExiting."
I'm not very good with python, unfortunately. The script should be multi-threaded somehow (but that probably doesn't matter in the scope of this question).
What baffles me is the gpsd.next() line. If I get it right, it was supposed to tell the script that new gps data have been acquired and are ready to be read.
However, I read the data using the infinite while True loop with a 1 second pause with time.sleep(1).
What this does, however, is that it sometimes echoes the same data twice (the sensor hasn't updated the data in the last second). I figure it also skips some sensor data somehow too.
Can I somehow change the script to print the current speed not every second, but every time the sensor reports new data? According to the data sheet it should be every second (a 1 Hz sensor), but obviously it isn't exactly 1 second, but varies by milliseconds.
As a generic design rule, you should have one thread for each input channel or more generic, for each "loop over a blocking call". Blocking means that the execution stops at that call until data arrives. E.g. gpsd.next() is such a call.
To synchronize multiple input channels, use a Queue and one extra thread. Each input thread should put its "events" on the (same) queue. The extra thread loops over queue.get() and reacts appropriately.
From this point of view, your script need not be multithreaded, since there is only one input channel, namely the gpsd.next() loop.
Example code:
from gps import *
class GpsPoller(object):
def __init__(self, action):
self.gpsd = gps(mode=WATCH_ENABLE) #starting the stream of info
self.action=action
def run(self):
while True:
self.gpsd.next()
self.action(self.gpsd)
def myaction(gpsd):
print gpsd.fix.speed
if __name__ == '__main__':
gpsp = GpsPoller(myaction)
gpsp.run() # runs until killed by Ctrl-C
Note how the use of the action callback separates the plumbing from the data evaluation.
To embed the poller into a script doing other stuff (i.e. handling other threads as well), use the queue approach. Example code, building on the GpsPoller class:
from threading import Thread
from Queue import Queue
class GpsThread(object):
def __init__(self, valuefunc, queue):
self.valuefunc = valuefunc
self.queue = queue
self.poller = GpsPoller(self.on_value)
def start(self):
self.t = Thread(target=self.poller.run)
self.t.daemon = True # kill thread when main thread exits
self.t.start()
def on_value(self, gpsd):
# note that we extract the value right here.
# Otherwise it could change while the event is in the queue.
self.queue.put(('gps', self.valuefunc(gpsd)))
def main():
q = Queue()
gt = GpsThread(
valuefunc=lambda gpsd: gpsd.fix.speed,
queue = q
)
print 'press Ctrl-C to stop.'
gt.start()
while True:
# blocks while q is empty.
source, data = q.get()
if source == 'gps':
print data
The "action" we give to the GpsPoller says "calculate a value by valuefunc and put it in the queue". The mainloop sits there until a value pops out, then prints it and continues.
It is also straightforward to put other Thread's events on the queue and add the appropriate handling code.
I see two options here:
GpsPoller will check if data changed and raise a flag
GpsPoller will check id data changed and put new data in the queue.
Option #1:
global is_speed_changed = False
def run(self):
global gpsd, is_speed_changed
while gpsp.running:
prev_speed = gpsd.fix.speed
gpsd.next()
if prev_speed != gpsd.fix.speed
is_speed_changed = True # raising flag
while True:
if is_speed_changed:
print gpsd.fix.speed
is_speed_changed = False
Option #2 ( I prefer this one since it protects us from raise conditions):
gpsd_queue = Queue.Queue()
def run(self):
global gpsd
while gpsp.running:
prev_speed = gpsd.fix.speed
gpsd.next()
curr_speed = gpsd.fix.speed
if prev_speed != curr_speed:
gpsd_queue.put(curr_speed) # putting new speed to queue
while True:
# get will block if queue is empty
print gpsd_queue.get()
Related
For example the first script:
from secondScript import Second
---
""
""
""
while True:
lastResult = <a list> --> I need to send this result to other script
---
My other script
class Second:
def __init__(self):
""
""
""
self.dum = Thread(target=self.func1)
self.dum.deamon = True
self.dum.start()
self.tis = Thread(target=self.func2, args= <a list>)
self.tis.deamon = True
self.tis.start()
def func1(self):
while True:
""
""
""
def func2(self, lastResult):
while True:
print(lastResult)
As a result, I want to send the value which I found in the first script to a infinity thread function in script 2. I can't import first script to second because I am also getting another values from script 2 to script 1.
Edit:
We can think of it like: There is a part of my program that is already running. We can say that I am getting real time images from the camera. While the whole code is running, it also generates a number value continuously and uninterruptedly. All of these operations are done in the 1st file. While the 1st file continues to work, it needs to continuously send this number to the 2nd file. In the second code, 2 different infinite loop functions are running at the same time. In the 1st function, the data from the arduino is constantly being read continuously and uninterruptedly. The 2nd function should print the number which coming from the 1st code. So actually there is nothing I can change in code 1. I am generating the number value. I need to send it to code 2 somehow. I'm not sure how to edit the code you wrote. Any sleep etc. I can't use any interrupt method because in code 1 the camera should work without interruption.
Ok this answers your question, but I changed a bit the structure. I would do it as follows:
EDIT: If you have streams of continuous data like the CameraFeed you mentioned, you can use a Queue with a pattern like this (You don't really need the Second class in this case, you can implement the CameraFeed and CameraDataConsumer in different classes).
If the dum thread does not send data to the tis thread, you can use the send_data method of tis to send data to it through main function and remove the queue from CameraFeed.
from threading import Event, Thread
from queue import Queue, Full
import time
class CameraFeed(Thread):
def __init__(self, queue):
super().__init__()
# This event is used to stop the thread
# Initially it is unset (False)
self._stopped_event = Event()
self._queue = queue
self._data = []
def run(self):
# sample_data is for demo purposes remove it and
# fetch data however you do it
sample_data = iter(range(10**10))
# Loop as long as the stopped event is not set
while not self._stopped_event.is_set():
# Get data from camera this is mock data
data = next(sample_data)
# Put data in the list for data to be sent
self._data.append(data)
# Get the next available item from the list
data = self._data.pop(0)
try:
# This tries to puts in the queue
# if queue is at max capacity raises a Full Exception
self._queue.put_nowait(data)
print('CameraFeed, sent data:', data)
except Full:
# If exception occures, put the data back to list
self._data.insert(0, data)
def stop(self):
# Sets the stopped event, so the thread exits the run loop
self._stopped_event.set()
class CameraDataConsumer(Thread):
def __init__(self, queue):
super().__init__()
self._stopped_event = Event()
self._queue = queue
def run(self):
while not self._stopped_event.is_set():
# Waits for data from queue
data = self._queue.get(block=True)
# If data is None then do nothing
if data is None:
continue
print('CameraConsumer, got data:', data)
def send_data(self, data):
"""Method to send data to this thread from main probably"""
self._queue.put(data, block=True)
def stop(self):
# Set the stopped event flag
self._stopped_event.set()
# Try to put data to queue, to wake up the thread
try:
self._queue.put_nowait(Data(None, EventType.OPERATION))
except Full:
# If queue is full, don't do anything it is probably
# safe to assume that setting the stop flag is sufficient
print('Queue is full')
# Create a Queue with capacity 1000
queue = Queue(maxsize=1000)
dum = CameraFeed(queue)
dum.start()
tis = CameraDataConsumer(queue)
tis.start()
# time.sleep is for demo purposes
time.sleep(1)
tis.stop()
dum.stop()
tis.join()
dum.join()
In first script:
print(lastResult, end='\n', file=sys.stdout, flush=True)
In other script and other thread:
second = Second()
...
for lastResult in sys.stdin:
lastResult = lastResult[:-1]
second.func2(lastResult)
...
I am building a watchdog timer that runs another Python program, and if it fails to find a check-in from any of the threads, shuts down the whole program. This is so it will, eventually, be able to take control of needed communication ports. The code for the timer is as follows:
from multiprocessing import Process, Queue
from time import sleep
from copy import deepcopy
PATH_TO_FILE = r'.\test_program.py'
WATCHDOG_TIMEOUT = 2
class Watchdog:
def __init__(self, filepath, timeout):
self.filepath = filepath
self.timeout = timeout
self.threadIdQ = Queue()
self.knownThreads = {}
def start(self):
threadIdQ = self.threadIdQ
process = Process(target = self._executeFile)
process.start()
try:
while True:
unaccountedThreads = deepcopy(self.knownThreads)
# Empty queue since last wake. Add new thread IDs to knownThreads, and account for all known thread IDs
# in queue
while not threadIdQ.empty():
threadId = threadIdQ.get()
if threadId in self.knownThreads:
unaccountedThreads.pop(threadId, None)
else:
print('New threadId < {} > discovered'.format(threadId))
self.knownThreads[threadId] = False
# If there is a known thread that is unaccounted for, then it has either hung or crashed.
# Shut everything down.
if len(unaccountedThreads) > 0:
print('The following threads are unaccounted for:\n')
for threadId in unaccountedThreads:
print(threadId)
print('\nShutting down!!!')
break
else:
print('No unaccounted threads...')
sleep(self.timeout)
# Account for any exceptions thrown in the watchdog timer itself
except:
process.terminate()
raise
process.terminate()
def _executeFile(self):
with open(self.filepath, 'r') as f:
exec(f.read(), {'wdQueue' : self.threadIdQ})
if __name__ == '__main__':
wd = Watchdog(PATH_TO_FILE, WATCHDOG_TIMEOUT)
wd.start()
I also have a small program to test the watchdog functionality
from time import sleep
from threading import Thread
from queue import SimpleQueue
Q_TO_Q_DELAY = 0.013
class QToQ:
def __init__(self, processQueue, threadQueue):
self.processQueue = processQueue
self.threadQueue = threadQueue
Thread(name='queueToQueue', target=self._run).start()
def _run(self):
pQ = self.processQueue
tQ = self.threadQueue
while True:
while not tQ.empty():
sleep(Q_TO_Q_DELAY)
pQ.put(tQ.get())
def fastThread(q):
while True:
print('Fast thread, checking in!')
q.put('fastID')
sleep(0.5)
def slowThread(q):
while True:
print('Slow thread, checking in...')
q.put('slowID')
sleep(1.5)
def hangThread(q):
print('Hanging thread, checked in')
q.put('hangID')
while True:
pass
print('Hello! I am a program that spawns threads!\n\n')
threadQ = SimpleQueue()
Thread(name='fastThread', target=fastThread, args=(threadQ,)).start()
Thread(name='slowThread', target=slowThread, args=(threadQ,)).start()
Thread(name='hangThread', target=hangThread, args=(threadQ,)).start()
QToQ(wdQueue, threadQ)
As you can see, I need to have the threads put into a queue.Queue, while a separate object slowly feeds the output of the queue.Queue into the multiprocessing queue. If instead I have the threads put directly into the multiprocessing queue, or do not have the QToQ object sleep in between puts, the multiprocessing queue will lock up, and will appear to always be empty on the watchdog side.
Now, as the multiprocessing queue is supposed to be thread and process safe, I can only assume I have messed something up in the implementation. My solution seems to work, but also feels hacky enough that I feel I should fix it.
I am using Python 3.7.2, if it matters.
I suspect that test_program.py exits.
I changed the last few lines to this:
tq = threadQ
# tq = wdQueue # option to send messages direct to WD
t1 = Thread(name='fastThread', target=fastThread, args=(tq,))
t2 = Thread(name='slowThread', target=slowThread, args=(tq,))
t3 = Thread(name='hangThread', target=hangThread, args=(tq,))
t1.start()
t2.start()
t3.start()
QToQ(wdQueue, threadQ)
print('Joining with threads...')
t1.join()
t2.join()
t3.join()
print('test_program exit')
The calls to join() means that the test program never exits all by itself since none of the threads ever exit.
So, as is, t3 hangs and the watchdog program detects this and detects the unaccounted for thread and stops the test program.
If t3 is removed from the above program, then the other two threads are well behaved and the watchdog program allows the test program to continue indefinitely.
I have a very simple example, it prints out the names, but the problem is, when I press ctrl+C, the program doesn't return to the normal command line interface:
^CStopping
After I only see my cursor blinking, but I can't do anything, so I have to close the window and open it up again.
I'm running Ubuntu 12.10.
that's my code:
import threading
import random
import time
import Queue
import urllib2
import sys
queue = Queue.Queue()
keep_running = True
class MyThread(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
self.names = ['Sophia', 'Irina', 'Tanya', 'Cait', 'Jess']
def run(self):
while keep_running:
time.sleep(0.25)
line = self.names[random.randint(0,len(self.names)-1)]
queue.put(line)
self.queue.task_done()
class Starter():
def __init__(self):
self.queue = queue
t = MyThread(self.queue)
t.start()
self.next()
def next(self):
while True:
time.sleep(0.2)
if not self.queue.empty():
line = self.queue.get()
print line, self.queue.qsize()
else:
print 'waiting for queue'
def main():
try:
Starter()
queue.join()
except KeyboardInterrupt, e:
print 'Stopping'
keep_running = False
sys.exit(1)
main()
Your main problem is that you didn't declare keep_running as global, so main is just creating a local variable with the same name.
If you fix that, it will usually exit on some platforms.
If you want it to always exit on all platforms, you need to do two more things:
join the thread that you created.
protect the shared global variable with a Lock or other sync mechanism.
However, a shared global keep_running flag isn't really needed here anyway. You've already got a queue. Just define a special "shutdown" message you can post on the queue, or use closing the queue as a signal to shutdown.
While we're at it, unless you're trying to simulate a slow network or something, there is no need for that time.sleep in your code. Just call self.queue.get(timeout=0.2). That way, instead of always taking 0.2 seconds to get each entry, it will take up to 0.2 seconds, but as little as 0 if there's already something there.
Your main thread is stuck in Starter.next. The interrupt then is called there and propagates up to the first line of the try statement and is caught, jumping to the except clause before join can be called. Try putting the join call in a finally block (with the sys.exit) or simply moving it to th exception handler
I have an FTP function that traces the progress of running upload but my understanding of threading is limited and i have been unable to implement a working solution... I'd like to add a GUI progress bar to my current Application by using threading. Can someone show me a basic function using asynchronous threads that can be updated from another running thread?
def ftpUploader():
BLOCKSIZE = 57344 # size 56 kB
ftp = ftplib.FTP()
ftp.connect(host)
ftp.login(login, passwd)
ftp.voidcmd("TYPE I")
f = open(zipname, 'rb')
datasock, esize = ftp.ntransfercmd(
'STOR %s' % os.path.basename(zipname))
size = os.stat(zipname)[6]
bytes_so_far = 0
print 'started'
while 1:
buf = f.read(BLOCKSIZE)
if not buf:
break
datasock.sendall(buf)
bytes_so_far += len(buf)
print "\rSent %d of %d bytes %.1f%%\r" % (
bytes_so_far, size, 100 * bytes_so_far / size)
sys.stdout.flush()
datasock.close()
f.close()
ftp.voidresp()
ftp.quit()
print 'Complete...'
Here's a quick overview of threading, just in case :) I won't go into too much detail into the GUI stuff, other than to say that you should check out wxWidgets. Whenever you do something that takes a long time, like:
from time import sleep
for i in range(5):
sleep(10)
You'll notice that to the user, the entire block of code seems to take 50 seconds. In those 5 seconds, your application can't do anything like update the interface, and so it looks like it's frozen. To solve this problem, we use threading.
Usually there are two parts to this problem; the overall set of things you want to process, and the operation that takes a while, that we'd like to chop up. In this case, the overall set is the for loop and the operation we want chopped up is the sleep(10) function.
Here's a quick template for the threading code, based on our previous example. You should be able to work your code into this example.
from threading import Thread
from time import sleep
# Threading.
# The amount of seconds to wait before checking for an unpause condition.
# Sleeping is necessary because if we don't, we'll block the os and make the
# program look like it's frozen.
PAUSE_SLEEP = 5
# The number of iterations we want.
TOTAL_ITERATIONS = 5
class myThread(Thread):
'''
A thread used to do some stuff.
'''
def __init__(self, gui, otherStuff):
'''
Constructor. We pass in a reference to the GUI object we want
to update here, as well as any other variables we want this
thread to be aware of.
'''
# Construct the parent instance.
Thread.__init__(self)
# Store the gui, so that we can update it later.
self.gui = gui
# Store any other variables we want this thread to have access to.
self.myStuff = otherStuff
# Tracks the paused and stopped states of the thread.
self.isPaused = False
self.isStopped = False
def pause(self):
'''
Called to pause the thread.
'''
self.isPaused = True
def unpause(self):
'''
Called to unpause the thread.
'''
self.isPaused = False
def stop(self):
'''
Called to stop the thread.
'''
self.isStopped = True
def run(self):
'''
The main thread code.
'''
# The current iteration.
currentIteration = 0
# Keep going if the job is active.
while self.isStopped == False:
try:
# Check for a pause.
if self.isPaused:
# Sleep to let the os schedule other tasks.
sleep(PAUSE_SLEEP)
# Continue with the loop.
continue
# Check to see if we're still processing the set of
# things we want to do.
if currentIteration < TOTAL_ITERATIONS:
# Do the individual thing we want to do.
sleep(10)
# Update the count.
currentIteration += 1
# Update the gui.
self.gui.update(currentIteration,TOTAL_ITERATIONS)
else:
# Stop the loop.
self.isStopped = True
except Exception as exception:
# If anything bad happens, report the error. It won't
# get written to stderr.
print exception
# Stop the loop.
self.isStopped = True
# Tell the gui we're done.
self.gui.stop()
To call this thread, all you have to do is:
aThread = myThread(myGui,myOtherStuff)
aThread.start()
I'm writing an application that listens for sound events (using messages passed in with Open Sound Control), and then based on those events pauses or resumes program execution. My structure works most of the time but always bombs out in the main loop, so I'm guessing it's a thread issue. Here's a generic, simplified version of what I'm talking about:
import time, threading
class Loop():
aborted = False
def __init__(self):
message = threading.Thread(target=self.message, args=((0),))
message.start()
loop = threading.Thread(target=self.loop)
loop.start()
def message(self,val):
if val > 1:
if not self.aborted:
self.aborted = True
# do some socket communication
else:
self.aborted = False
# do some socket communication
def loop(self):
cnt = 0
while True:
print cnt
if self.aborted:
while self.aborted:
print "waiting"
time.sleep(.1);
cnt += 1
class FakeListener():
def __init__(self,loop):
self.loop = loop
listener = threading.Thread(target=self.listener)
listener.start()
def listener(self):
while True:
loop.message(2)
time.sleep(1)
if __name__ == '__main__':
loop = Loop()
#fake listener standing in for the real OSC event listener
listener = FakeListener(loop)
Of course, this simple code seems to work great, so it's clearly not fully illustrating my real code, but you get the idea. What isn't included here is also the fact that on each loop pause and resume (by setting aborted=True/False) results in some socket communication which also involves threads.
What always happens in my code is that the main loop doesn't always pickup where it left off after a sound event. It will work for a number of events but then eventually it just doesn't answer.
Any suggestions for how to structure this kind of communication amongst threads?
UPDATE:
ok, i think i've got it. here's a modification that seems to work. there's a listener thread that periodically puts a value into a Queue object. there's a checker thread that keeps checking the queue looking for the value, and once it sees it sets a boolean to its opposite state. that boolean value controls whether the loop thread continues or waits.
i'm not entirely sure what the q.task_done() function is doing here, though.
import time, threading
import Queue
q = Queue.Queue(maxsize = 0)
class Loop():
aborted = False
def __init__(self):
checker = threading.Thread(target=self.checker)
checker.setDaemon(True)
checker.start()
loop = threading.Thread(target=self.loop)
loop.start()
def checker(self):
while True:
if q.get() == 2:
q.task_done()
if not self.aborted:
self.aborted = True
else:
self.aborted = False
def loop(self):
cnt = 0
while cnt < 40:
if self.aborted:
while self.aborted:
print "waiting"
time.sleep(.1)
print cnt
cnt += 1
time.sleep(.1)
class fakeListener():
def __init__(self):
listener = threading.Thread(target=self.listener)
listener.setDaemon(True)
listener.start()
def listener(self):
while True:
q.put(2)
time.sleep(1)
if __name__ == '__main__':
#fake listener standing in for the real OSC event listener
listener = fakeListener()
loop = Loop()
Umm.. I don't completely understand your question but i'll do my best to explain what I think you need to fix your problems.
1) The thread of your Loop.loop function should be set as a daemon thread so that it exits with your main thread (so you don't have to kill the python process every time you want to shut down your program). To do this just put loop.setDaemon(True) before you call the thread's "start" function.
2)The most simple and fail-proof way to communicate between threads is with a Queue. On thread will put an item in that Queue and another thread will take an item out, do something with the item and then terminate (or get another job)
In python a Queue can be anything from a global list to python's built-in Queue object. I recommend the python Queue because it is thread safe and easy to use.