I'm subclassing multiprocessing.Process to create a class that will asynchronously grab images from a camera and push them to some queues for display and saving to disk.
The problem I'm having is that when I issue a stop command using a multiprocessing.Event object that belongs to the Process-descendant-object, the process successfully completes the last line of the run function, but then it doesn't die. The process just continues to exist and continues to return true from the is_alive function. I don't understand how this could be possible. What would cause a process to complete its run function but not die?
Maddeningly, when I separated this object from the larger context I'm using it in (which includes several other Process subclasses also running simultaneously), I can't reproduce the problem, which tends to make me think it has something to do with the rest of the code, but I don't understand how that could be - if it executed the last line of the run function, shouldn't it die regardless of what else is going on? I must be misunderstanding something about how a Process object works.
Here's the code below. When I run it, I see the message "Video acquires process STOPPED" printed out, but the process doesn't die.
class VideoAcquirer(mp.Process):
def __init__(self, camSerial, imageQueue, monitorImageQueue, acquireSettings={}, monitorFrameRate=15):
mp.Process.__init__(self, daemon=True)
self.camSerial = camSerial
self.acquireSettings = acquireSettings
self.imageQueue = imageQueue
self.monitorImageQueue = monitorImageQueue
self.videoFrequencyEntry.get()Rate = monitorFrameRate
self.stop = mp.Event()
def stopProcess(self):
print('Stopping video acquire process')
self.stop.set()
def run(self):
system = PySpin.System.GetInstance()
camList = system.GetCameras()
cam = camList.GetBySerial(self.camSerial)
cam.Init()
nodemap = cam.GetNodeMap()
setCameraAttributes(nodemap, self.acquireSettings)
cam.BeginAcquisition()
monitorFramePeriod = 1.0/self.monitorFrameRate
print("Video monitor frame period:", monitorFramePeriod)
lastTime = time.time()
k = 0
im = imp = imageResult = None
print("Image acquisition begins now!")
while not self.stop.is_set():
try:
# Retrieve next received image
print(1)
imageResult = cam.GetNextImage(100) # Timeout of 100 ms to allow for stopping process
print(2)
# Ensure image completion
if imageResult.IsIncomplete():
print('Image incomplete with image status %d...' % imageResult.GetImageStatus())
else:
# Print image information; height and width recorded in pixels
width = imageResult.GetWidth()
height = imageResult.GetHeight()
k = k + 1
print('Grabbed Image %d, width = %d, height = %d' % (k, width, height))
im = imageResult.Convert(PySpin.PixelFormat_Mono8, PySpin.HQ_LINEAR)
imp = PickleableImage(im.GetWidth(), im.GetHeight(), 0, 0, im.GetPixelFormat(), im.GetData())
self.imageQueue.put(imp)
# Put the occasional image in the monitor queue for the UI
thisTime = time.time()
if (thisTime - lastTime) >= monitorFramePeriod:
# print("Sent frame for monitoring")
self.monitorImageQueue.put((self.camSerial, imp))
lastTime = thisTime
imageResult.Release()
print(3)
except PySpin.SpinnakerException as ex:
pass # Hopefully this is just because there was no image in camera buffer
# print('Error: %s' % ex)
# traceback.print_exc()
# return False
# Send stop signal to write process
print(4)
self.imageQueue.put(None)
camList.Clear()
cam.EndAcquisition()
cam.DeInit()
print(5)
del cam
system.ReleaseInstance()
del nodemap
del imageResult
del im
del imp
del camList
del system
print("Video acquire process STOPPED")
I start the process from a tkinter GUI thread roughly like this:
import multiprocessing as mp
camSerial = '2318921'
queue = mp.Queue()
videoMonitorQueue = mp.Queue()
acquireSettings = [('AcquisitionMode', 'Continuous'), ('TriggerMode', 'Off'), ('TriggerSource', 'Line0'), ('TriggerMode', 'On')]
v = VideoAcquirer(camSerial, queue, videoMonitorQueue, acquireSettings=acquireSettings, monitorFrameRate=15)
And here's roughly how I stop the process, also from the tkinter GUI thread:
v.stopProcess()
Thanks for your help.
Related
Does anyone have better idea on how to quickly switch between an array of web cameras controlled by a python open CV2 program. I would like it to have no noticeable pause between switching cameras. I am willing to go to other languages, maybe C++?
What I have so far:
#%% import the opencv library
import cv2
from multiprocessing import Queue, Process, Manager
import time
import keyboard
import glob
import threading
import signal
import sys
#%%
baseVidList = glob.glob("/dev/video*")
baseVidList = baseVidList[:-2]
#%%
manager = Manager()
selectedCam = manager.Value('i', 0)
endSignal = manager.Value('i', 0)
qqq = Queue()
vidDict = {}
# define a video capture object
for device in baseVidList:
vidDict[device] = lambda: cv2.VideoCapture(f"{device}")
#%%
ejections = []
for device, capDevice in vidDict.items():
vidDict[device] = capDevice()
if vidDict[device].isOpened():
print(f"{device} Sucess")
vidDict[device].set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))
else:
print(f"{device} Failed, Ejecting")
vidDict[device].release()
ejections.append(device)
for eject in ejections:
vidDict.pop(eject)
#%%
font = cv2.FONT_HERSHEY_SIMPLEX
#### Functions ####
# Thread class
class VideoCaptureThread(threading.Thread):
def __init__(self, device, camID, endSignal):
super().__init__()
self.camID = camID
self.device = device
self.stopped = False
self.fps = 10
self.endSignal = endSignal
def run(self):
print("RUNNNN", self.endSignal != 1)
while self.endSignal != 1:
start_time = time.time()
print("S cam is:", selectedCam.value)
ret, frame = self.device.read()
if ret and selectedCam.value == self.camID:
# print(self.camID, xW, yH)
cv2.putText(frame,f"{self.camID}",(100,100),font,1,(255,0,0),1)
qqq.put(frame)
# Control the frame rate
sleep_time = 1/self.fps - (time.time() - start_time)
if sleep_time > 0:
time.sleep(sleep_time)
def stop(self):
self.stopped = True
self.device.release()
# Multiprocessing thread launch
def runCamera(device, camID):
# assert cap.isOpened()
# Usage
capture_thread = VideoCaptureThread(device, camID, endSignal) # src can be a filepath or a device index
capture_thread.start()
# # To stop the thread
# capture_thread.stop()
# Display Process
def runViewer():
while endSignal != 1:
# print("Qlen: ", qqq.qsize())
if qqq.empty() != True:
frame = qqq.get()
fy, fx, _ = frame.shape
frame = cv2.resize(frame,(int(2*fx),int(2*fy)))
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
while qqq.qsize() > 0: # Empty the Q
qqq.get()
# Signal Handler
def signal_handler(sig, arg2):
print("Exiting...")
endSignal.value = 1
sys.exit(0)
#### MAIN ####
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal_handler)
processes = []
idx = 0
# Setup the cameras
for device, capDevice in vidDict.items():
if capDevice.isOpened():
processes.append(Process(target=runCamera, args=(capDevice,idx)))
idx = idx + 1
else:
print(f"Ejecting: {device}")
numProcesses = len(processes)
print(f"Processes: {numProcesses}")
processes.append(Process(target=runViewer))
# Launch the processes
for process in processes:
process.start()
# Keyboard control
while endSignal != 1:
if keyboard.is_pressed('d'): # if key 'q' is pressed
print('You Pressed d Key!')
selectedCam.value = (selectedCam.value + 1) % numProcesses
time.sleep(250/1000)
if keyboard.is_pressed('a'): # if key 'q' is pressed
print('You Pressed a Key!')
selectedCam.value = (selectedCam.value - 1) % numProcesses
time.sleep(250/1000)
# Graceful shutdown?
print("START END")
for process in processes:
process.join()
for device, capDevice in vidDict.items():
capDevice.release()
# Destroy all the windows
cv2.destroyAllWindows()
This code seems to support up to 5 cameras without issue. Extending to 10 sort of works but some of the cameras seem not to send data. The combination of threading and multiprocessing is an attempt to have the next frame ready for immediate addition to the queue and thereby there is no pause in switching cameras as would happen if we had to go through the whole cap setup and read process.
Any thoughts appreciated.
Run each camera capture in a separate thread by using multi-threading. In this method, while the other threads are capturing the following frame, the main thread can continue to process the currently shown frame.
I'm struggling to understand why I cant get a "LIVE" feed from my IP camera.
It appears that there is a buffer and it causes the frames to build up if not being read - and as each iteration of my code takes some time there is a backlog and it ends up being almost slow mo to whats actually happening.
I found the below code which triggers a thread to do the reading of the camera on a loop to try and avoid this. But now i'm getting a "LIVE" feed for around 5 frames and then it stalls and shows the same image for another few.
##camera class - this stops the RTSP feed getting caught in the buffer
class Camera:
def __init__(self, rtsp_link):
#init last ready and last frame
self.last_frame = None
self.last_ready = None
self.lock = Lock()
#set capture decive
capture = cv2.VideoCapture(rtsp_link,apiPreference=cv2.CAP_FFMPEG)
#set thread to clear buffer
thread = threading.Thread(target=self.rtsp_cam_buffer, args=(capture,), name="rtsp_read_thread")
thread.daemon = True
thread.start()
#delay start of next step to avoid errors
time.sleep(2)
def rtsp_cam_buffer(self, capture):
#loop forever
while True:
with self.lock:
capture.grab()
self.last_ready, self.last_frame = capture.retrieve()
def getFrame(self):
#get last frame
if (self.last_ready is not None) and (self.last_frame is not None):
return self.last_frame.copy())
else:
return None
Whats the correct thing to do in this situation? Is there a way round this?
OR
Should I use something like gstreamer or ffmpeg to get the camera feed? If so which is better and why? Any advice or pages to give me some python examples of getting it working? I couldn't find loads about that made sense to me.
thanks
After searching online through multiple resources the suggestion for using threads to remove frames from the buffer came up ALOT. And although it seemed to work for a while it caused me issues with duplicate frames being displayed for some reason that I could not work out.
I then tried to build opencv from source with gstreamer support but even once it was compiled correctly it still didn't seem to like interfacing with gstreamer correctly.
Eventually I thought the best bet was to go back down the threading approach but again couldnt get it working. So I gave multiprocessing a shot.
I wrote the below class to handle the camera connection:
import cv2
import time
import multiprocessing as mp
class Camera():
def __init__(self,rtsp_url):
#load pipe for data transmittion to the process
self.parent_conn, child_conn = mp.Pipe()
#load process
self.p = mp.Process(target=self.update, args=(child_conn,rtsp_url))
#start process
self.p.daemon = True
self.p.start()
def end(self):
#send closure request to process
self.parent_conn.send(2)
def update(self,conn,rtsp_url):
#load cam into seperate process
print("Cam Loading...")
cap = cv2.VideoCapture(rtsp_url,cv2.CAP_FFMPEG)
print("Cam Loaded...")
run = True
while run:
#grab frames from the buffer
cap.grab()
#recieve input data
rec_dat = conn.recv()
if rec_dat == 1:
#if frame requested
ret,frame = cap.read()
conn.send(frame)
elif rec_dat ==2:
#if close requested
cap.release()
run = False
print("Camera Connection Closed")
conn.close()
def get_frame(self,resize=None):
###used to grab frames from the cam connection process
##[resize] param : % of size reduction or increase i.e 0.65 for 35% reduction or 1.5 for a 50% increase
#send request
self.parent_conn.send(1)
frame = self.parent_conn.recv()
#reset request
self.parent_conn.send(0)
#resize if needed
if resize == None:
return frame
else:
return self.rescale_frame(frame,resize)
def rescale_frame(self,frame, percent=65):
return cv2.resize(frame,None,fx=percent,fy=percent)
Displaying the frames can be done as below
cam = Camera("rtsp://admin:[somepassword]#192.168.0.40/h264Preview_01_main")
print(f"Camera is alive?: {cam.p.is_alive()}")
while(1):
frame = cam.get_frame(0.65)
cv2.imshow("Feed",frame)
key = cv2.waitKey(1)
if key == 13: #13 is the Enter Key
break
cv2.destroyAllWindows()
cam.end()
This solution has resolved all my issues of buffer lag and also repeated frames. #
Hopefully it will help anyone else in the same situation.
Lewis's solution was helpful to reduce the lag so far but there was still some lag in my case, and I have found this gist, which is a bit faster:
import os
import sys
import time
import threading
import numpy as np
import cv2 as cv
# also acts (partly) like a cv.VideoCapture
class FreshestFrame(threading.Thread):
def __init__(self, capture, name='FreshestFrame'):
self.capture = capture
assert self.capture.isOpened()
# this lets the read() method block until there's a new frame
self.cond = threading.Condition()
# this allows us to stop the thread gracefully
self.running = False
# keeping the newest frame around
self.frame = None
# passing a sequence number allows read() to NOT block
# if the currently available one is exactly the one you ask for
self.latestnum = 0
# this is just for demo purposes
self.callback = None
super().__init__(name=name)
self.start()
def start(self):
self.running = True
super().start()
def release(self, timeout=None):
self.running = False
self.join(timeout=timeout)
self.capture.release()
def run(self):
counter = 0
while self.running:
# block for fresh frame
(rv, img) = self.capture.read()
assert rv
counter += 1
# publish the frame
with self.cond: # lock the condition for this operation
self.frame = img if rv else None
self.latestnum = counter
self.cond.notify_all()
if self.callback:
self.callback(img)
def read(self, wait=True, seqnumber=None, timeout=None):
# with no arguments (wait=True), it always blocks for a fresh frame
# with wait=False it returns the current frame immediately (polling)
# with a seqnumber, it blocks until that frame is available (or no wait at all)
# with timeout argument, may return an earlier frame;
# may even be (0,None) if nothing received yet
with self.cond:
if wait:
if seqnumber is None:
seqnumber = self.latestnum+1
if seqnumber < 1:
seqnumber = 1
rv = self.cond.wait_for(lambda: self.latestnum >= seqnumber, timeout=timeout)
if not rv:
return (self.latestnum, self.frame)
return (self.latestnum, self.frame)
And then you use it like:
# open some camera
cap = cv.VideoCapture('rtsp://URL')
cap.set(cv.CAP_PROP_FPS, 30)
# wrap it
fresh = FreshestFrame(cap)
Use fresh to deal with the open camera
I'm using python and OpenCV to get video from a rtsp stream. I'm getting single frames from the stream and saving them to the file system.
I wrote a StreamingWorker which handles frame getting and saving. Additionally there is a StreamPool that has all the streaming objects. I thought that as the StreamingWorker would always be running, there should only be one per core, in order to take as much as possible. Then the StreamPool would provide the VideoCapture objects to the available StreamingWorker.
The problem is that most of the time that the script is running, is blocking:
import os
import time
import threading
import cv2 as cv
class StreamingWorker(object):
def __init__(self, stream_pool):
self.stream_pool = stream_pool
self.start_loop()
def start_loop(self):
while True:
try:
# getting a stream from the read_strategy
stream_object = self.stream_pool.next()
# getting an image from the stream
_, frame = stream_object['stream'].read()
# saving image to file system
cv.imwrite(os.path.join('result', stream_object['feed'], '{}.jpg'.format(time.time())))
except ValueError as e:
print('[error] {}'.format(e))
class StreamPool(object):
def __init__(self, streams):
self.streams = [{'feed': stream, 'stream': cv.VideoCapture(stream)} for stream in streams]
self.current_stream = 0
self.lock = threading.RLock()
def next(self):
self.lock.acquire()
if(self.current_stream + 1 >= len(self.streams)):
self.current_stream = 0
else:
self.current_stream += 1
result = self.streams[self.current_stream]
self.lock.release()
return result
def get_cores():
# This function returns the number of available cores
import multiprocessing
return multiprocessing.cpu_count()
def start(stream_pool):
StreamingWorker(stream_pool)
def divide_list(input_list, amount):
# This function divides the whole list into list of lists
result = [[] for _ in range(amount)]
for i in range(len(input_list)):
result[i % len(result)].append(input_list[i])
return result
if __name__ == '__main__':
stream_list = ['rtsp://some/stream1', 'rtsp://some/stream2', 'rtsp://some/stream3']
num_cores = get_cores()
divided_streams = divide_list(stream_list, num_cores)
for streams in divided_streams:
stream_pool = StreamPool(streams)
thread = threading.Thread(target=start, args=(stream_pool))
thread.start()
When I thought of this, I didn't take into account that most of the operations will be blocking operations like:
# Getting a frame blocks
_, frame = stream_object['stream'].read()
# Writing to the file system blocks
cv.imwrite(os.path.join('result', stream_object['feed'], '{}.jpg'.format(time.time())))
The problem with spending too much time blocking is that most of the processing power is wasted. I thought of using futures with a ThreadPoolExecutor but I can't seem to reach my goal of using the maximum amount of processing cores possible. Maybe I'm not setting enaugh threads.
Is there a standard way of handling blocking operations, in order to make the best use of the cores' processing power? I'm fine having a language-agnostic answer.
I ended up using the ThreadPoolExecutor using the add_done_callback(fn) function.
class StreamingWorker(object):
def __init__(self, stream_pool):
self.stream_pool = stream_pool
self.thread_pool = ThreadPoolExecutor(10)
self.start_loop()
def start_loop(self):
def done(fn):
print('[info] future done')
def save_image(stream):
# getting an image from the stream
_, frame = stream['stream'].read()
# saving image to file system
cv.imwrite(os.path.join('result', stream['feed'], '{}.jpg'.format(time.time())))
while True:
try:
# getting a stream from the read_strategy
stream_object = self.stream_pool.next()
# Scheduling the process to the thread pool
self.thread_pool.submit(save_image, (stream_object)).add_done_callback(done)
except ValueError as e:
print('[error] {}'.format(e))
I didn't actually want to do anything after the future finished, but if I used result() then the while True would stop, which whould also defeat all the purpose of using the thread pool.
Side note: I had to add a threading.Rlock() when calling self.stream_pool.next() because apparently opencv can't handle calls from multiple threads.
I'm having some troubles with python threads. I'm writing a software package that plots data received from multiple devices. I have a plot thread that plots the data once it has received a set of data from all devices, and a data retrieval thread for each device. The application plots data continuously (as fast as data can be retrieved from the device) until the user hits a button. I have a threading.Event() self.stop_thread that is checked frequently to back out of the threaded loops. The threads hit the check, break out of the loop, but are still 'running' according to my debugger and threading.active_count(). Does anyone know why this is happening and how can I get it to stop? I need to know these threads are gone before I move on to another function of the application. The following three methods are where the issues arise.
# initalizes startup settings, starts a thread to carry out
# plotting and a seperate thread to carry out data retrieval
def start_plot_threads(self):
if not self.abstraction.connected:
self.connect_to_device()
if not self.abstraction.connected:
return
self.stop_thread.clear()
self.pause_thread.clear()
for device in self.devices:
device.pause_thread.clear()
device.stop_thread.clear()
device.change_units.set()
self.presentation.enable_derivative()
self.presentation.show_average_button.SetValue(False)
self.presentation.show_average_button.Disable()
self.abstraction.multi_plot_data = {}
try:
if self.plot_thread.is_alive():
return
except Exception:
pass
self.plot_thread = Thread(target=self.plot_data)
self.plot_thread.daemon = True
self.plot_thread.start()
for device in self.devices:
thread = Thread(target=self.retrieve_data,
kwargs={'device': device},
name="Data Retrieval Thread %s" % device.instr_id)
thread.daemon = True
thread.start()
# waits for plot data to be thrown on a thread safe queue by the data
# retrieval thread and plots it. data comes in as a tuple of the form
# (y_data, label, x_data)
def plot_data(self):
multiplot = False
if len(self.devices) > 1:
multiplot = True
plot_data = []
while not self.stop_thread.is_set():
try:
data = self.plot_data_queue.get()
except Empty:
pass
else:
if multiplot:
scan = {}
scan['y_data'] = [data[0]]
scan['labels'] = [data[1]]
scan['x_data'] = data[2]
plot_data.append(scan)
if len(plot_data) == len(self.devices):
self.presentation.plot_multiline(plot_data, average=False)
self.abstraction.multi_plot_data = plot_data
plot_data = []
else:
self.presentation.plot_signal(data[0], data[1])
# the intent is that the data retrieval thread stays in this loop while
# taking continuous readings
def retrieve_data(self, device):
while True:
if device.stop_thread.is_set():
return
while device.pause_thread.is_set():
if device.stop_thread.is_set():
return
sleep(0.1)
y = self.get_active_signal_data(device)
if not y:
return
self.plot_data_queue.put(
(y, device.name, device.x_data))
self.abstraction.y_data = [y]
try:
self.update_spectrum(device)
except DeviceCommunicationError, data:
self.presentation.give_connection_error(data)
self.presentation.integ_time = device.prev_integ
I apologize for the extra bulk in the methods. They are straight from my code base.
The reason why your threads continue running is unknown- device.stop_thread.is_set(): (What is doing the setting??)
However you can guarantee that all your threads have stopped by retaining a handler on each thread ( by appending each thread object to a list) and once you have started all your threads you can then proceed to thread.join() them.
threads = []
for job in batch:
thr = threading.Thread(target=do_job, args = (job))
thr.start()
threads.append(thr)
#join all the threads
for thr in threads:
thr.join()
Join will wait for the thread to complete before moving on.
Python Docs:
https://docs.python.org/2/library/threading.html
I've read a lot of posts about using threads, subprocesses, etc.. A lot of it seems over complicated for what I'm trying to do...
All I want to do is stop executing a function after X amount of time has elapsed.
def big_loop(bob):
x = bob
start = time.time()
while True:
print time.time()-start
This function is an endless loop that never throws any errors or exceptions, period.
I"m not sure the difference between "commands, shells, subprocesses, threads, etc.." and this function, which is why I'm having trouble manipulating subprocesses.
I found this code here, and tried it but as you can see it keeps printing after 10 seconds have elapsed:
import time
import threading
import subprocess as sub
import time
class RunCmd(threading.Thread):
def __init__(self, cmd, timeout):
threading.Thread.__init__(self)
self.cmd = cmd
self.timeout = timeout
def run(self):
self.p = sub.Popen(self.cmd)
self.p.wait()
def Run(self):
self.start()
self.join(self.timeout)
if self.is_alive():
self.p.terminate()
self.join()
def big_loop(bob):
x = bob
start = time.time()
while True:
print time.time()-start
RunCmd(big_loop('jimijojo'), 10).Run() #supposed to quit after 10 seconds, but doesn't
x = raw_input('DONEEEEEEEEEEEE')
What's a simple way this function can be killed. As you can see in my attempt above, it doesn't terminate after 20 seconds and just keeps on going...
***OH also, I've read about using signal, but I"m on windows so I can't use the alarm feature.. (python 2.7)
**assume the "infinitely running function" can't be manipulated or changed to be non-infinite, if I could change the function, well I'd just change it to be non infinite wouldn't I?
Here are some similar questions, which I haven't able to port over their code to work with my simple function:
Perhaps you can?
Python: kill or terminate subprocess when timeout
signal.alarm replacement in Windows [Python]
Ok I tried an answer I received, it works.. but how can I use it if I remove the if __name__ == "__main__": statement? When I remove this statement, the loop never ends as it did before..
import multiprocessing
import Queue
import time
def infinite_loop_function(bob):
var = bob
start = time.time()
while True:
time.sleep(1)
print time.time()-start
print 'this statement will never print'
def wrapper(queue, bob):
result = infinite_loop_function(bob)
queue.put(result)
queue.close()
#if __name__ == "__main__":
queue = multiprocessing.Queue(1) # Maximum size is 1
proc = multiprocessing.Process(target=wrapper, args=(queue, 'var'))
proc.start()
# Wait for TIMEOUT seconds
try:
timeout = 10
result = queue.get(True, timeout)
except Queue.Empty:
# Deal with lack of data somehow
result = None
finally:
proc.terminate()
print 'running other code, now that that infinite loop has been defeated!'
print 'bla bla bla'
x = raw_input('done')
Use the building blocks in the multiprocessing module:
import multiprocessing
import Queue
TIMEOUT = 5
def big_loop(bob):
import time
time.sleep(4)
return bob*2
def wrapper(queue, bob):
result = big_loop(bob)
queue.put(result)
queue.close()
def run_loop_with_timeout():
bob = 21 # Whatever sensible value you need
queue = multiprocessing.Queue(1) # Maximum size is 1
proc = multiprocessing.Process(target=wrapper, args=(queue, bob))
proc.start()
# Wait for TIMEOUT seconds
try:
result = queue.get(True, TIMEOUT)
except Queue.Empty:
# Deal with lack of data somehow
result = None
finally:
proc.terminate()
# Process data here, not in try block above, otherwise your process keeps running
print result
if __name__ == "__main__":
run_loop_with_timeout()
You could also accomplish this with a Pipe/Connection pair, but I'm not familiar with their API. Change the sleep time or TIMEOUT to check the behaviour for either case.
There is no straightforward way to kill a function after a certain amount of time without running the function in a separate process. A better approach would probably be to rewrite the function so that it returns after a specified time:
import time
def big_loop(bob, timeout):
x = bob
start = time.time()
end = start + timeout
while time.time() < end:
print time.time() - start
# Do more stuff here as needed
Can't you just return from the loop?
start = time.time()
endt = start + 30
while True:
now = time.time()
if now > endt:
return
else:
print end - start
import os,signal,time
cpid = os.fork()
if cpid == 0:
while True:
# do stuff
else:
time.sleep(10)
os.kill(cpid, signal.SIGKILL)
You can also check in the loop of a thread for an event, which is more portable and flexible as it allows other reactions than brute killing. However, this approach fails if # do stuff can take time (or even wait forever on some event).