Can't access parent's attributes using super initialisation in Python - python

I have parent class Gauge, which records video from web camera(OpenCV) in separate thread in while-loop . All the frames are saved as class attribute and I can easily see them being in the Gauge. Whenever I try to access Gauge's frames from child class using inheritance I become the error, saying AttributeError: 'Video' object has no attribute 'frame' . Here is the snippets:
class Gauge(object):
def __init__(self):
self.capture = cv2.VideoCapture(2)
if self.capture.isOpened():
print("opened camera")
self.video_thread = Thread(target=self.update, args=())
self.video_thread.name='video thread'
self.video_thread.daemon = True
self.video_thread.start()
def update(self):
while True:
if self.capture.isOpened():
(self.status, self.frame) = self.capture.read()
class Video(Gauge):
def __init__(self, master):
super(Video, self).__init__()
#skipping unimportant lines...
while True:
image = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB) #AttributeError!!
image = cv2.resize(image,(round(self.master.winfo_width()/2-20),round(self.master.winfo_height()-10)),interpolation=cv2.INTER_AREA)
image = Image.fromarray(image)
image = ImageTk.PhotoImage(image)
self.labelVideo.configure(image=image)
self.labelVideo.image=image

The problem is most likely due to Video.__init__ trying to access self.frame before the thread running Gauge.update can create it. You also don't want to be running an infinite loop inside Video.__init__. It's also not clear that Video should be a subclass of Gauge at all, but I would try something like
from queue import Queue
class Gauge(object):
def __init__(self, q: Queue):
self.capture = cv2.VideoCapture(2)
if self.capture.isOpened():
print("opened camera")
self.video_thread = Thread(target=self.update, args=(q,))
self.video_thread.name='video thread'
self.video_thread.daemon = True
self.video_thread.start()
# Stores captures in a queue for someone to consume
def update(self, q):
while True:
if self.capture.isOpened():
q.put(self.capture.read())
class Video(Gauge):
def __init__(self, master):
self.q = Queue()
super(Video, self).__init__(self.q)
def capture_images(self):
while True:
# Wait until a capture is available
status, frame = self.q.get()
# Then use the capture
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = cv2.resize(image,(round(self.master.winfo_width()/2-20),round(self.master.winfo_height()-10)),interpolation=cv2.INTER_AREA)
image = Image.fromarray(image)
image = ImageTk.PhotoImage(image)
self.labelVideo.configure(image=image)
self.labelVideo.image=image
v = Video()
v.capture_images()

Related

multithreading reading frames from opencv reducing latency

Consider the below script:
class VideoCaptureThreading:
def __init__(self, src=0, width=640, height=480):
self.src = src
self.cap = cv2.VideoCapture(self.src)
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
self.grabbed, self.frame = self.cap.read()
self.started = False
self.read_lock = threading.Lock()
def set(self, var1, var2):
self.cap.set(var1, var2)
def start(self):
if self.started:
print('[!] Threaded video capturing has already been started.')
return None
self.started = True
self.thread = threading.Thread(target=self.update, args=())
self.thread.start()
return self
def update(self):
while self.started:
grabbed, frame = self.cap.read()
with self.read_lock:
self.grabbed = grabbed
self.frame = frame
def read(self):
with self.read_lock:
if isinstance(self.frame,np.ndarray):
frame = self.frame.copy()
grabbed = self.grabbed
else:
frame=None
grabbed=False
return grabbed, frame
def stop(self):
self.started = False
self.thread.join()
def get(self, prop):
with self.read_lock:
prop = self.cap.get(prop)
return prop
def __exit__(self, exec_type, exc_value, traceback):
self.cap.release()
Suppose in the above script, I want to reduce latency in reading the frames from a videocapture object so here multithreading is used to read and update the frames from buffer in read() method
a copy of the frame is created using copy() which I feel might be causing lag but again I am making a guess, any helpful explanations are welcomed.
This approach makes sure the frame is the latest one in the stream in case the stream publishes faster than reading from the buffer. If there is an observable latency, it is most probably because of heavy publisher encoder options or unsuited listener configuration (e.g. realtime flag not set). Copy has a cost but it does not exceed few milliseconds.

How to read multi-camera streams with OpenCV seamlessly?

I went through this beautifully explained Q/A and I want something similar into a much stripped down version of the code I found there which includes PyQt4 and qdarkstyle.
My code is as follows:
import pafy
import base64
import zmq
from threading import Thread
import cv2
import time
class IPCamera(object):
def __init__(self, src=0):
self.frame = None
self.status = None
self.capture = cv2.VideoCapture(src)
self.capture.set(cv2.CAP_PROP_BUFFERSIZE, 2)
self.FPS = 1/25
self.FPS_MS = int(self.FPS * 1000)
# Start frame retrieval thread
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
self.thread.start()
def update(self):
while True:
if self.capture.isOpened():
(self.status, self.frame) = self.capture.read()
# self.frame = cv2.resize(self.frame, (640, 480)) # resize the frame
time.sleep(self.FPS)
def show_frame(self):
cv2.imshow('frame', self.frame)
cv2.waitKey(self.FPS_MS)
if __name__ == '__main__':
# Sample youtube video starts
youtube_link = 'https://www.youtube.com/watch?v=QgaUKlAuqn8'
vPafy = pafy.new(youtube_link)
play = vPafy.getbest(preftype="mp4")
src = play.url
# Sample youtube video ends
# Creating ZMQ context starts
context = zmq.Context()
footage_socket = context.socket(zmq.PUB)
footage_socket.connect('tcp://localhost:5555')
# cv2.namedWindow("Client Started!")
# Creating ZMQ context ends
threaded_camera = IPCamera(src)
while threaded_camera.capture.isOpened():
try:
if threaded_camera.status is True:
encoded, buffer = cv2.imencode('.jpg', threaded_camera.frame)
jpg_as_text = base64.b64encode(buffer)
footage_socket.send(jpg_as_text)
# key = cv2.waitKey(1)
# threaded_camera.show_frame()
# if key == 27: # exit on ESC
# break
# else:
# break
except AttributeError:
pass
The above code is running fine, I want to implement the code from the Q/A on the aforementioned link with those libraries removed. Can you help me in that?

Video streaming getting distorted QImage

I am trying to make a multi camera video streaming using OpenCV and have used PyQt for GUI. The code is running fine in Full HD but the streams are getting distorted when I change the resolution of the display. Can anyone tell me why is this happening?
Following are the screenshots for your reference:
Screenshot in 1920*1080(Full HD)
Screenshot in 1600*900
Screenshot in 1366*768
The code is below:
from PyQt4 import QtCore, QtGui
from threading import Thread
from collections import deque
from datetime import datetime
import time
import sys
import cv2
import imutils
class CameraWidget(QtGui.QWidget):
def __init__(self, width, height, stream_link=0, aspect_ratio=False, parent=None, deque_size=1):
super(CameraWidget, self).__init__(parent)
# Initialize deque used to store frames read from the stream
self.deque = deque(maxlen=deque_size)
self.screen_width = width
self.screen_height = height
self.maintain_aspect_ratio = aspect_ratio
self.camera_stream_link = stream_link
# Flag to check if camera is valid/working
self.online = False
self.capture = None
self.video_frame = QtGui.QLabel()
self.load_network_stream()
# Start background frame grabbing
self.get_frame_thread = Thread(target=self.get_frame, args=())
self.get_frame_thread.daemon = True
self.get_frame_thread.start()
# Periodically set video frame to display
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.set_frame)
self.timer.start(.5)
print('Started camera: {}'.format(self.camera_stream_link))
def load_network_stream(self):
"""Verifies stream link and open new stream if valid"""
def load_network_stream_thread():
if self.verify_network_stream(self.camera_stream_link):
self.capture = cv2.VideoCapture(self.camera_stream_link)
self.online = True
self.load_stream_thread = Thread(target=load_network_stream_thread, args=())
self.load_stream_thread.daemon = True
self.load_stream_thread.start()
def verify_network_stream(self, link):
"""Attempts to receive a frame from given link"""
cap = cv2.VideoCapture(link)
if not cap.isOpened():
return False
cap.release()
return True
def get_frame(self):
"""Reads frame, resizes, and converts image to pixmap"""
while True:
try:
if self.capture.isOpened() and self.online:
# Read next frame from stream and insert into deque
status, frame = self.capture.read()
if status:
self.deque.append(frame)
else:
self.capture.release()
self.online = False
else:
# Attempt to reconnect
print('attempting to reconnect', self.camera_stream_link)
self.load_network_stream()
self.spin(2)
self.spin(.001)
except AttributeError:
pass
def spin(self, seconds):
"""Pause for set amount of seconds, replaces time.sleep so program doesnt stall"""
time_end = time.time() + seconds
while time.time() < time_end:
QtGui.QApplication.processEvents()
def set_frame(self):
"""Sets pixmap image to video frame"""
if not self.online:
self.spin(1)
return
if self.deque and self.online:
# Grab latest frame
frame = self.deque[-1]
# Keep frame aspect ratio
if self.maintain_aspect_ratio:
self.frame = imutils.resize(frame, width=self.screen_width)
# Force resize
else:
self.frame = cv2.resize(frame, (self.screen_width, self.screen_height))
# Convert to pixmap and set to video frame
self.img = QtGui.QImage(self.frame, self.frame.shape[1], self.frame.shape[0], QtGui.QImage.Format_RGB888).rgbSwapped()
self.pix = QtGui.QPixmap.fromImage(self.img)
self.video_frame.setPixmap(self.pix)
def get_video_frame(self):
return self.video_frame
I found the solution, QImage was responsible for the trouble.
else:
self.frame = cv2.resize(frame, (self.screen_width, self.screen_height))
self.frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
h, w, ch = self.frame.shape
bytesPerLine = ch * w
# Convert to pixmap and set to video frame
self.img = QtGui.QImage(self.frame, w, h, bytesPerLine, QtGui.QImage.Format_RGB888)
self.pix = QtGui.QPixmap.fromImage(self.img)
self.video_frame.setPixmap(self.pix)

Picamera multiprocess

I'm trying to use a separate process to take pictures. This code was modified from being a Thread to being a multiprocess (by me, this is why it doesn't work). When i create an instance of this class and then run it with obj.start() a popup appears the program is already running, do you want to stop it? I don't understand what am I doing wrong?
P.S ("GO" output is never shown on the screen)
# import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
import multiprocessing
class MultiProcess(multiprocessing.Process):
def __init__(self):
print("INIT")
multiprocessing.Process.__init__(self)
# initialize the camera and stream
self.camera = PiCamera()
self.camera.resolution = (640, 480)
self.camera.framerate = 32
self.rawCapture = PiRGBArray(self.camera, size=(320, 240))
self.stream = self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True)
# initialize the frame and the variable used to indicate
# if the thread should be stopped
self.frame = None
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
p = multiprocessing.Process(target=self.update, args=())
print("1")
p.daemon = True
print("2")
p.start()
print("3")
p.join()
print("GO")
return self
def update(self):
# keep looping infinitely until the thread is stopped
for f in self.stream:
print("NEVER REACH HERE")
# grab the frame from the stream and clear the stream in
# preparation for the next frame
self.frame = f.array
self.rawCapture.truncate(0)
# if the thread indicator variable is set, stop the thread
# and resource camera resources
if self.stopped:
self.stream.close()
self.rawCapture.close()
self.camera.close()
return
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True

OpenCV-Python not recording with threaded setup for some reason

So the main part of my Bachelor-Work is to make an application that records from multiple cameras/devices at once. I started using OpenCV for this reason, and the extremely simple example works great. However when I tried to implement threading and multiple cameras for some reason it doesn't save the file. I don't get any error messages, the file simply doesn't appear. I'll post both the script that works (which is basically the same that can be found in all the online tutorials), and the relevant part of my code that doesn't work. Please let me know if you have any ideas as to why it doesn't work.
Working code:
cap = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID') #Codec
out = cv2.VideoWriter('output.avi', fourcc, 24.0, (640,480))
while(cap.isOpened()):
ret, frame = cap.read()
out.write(frame)
if(cv2.waitKey(1) & 0xFF == ord('q')): #No idea what this does...
break
cap.release()
out.release()
cv2.destroyAllWindows()
Problematic code:
class PreviewBox():
def __init__(self, capture):
self.selected = False
self.label = QLabel()
self.label.mousePressEvent = self.toggleSelected
self.cap = capture
self.resizeFactor = 0.5
self.interactable = True
self.name = "defaultCam"
return super().__init__()
def toggleSelected(self, event):
if self.interactable == True:
self.selected = not self.selected
if self.selected == True :
self.resizeFactor = 0.25
else:
self.resizeFactor = 0.5
#============================================================
class RecordingWindow(QWidget):
previews = []
previewThread = None
recordingThread = None
def __init__(self, mainWindow):
super().__init__()
self.ui = Ui_recordingTutorialWidget()
self.mainWindow = mainWindow
self.initUI()
self.show()
def initUI(self):
self.ui.setupUi(self)
self.ui.gridLayout.setAlignment(Qt.AlignTop)
self.ui.startRecordingButton.clicked.connect(self.startRecording)
self.ui.stopRecordingButton.clicked.connect(self.stopRecording)
def refreshPreviews(self):
if self.previewThread is not None:
self.previewThread.kill()
for previewB in self.previews:
previewB.interactable = False
previewB.resizeFactor = 0.5
self.ui.previewGrid.addWidget(previewB.label)
self.previewThread = PreviewThread(self.previews)
self.previewThread.daemon = True
self.previewThread.start()
def startRecording(self):
self.recordingThread = RecordingThread(self.previews)
self.recordingThread.daemon = True
self.recordingThread.start()
def stopRecording(self):
self.recordingThread.kill()
#------------------------------------------------------------
class RecordingThread(threading.Thread):
outputs = []
doRun = True
def __init__(self, previews):
super().__init__()
for cam in previews:
fourcc = cv2.VideoWriter_fourcc(*'DIVX') #Codec
self.outputs.append((cam.cap, cv2.VideoWriter(cam.name, fourcc, 24.0, (int(cam.cap.get(3)),int(cam.cap.get(4)))))) #Output format and name
def run(self):
while(self.doRun):
for cap, output in self.outputs:
if(cap.isOpened()):
ret, frame = cap.read() #Capture frame (ret is a boolean if frame is read succesfully)
output.write(frame)
print(ret)
for cap, output in self.outputs:
output.release()
cv2.destroyAllWindows()
def kill(self):
self.doRun = False
(I know the indendation is wrong here, it's because of pasting, it is correct in the actual editor. I'm just lazy.. :D )
Okay, so surprise surprise, I was stupid. I forgot to add the file extension to the default name, and because of that it wasn't saving anything. Change the row where the videowriter is created to the following:
self.outputs.append((cam.cap, cv2.VideoWriter(cam.name+".avi", fourcc, 24.0, (int(cam.cap.get(3)),int(cam.cap.get(4))))))

Categories