I have made a opencv project which processes input stream from the video and displays the processed output. I have used PyQt buttons to switch from one output to another. My PyQt window covvers almost the entire screen and when i click on my buttons, the opencv window remains behind the PyQt window. Also, I have made the main window of PyQt my parent window. How can I bring the opencv window on top of PyQt window. I searched for cvGetWindowHandle(), but didn't find it's implementation for python.
I have used PyQt4 and opencv2, and the PyQt window has not been designed using a QtDesigner.
You can always wrap OpenCV window in Qt widget...
class QtCapture(QtGui.QWidget):
def __init__(self, *args):
super(QtGui.QWidget, self).__init__()
self.fps = 24
self.cap = cv2.VideoCapture(*args)
self.video_frame = QtGui.QLabel()
lay = QtGui.QVBoxLayout()
lay.setMargin(0)
lay.addWidget(self.video_frame)
self.setLayout(lay)
def setFPS(self, fps):
self.fps = fps
def nextFrameSlot(self):
ret, frame = self.cap.read()
# OpenCV yields frames in BGR format
frame = cv2.cvtColor(frame, cv2.cv.CV_BGR2RGB)
img = QtGui.QImage(frame, frame.shape[1], frame.shape[0], QtGui.QImage.Format_RGB888)
pix = QtGui.QPixmap.fromImage(img)
self.video_frame.setPixmap(pix)
def start(self):
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.nextFrameSlot)
self.timer.start(1000./self.fps)
def stop(self):
self.timer.stop()
def deleteLater(self):
self.cap.release()
super(QtGui.QWidget, self).deleteLater()
...and do with it whatever you will:
class ControlWindow(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
self.capture = None
self.start_button = QtGui.QPushButton('Start')
self.start_button.clicked.connect(self.startCapture)
self.quit_button = QtGui.QPushButton('End')
self.quit_button.clicked.connect(self.endCapture)
self.end_button = QtGui.QPushButton('Stop')
vbox = QtGui.QVBoxLayout(self)
vbox.addWidget(self.start_button)
vbox.addWidget(self.end_button)
vbox.addWidget(self.quit_button)
self.setLayout(vbox)
self.setWindowTitle('Control Panel')
self.setGeometry(100,100,200,200)
self.show()
def startCapture(self):
if not self.capture:
self.capture = QtCapture(0)
self.end_button.clicked.connect(self.capture.stop)
self.capture.setFPS(30)
self.capture.setParent(self)
self.capture.setWindowFlags(QtCore.Qt.Tool)
self.capture.start()
self.capture.show()
def endCapture(self):
self.capture.deleteLater()
self.capture = None
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
window = ControlWindow()
sys.exit(app.exec_())
Related
Although I choose the same camera frame size and widget size, white spaces appear around the window.
How can I remove these whites?
I never understood why these white parts were appearing.
Although I choose the same camera frame size and widget size, white spaces appear around the window.
How can I remove these whites?
I never understood why these white parts were appearing.
'''
class VideoThread(QThread):
change_pixmap_signal = pyqtSignal(np.ndarray)
def __init__(self):
super().__init__()
self._run_flag = True
def run(self):
# capture from analog camera
cap = cv2.VideoCapture(0)
cap.set(3,720)
cap.set(4,576)
while self._run_flag:
now1=time.time()
ret, cv_img = cap.read()
if ret:
cv_img = cv2.resize(cv_img, (1024, 768))
self.change_pixmap_signal.emit(cv_img)
now2=time.time()
cap.release()
def stop(self):
#stop capture
self._run_flag = False
self.wait()
class App(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("window")
self.disply_width = 1024
self.display_height = 768
# create the label that holds the image
self.image_label.resize(self.disply_width, self.display_height)
# create a vertical box layout and add the two labels
vbox = QVBoxLayout()
vbox.addWidget(self.image_label)
#vbox.addWidget(self.textLabel)
# set the vbox layout as the widgets layout
self.setLayout(vbox)
# create the video capture thread
self.thread = VideoThread()
# connect its signal to the update_image slot
self.thread.change_pixmap_signal.connect(self.update_image)
# start the thread
self.thread.start()
def closeEvent(self, event):
self.thread.stop()
event.accept()
#pyqtSlot(np.ndarray)
def update_image(self, cv_img):
"""Updates the image_label with a new opencv image"""
qt_img = self.convert_cv_qt(cv_img)
self.image_label.setPixmap(qt_img)
def convert_cv_qt(self, cv_img):
"""Convert from an opencv image to QPixmap"""
rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
h, w, ch = rgb_image.shape
bytes_per_line = ch * w
convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)
p = convert_to_Qt_format.scaled(self.disply_width, self.display_height, Qt.KeepAspectRatio)
return QPixmap.fromImage(p)
if __name__ == "__main__":
app = QApplication(sys.argv)
a = App()
a.show()
#a.showFullScreen()
sys.exit(app.exec_())
'''
Use vbox.setContentsMargins(0, 0, 0, 0); to get rid of the white margins.
Plus there are many many more bugs in your code. For example you should not resize the image label self.image_label.resize(self.disply_width, self.display_height) (I guess this should crash, since you have not created the label yet... but I guess you did not show all your code). You should resize the whole window instead, i.e. self.resize(self.disply_width, self.display_height). Well, it seems you have no understanding how layouts work. But this my answer is not intended to teach you, I am only trying to answer your question.
In my GUI application, I'm displaying a camera stream to a user. Now the thing is that the user will be able to see stream from only one camera at a time and in order to see streams from other cameras he must enter the credentials of the new camera like username, password and camera IP.
I want to do this using a dialog box. I was able to do that but everytime a new window popped up. I do how to switch between different cameras using QStackedLayout but this time I can't use that because the camera objects are created at runtime.
All I want is that on press of a button a dialog box should appear and the camera must be replaced once the credentials are entered.
code:
from PyQt5 import QtCore, QtGui, QtWidgets
from threading import Thread
from collections import deque
from datetime import datetime
import time
import sys
import cv2
import imutils
class CameraWidget(QtWidgets.QWidget):
"""Independent camera feed
Uses threading to grab IP camera frames in the background
#param width - Width of the video frame
#param height - Height of the video frame
#param stream_link - IP/RTSP/Webcam link
#param aspect_ratio - Whether to maintain frame aspect ratio or force into fraame
"""
def __init__(self, username, password, camera_ip, width=0, height=0, stream_link=0, aspect_ratio=False, parent=None, deque_size=1):
super(CameraWidget, self).__init__(parent)
# Initialize deque used to store frames read from the stream
self.deque = deque(maxlen=deque_size)
# Slight offset is needed since PyQt layouts have a built in padding
# So add offset to counter the padding
self.screen_width = 640
self.screen_height = 480
self.maintain_aspect_ratio = aspect_ratio
self.camera_stream_link = 'rtsp://{}:{}#{}/Streaming/Channels/2'.format(username, password, camera_ip)
# Flag to check if camera is valid/working
self.online = False
self.capture = None
self.video_frame = QtWidgets.QLabel()
self.load_network_stream()
# Start background frame grabbing
self.get_frame_thread = Thread(target=self.get_frame, args=())
self.get_frame_thread.daemon = True
self.get_frame_thread.start()
# Periodically set video frame to display
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.set_frame)
self.timer.start(.5)
print('Started camera: {}'.format(self.camera_stream_link))
def load_network_stream(self):
"""Verifies stream link and open new stream if valid"""
def load_network_stream_thread():
if self.verify_network_stream(self.camera_stream_link):
self.capture = cv2.VideoCapture(self.camera_stream_link)
self.online = True
self.load_stream_thread = Thread(target=load_network_stream_thread, args=())
self.load_stream_thread.daemon = True
self.load_stream_thread.start()
def verify_network_stream(self, link):
"""Attempts to receive a frame from given link"""
cap = cv2.VideoCapture(link)
if not cap.isOpened():
return False
cap.release()
return True
def get_frame(self):
"""Reads frame, resizes, and converts image to pixmap"""
while True:
try:
if self.capture.isOpened() and self.online:
# Read next frame from stream and insert into deque
status, frame = self.capture.read()
if status:
self.deque.append(frame)
else:
self.capture.release()
self.online = False
else:
# Attempt to reconnect
print('attempting to reconnect', self.camera_stream_link)
self.load_network_stream()
self.spin(2)
self.spin(.001)
except AttributeError:
pass
def spin(self, seconds):
"""Pause for set amount of seconds, replaces time.sleep so program doesnt stall"""
time_end = time.time() + seconds
while time.time() < time_end:
QtWidgets.QApplication.processEvents()
def set_frame(self):
"""Sets pixmap image to video frame"""
if not self.online:
self.spin(1)
return
if self.deque and self.online:
# Grab latest frame
frame = self.deque[-1]
# Keep frame aspect ratio
if self.maintain_aspect_ratio:
self.frame = imutils.resize(frame, width=self.screen_width)
# Force resize
else:
self.frame = cv2.resize(frame, (self.screen_width, self.screen_height))
self.frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
h, w, ch = self.frame.shape
bytesPerLine = ch * w
# Convert to pixmap and set to video frame
self.img = QtGui.QImage(self.frame, w, h, bytesPerLine, QtGui.QImage.Format_RGB888)
self.pix = QtGui.QPixmap.fromImage(self.img)
self.video_frame.setPixmap(self.pix)
def get_video_frame(self):
return self.video_frame
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, username, password, camera_ip, parent=None):
super(MainWindow, self).__init__(parent)
# Top frame
self.top_frame = QtWidgets.QFrame()
self.top_frame.setStyleSheet("background-color: rgb(153, 187, 255)")
self.camera = CameraWidget(username, password, camera_ip)
self.top_layout = QtWidgets.QHBoxLayout()
self.top_layout.addWidget(self.camera.get_video_frame())
self.top_frame.setLayout(self.top_layout)
# Bottom frame
self.btm_frame = QtWidgets.QFrame()
self.btm_frame.setStyleSheet("background-color: rgb(208, 208, 225)")
self.button = QtWidgets.QPushButton('Change Camera')
self.button.clicked.connect(self.onClick)
self.btm_layout = QtWidgets.QHBoxLayout()
self.btm_layout.addStretch()
self.btm_layout.addWidget(self.button)
self.btm_layout.setContentsMargins(5, 5, 5, 5)
self.btm_frame.setLayout(self.btm_layout)
self.widget = QtWidgets.QWidget()
self.layout = QtWidgets.QVBoxLayout()
self.layout.addWidget(self.top_frame, 20)
self.layout.addWidget(self.btm_frame,1)
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setSpacing(0)
self.widget.setLayout(self.layout)
self.setCentralWidget(self.widget)
def onClick(self):
"""
I want this function to open a dialog box
asking user to enter new cameras credentials
and display it.
"""
if __name__ == '__main__':
# Create main application window
app = QtWidgets.QApplication([])
app.setStyle(QtWidgets.QStyleFactory.create("Fusion"))
w = MainWindow('admin', 'vaaan#123', '192.168.1.51')
w.showMaximized()
sys.exit(app.exec_())
Initial answer
Is this close to what you have in mind?
from PyQt5 import QtCore, QtGui, QtWidgets
from threading import Thread
from collections import deque
from datetime import datetime
import time
import sys
import cv2
import imutils
class CameraWidget(QtWidgets.QWidget):
# no change
...
class ChangeDialog(QtWidgets.QDialog):
def __init__(self, *args, **kwargs):
super().__init__(*args, *kwargs)
QBtn = QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel
buttonBox = QtWidgets.QDialogButtonBox(QBtn)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
self.layout = QtWidgets.QVBoxLayout()
self.setLayout(self.layout)
vlayout = QtWidgets.QVBoxLayout()
self.usernameEdit = QtWidgets.QLineEdit()
self.passwordEdit = QtWidgets.QLineEdit()
self.passwordEdit.setEchoMode(QtWidgets.QLineEdit.Password)
self.ipAddrEdit = QtWidgets.QLineEdit()
vlayout.addWidget(self.usernameEdit)
vlayout.addWidget(self.passwordEdit)
vlayout.addWidget(self.ipAddrEdit)
self.layout.addLayout(vlayout)
self.layout.addWidget(buttonBox)
#property
def username(self):
return self.usernameEdit.text()
#property
def password(self):
return self.passwordEdit.text()
#property
def ipAddress(self):
return self.ipAddrEdit.text()
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, username, password, camera_ip, parent=None):
super(MainWindow, self).__init__(parent)
# Top frame
self.top_frame = QtWidgets.QFrame()
self.top_frame.setStyleSheet("background-color: rgb(153, 187, 255)")
self.camera = CameraWidget(username, password, camera_ip)
self.top_layout = QtWidgets.QHBoxLayout()
self.top_layout.addWidget(self.camera.get_video_frame())
self.top_frame.setLayout(self.top_layout)
# Bottom frame
self.btm_frame = QtWidgets.QFrame()
self.btm_frame.setStyleSheet("background-color: rgb(208, 208, 225)")
self.button = QtWidgets.QPushButton('Change Camera')
self.button.clicked.connect(self.onClick)
self.btm_layout = QtWidgets.QHBoxLayout()
self.btm_layout.addStretch()
self.btm_layout.addWidget(self.button)
self.btm_layout.setContentsMargins(5, 5, 5, 5)
self.btm_frame.setLayout(self.btm_layout)
self.widget = QtWidgets.QWidget()
self.layout = QtWidgets.QVBoxLayout()
self.layout.addWidget(self.top_frame, 20)
self.layout.addWidget(self.btm_frame,1)
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setSpacing(0)
self.widget.setLayout(self.layout)
self.setCentralWidget(self.widget)
self.changeDialog = ChangeDialog()
self.changeDialog.accepted.connect(self.changeCamera)
def changeCamera(self):
self.camera = CameraWidget(
self.changeDialog.username,
self.changeDialog.password,
self.changeDialog.ipAddress)
# not sure if this is necessary
self.top_layout.takeAt(0)
self.top_layout.addWidget(self.camera.get_video_frame())
def onClick(self):
"""
I want this function to open a dialog box
asking user to enter new cameras credentials
and display it.
"""
self.changeDialog.exec()
if __name__ == '__main__':
# Create main application window
app = QtWidgets.QApplication([])
app.setStyle(QtWidgets.QStyleFactory.create("Fusion"))
w = MainWindow('admin', 'vaaan#123', '192.168.1.51')
w.showMaximized()
sys.exit(app.exec_())
Without being able to actually see something it's hard to tell if something is missing, but this should be the right direction.
Answering comments
About mandatory fields
First of all this proposal is very rough.
You should add QLabels before each QLineEdit.
Then you should code some validation logic. You do this by removing the default "OK" button that I put in and put your own button. When this button is pressed, you check that each input self (the dialog) is valid.
If this is the case you can call accept(). Otherwise you can use setFocus() on the first input that is invalid.
Display previously entered data
In my proposal I created a dialog that is stored with the MainWindow.
It is never destroyed so all the data is still alive. When you display the dialog for the second time it still holds the previous data.
You can create a new dialog object each time if you prefer, or clear all inputs.
#Importing necessary libraries, mainly the OpenCV, and PyQt libraries
import cv2
import numpy as np
import sys
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5.QtCore import pyqtSignal
class ShowVideo(QtCore.QObject):
#initiating the built in camera
camera_port = -1
camera = cv2.VideoCapture(camera_port)
VideoSignal = QtCore.pyqtSignal(QtGui.QImage)
def __init__(self, parent = None):
super(ShowVideo, self).__init__(parent)
#QtCore.pyqtSlot()
def startVideo(self):
run_video = True
while run_video:
ret, image = self.camera.read()
color_swapped_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
height, width, _ = color_swapped_image.shape
qt_image = QtGui.QImage(color_swapped_image.data,
width,
height,
color_swapped_image.strides[0],
QtGui.QImage.Format_RGB888)
pixmap = QtGui.QPixmap(qt_image)
qt_image = pixmap.scaled(640, 480, QtCore.Qt.KeepAspectRatio)
qt_image = QtGui.QImage(qt_image)
self.VideoSignal.emit(qt_image)
#QtCore.pyqtSlot()
def makeScreenshot(self):
#cv2.imwrite("test.jpg", self.image)
print("Screenshot saved")
#self.qt_image.save('test.jpg')
class ImageViewer(QtWidgets.QWidget):
def __init__(self, parent = None):
super(ImageViewer, self).__init__(parent)
self.image = QtGui.QImage()
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
def paintEvent(self, event):
painter = QtGui.QPainter(self)
painter.drawImage(0,0, self.image)
self.image = QtGui.QImage()
def initUI(self):
self.setWindowTitle('Test')
#QtCore.pyqtSlot(QtGui.QImage)
def setImage(self, image):
if image.isNull():
print("viewer dropped frame!")
self.image = image
if image.size() != self.size():
self.setFixedSize(image.size())
self.update()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
thread = QtCore.QThread()
thread.start()
vid = ShowVideo()
vid.moveToThread(thread)
image_viewer = ImageViewer()
#image_viewer.resize(200,400)
vid.VideoSignal.connect(image_viewer.setImage)
#Button to start the videocapture:
push_button = QtWidgets.QPushButton('Start')
push_button.clicked.connect(vid.startVideo)
push_button2 = QtWidgets.QPushButton('Screenshot')
push_button2.clicked.connect(vid.makeScreenshot)
vertical_layout = QtWidgets.QVBoxLayout()
vertical_layout.addWidget(image_viewer)
vertical_layout.addWidget(push_button)
vertical_layout.addWidget(push_button2)
layout_widget = QtWidgets.QWidget()
layout_widget.setLayout(vertical_layout)
main_window = QtWidgets.QMainWindow()
main_window.setCentralWidget(layout_widget)
main_window.resize(640,480)
main_window.show()
sys.exit(app.exec_())
This code showing video from camera in endless loop using OpenCV and PyQt5. But how to make screenshot and don't stop showing video. I think it needs to be stop loop for a little, make screnshot, and run loop again.
You can use cv2.waitKey() for the same, as shown below:
while run_video:
ret, image = self.camera.read()
if(cv2.waitKey(10) & 0xFF == ord('s')):
cv2.imwrite("screenshot.jpg",image)
(I'm guessing that by the term "screenshot", you mean the camera frame, and not the image of the entire screen.)
When you press 's' on the keyboard, it'll perform imwrite.
Note that if you wish to save multiple images, you'd have to vary the filename. The above code will overwrite screenshot.jpg to save only the latest frame.
I have read all three or four current threads on this subject that are on the internet, and so far none accurately answer the question.
I am fairly new to wxPython, although I have some experience with FLTK. I am new to OpenCV.
I am attempting to capture an image from a webcam with openCV and paint that image into wxPython. I have had limited success (I can get an image and paint it, but it's faint and not aligned properly). I can confirm that my webcam and openCV are working on their own, because sample code like this works as expected.
Here is an example of my latest effort, which I've cobbled together from the internet and my own efforts with opencv2.
import wx
import cv2
class viewWindow(wx.Frame):
imgSizer = (480,360)
def __init__(self, parent, title="View Window"):
super(viewWindow,self).__init__(parent)
self.pnl = wx.Panel(self)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.image = wx.EmptyImage(self.imgSizer[0],self.imgSizer[1])
self.imageBit = wx.BitmapFromImage(self.image)
self.staticBit = wx.StaticBitmap(self.pnl,wx.ID_ANY,
self.imageBit)
self.vbox.Add(self.staticBit)
self.pnl.SetSizer(self.vbox)
self.timex = wx.Timer(self, wx.ID_OK)
self.timex.Start(1000/12)
self.Bind(wx.EVT_TIMER, self.redraw, self.timex)
self.capture = cv2.VideoCapture(0)
self.SetSize(self.imgSizer)
self.Show()
def redraw(self,e):
ret, frame = self.capture.read()
#print('tick')
self.imageBit = wx.BitmapFromImage(self.image)
self.staticBit = wx.StaticBitmap(self.pnl,
wx.ID_ANY, self.imageBit)
self.Refresh()
def main():
app = wx.PySimpleApp()
frame = viewWindow(None)
frame.Center()
frame.Show()
app.MainLoop()
if __name__ == '__main__':
main()
OpenCV is not a hard requirement (I'm open to other options as long as the solution is cross-platform. If I'm not mistaken, Gstreamer is not cross platform, and PyGame has been difficult to embed in wxPython, but I'm open to ideas).
wxPython is a hard requirement.
if OpenCV is not a hard requirement
try libvlc and its python bindigs
https://wiki.videolan.org/Python_bindings
you can try this modified code, it should show the webcam image from opencv2 into your wx python environment:
import wx
import cv2
class viewWindow(wx.Frame):
def __init__(self, parent, title="View Window"):
# super(viewWindow,self).__init__(parent)
wx.Frame.__init__(self, parent)
self.imgSizer = (480, 360)
self.pnl = wx.Panel(self)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.image = wx.EmptyImage(self.imgSizer[0],self.imgSizer[1])
self.imageBit = wx.BitmapFromImage(self.image)
self.staticBit = wx.StaticBitmap(self.pnl, wx.ID_ANY, self.imageBit)
self.vbox.Add(self.staticBit)
self.capture = cv2.VideoCapture(0)
ret, self.frame = self.capture.read()
if ret:
self.height, self.width = self.frame.shape[:2]
self.bmp = wx.BitmapFromBuffer(self.width, self.height, self.frame)
self.timex = wx.Timer(self)
self.timex.Start(1000./24)
self.Bind(wx.EVT_TIMER, self.redraw)
self.SetSize(self.imgSizer)
else:
print "Error no webcam image"
self.pnl.SetSizer(self.vbox)
self.vbox.Fit(self)
self.Show()
def redraw(self,e):
ret, self.frame = self.capture.read()
if ret:
self.frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
self.bmp.CopyFromBuffer(self.frame)
self.staticBit.SetBitmap(self.bmp)
self.Refresh()
def main():
app = wx.PySimpleApp()
frame = viewWindow(None)
frame.Center()
frame.Show()
app.MainLoop()
if __name__ == '__main__':
main()
I'm integrating openCV 3.0 with Qt5 in Python 3.4.3 using pyqt5. I've been trying to build an app to process videos from files, but ran into some trouble with pyqt. Specifically, I will be loading videos through a file dialog multiple times and these videos will not be the same size. Therefore, I want the main window in my app to wrap/expand to the size of the video being played.
Below is a simplified version of my code with the 3 core classes for showing the video. One for the Main Window, one for a frame viewer widget to show each video frame in the GUI, and one for a video processor to read and process the video through opencv, transform it to a QImage then send it to the viewer.
class videoProcessor(QtCore.QObject):
filename = None
cap = None
videoSignal = QtCore.pyqtSignal(QtGui.QImage)
def __init__(self):
super().__init__()
self.filename = "test.mp4"
#QtCore.pyqtSlot()
def runVideoProcessor(self):
self.cap = cv2.VideoCapture(self.filename)
while self.cap.isOpened():
ret, frame = self.cap.read()
if ret:
outimg = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
imgh, imgw, bytesPerComponent = outimg.shape
bytesPerLine = bytesPerComponent * imgw;
qtimg = QtGui.QImage(outimg.data,imgw,imgh,bytesPerLine,QtGui.QImage.Format_RGB888)
self.videoSignal.emit(qtimg)
else:
break
self.cap.release()
class frameViewer(QtWidgets.QWidget):
def __init__(self):
super().__init__()
self.image = QtGui.QImage()
self.imageAvailable = False
def paintEvent(self,event):
painter = QtGui.QPainter(self)
painter.drawImage(0,0,self.image)
self.image = QtGui.QImage()
painter.end()
#QtCore.pyqtSlot(QtGui.QImage)
def setFrame(self,image):
self.image = image
self.setFixedSize(self.image.size())
self.repaint()
class mainWindow(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.fview = frameViewer()
self.vproc = videoProcessor()
self.vproc.videoSignal.connect(self.fview.setFrame)
self.startButton = QtWidgets.QPushButton("Start")
self.startButton.clicked.connect(self.vproc.runVideoProcessor)
self.mainLayout = QtWidgets.QVBoxLayout()
self.mainLayout.addWidget(self.fview)
self.mainLayout.addWidget(self.startButton)
self.mainWidget = QtWidgets.QWidget()
self.mainWidget.setLayout(self.mainLayout)
self.mainWidget.setSizePolicy(QtWidgets.QSizePolicy.Expanding,QtWidgets.QSizePolicy.Expanding)
self.setCentralWidget(self.mainWidget)
self.statusBar().showMessage('Ready')
self.setGeometry(50, 50, 300, 300)
self.setWindowTitle('OpenCV PyQt Test')
self.show()
if __name__=='__main__':
app = QtWidgets.QApplication(sys.argv)
mw = mainWindow()
sys.exit(app.exec_())
So far, the program can run videos but there are two main problems:
The window does not adjust to the size of the video frame until the end of the video. However, any repetition of the same video play will be in the correct size.
If I don't set self.image=QtGui.QImage() in paintEvent after drawing the image, the program crashes. However, if I put that line in, at the end of the video, the window will go blank because an empty QImage will be drawn in the last frame's place whenever the window is updated.
Any ideas on how to solve these issues? Thank you.