In the QVideoWidget container on PyQt, you need to start a video from the computer, on which objects are to be searched through TensorFlow (openCV, cv2). The problem is that when the button is pressed, the video only shows one frame and nothing else. What could be the problem? Made in PyCharm, Python 3.7.
from PyQt5 import QtCore, QtGui, QtWidgets, uiс
import os
import cv2
import numpy as np
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QLabel, QVBoxLayout
from PyQt5.QtCore import QThread, pyqtSignal, Qt
from PyQt5.QtGui import QImage, QPixmap
class ThreadOpenCV(QThread):
changePixmap = pyqtSignal(QImage)
def __init__(self, source):
super().__init__()
def run(self):
# MODEL_NAME = 'inference_graph'
VIDEO_NAME = '20201024161726.mp4'
#
# # Grab path to current working directory
CWD_PATH = os.getcwd()
PATH_TO_VIDEO = os.path.join(CWD_PATH, VIDEO_NAME)
cap = cv2.VideoCapture(PATH_TO_VIDEO)
while True:
ret, frame = cap.read()
if ret:
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_expanded = np.expand_dims(frame_rgb, axis=0)
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
class Widget(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
uic.loadUi('fire_detection.ui', self)
self.show()
self.label_video = QLabel()
layout = QVBoxLayout()
layout.addWidget(self.label_video)
self.widget.setLayout(layout)
self.thread = ThreadOpenCV('20201024161726.mp4')
self.thread.changePixmap.connect(self.setImage)
self.btn1.clicked.connect(self.playVideo)
def playVideo(self):
self.thread.start()
def setImage(self, image):
self.label_video.setPixmap(QPixmap.fromImage(image))
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
mw = Widget()
mw.show()
sys.exit(app.exec_())
All problem is because you have wrong indentations - and you run cap.release() inside while-loop so it releases stream after the first frame.
while True:
ret, frame = cap.read()
if ret:
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_expanded = np.expand_dims(frame_rgb, axis=0)
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
# --- after `while` ---
cap.release()
You don't create window in cv2 so you don't cv2.destroyAllWindows().
And cv2.waitKey(1) will be also useless because the system sends key/mouse events to cv2 only when it displays the window and this window is active (focused).
EDIT:
Full working code. It uses a webcam.
It doesn't need a UI file.
It has buttons to start and stop streaming,
It has also buttons to switch: RGB <-> GRAYSCALE, NORMAL <-> BLURED.
import os
import sys
import numpy as np
import cv2
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt, QThread, pyqtSignal
from PyQt5.QtWidgets import QLabel, QVBoxLayout, QPushButton, QWidget
from PyQt5.QtGui import QImage, QPixmap
class ThreadOpenCV(QThread):
changePixmap = pyqtSignal(QImage)
def __init__(self, source):
super().__init__()
self.source = source
self.running = True
self.grayscale = False
self.blur = False
def run(self):
print('start')
cap = cv2.VideoCapture(self.source)
self.running = True
while self.running:
ret, frame = cap.read()
if ret:
if self.grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
else:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if self.blur:
frame = cv2.blur(frame, (15, 15))
h, w, ch = frame.shape
bytes_per_line = ch * w # PEP8: `lower_case_names` for variables
image = QImage(frame.data, w, h, bytes_per_line, QImage.Format_RGB888)
image = image.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(image)
cap.release()
print('stop')
def stop(self):
self.running = False
class Widget(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
# MODEL_NAME = 'inference_graph'
VIDEO_NAME = '20201024161726.mp4'
CWD_PATH = os.getcwd()
PATH_TO_VIDEO = os.path.join(CWD_PATH, VIDEO_NAME)
# webcam
PATH_TO_VIDEO = 0
self.thread = ThreadOpenCV(PATH_TO_VIDEO)
self.thread.changePixmap.connect(self.setImage)
layout = QVBoxLayout()
self.label_video = QLabel()
layout.addWidget(self.label_video)
self.btn1 = QPushButton("PLAY")
self.btn1.clicked.connect(self.playVideo)
layout.addWidget(self.btn1)
self.btn_stop = QPushButton("STOP")
self.btn_stop.clicked.connect(self.stopVideo)
layout.addWidget(self.btn_stop)
self.btn_gray = QPushButton("RGB <-> GRAYSCALE")
self.btn_gray.clicked.connect(self.grayVideo)
layout.addWidget(self.btn_gray)
self.btn_blur = QPushButton("NORMAL <-> BLURED")
self.btn_blur.clicked.connect(self.blurVideo)
layout.addWidget(self.btn_blur)
self.widget = QWidget()
self.widget.setLayout(layout)
self.setCentralWidget(self.widget)
def playVideo(self):
self.thread.start()
def stopVideo(self):
self.thread.running = False
def grayVideo(self):
self.thread.grayscale = not self.thread.grayscale
def blurVideo(self):
self.thread.blur = not self.thread.blur
def setImage(self, image):
self.label_video.setPixmap(QPixmap.fromImage(image))
if __name__ == '__main__':
app = QtWidgets.QApplication([])
mw = Widget()
mw.show()
app.exec()
RGB, NORMAL:
GRAYSCALE, BLURED:
Related
I'm trying to create a UI that streams a webcam. This is what I referenced from. I tested from the answer and it works well.
Now I'm trying to change from link's QThread-run method to QThread-moveToThread method, but UI shows nothing but just empty gray screen with the message to output terminal, 'QThread: Destoryed while thread is still running'.
import cv2
import sys
from PyQt5.QtWidgets import QWidget, QLabel, QApplication
from PyQt5.QtCore import QThread, Qt, pyqtSignal, pyqtSlot, QObject
from PyQt5.QtGui import QImage, QPixmap
class Thread(QObject): # Changed here
changePixmap = pyqtSignal(QImage)
#pyqtSlot() # Changed here
def run(self):
print('run!') # Changed here
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
while True:
ret, frame = cap.read()
if ret:
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'PyQt5 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
#pyqtSlot(QImage)
def setImage(self, image):
self.label.setPixmap(QPixmap.fromImage(image))
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
# create a label
self.label = QLabel(self)
self.label.move(280, 120)
self.label.resize(640, 480)
# Changed here
# th = Thread(self)
# th.changePixmap.connect(self.setImage)
# th.start()
qth = QThread()
qth.start()
th = Thread()
th.changePixmap.connect(self.setImage)
th.moveToThread(qth)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
It never prints the 'run!', So It seems like I can't access to the run method in class Thread(). I'm working on Windows 10 pro, with python3.8
I have been trying to read frames from webcam, using OpenCV and PyQt. I know there are a lot of examples. But I would like to use MVC (model-view-controller).
OpenCV handles controller, i created models.py for model and views.py for GUI.
I don't see any error before run the code, when i run the code GUI opens, then i press open webcam and i see this warning and error:
[ WARN:0] global C:\projects\opencv-python\opencv\modules\videoio\src\cap_msmf.cpp (674) SourceReaderCB::~SourceReaderCB terminating async callback
Process finished with exit code 1
Here is views.py
from PyQt5.QtCore import QThread, QTimer
from PyQt5.QtWidgets import QLabel, QWidget, QPushButton, QVBoxLayout, QApplication, QHBoxLayout, QMessageBox
from models import Camera
class UI_Window(QWidget):
def __init__(self, camera = None):
super().__init__()
self.camera = camera
# Create a timer.
self.timer = QTimer()
self.timer.timeout.connect(Camera.nextFrameSlot)
# Create a layout.
layout = QVBoxLayout()
# Add a button
button_layout = QHBoxLayout()
btnCamera = QPushButton("Open camera")
btnCamera.clicked.connect(Camera.openCamera)
button_layout.addWidget(btnCamera)
layout.addLayout(button_layout)
# Add a label
self.label = QLabel()
self.label.setFixedSize(640, 640)
layout.addWidget(self.label)
# Set the layout
self.setLayout(layout)
self.setWindowTitle("First GUI with QT")
self.setFixedSize(800, 800)
# https://stackoverflow.com/questions/1414781/prompt-on-exit-in-pyqt-application
class MovieThread(QThread):
def __init__(self, camera):
super().__init__()
self.camera = camera
def run(self):
self.camera.acquire_movie(200)
if __name__ == '__main__':
app = QApplication([])
window = UI_Window()
window.show()
models.py
import cv2
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtGui import QPixmap, QImage
class Camera:
def __init__(self, camera):
self.camera = camera
self.cap = None
def openCamera(self):
self.vc = cv2.VideoCapture(0)
# vc.set(5, 30) #set FPS
self.vc.set(3, 640) # set width
self.vc.set(4, 480) # set height
if not self.vc.isOpened():
msgBox = QMessageBox()
msgBox.setText("Failed to open camera.")
msgBox.exec_()
return
self.timer.start(1000. / 24)
# https://stackoverflow.com/questions/41103148/capture-webcam-video-using-pyqt
def nextFrameSlot(self):
rval, frame = self.vc.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = QImage(frame, frame.shape[1], frame.shape[0], QImage.Format_RGB888)
pixmap = QPixmap.fromImage(image)
self.label.setPixmap(pixmap)
def initialize(self):
self.cap = cv2.VideoCapture(self.camera)
start.py
from PyQt5.QtWidgets import QApplication
from models import Camera
from views import UI_Window
camera = Camera(0)
camera.initialize()
app = QApplication([])
start_window = UI_Window(camera)
start_window.show()
app.exit(app.exec_())
This code works for me. For test I put all in one file.
I removed
camera.initialize()
I moved nextFrameSlot from Camera to UI_Window
I also created start() in UI_Windows to move self.timer.start() from Camera to UI_Window
import cv2
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtGui import QPixmap, QImage
class Camera:
def __init__(self, camera):
self.camera = camera
self.cap = None
def openCamera(self):
self.vc = cv2.VideoCapture(0)
# vc.set(5, 30) #set FPS
self.vc.set(3, 640) # set width
self.vc.set(4, 480) # set height
if not self.vc.isOpened():
print('failure')
msgBox = QMessageBox()
msgBox.setText("Failed to open camera.")
msgBox.exec_()
return
# https://stackoverflow.com/questions/41103148/capture-webcam-video-using-pyqt
def initialize(self):
self.cap = cv2.VideoCapture(self.camera)
from PyQt5.QtCore import QThread, QTimer
from PyQt5.QtWidgets import QLabel, QWidget, QPushButton, QVBoxLayout, QApplication, QHBoxLayout, QMessageBox
#from models import Camera
class UI_Window(QWidget):
def __init__(self, camera = None):
super().__init__()
self.camera = camera
print('UI')
# Create a timer.
self.timer = QTimer()
self.timer.timeout.connect(self.nextFrameSlot)
# Create a layout.
layout = QVBoxLayout()
# Add a button
button_layout = QHBoxLayout()
btnCamera = QPushButton("Open camera")
btnCamera.clicked.connect(self.start)
button_layout.addWidget(btnCamera)
layout.addLayout(button_layout)
# Add a label
self.label = QLabel()
self.label.setFixedSize(640, 640)
layout.addWidget(self.label)
# Set the layout
self.setLayout(layout)
self.setWindowTitle("First GUI with QT")
self.setFixedSize(800, 800)
def start(self):
camera.openCamera()
self.timer.start(1000. / 24)
def nextFrameSlot(self):
rval, frame = camera.vc.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = QImage(frame, frame.shape[1], frame.shape[0], QImage.Format_RGB888)
pixmap = QPixmap.fromImage(image)
self.label.setPixmap(pixmap)
# https://stackoverflow.com/questions/1414781/prompt-on-exit-in-pyqt-application
class MovieThread(QThread):
def __init__(self, camera):
super().__init__()
self.camera = camera
def run(self):
self.camera.acquire_movie(200)
#if __name__ == '__main__':
# app = QApplication([])
# window = UI_Window()
# window.show()
#start.py
from PyQt5.QtWidgets import QApplication
#from models import Camera
#from views import UI_Window
camera = Camera(0)
#camera.initialize()
print('test')
app = QApplication([])
start_window = UI_Window(camera)
start_window.show()
app.exit(app.exec_())
EDIT: code tested in separated files.
I also added read_gray() and negative colors read(negative=True), read_gray(negative=True)
BTW: I many places I check if frame is not empty - but it can't be used if not frame but if frame is not None because frame is numpy.array and if not numpy.array: may give wrong result.
All GUI widgets I moved to from model to view.
models.py
import cv2
class Camera:
def __init__(self, camera):
self.camera = camera
self.vp = None
def open(self, width=640, height=480, fps=30):
self.vc = cv2.VideoCapture(self.camera)
self.width = width
self.height = height
self.fps = fps
# vc.set(5, fps) #set FPS
self.vc.set(3, width) # set width
self.vc.set(4, height) # set height
return self.vc.isOpened()
def read(self, negative=False):
rval, frame = self.vc.read()
if frame is not None:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if negative:
frame = cv2.bitwise_not(frame)
return frame
def read_gray(self, negative=False):
rval, frame = self.vc.read()
if frame is not None:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
if negative:
frame = cv2.bitwise_not(frame)
return frame
views.py
from PyQt5.QtCore import QThread, QTimer
from PyQt5.QtWidgets import QLabel, QWidget, QPushButton, QVBoxLayout, QApplication, QHBoxLayout, QMessageBox
from PyQt5.QtGui import QPixmap, QImage
from models import Camera
class UI_Window(QWidget):
def __init__(self, camera = None):
super().__init__()
self.camera = camera
print('UI')
# Create a timer.
self.timer = QTimer()
self.timer.timeout.connect(self.nextFrameSlot)
# Create a layout.
layout = QVBoxLayout()
# Add a button
button_layout = QHBoxLayout()
btnCamera = QPushButton("Open camera")
btnCamera.clicked.connect(self.start)
button_layout.addWidget(btnCamera)
layout.addLayout(button_layout)
# Add a label
self.label = QLabel()
self.label.setFixedSize(640, 640)
layout.addWidget(self.label)
# Set the layout
self.setLayout(layout)
self.setWindowTitle("First GUI with QT")
#self.setFixedSize(800, 800)
def start(self):
if not self.camera.open():
print('failure')
msgBox = QMessageBox()
msgBox.setText("Failed to open camera.")
msgBox.exec_()
return
self.timer.start(1000. / 24)
def nextFrameSlot(self):
frame = self.camera.read()
#frame = self.camera.read_gray()
if frame is not None:
image = QImage(frame, frame.shape[1], frame.shape[0], QImage.Format_RGB888)
pixmap = QPixmap.fromImage(image)
self.label.setPixmap(pixmap)
class MovieThread(QThread):
def __init__(self, camera):
super().__init__()
self.camera = camera
def run(self):
self.camera.acquire_movie(200)
if __name__ == '__main__':
app = QApplication([])
window = UI_Window()
window.show()
main.py
from PyQt5.QtWidgets import QApplication
from views import UI_Window
from models import Camera
if __name__ == '__main__':
camera = Camera(0)
app = QApplication([])
start_window = UI_Window(camera)
start_window.show()
app.exit(app.exec_())
Here is a consice alternative to #furas's answer if your just looking for a barebones answer to the question:
"How to read frames using PyQt and OpenCV?"
import sys
from PyQt5.QtCore import Qt, QSize, QTimer, QThread
from PyQt5.QtWidgets import QApplication, QWidget, QGridLayout, QLabel
from PyQt5.QtGui import QPixmap, QImage
import cv2
def main():
app = QApplication([])
window = QWidget()
window.setLayout(QGridLayout(window))
window.setMinimumSize(QSize(640, 480))
label = QLabel()
label.setFixedSize(640, 640)
window.layout().addWidget(label, 0, 0)
window.show()
vc = cv2.VideoCapture(0)
vc.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
vc.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
timer = QTimer()
timer.timeout.connect(lambda: nextFrameSlot(vc, label))
timer.start(1000. / 24)
return app.exit(app.exec_())
def nextFrameSlot(vc: cv2.VideoCapture, label: QLabel):
rval, frame = vc.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = QImage(frame, frame.shape[1], frame.shape[0], QImage.Format_RGB888)
pixmap = QPixmap.fromImage(image)
label.setPixmap(pixmap)
if __name__ == '__main__':
exit_code = main()
sys.exit(exit_code)
Tested with Python 3.7.9
requirements.txt:
numpy==1.19.4
opencv-python==4.4.0.46
PyQt5==5.15.1
PyQt5-sip==12.8.1
I am new to classes and PyQt5 and trying to build a video player using PyQt5 and Opencv. Using the code in OpenCV Video Capture with PyQt4 and making the changes to convert commands in PyQt4 into PyQt5 and adding a pause button, I now have a video player with pause, play, end, and quit buttons. This is the code which is working fine:
import cv2
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import QApplication, QPushButton,
QVBoxLayout, QFileDialog
fileName = 'C:/Users/Art/Downloads/testVideo.mp4'
class Capture():
def __init__(self):
self.capturing = False
self.c = cv2.VideoCapture(fileName)
def startCapture(self):
self.capturing = True
cap = self.c
while(self.capturing):
ret, frame = cap.read()
cv2.imshow("Capture", frame)
cv2.waitKey(5)
cv2.destroyAllWindows()
def endCapture(self):
self.capturing = False
def pauseCapture(self):
if cv2.waitKey(0) & 0xFF == ord('p'): # Pause
self.capturing = False
def quitCapture(self):
cap = self.c
cv2.destroyAllWindows()
cap.release()
QtCore.QCoreApplication.quit()
class Window(QtWidgets.QWidget):
def __init__(self):
QtWidgets.QWidget.__init__(self)
self.setWindowTitle('Control Panel')
self.capture = Capture()
self.start_button = QPushButton('Start', self)
self.start_button.clicked.connect(self.capture.startCapture)
self.end_button = QPushButton('End', self)
self.end_button.clicked.connect(self.capture.endCapture)
self.pause_button = QPushButton('Pause', self)
self.pause_button.clicked.connect(self.capture.pauseCapture)
self.quit_button = QPushButton('Quit', self)
self.quit_button.clicked.connect(self.capture.quitCapture)
vbox = QVBoxLayout(self)
vbox.addWidget(self.start_button)
vbox.addWidget(self.end_button)
vbox.addWidget(self.pause_button)
vbox.addWidget(self.quit_button)
self.setLayout(vbox)
self.setGeometry(100, 100, 200, 200)
self.show()
if __name__== '__main__':
import sys
app = QApplication(sys.argv)
window = Window()
sys.exit(app.exec())
So far, I hard coded the video file name and its path into the code (fileName). Now, I want to add a load button, that let the user select the video. Something like this:
self.load_button = QPushButton('Load', self)
self.load_button.clicked.connect(self.pick_video)
def pick_video():
dialog = QtGui.QFileDialog()
fileName = dialog.getExistingDirectory(None,
"Select Folder")
return fileName
And adding the load button to the list of existing buttons, like this:
vbox.addWidget(self.load_button)
My problem is I don't know how can I merge this into the existing code. If I put it inside Window class, it throws me an error. My question is how I can add something like this into my existing code, such that the user can select the video file after pressing a load button.
Edit: After changing the code based on #ekhumoro, I got something like this:
import cv2
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import QApplication, QPushButton,
QVBoxLayout, QFileDialog
class Capture():
def __init__(self):
self.capturing = False
def startCapture(self, path):
self.capturing = True
self.c = cv2.VideoCapture(path)
while self.capturing:
ret, frame = self.c.read()
cv2.imshow("Capture", frame)
cv2.waitKey(5)
cv2.destroyAllWindows()
def endCapture(self):
self.capturing = False
def pauseCapture(self):
if cv2.waitKey(0) & 0xFF == ord('p'): # Pause
self.capturing = False
def quitCapture(self):
cap = self.c
cv2.destroyAllWindows()
cap.release()
QtCore.QCoreApplication.quit()
class Window(QtWidgets.QWidget):
def __init__(self):
QtWidgets.QWidget.__init__(self)
self.setWindowTitle('Control Panel')
self.capture = Capture()
self.start_button = QPushButton('Start', self)
self.start_button.clicked.connect(self.start)
self.end_button = QPushButton('End', self)
self.end_button.clicked.connect(self.capture.endCapture)
self.pause_button = QPushButton('Pause', self)
self.pause_button.clicked.connect(self.capture.pauseCapture)
self.quit_button = QPushButton('Quit', self)
self.quit_button.clicked.connect(self.capture.quitCapture)
vbox = QVBoxLayout(self)
vbox.addWidget(self.start_button)
vbox.addWidget(self.end_button)
vbox.addWidget(self.pause_button)
vbox.addWidget(self.quit_button)
self.setLayout(vbox)
self.setGeometry(100, 100, 200, 200)
self.show()
def start(self):
path = QtWidgets.QFileDialog.getOpenFileName(self)[0]
if path:
self.capture.startCapture(path)
if __name__== '__main__':
import sys
app = QApplication(sys.argv)
window = Window()
sys.exit(app.exec())
But, when I run this code, I get this error: AttributeError: 'Window' object has no attribute 'start'. Another thing is that I want to have a separate button for this process, meaning that once the user run the code, in the window that opens, he/she can click on that button (let's call it load button), then select the video file, which I don't see in this code. Am I missing something somewhere? Or maybe the rearranged code is not what #ekhumoro meant.
Rearrange the code so that startCapture takes a path parameter. Then pass in the path from the file-dialog in the slot for the start button:
class Capture():
def __init__(self):
self.capturing = False
def startCapture(self, path):
self.capturing = True
self.c = cv2.VideoCapture(path)
while self.capturing:
ret, frame = self.c.read()
cv2.imshow("Capture", frame)
cv2.waitKey(5)
cv2.destroyAllWindows()
class Window(QtWidgets.QWidget):
def __init__(self):
...
self.start_button = QPushButton('Start', self)
self.start_button.clicked.connect(self.start)
...
def start(self):
path = QtWidgets.QFileDialog.getOpenFileName(self)[0]
if path:
self.capture.startCapture(path)
Here is a complete alternative implementation:
import cv2
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import QApplication, QPushButton,QVBoxLayout, QFileDialog
class Capture():
def __init__(self):
self.capturing = False
self.c = None
def setVideoFile(self, path):
if self.c is not None:
cv2.destroyAllWindows()
self.c.release()
self.c = cv2.VideoCapture(path)
self.startCapture()
def startCapture(self):
self.capturing = True
cap = self.c
while(self.capturing):
ret, frame = cap.read()
cv2.imshow("Capture", frame)
cv2.waitKey(5)
cv2.destroyAllWindows()
def endCapture(self):
self.capturing = False
def pauseCapture(self):
if cv2.waitKey(0) & 0xFF == ord('p'): # Pause
self.capturing = False
def quitCapture(self):
cap = self.c
cv2.destroyAllWindows()
cap.release()
QtCore.QCoreApplication.quit()
class Window(QtWidgets.QWidget):
def __init__(self):
QtWidgets.QWidget.__init__(self)
self.setWindowTitle('Control Panel')
self.capture = Capture()
self.open_button = QPushButton('Open', self)
self.open_button.clicked.connect(self.open)
self.start_button = QPushButton('Start', self)
self.start_button.clicked.connect(self.capture.startCapture)
self.end_button = QPushButton('End', self)
self.end_button.clicked.connect(self.capture.endCapture)
self.pause_button = QPushButton('Pause', self)
self.pause_button.clicked.connect(self.capture.pauseCapture)
self.quit_button = QPushButton('Quit', self)
self.quit_button.clicked.connect(self.capture.quitCapture)
vbox = QVBoxLayout(self)
vbox.addWidget(self.open_button)
vbox.addWidget(self.start_button)
vbox.addWidget(self.end_button)
vbox.addWidget(self.pause_button)
vbox.addWidget(self.quit_button)
self.setLayout(vbox)
self.setGeometry(100, 100, 200, 200)
self.show()
def open(self):
path = QtWidgets.QFileDialog.getOpenFileName(self)[0]
if path:
self.capture.setVideoFile(path)
if __name__== '__main__':
import sys
app = QApplication(sys.argv)
window = Window()
sys.exit(app.exec())
Try to link PyQt and Opencv video feed, can't understand how to apply while loop for continuously streaming video. It just take a still picture.Please can anyone help to solve the problem.
PtQt=5
Python=3.6.1
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'PyQt5 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
#create a label
label = QLabel(self)
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
convertToQtFormat = QtGui.QImage(rgbImage.data, rgbImage.shape[1], rgbImage.shape[0],
QtGui.QImage.Format_RGB888)
convertToQtFormat = QtGui.QPixmap.fromImage(convertToQtFormat)
pixmap = QPixmap(convertToQtFormat)
resizeImage = pixmap.scaled(640, 480, QtCore.Qt.KeepAspectRatio)
QApplication.processEvents()
label.setPixmap(resizeImage)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
The problem is that the function that obtains the image is executed only once and not updating the label.
The correct way is to place it inside a loop, but it will result in blocking the main window. This blocking of main window can be solved by using the QThread class and send through a signal QImage to update the label. For example:
import cv2
import sys
from PyQt5.QtWidgets import QWidget, QLabel, QApplication
from PyQt5.QtCore import QThread, Qt, pyqtSignal, pyqtSlot
from PyQt5.QtGui import QImage, QPixmap
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def run(self):
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if ret:
# https://stackoverflow.com/a/55468544/6622587
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
class App(QWidget):
def __init__(self):
super().__init__()
[...]
self.initUI()
#pyqtSlot(QImage)
def setImage(self, image):
self.label.setPixmap(QPixmap.fromImage(image))
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
# create a label
self.label = QLabel(self)
self.label.move(280, 120)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(self.setImage)
th.start()
self.show()
Updating this for PySide2 and qimage2ndarray
from PySide2.QtCore import *
from PySide2.QtGui import *
import cv2 # OpenCV
import qimage2ndarray # for a memory leak,see gist
import sys # for exiting
# Minimal implementation...
def displayFrame():
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = qimage2ndarray.array2qimage(frame)
label.setPixmap(QPixmap.fromImage(image))
app = QApplication([])
window = QWidget()
# OPENCV
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
# timer for getting frames
timer = QTimer()
timer.timeout.connect(displayFrame)
timer.start(60)
label = QLabel('No Camera Feed')
button = QPushButton("Quiter")
button.clicked.connect(sys.exit) # quiter button
layout = QVBoxLayout()
layout.addWidget(button)
layout.addWidget(label)
window.setLayout(layout)
window.show()
app.exec_()
# See also: https://gist.github.com/bsdnoobz/8464000
Thank you Taimur Islam for the question.
Thank you eyllanesc for wonderful answering and I have modified your code little bit. I used PtQt=4 Python=2.7 and I didn't use opencv
import sys
import numpy as np
import flycapture2 as fc2
from PyQt4.QtCore import (QThread, Qt, pyqtSignal)
from PyQt4.QtGui import (QPixmap, QImage, QApplication, QWidget, QLabel)
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def __init__(self, parent=None):
QThread.__init__(self, parent=parent)
self.cameraSettings()
def run(self):
while True:
im = fc2.Image()
self.c.retrieve_buffer(im)
a = np.array(im)
rawImage = QImage(a.data, a.shape[1], a.shape[0], QImage.Format_Indexed8)
self.changePixmap.emit(rawImage)
def cameraSettings(self):
print(fc2.get_library_version())
self.c = fc2.Context()
numberCam = self.c.get_num_of_cameras()
print(numberCam)
self.c.connect(*self.c.get_camera_from_index(0))
print(self.c.get_camera_info())
m, f = self.c.get_video_mode_and_frame_rate()
print(m, f)
print(self.c.get_property_info(fc2.FRAME_RATE))
p = self.c.get_property(fc2.FRAME_RATE)
print(p)
self.c.set_property(**p)
self.c.start_capture()
class App(QWidget):
def __init__(self):
super(App,self).__init__()
self.title = 'PyQt4 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(800, 600)
# create a label
self.label = QLabel(self)
self.label.move(0, 0)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(lambda p: self.setPixMap(p))
th.start()
def setPixMap(self, p):
p = QPixmap.fromImage(p)
p = p.scaled(640, 480, Qt.KeepAspectRatio)
self.label.setPixmap(p)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
ex.show()
sys.exit(app.exec_())
Try to link PyQt and Opencv video feed, can't understand how to apply while loop for continuously streaming video. It just take a still picture.Please can anyone help to solve the problem.
PtQt=5
Python=3.6.1
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'PyQt5 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
#create a label
label = QLabel(self)
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
convertToQtFormat = QtGui.QImage(rgbImage.data, rgbImage.shape[1], rgbImage.shape[0],
QtGui.QImage.Format_RGB888)
convertToQtFormat = QtGui.QPixmap.fromImage(convertToQtFormat)
pixmap = QPixmap(convertToQtFormat)
resizeImage = pixmap.scaled(640, 480, QtCore.Qt.KeepAspectRatio)
QApplication.processEvents()
label.setPixmap(resizeImage)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
The problem is that the function that obtains the image is executed only once and not updating the label.
The correct way is to place it inside a loop, but it will result in blocking the main window. This blocking of main window can be solved by using the QThread class and send through a signal QImage to update the label. For example:
import cv2
import sys
from PyQt5.QtWidgets import QWidget, QLabel, QApplication
from PyQt5.QtCore import QThread, Qt, pyqtSignal, pyqtSlot
from PyQt5.QtGui import QImage, QPixmap
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def run(self):
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if ret:
# https://stackoverflow.com/a/55468544/6622587
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
class App(QWidget):
def __init__(self):
super().__init__()
[...]
self.initUI()
#pyqtSlot(QImage)
def setImage(self, image):
self.label.setPixmap(QPixmap.fromImage(image))
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
# create a label
self.label = QLabel(self)
self.label.move(280, 120)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(self.setImage)
th.start()
self.show()
Updating this for PySide2 and qimage2ndarray
from PySide2.QtCore import *
from PySide2.QtGui import *
import cv2 # OpenCV
import qimage2ndarray # for a memory leak,see gist
import sys # for exiting
# Minimal implementation...
def displayFrame():
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = qimage2ndarray.array2qimage(frame)
label.setPixmap(QPixmap.fromImage(image))
app = QApplication([])
window = QWidget()
# OPENCV
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
# timer for getting frames
timer = QTimer()
timer.timeout.connect(displayFrame)
timer.start(60)
label = QLabel('No Camera Feed')
button = QPushButton("Quiter")
button.clicked.connect(sys.exit) # quiter button
layout = QVBoxLayout()
layout.addWidget(button)
layout.addWidget(label)
window.setLayout(layout)
window.show()
app.exec_()
# See also: https://gist.github.com/bsdnoobz/8464000
Thank you Taimur Islam for the question.
Thank you eyllanesc for wonderful answering and I have modified your code little bit. I used PtQt=4 Python=2.7 and I didn't use opencv
import sys
import numpy as np
import flycapture2 as fc2
from PyQt4.QtCore import (QThread, Qt, pyqtSignal)
from PyQt4.QtGui import (QPixmap, QImage, QApplication, QWidget, QLabel)
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def __init__(self, parent=None):
QThread.__init__(self, parent=parent)
self.cameraSettings()
def run(self):
while True:
im = fc2.Image()
self.c.retrieve_buffer(im)
a = np.array(im)
rawImage = QImage(a.data, a.shape[1], a.shape[0], QImage.Format_Indexed8)
self.changePixmap.emit(rawImage)
def cameraSettings(self):
print(fc2.get_library_version())
self.c = fc2.Context()
numberCam = self.c.get_num_of_cameras()
print(numberCam)
self.c.connect(*self.c.get_camera_from_index(0))
print(self.c.get_camera_info())
m, f = self.c.get_video_mode_and_frame_rate()
print(m, f)
print(self.c.get_property_info(fc2.FRAME_RATE))
p = self.c.get_property(fc2.FRAME_RATE)
print(p)
self.c.set_property(**p)
self.c.start_capture()
class App(QWidget):
def __init__(self):
super(App,self).__init__()
self.title = 'PyQt4 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(800, 600)
# create a label
self.label = QLabel(self)
self.label.move(0, 0)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(lambda p: self.setPixMap(p))
th.start()
def setPixMap(self, p):
p = QPixmap.fromImage(p)
p = p.scaled(640, 480, Qt.KeepAspectRatio)
self.label.setPixmap(p)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
ex.show()
sys.exit(app.exec_())