I am trying to make the code at the link (https://github.com/god233012yamil/Streaming-IP-Cameras-Using-PyQt-and-OpenCV/blob/main/Streaming_IP_Camera_Using_PyQt_OpenCV.py) more generalized. The issue when I run the code below, camera frame 3 shows blinking frames from camera 3 and camera 1.
The camera urls are rtsp links. I also added some youtube video links but it doesn't show them. I also do not know why it doesn't show them but right now my main concern is the blinking video.
When you run the code you can see 2x2 (total 4) camera streams on screen. when you double click one of them it becomes full screen. this is like a CCTV camera software.
I feel like I need to make some variable instance variable but right now in fact I am keeping record of everything in an array cameras where each camera has variables as instance variables.
I am using linux ubuntu.
I would be grateful if you can help
# import the require packages.
import cv2
from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, \
QLabel, QGridLayout, QScrollArea, QSizePolicy
from PyQt5.QtGui import QPixmap, QIcon, QImage, QPalette
from PyQt5.QtCore import QThread, pyqtSignal, Qt, QEvent, QObject
from PyQt5 import QtCore
import sys
import math
class CaptureIpCameraFramesWorker(QThread):
# Signal emitted when a new image or a new frame is ready.
ImageUpdated = pyqtSignal(QImage)
def __init__(self, url) -> None:
super(CaptureIpCameraFramesWorker, self).__init__()
# Declare and initialize instance variables.
self.url = url
self.__thread_active = True
self.fps = 0
self.__thread_pause = False
def run(self) -> None:
# Capture video from a network stream.
cap = cv2.VideoCapture(self.url, cv2.CAP_FFMPEG)
# Get default video FPS.
self.fps = cap.get(cv2.CAP_PROP_FPS)
# If video capturing has been initialized already.q
if cap.isOpened():
# While the thread is active.
while self.__thread_active:
#
if not self.__thread_pause:
# Grabs, decodes and returns the next video frame.
ret, frame = cap.read()
# Get the frame height, width and channels.
height, width, channels = frame.shape
# Calculate the number of bytes per line.
bytes_per_line = width * channels
# If frame is read correctly.
if ret:
# Convert image from BGR (cv2 default color format) to RGB (Qt default color format).
cv_rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Convert the image to Qt format.
qt_rgb_image = QImage(cv_rgb_image.data, width, height, bytes_per_line, QImage.Format_RGB888)
# Scale the image.
qt_rgb_image_scaled = qt_rgb_image.scaled(1280, 720, Qt.KeepAspectRatio) # 720p
# qt_rgb_image_scaled = qt_rgb_image.scaled(1920, 1080, Qt.KeepAspectRatio)
# Emit this signal to notify that a new image or frame is available.
self.ImageUpdated.emit(qt_rgb_image_scaled)
else:
break
# When everything done, release the video capture object.
cap.release()
# Tells the thread's event loop to exit with return code 0 (success).
self.quit()
def stop(self) -> None:
self.__thread_active = False
def pause(self) -> None:
self.__thread_pause = True
def unpause(self) -> None:
self.__thread_pause = False
class cameraObj:
#this object is used to store various variables about a camera including GUI properties and camera url
def __init__(self,cameraNumg,cameralabelg,urlg,scrollAreag):
self.scrollArea=scrollAreag
self.cameraNumber=cameraNumg
self.cameralabel=cameralabelg
self.url=urlg
self.ipworker=None
self.state="Normal"
def setIpWorker(self,ipw):
self.ipworker=ipw
class MainWindow(QMainWindow):
def __init__(self) -> None:
super(MainWindow, self).__init__()
self.urls=[]
self.url_1 = "rtsp://admin:12346#192.168.1.12/1"
#self.url_2 = "https://www.youtube.com/watch?v=hGhqPpVZR4A"
self.url_3 = "rtsp://admin:12345#192.168.1.13/1"
#self.url_4 = "https://www.youtube.com/watch?v=YMlUtdaPbHY"
self.urls.append(self.url_1)
#self.urls.append(self.url_2)
#self.urls.append(self.url_4)
self.urls.append(self.url_3)
self.numberofcameras=len(self.urls)
self.cameraNames=[]
self.cameras=[]
for camnum in range(self.numberofcameras):
# Create an instance of a QLabel class to show camera 1.
camera_1 = QLabel()
camname="Camera_"+str(camnum+1)
camera_1.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
camera_1.setScaledContents(True)
camera_1.installEventFilter(self)
camera_1.setObjectName(camname)
# Create an instance of a QScrollArea class to scroll camera 1 image.
QScrollArea_1 = QScrollArea()
QScrollArea_1.setBackgroundRole(QPalette.Dark)
QScrollArea_1.setWidgetResizable(True)
QScrollArea_1.setWidget(camera_1)
cobj=cameraObj(camnum+1,camera_1,self.urls[camnum],QScrollArea_1)
self.cameras.append(cobj)
self.cameraNames.append(camname)
# Set the UI elements for this Widget class.
self.__SetupUI()
for camnum in range(self.numberofcameras):
# Create an instance of CaptureIpCameraFramesWorker.
cWorker = CaptureIpCameraFramesWorker(self.cameras[camnum].url)
cWorker.ImageUpdated.connect(lambda image: self.ShowCamera(image,camnum))
self.cameras[camnum].setIpWorker(cWorker)
for camnum in range(self.numberofcameras):
self.cameras[camnum].ipworker.start()
def hidescroll(self,snum:int):
for i in range(len(self.cameras)):
if i != snum:
self.cameras[i].scrollArea.hide()
def showscroll(self,snum:int):
for i in range(len(self.cameras)):
if i != snum:
self.cameras[i].scrollArea.show()
def __SetupUI(self) -> None:
# Create an instance of a QGridLayout layout.
grid_layout = QGridLayout()
grid_layout.setContentsMargins(0, 0, 0, 0)
sqroot=int(math.sqrt(self.numberofcameras))
for i in range(self.numberofcameras):
grid_layout.addWidget(self.cameras[i].scrollArea, int(i/sqroot), i%sqroot)
# Create a widget instance.
self.widget = QWidget(self)
self.widget.setLayout(grid_layout)
# Set the central widget.
self.setCentralWidget(self.widget)
self.setMinimumSize(800, 600)
self.showMaximized()
self.setStyleSheet("QMainWindow {background: 'black';}")
self.setWindowIcon(QIcon(QPixmap("camera_2.png")))
# Set window title.
self.setWindowTitle("IP Camera System")
#QtCore.pyqtSlot()
def ShowCamera(self, frame: QImage,cnum:int) -> None:
self.cameras[cnum].cameralabel.setPixmap(QPixmap.fromImage(frame))
# Override method for class MainWindow.
def eventFilter(self, source: QObject, event: QEvent) -> bool:
"""
Method to capture the events for objects with an event filter installed.
:param source: The object for whom an event took place.
:param event: The event that took place.
:return: True if event is handled.
"""
#
if event.type() == QtCore.QEvent.MouseButtonDblClick:
camname=source.objectName()
camerapos=int(camname[-1])-1
if not camname in self.cameraNames:
return super(MainWindow, self).eventFilter(source, event)
if self.cameras[camerapos].state == "Normal":
self.hidescroll(camerapos)
self.cameras[camerapos].state = "Maximized"
else:
self.showscroll(camerapos)
self.cameras[camerapos].state = "Normal"
return True
else:
return super(MainWindow, self).eventFilter(source, event)
# Overwrite method closeEvent from class QMainWindow.
def closeEvent(self, event) -> None:
# If thread getIpCameraFrameWorker_1 is running, then exit it.
for i in range(self.numberofcameras):
if self.cameras[i].ipworker.isRunning():
self.cameras[i].ipworker.quit()
event.accept()
def main() -> None:
# Create a QApplication object. It manages the GUI application's control flow and main settings.camnum
# It handles widget specific initialization, finalization.
# For any GUI application using Qt, there is precisely one QApplication object
app = QApplication(sys.argv)
# Create an instance of the class MainWindow.
window = MainWindow()
# Show the window.
window.show()
# Start Qt event loop.
sys.exit(app.exec_())
if __name__ == '__main__':
main()
Related
I need to play a .mov video (ProRes4444) with alpha channel in a scene. The scene has a background image and I need to use the alpha channel of the video so it overlays on the background.
If I open the video normally with QMediaPlayer, the alpha channel appears in black.
screen with background pic & video with black alpha:
How can I make the output of the QMediaPlayer (QGraphicsVideoItem) respect the alpha and make the overlay effect possible?
The closest I got to the answer based on online research is code in cpp that I've found that shows the necessity to create a subclass of a QAbstractVideoSurface that receives videoframes converts to ARGB, then forwards those to a QLabel that displays them.
Displaying a video with an alpha channel using qt
I've also tried that unsuccessfully. Is this the right course or I'm just missing something simple on my current code?
EDIT:
Link to files (background image and video .mov)
https://drive.google.com/drive/folders/1LIZzTg1E8wkaD0YSvkkcfSATdlDTggyh?usp=sharing
import sys
from PyQt5.QtMultimedia import *
from PyQt5.QtMultimediaWidgets import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
class VideoWindow(QMainWindow):
def __init__(self):
super(VideoWindow, self).__init__()
self.setWindowTitle('QMediaPlayer TEST')
self.resize(1920, 1080)
self.vista = QGraphicsView(self)
self.vista.setGeometry(QRect(0, 0, 1920, 1080))
self.scene = QGraphicsScene(self.vista)
self.scene.setSceneRect(0, 0, 1920, 1080)
self.vista.setScene(self.scene)
self.graphvitem1 = QGraphicsVideoItem()
#SET BACKGROUND IMAGE ON SCENE
self.tempImg = QPixmap("/Users/elemental/Desktop/pyvids/fons.jpeg")
self.tempImg = self.tempImg.scaled(self.scene.width(), self.scene.height())
self.graphicsPixmapItem = QGraphicsPixmapItem(self.tempImg)
self.scene.addItem(self.graphicsPixmapItem)
#SET VIDEO 1 WITH LOOP
self.mediaPlayer1 = QMediaPlayer(None, QMediaPlayer.VideoSurface)
self.mediaPlayer1.setVideoOutput(self.graphvitem1)
self.playlist1 = QMediaPlaylist(self)
self.playlist1.addMedia(QMediaContent(QUrl.fromLocalFile("/Users/elemental/Desktop/pyvids/vida1.mov")))
self.playlist1.setCurrentIndex(1)
self.playlist1.setPlaybackMode(QMediaPlaylist.CurrentItemInLoop)
self.mediaPlayer1.setPlaylist(self.playlist1)
self.graphvitem1.setPos(500, 100)
self.graphvitem1.setSize(QSizeF(1000, 500))
self.scene.addItem(self.graphvitem1)
self.mediaPlayer1.play()
self.vista.show()
if __name__ == '__main__':
app = QApplication([])
window = VideoWindow()
window.show()
sys.exit(app.exec_())
From what I can see, QVideoWidget doesn't support alpha channels by default, so it falls back to the "basic" black background.
But, implementation seems possible, by properly subclassing QAbstractVideoSurface.
Consider that the following code is experimental, my knowledge of QMediaPlayer and the Qt video surface isn't that deep (the former is an abstract for multiple platforms and multiple libraries that can behave very differently on different configurations), and I could only test it on two Linux platforms, so I don't know how it behaves under Windows nor MacOS.
The assumption is that the video surface provides a default dedicated QWidget subclass (VideoWidget) unless another class with a suitable setImage is provided, and updates its image whenever the media player requires it.
Note that I only tested it with a couple of videos (including the provided one), and further testing might be required.
from PyQt5.QtMultimedia import *
from PyQt5.QtMultimediaWidgets import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
class VideoWidget(QWidget):
image = QImage()
def __init__(self, **kwargs):
super().__init__(**kwargs)
def setImage(self, image):
self.image = image
self.update()
def sizeHint(self):
return QSize(640, 480)
def paintEvent(self, event):
qp = QPainter(self)
# ensure that smooth transformation is used while scaling pixmaps
qp.setRenderHints(qp.SmoothPixmapTransform)
# provide compliancy with background set using stylesheets, see:
# https://doc.qt.io/qt-5/stylesheet-reference.html#qwidget-widget
opt = QStyleOption()
opt.initFrom(self)
self.style().drawPrimitive(QStyle.PE_Widget, opt, qp, self)
# draw the image, scaled to the widget size; if you need fixed sizes
# or keep aspect ratio, implement this (or the widget) accordingly
qp.drawImage(self.rect(), self.image, self.image.rect())
class AlphaVideoDrawer(QAbstractVideoSurface):
def __init__(self, videoWidget=None, widgetOptions=None):
super().__init__()
if videoWidget:
if not hasattr(videoWidget, 'setImage'):
raise NotImplementedError(
'setImage() must be implemented for videoWidget!')
else:
if not isinstance(widgetOptions, dict):
widgetOptions = {}
elif not 'styleSheet' in widgetOptions:
# just a default background for testing purposes
widgetOptions = {'styleSheet': 'background: darkGray;'}
videoWidget = VideoWidget(**widgetOptions)
self.videoWidget = videoWidget
# QVideoFrame.image() has been introduced since Qt 5.15
version, majVersion, minVersion = map(int, QT_VERSION_STR.split('.'))
if version < 6 and majVersion < 15:
self.imageFromFrame = self._imageFromFrameFix
else:
self.imageFromFrame = lambda frame: frame.image()
def _imageFromFrameFix(self, frame):
clone_frame = QVideoFrame(frame)
clone_frame.map(QAbstractVideoBuffer.ReadOnly)
image = QImage(
clone_frame.bits(), frame.width(), frame.height(), frame.bytesPerLine(),
QVideoFrame.imageFormatFromPixelFormat(frame.pixelFormat()))
clone_frame.unmap()
return image
def supportedPixelFormats(self, type):
return [QVideoFrame.Format_ARGB32]
def present(self, frame: QVideoFrame):
if frame.isValid():
self.videoWidget.setImage(self.imageFromFrame(frame))
if self.surfaceFormat().pixelFormat() != frame.pixelFormat() or \
self.surfaceFormat().frameSize() != frame.size():
self.setError(QAbstractVideoSurface.IncorrectFormatError)
self.stop()
return False
else:
return True
class AlphaVideoTest(QMainWindow):
def __init__(self):
super().__init__()
self.setStyleSheet('''
QFrame#mainFrame {
background: green;
}
''')
mainFrame = QFrame(objectName='mainFrame')
self.setCentralWidget(mainFrame)
layout = QVBoxLayout(mainFrame)
self.playButton = QPushButton('Play', checkable=True)
layout.addWidget(self.playButton)
self.drawer = AlphaVideoDrawer()
layout.addWidget(self.drawer.videoWidget)
self.mediaPlayer1 = QMediaPlayer(self, QMediaPlayer.VideoSurface)
self.playlist = QMediaPlaylist(self)
path = QDir.current().absoluteFilePath('vida1.mov')
self.playlist.addMedia(QMediaContent(QUrl.fromLocalFile(path)))
self.playlist.setCurrentIndex(1)
self.playlist.setPlaybackMode(QMediaPlaylist.CurrentItemInLoop)
self.mediaPlayer1.setPlaylist(self.playlist)
self.mediaPlayer1.setVideoOutput(self.drawer)
self.playButton.toggled.connect(self.togglePlay)
def togglePlay(self, play):
if play:
self.mediaPlayer1.play()
self.playButton.setText('Pause')
else:
self.mediaPlayer1.pause()
self.playButton.setText('Play')
import sys
app = QApplication(sys.argv)
test = AlphaVideoTest()
test.show()
sys.exit(app.exec_())
I based the above code on the following sources:
wrong video frame of present() of qabstractvideosurface in pyqt5
Displaying a video with an alpha channel using qt
Note that I limited the supportedPixelFormats output, as using the full list of formats provided in the related question didn't work; this doesn't mean that this would work anyway, but that further testing is probably required, possibly on different machines and different OS/System configurations and video formats: remember that QMediaPlayer completely relies on the underlying OS and default media backend.
Finally, if you only need this for "limited" and predefined animations, consider implementing your own subclass of QWidget that uses a list of loaded PNG images and shows them by implementing paintEvent() that would be called by updates based on a QVariantAnimation. While this kind of implementation might result less performant or ideal, it has the major benefit of providing cross platform compatibility.
I'm developing an application with PyQt5 and QtDesigner. For one of the pages (page 2), I'm trying to embed a live video stream from a camera with OpenCV. The code has a thread running and I confirmed that it is sending good frames. The problem I'm facing is dynamically updating a QLabel with the OpenCV frame.
The program currently crashes when this line of code (in the MainWindow class) is uncommented: Why?
self.ui.Worker1.ImageUpdate.connect(self.ui.ImageUpdateSlot)
Below is the main code
# by: reevve
# Import Modules
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import cv2
# Import UI files
from ui_main import Ui_MainWindow
from ui_splashscreen import Ui_SplashScreen
# Global Variables
counter = 0
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow,self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# add page btn click functionality
self.ui.btn_page_1.clicked.connect(lambda: self.ui.stackedWidget.setCurrentWidget(self.ui.page_1))
self.ui.btn_page_2.clicked.connect(lambda: self.ui.stackedWidget.setCurrentWidget(self.ui.page_2))
self.ui.btn_page_3.clicked.connect(lambda: self.ui.stackedWidget.setCurrentWidget(self.ui.page_3))
# set up the video feed
self.ui.CancelBTN.clicked.connect(lambda: self.ui.CancelFeed)
self.ui.Worker1 = Worker1()
self.ui.Worker1.start()
# the line below is causing the program to crash
#self.ui.Worker1.ImageUpdate.connect(self.ui.ImageUpdateSlot)
def ImageUpdateSlot(self, Image):
print('recieve frames')
self.ui.FeedLabel.setPixmap(QPixmap.fromImage(Image))
def CancelFeed(self):
print('cancel feed')
self.ui.Worker1.stop()
class SplashScreen(QMainWindow):
def __init__(self):
super(SplashScreen,self).__init__()
self.ui = Ui_SplashScreen()
self.ui.setupUi(self)
# remove title bar
self.setWindowFlag(QtCore.Qt.FramelessWindowHint)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
# drop shadow effect
self.shadow = QGraphicsDropShadowEffect(self)
self.shadow.setBlurRadius(20)
self.shadow.setXOffset(0)
self.shadow.setYOffset(0)
self.shadow.setColor(QColor(0, 0, 0, 60))
self.ui.dropShadowFrame.setGraphicsEffect(self.shadow)
# start timer
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.progress)
# specify duration of launcher
self.timer.start(15)
# initial text
self.ui.label_description.setText("<strong>UD ASAE</strong> Ground Station GUI")
# change texts during loading process
QtCore.QTimer.singleShot(1500, lambda: self.ui.label_description.setText("<strong>LOADING</strong> the good stuff"))
QtCore.QTimer.singleShot(3000, lambda: self.ui.label_description.setText("<strong>GATHERING</strong> remaining braincells"))
# show main window
self.show()
def progress(self):
global counter
self.ui.progressBar.setValue(counter)
# close splash screen and open main gui
if counter > 100:
self.timer.stop()
self.main = MainWindow()
self.main.show()
self.close()
counter += 1
# FPV thread
class Worker1(QThread):
ImageUpdate = pyqtSignal(QImage)
def run(self):
print('\nrun feed')
self.ThreadActive = True
Capture = cv2.VideoCapture(0)
while self.ThreadActive:
ret, frame = Capture.read()
if ret:
Image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
ConvertToQtFormat = QImage(Image.data, Image.shape[1], Image.shape[0], QImage.Format_RGB888)
Pic = ConvertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.ImageUpdate.emit(Pic)
print('send good frames')
def stop(self):
print('stop feed')
self.ThreadActive = False
self.quit()
def window():
app = QApplication(sys.argv)
win = SplashScreen()
sys.exit(app.exec_())
window()
Again, the Worker1 thread seems to be sending good frames (confirmed with print statement), but I'm having trouble updating my QLabel (called FeedLabel) as the frames come in.
I did not attach the supporting .ui files to this post.
I changed a bunch of things in your code, indicated in the comments. Essentially the methods of your program were defined in a strange way and you stored many things in self.ui instead of self.
I made myself a minimal UI to be able to test the changes and it works. Below you can see the back of the sticky note I put on my laptop's camera:
Here is your modified code:
class MainWindow(QMainWindow):
def __init__(self):
super().__init__() # (optional) removed the args of `super`
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# add page btn click functionality
...
# set up the video feed
self.ui.CancelBTN.clicked.connect(self.CancelFeed) # removed `lambda` and `.ui`
self.Worker1 = Worker1() # (optional) removed `.ui` because your thread should be an attr of the program, not of the ui. This is a matter of preference though.
self.Worker1.start() # (optional) removed `.ui`
self.Worker1.ImageUpdate.connect(self.ImageUpdateSlot) # removed `.ui`
#pyqtSlot(QImage) # (optional) decorator to indicate what object the signal will provide.
def ImageUpdateSlot(self, Image): # Unindented by 4 spaces.
print('recieve frames')
self.ui.FeedLabel.setPixmap(QPixmap.fromImage(Image))
def CancelFeed(self): # Unindented by 4 spaces.
print('cancel feed')
self.Worker1.stop() # (optional) removed `.ui`
Please excuse me if my description isn't perfect, I'm still pretty new at PyQt and also Python in general. If you have recommendations on how to improve the question, please let me know.
I'm trying to draw on a Pixmap-QLabel, which is part of a QMainWindow, with QPainter. The QPainter is called in a loop, because the drawing is updated after a fixed duration. Drawing on the Pixmap works as intended, the problem I have is that the label always opens in a new window, instead of being placed on the QLabel inside the original QMainWindow.
I suspect that the reason for that is that I'm calling the QPainter from a Worker-class-object which is created by the QThreadpool-object. If I call the QPainter from inside the initialization of the GUI, the Pixmap-label is created as part of the QMainWindow as intended. Unfortunately the multithreading is necessary so the GUI stays responsive while the QLabel is updating.
The GUI itself is created with QtCreator, and simply loaded into the script.
Here's my code:
import os
import sys
import time
from PyQt5 import QtWidgets, QtCore, uic
from PyQt5.QtWidgets import QLabel, QPushButton, QMainWindow
from PyQt5.QtGui import QPixmap, QPainter, QPen, QPaintEvent
from PyQt5.QtCore import *
class Ui(QMainWindow):
def __init__(self):
super(Ui, self).__init__()
self.counter = 0
# load ui which can be designed with Qt Creator
uic.loadUi("ui/paintEvent_Loop.ui", self)
# find the QLabel where the picture should be placed
self.pixmap_label = self.findChild(QtWidgets.QLabel, "pixmap_label")
# creating the pixmap-label here works as intended
'''self.draw_label = PixmapLabel(self.pixmap_label)
self.draw_label.setGeometry(130, 50, 911, 512)
self.draw_label.show()'''
self.label = self.findChild(QLabel, "label")
# find the button with the name "cancel_button"
self.cancel_button = self.findChild(QtWidgets.QPushButton, "cancel_button")
self.cancel_button.clicked.connect(self.close_application)
# find the start_button button
self.start_button = self.findChild(QtWidgets.QPushButton, "start_button")
self.start_button.clicked.connect(self.start_loop)
self.pause_cont_button = self.findChild(QPushButton, "pause_cont_button")
self.pause_cont_button.clicked.connect(self.toggle_pause_continue)
self.pause_cont_button.hide()
# create the QThreadPool object to manage multiple threads
self.threadpool = QThreadPool()
print("Multithreading with maximum %d threads" % self.threadpool.maxThreadCount())
self.run_loop = True
# show application
self.show()
def close_application(self):
app.quit()
def toggle_pause_continue(self):
"""
changes the value of boolean run_loop to pause and continue the loop through the samples in the chosen scene
:return:
"""
if self.run_loop:
self.run_loop = False
else:
self.run_loop = True
def start_loop(self):
# hide start_button and show pause_cont_button
self.start_button.hide()
self.pause_cont_button.show()
self.pause_cont_button.setCheckable(True)
# start one further thread managed by threadpool
worker = Worker()
self.threadpool.start(worker)
class PixmapLabel(QLabel):
def __init__(self, parent=None):
super(PixmapLabel, self).__init__(parent=parent)
def paintEvent(self, a0: QPaintEvent) -> None:
# initiate QPainter instance
painter = QPainter(window.draw_label)
# open image
picture = QPixmap(os.getcwd() + '/test-image.png')
myPicturePixmap = picture.scaled(self.size(), QtCore.Qt.KeepAspectRatio)
self.setPixmap(myPicturePixmap)
# draw red box on it
painter.drawPixmap(self.rect(), myPicturePixmap)
pen = QPen(Qt.red, 3)
painter.setPen(pen)
painter.drawRect(10, 10, 100, 100)
class Worker(QRunnable):
# worker thread
def __init__(self):
super().__init__()
#pyqtSlot()
def run(self):
print("Thread start")
for self.i in range(0, 50):
# create pixmap_label with drawings
# FIXME: make pixmap-label part of original GUI
window.draw_label = PixmapLabel(window.pixmap_label)
window.draw_label.setGeometry(130, 50, 911, 512)
window.draw_label.show()
window.label.setText(str(self.i))
while window.run_loop == False:
time.sleep(0.05)
# show image for 0.5 seconds, then update image
time.sleep(0.5)
window.draw_label.destroy()
time.sleep(0.05)
# print in terminal to know that we are finished
print("Thread complete")
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
window = Ui()
app.exec_()
The image I'm using:
I'm attempting to get webcam data from a camera using opencv and then display that in a PyQt gui. I have done this before with Tkinter by gaining access to Tkinter main window loop with the .after function. However, PyQt doesn't seem to have the same usability and in order to have another loop running with an application you need to use a separate thread. So this is what I have come up with:
import sys
import cv2
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtGui import QImage
import time
class VideoCapture(QtGui.QWidget):
def __init__(self, parent = None):
QtGui.QWidget().__init__()
self.camera = None
self.camera = cv2.VideoCapture(0)
b, self.frame = self.camera.read()
self.label = QtGui.QLabel()
self.workThread = WorkThread(self)
self.connect(self.workThread, QtCore.SIGNAL('update_Camera'), self.draw)
self.workThread.start()
def closeEvent(self, event):
self.workThread.stop()
def draw(self):
print "I should Redraw"
height, width, channel = self.frame.shape
bpl = 3 * width
self.qImg = QImage(self.frame.data, width, height, bpl, QImage.Format_RGB888)
pix = QtGui.QPixmap(self.qImg)
self.label.setPixmap(pix)
self.label.show()
class WorkThread(QtCore.QThread):
def __init__(self, parent):
QtCore.QThread.__init__(self)
self.parent = parent
def __del__(self):
self.wait()
def run(self):
while True:
self.emit(QtCore.SIGNAL('update_Camera'), "_")
self.terminate()
app = QtGui.QApplication(sys.argv)
test = VideoCapture()
test.draw()
sys.exit(app.exec_())
My idea was simple: I'll just create a thread with a loop which emits a signal telling the main application to update. (Obviously I don't I want a thread with a while True loop but I just used it for convenience and planned on replacing it once I could guarantee this idea would work). However, the signal doesn't appear to be registering because the draw() function is never called. Any idea what i'm doing wrong?
I don't know anything about OpenCV, so I can only guess at the problems.
My guess is that you are only reading the video data once. If it is a video stream then you have to continually read and interpret the data.
import sys
import cv2
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtGui import QImage
import time
class VideoCapture(QtGui.QWidget):
update_video = QtCore.pyqtSignal()
def __init__(self, parent = None):
QtGui.QWidget().__init__()
self.camera = cv2.VideoCapture(0)
self.label = QtGui.QLabel()
layout = QtGui.QHBoxLayout()
self.setLayout(layout)
layout.addWidget(self.label)
# Create the worker Thread
self.workThread = WorkThread(self.readVideo)
self.update_video.connect(self.draw)
def start(self):
self.workerThread.start()
def stop(self):
self.workThread.alive = False
self.workThread.stop()
def readVideo(self):
"""Note this method is executed in a thread. No drawing can happen in a thread. Emit a signal to draw items."""
b, self.frame = self.camera.read()
self.update_video.emit() # Signals are slow this may happen too fast
def closeEvent(self, event):
self.stop()
return QtGui.QWidget.closeEvent(self, event)
#self.workThread.alive = False
#self.workThread.stop()
def draw(self):
print "I should Redraw"
height, width, channel = self.frame.shape
bpl = 3 * width
qImg = QImage(self.frame.data, width, height, bpl, QImage.Format_RGB888)
pix = QtGui.QPixmap(qImg)
self.label.setPixmap(pix)
# self.label.show() # The label is now a part of the widget layout
class WorkThread(QtCore.QThread):
def __init__(self, target=None, args=(), kwargs={}):
QtCore.QThread.__init__(self)
# I don't know how Qt's threads work, so I am treating it like a python thread
self.target = target
self.args = args
self.kwargs = kwargs
self.alive = True
def run(self):
while self.alive:
self.target(*self.args, **self.kwargs)
app = QtGui.QApplication(sys.argv)
test = VideoCapture()
test.start()
sys.exit(app.exec_())
Since you are only updating so many times per second you could probably use a timer for this instead of a thread. The timer is probably easier and safer to use.
import sys
import cv2
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtGui import QImage
import time
class VideoCapture(QtGui.QWidget):
def __init__(self, parent = None):
QtGui.QWidget().__init__()
self.camera = cv2.VideoCapture(0)
self.label = QtGui.QLabel()
layout = QtGui.QHBoxLayout()
self.setLayout(layout)
layout.addWidget(self.label)
# Create the worker Thread
self.timer= QtCore.QTimer()
self.timer.setInterval(300)
self.timer.timeout.connect(self.draw_camera)
def start(self):
self.timer.start()
def stop(self):
self.timer.stop()
def draw_camera(self):
"""You can draw in a timer, so just read the data and draw however fast you want."""
print "I should Redraw"
b, frame = self.camera.read()
height, width, channel = frame.shape
bpl = 3 * width
qImg = QImage(frame.data, width, height, bpl, QImage.Format_RGB888)
pix = QtGui.QPixmap(qImg)
self.label.setPixmap(pix)
def closeEvent(self, event):
self.stop()
return QtGui.QWidget.closeEvent(self, event)
app = QtGui.QApplication(sys.argv)
test = VideoCapture()
test.start()
sys.exit(app.exec_())
I've been working on things very similar to your problem. I modified your code and tested it on my Windows PC.
The key point here is that you have to put the cv2 camera object in the WorkThread, read each frame in the main while loop in the run() method, and finally emit the image to the QWidget object to display it. In this way you get a continuous iteration of image capturing and display.
import sys
import cv2
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtGui import QImage
import time
class VideoCapture(QtGui.QWidget):
def __init__(self, parent = None):
# Use super() to call __init__() methods in the parent classes
super(VideoCapture, self).__init__()
# The instantiated QLabel object should belong to the 'self' QWidget object
self.label = QtGui.QLabel(self) # <- So put 'self' in the parenthesis
# Set the QLabel geometry to fit the image dimension (640, 480)
# The top left corner (0, 0) is the position within the QWidget main window
self.label.setGeometry(0,0,640,480)
# Instantiate a QThread object. No need to pass in the parent QWidget object.
self.workThread = WorkThread()
# Connect signal from self.workThread to the slot self.draw
self.connect(self.workThread, QtCore.SIGNAL('update_Camera'), self.draw)
self.workThread.start()
def closeEvent(self, event):
self.workThread.stop()
event.accept()
def draw(self, img):
print "I should Redraw"
height, width, channel = img.shape
bpl = 3 * width
self.qImg = QImage(img, width, height, bpl, QImage.Format_RGB888)
pix = QtGui.QPixmap(self.qImg)
self.label.setPixmap(pix)
self.label.show()
class WorkThread(QtCore.QThread):
def __init__(self):
# Use super() to call __init__() methods in the parent classes
super(WorkThread, self).__init__()
# Place the camera object in the WorkThread
self.camera = cv2.VideoCapture(0)
# The boolean variable to break the while loop in self.run() method
self.running = True
def run(self):
while self.running:
# Read one frame
b, self.frame = self.camera.read()
# Emit self.frame to the QWidget object
self.emit(QtCore.SIGNAL('update_Camera'), self.frame)
def stop(self):
# Terminate the while loop in self.run() method
self.running = False
app = QtGui.QApplication(sys.argv)
video_capture_widget = VideoCapture()
video_capture_widget.show()
sys.exit(app.exec_())
I want to use python OpenCV bindings to display webcam stream in a QLabel. I found some previous posts here:
updating QLabel in non-GUI thread continuously
Displaying a video stream in QLabel with PySide
In the beginning I tried a simple "while" loop:
def onRun(self):
self.playing = True
capture = cv2.VideoCapture(0)
while self.playing:
_, data = capture.read()
data = cv2.cvtColor(data, cv2.cv.CV_BGR2RGB)
qImage = QtGui.QImage(data, data.shape[1], data.shape[0],
QtGui.QImage.Format_RGB888)
self.lblImage.setPixmap(QtGui.QPixmap.fromImage(qImage))
self.lblImage.adjustSize()
time.sleep(0.02)
But I met with a "white-window" problem. I found that proper way to solve this is to create a new thread. My question is: what is it all about new thread? should I create QThread or something? And what is it signal/slot emitting in a thread?
I've never used threads so it's totally new thing to me.
I can't test this myself, but would it not be enough to simply process the pending events within the loop?
That is:
def onRun(self):
self.playing = True
capture = cv2.VideoCapture(0)
while self.playing:
...
QtGui.qApp.processEvents()
time.sleep(0.02)
A solution is to use pyqtSignal. Here is an example:
import time
from PyQt5.QtCore import pyqtSignal, pyqtSlot
from PyQt5.QtWidgets import QApplication, QLabel, QWidget, QVBoxLayout
from PyQt5.QtGui import QPixmap, QImage
import sys
import threading
import numpy as np
class FakeCamera(QWidget):
"""Simulate a camera"""
image_taken = pyqtSignal(np.ndarray)
def __init__(self, callback, time_cost=0.01, image_shape=(256, 256)):
super(FakeCamera, self).__init__()
self.time_cost = time_cost # the time cost to take a frame, determine the frame rate
self.image_shape = image_shape
self.image_taken.connect(callback)
self._stop = threading.Event()
def start(self):
"""start a thread to take images"""
def run():
while not self._stop.is_set():
time.sleep(self.time_cost)
image = np.random.randint(0, 256, self.image_shape, np.uint8)
self.image_taken.emit(image)
threading.Thread(target=run).start()
def stop(self):
self._stop.set()
class WindowDemo(QWidget):
def __init__(self):
super(WindowDemo, self).__init__()
# label
self.label = QLabel()
self.label.setScaledContents(True)
self.label.setMinimumSize(1, 1)
# layout
vbox = QVBoxLayout()
vbox.addWidget(self.label)
self.setLayout(vbox)
# camera
self.camera = FakeCamera(self.show_image)
# start a thread to take images
self.camera.start()
#pyqtSlot(np.ndarray)
def show_image(self, image):
qimage = QImage(image, image.shape[1], image.shape[0], QImage.Format_Grayscale8)
self.label.setPixmap(QPixmap.fromImage(qimage))
def closeEvent(self, e):
self.camera.stop()
e.accept()
if __name__ == '__main__':
app = QApplication(sys.argv)
win = WindowDemo()
win.show()
sys.exit(app.exec_())