Unable to stream frames from camera to QML - python

I am using PyQT5. I want to send frames from Opencv to QML using QQuickPaintedItem. I wrote a sample implementation here. I cant seem to find why the paint event is called only once, only when the application is loading. It is painting only one frame from the camera to the QML component and the self.update() is not calling paint event.
from OpenGL import GL
from PyQt5.QtQuick import QQuickPaintedItem, QQuickView
from PyQt5.QtGui import QPainter, QPixmap, QImage
from PyQt5.QtQml import qmlRegisterType
import sys
from PyQt5.QtGui import QColor
from PyQt5.QtCore import QUrl,QObject,pyqtSignal
import cv2.cv2 as cv2
from PyQt5.QtWidgets import QApplication
class ImageWriter(QQuickPaintedItem):
cam_frame = None
def __init__(self, *args, **kwargs):
super(ImageWriter, self).__init__(*args, **kwargs)
self.setRenderTarget(QQuickPaintedItem.FramebufferObject)
def paint(self, painter):
print(ImageWriter.cam_frame)
painter.drawPixmap(0,0,ImageWriter.cam_frame)
def update_frame(self,frame):
frame = cv2.resize(frame, (700, 500), cv2.INTER_AREA)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
frame = QImage(frame, frame.shape[1], frame.shape[0], 17)
ImageWriter.cam_frame = QPixmap.fromImage(frame)
self.update()
def get_frames(app):
cap = cv2.VideoCapture(0)
num = 0
imgw = ImageWriter()
while True:
while num != 30:
_ , bgframe = cap.read()
num += 1
_ , frame = cap.read()
imgw.update_frame(frame)
print("get frames")
app.processEvents()
if __name__ == '__main__':
app = QApplication(sys.argv)
qmlRegisterType(ImageWriter, "imageWriter", 1, 0, "ImageWriter")
view = QQuickView()
view.setSource(QUrl('test.qml'))
rootObject = view.rootObject()
view.show()
get_frames(app)
sys.exit(app.exec_())
Here is the QML i wrote for this,
import QtQuick 2.0
import imageWriter 1.0
Item {
width: 800
height: 600
ImageWriter {
id : imageWriter
width : 800
height : 600
}
}
I am quite not able to get why the paint event is not called by self.update() . I cant use QWidgets, i have to use this. Is there something i am missing out here ?

The problem is caused by having 2 ImageWriter objects, one created in imgw = ImageWriter() and the other in QML, you can combine it by adding prints in .py in .qml:
*.py
def get_frames(app):
cap = cv2.VideoCapture(0)
num = 0
imgw = ImageWriter()
print("Python:", imgw)
...
*.qml
...
Component.onCompleted: console.log("QML:", imageWriter)
...
Output:
qml: >>>> ImageWriter(0x55bf2927e770)
Python: <__main__.ImageWriter object at 0x7fce8e4ff798>
As you can see, there are 2 objects that point to different memory addresses, so a possible solution is to create a singleton using this library:
from OpenGL import GL
import sys
from PyQt5 import QtCore, QtGui, QtQml, QtQuick
import cv2
try: from PyQt5.QtCore import pyqtWrapperType
except ImportError:
from sip import wrappertype as pyqtWrapperType
class Singleton(pyqtWrapperType, type):
def __init__(cls, name, bases, dict):
super().__init__(name, bases, dict)
cls.instance=None
def __call__(cls,*args,**kw):
if cls.instance is None:
cls.instance=super().__call__(*args, **kw)
return cls.instance
class ImageWriter(QtQuick.QQuickPaintedItem, metaclass=Singleton):
def __init__(self, *args, **kwargs):
super(ImageWriter, self).__init__(*args, **kwargs)
self.setRenderTarget(QtQuick.QQuickPaintedItem.FramebufferObject)
self.cam_frame = QtGui.QImage()
def paint(self, painter):
painter.drawImage(0, 0, self.cam_frame)
def update_frame(self,frame):
frame = cv2.resize(frame, (700, 500), cv2.INTER_AREA)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
frame = QtGui.QImage(frame, frame.shape[1], frame.shape[0], 17)
self.cam_frame = frame.copy()
self.update()
def get_frames(app):
cap = cv2.VideoCapture(0)
num = 0
imgw = ImageWriter()
while True:
while num != 30:
_ , bgframe = cap.read()
num += 1
ret, frame = cap.read()
if ret:
imgw.update_frame(frame)
#print("get frames")
app.processEvents()
if __name__ == '__main__':
app = QtGui.QGuiApplication(sys.argv)
QtQml.qmlRegisterType(ImageWriter, "imageWriter", 1, 0, "ImageWriter")
view = QtQuick.QQuickView()
view.setSource(QtCore.QUrl('test.qml'))
rootObject = view.rootObject()
view.show()
get_frames(app)
sys.exit(app.exec_())
With the above should work the acquisition of images I think there is a better way, in a few moments I will show a better option.
Using my previous answer as a base I created a module that implements a handler of the camera using opencv, in addition to a viewer, and a generic class that allows adding filters, for this the project must have the following structure
├── main.py
├── main.qml
└── PyCVQML
├── cvcapture.py
├── cvitem.py
└── __init__.py
PyCVQML/cvcapture.py
import numpy as np
import threading
import cv2
from PyQt5 import QtCore, QtGui, QtQml
gray_color_table = [QtGui.qRgb(i, i, i) for i in range(256)]
class CVAbstractFilter(QtCore.QObject):
def process_image(self, src):
dst = src
return dst
class CVCapture(QtCore.QObject):
started = QtCore.pyqtSignal()
imageReady = QtCore.pyqtSignal()
indexChanged = QtCore.pyqtSignal()
def __init__(self, parent=None):
super(CVCapture, self).__init__(parent)
self._image = QtGui.QImage()
self._index = 0
self.m_videoCapture = cv2.VideoCapture()
self.m_timer = QtCore.QBasicTimer()
self.m_filters = []
self.m_busy = False
#QtCore.pyqtSlot()
#QtCore.pyqtSlot(int)
def start(self, *args):
if args:
self.setIndex(args[0])
self.m_videoCapture.release()
self.m_videoCapture = cv2.VideoCapture(self._index)
if self.m_videoCapture.isOpened():
self.m_timer.start(0, self)
self.started.emit()
#QtCore.pyqtSlot()
def stop(self):
self.m_timer.stop()
def timerEvent(self, e):
if e.timerId() != self.m_timer.timerId(): return
ret, frame = self.m_videoCapture.read()
if not ret:
self.m_timer.stop()
return
if not self.m_busy:
threading.Thread(target=self.process_image, args=(np.copy(frame),)).start()
#QtCore.pyqtSlot(np.ndarray)
def process_image(self, frame):
self.m_busy = True
for f in self.m_filters:
frame = f.process_image(frame)
image = CVCapture.ToQImage(frame)
self.m_busy = False
QtCore.QMetaObject.invokeMethod(self,
"setImage",
QtCore.Qt.QueuedConnection,
QtCore.Q_ARG(QtGui.QImage, image))
#staticmethod
def ToQImage(im):
if im is None:
return QtGui.QImage()
if im.dtype == np.uint8:
if len(im.shape) == 2:
qim = QtGui.QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QtGui.QImage.Format_Indexed8)
qim.setColorTable(gray_color_table)
return qim.copy()
elif len(im.shape) == 3:
if im.shape[2] == 3:
w, h, _ = im.shape
rgb_image = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
flip_image = cv2.flip(rgb_image, 1)
qim = QtGui.QImage(flip_image.data, h, w, QtGui.QImage.Format_RGB888)
return qim.copy()
return QtGui.QImage()
def image(self):
return self._image
#QtCore.pyqtSlot(QtGui.QImage)
def setImage(self, image):
if self._image == image: return
self._image = image
self.imageReady.emit()
def index(self):
return self._index
def setIndex(self, index):
if self._index == index: return
self._index = index
self.indexChanged.emit()
#QtCore.pyqtProperty(QtQml.QQmlListProperty)
def filters(self):
return QtQml.QQmlListProperty(CVAbstractFilter, self, self.m_filters)
image = QtCore.pyqtProperty(QtGui.QImage, fget=image, notify=imageReady)
index = QtCore.pyqtProperty(int, fget=index, fset=setIndex, notify=indexChanged)
PyCVQML/cvitem.py
from PyQt5 import QtCore, QtGui, QtQuick
class CVItem(QtQuick.QQuickPaintedItem):
imageChanged = QtCore.pyqtSignal()
def __init__(self, parent=None):
super(CVItem, self).__init__(parent)
# self.setRenderTarget(QtQuick.QQuickPaintedItem.FramebufferObject)
self.m_image = QtGui.QImage()
def paint(self, painter):
if self.m_image.isNull(): return
image = self.m_image.scaled(self.size().toSize())
painter.drawImage(QtCore.QPoint(), image)
def image(self):
return self.m_image
def setImage(self, image):
if self.m_image == image: return
self.m_image = image
self.imageChanged.emit()
self.update()
image = QtCore.pyqtProperty(QtGui.QImage, fget=image, fset=setImage, notify=imageChanged)
PyCVQML/__init__.py
from PyQt5 import QtQml
from .cvcapture import CVCapture, CVAbstractFilter
from .cvitem import CVItem
def registerTypes(uri = "PyCVQML"):
QtQml.qmlRegisterType(CVCapture, uri, 1, 0, "CVCapture")
QtQml.qmlRegisterType(CVItem, uri, 1, 0, "CVItem")
Then you use it in the main.py, I have added 2 example filters, for this CVCapture has the filters property where the filters are passed to it, and they will be executed in the order they are established. To implement a new filter you must inherit from CVAbstractFilter and overwrite the process_image() method that receives the image as an np.ndarray and should return the result after the filter.
main.py
import cv2
import numpy as np
from PyQt5 import QtGui, QtCore, QtQuick, QtQml
import PyCVQML
def max_rgb_filter(image):
# split the image into its BGR components
(B, G, R) = cv2.split(image)
# find the maximum pixel intensity values for each
# (x, y)-coordinate,, then set all pixel values less
# than M to zero
M = np.maximum(np.maximum(R, G), B)
R[R < M] = 0
G[G < M] = 0
B[B < M] = 0
# merge the channels back together and return the image
return cv2.merge([B, G, R])
def rgb_to_gray(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return gray
class MaxRGBFilter(PyCVQML.CVAbstractFilter):
def process_image(self, src):
return max_rgb_filter(src)
class GrayFilter(PyCVQML.CVAbstractFilter):
def process_image(self, src):
return rgb_to_gray(src)
if __name__ == '__main__':
import os
import sys
app = QtGui.QGuiApplication(sys.argv)
PyCVQML.registerTypes()
QtQml.qmlRegisterType(MaxRGBFilter, "Filters", 1, 0, "MaxRGBFilter")
QtQml.qmlRegisterType(GrayFilter, "Filters", 1, 0, "GrayFilter")
view = QtQuick.QQuickView()
view.setTitle("PyCVQML Example")
dir_path = os.path.dirname(os.path.realpath(__file__)
view.setSource(QtCore.QUrl.fromLocalFile(QtCore.QDir(dir_path).absoluteFilePath("main.qml")))
view.show()
sys.exit(app.exec_())
main.qml
import QtQuick 2.0
import PyCVQML 1.0
import Filters 1.0
Item {
width: 800
height: 600
CVItem {
id: imageWriter
anchors.fill: parent
image: capture.image
}
MaxRGBFilter{
id: max_rgb_filter
}
GrayFilter{
id: gray_filter
}
CVCapture{
id: capture
index: 0
filters: [max_rgb_filter, gray_filter]
Component.onCompleted: capture.start()
}
}

Related

Porting from PyOpenGL GLUT to PyQT: Rendering image in QOpenGLWidget fails

I am writing an application that shows the user's webcam video feed in a PyQT5 window. Using a QLabel and updating the label's pixmap for every frame is to slow due to the target device's performance.
I therefore tried to gain some speed by using OpenGL to display the video frames as 2D texture on a rectangle. I found this earlier question by user Arijit that contains a working example using GLUT. However, I fail to port the code from using GLUT to a QOpenGLWidget.
For the init and reshape functions in GLUT it is clear that they correspond to the initializeGL and resizeGL functions in QT. But where in the QOpenGLWidget lifecycle do the display and idle functions belong? Executing them inside paintGL shows no effect. The current code does not crash, but the widget stays black.
What is the right way to do this?
#!/usr/bin/env python
import sys, time
import cv2
import numpy as np
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
class GLWidget(QOpenGLWidget):
def __init__(self, parent=None, width=1280, height=720):
self.parent = parent
self.width = width
self.height = height
QOpenGLWidget.__init__(self, parent)
def sizeHint(self):
return QSize(self.width,self.height)
def setImage(self,image):
self.image = np.flipud(image).tobytes()
self._idle()
def initializeGL(self):
version_profile = QOpenGLVersionProfile()
version_profile.setVersion(2,0)
self.gl = self.context().versionFunctions(version_profile)
self.gl.glClearColor(0.0, 0.0, 0.0, 1.0)
self.setImage(np.zeros((self.width, self.height,3)))
def _idle(self):
self.gl.glTexImage2D(self.gl.GL_TEXTURE_2D,
0,
self.gl.GL_RGB,
self.width,self.height,
0,
self.gl.GL_RGB,
self.gl.GL_UNSIGNED_BYTE,
self.image)
self.update()
def _display(self):
self.gl.glClear(self.gl.GL_COLOR_BUFFER_BIT | self.gl.GL_DEPTH_BUFFER_BIT)
self.gl.glEnable(self.gl.GL_TEXTURE_2D)
self.gl.glTexParameterf(self.gl.GL_TEXTURE_2D, self.gl.GL_TEXTURE_MIN_FILTER, self.gl.GL_NEAREST)
self.gl.glMatrixMode(self.gl.GL_PROJECTION)
self.gl.glLoadIdentity()
self.gl.glOrtho(0, self.width, 0, self.height,-1,1)
self.gl.glMatrixMode(self.gl.GL_MODELVIEW)
self.gl.glLoadIdentity()
self.gl.glBegin(self.gl.GL_QUADS)
self.gl.glTexCoord2f(0.0, 0.0)
self.gl.glVertex2f(0.0, 0.0)
self.gl.glTexCoord2f(1.0, 0.0)
self.gl.glVertex2f(self.width, 0.0)
self.gl.glTexCoord2f(1.0, 1.0)
self.gl.glVertex2f(self.width, self.height)
self.gl.glTexCoord2f(0.0, 1.0)
self.gl.glVertex2f(0.0, self.height)
self.gl.glEnd()
self.gl.glFlush()
def resizeGL(self, w, h):
if h == 0:
h = 1
self.gl.glViewport(0, 0, w, h)
self.gl.glMatrixMode(self.gl.GL_PROJECTION)
self.gl.glLoadIdentity()
if w <= h:
self.gl.glOrtho(-1, 1, -1*h/w, h/w, -1, 1)
else:
self.gl.glOrtho(-1*w/h, w/h, -1, 1, -1, 1)
self.gl.glMatrixMode(self.gl.GL_MODELVIEW)
self.gl.glLoadIdentity()
def paintGL(self):
self._display()
class VideoThread(QThread):
change_image_signal = pyqtSignal(np.ndarray)
def __init__(self):
super().__init__()
self._run_flag = True
def run(self):
capture = cv2.VideoCapture(0)
capture.set(cv2.CAP_PROP_BUFFERSIZE,3)
while self._run_flag:
_, frame = capture.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
self.change_image_signal.emit(frame)
time.sleep(0.4)
capture.release()
def stop(self):
self._run_flag = False
self.wait()
class MainUI(QWidget):
def __init__(self):
QWidget.__init__(self)
self.video_size = QSize(394,292)
self.setup_ui()
self.setup_camera()
def setup_ui(self):
self.video_widget = GLWidget(self,self.video_size.width(),self.video_size.height())
self.main_layout = QGridLayout()
self.main_layout.addWidget(self.video_widget,0,0)
self.setLayout(self.main_layout)
def closeEvent(self, event):
self.thread.stop()
event.accept()
def setup_camera(self):
self.thread = VideoThread()
self.thread.change_image_signal.connect(self.display_video_stream)
self.thread.start()
#pyqtSlot(np.ndarray)
def display_video_stream(self,image):
self.video_widget.setImage(image)
if __name__ == "__main__":
app = QApplication(sys.argv)
win = MainUI()
win.show()
sys.exit(app.exec())

Live video from GigE Cameras

I have a problem with live video stream from 2 GigE cameras in QML. I tried it before with QLabels and QPixmap and it worked without any problem. The QML Labels don't have pixmap property to send images using signal slots.
Here is my Python code:
import sys
import os
from PySide2.QtGui import QGuiApplication
from PySide2.QtQml import QQmlApplicationEngine
from PySide2.QtGui import QImage, QPixmap
from PySide2.QtCore import Slot, QThread, Signal, Qt, QObject
import cv2
from pypylon import pylon
tlFactory = pylon.TlFactory.GetInstance()
devices = tlFactory.EnumerateDevices()
if len(devices) == 0:
raise pylon.RuntimeException("No camera present.")
cameras = pylon.InstantCameraArray(min(len(devices), 2))
for i, cam in enumerate(cameras):
cam.Attach(tlFactory.CreateDevice(devices[i]))
class CamThread(QThread):
cam1 = Signal(QImage)
cam2 = Signal(QImage)
def run(self):
cameras.StartGrabbing(pylon.GrabStrategy_LatestImageOnly)
try:
while cameras.IsGrabbing():
grabResult1 = cameras[0].RetrieveResult(
5000, pylon.TimeoutHandling_ThrowException
)
grabResult2 = cameras[1].RetrieveResult(
5000, pylon.TimeoutHandling_ThrowException
)
if grabResult1.GrabSucceeded() and grabResult2.GrabSucceeded():
img1 = grabResult1.GetArray()
img2 = grabResult2.GetArray()
rgb1 = cv2.cvtColor(img1, cv2.COLOR_YUV2RGB_Y422)
rgb2 = cv2.cvtColor(img2, cv2.COLOR_YUV2RGB_Y422)
h1, w1, ch1 = rgb1.shape
h2, w2, ch2 = rgb2.shape
bytesPerLine1 = ch1 * w1
bytesPerLine2 = ch2 * w1
convertToQtFormat1 = QImage(
img1.data, w1, h1, bytesPerLine1, QImage.Format_RGB888
)
convertToQtFormat2 = QImage(
img2.data, w2, h2, bytesPerLine2, QImage.Format_RGB888
)
p = convertToQtFormat1.scaled(800, 746, Qt.KeepAspectRatio)
q = convertToQtFormat2.scaled(800, 746, Qt.KeepAspectRatio)
self.cam1.emit(p)
self.cam2.emit(q)
except Exception as error:
print(error)
class MainWindow(QObject):
def __init__(self):
QObject.__init__(self)
self.CamThread = CamThread()
self.CamThread.cam1.connect(self.camera1)
self.CamThread.cam2.connect(self.camera2)
self.CamThread.start()
#Slot(QImage)
def camera1(self, image):
pass
#Slot(QImage)
def camera2(self, image):
pass
if __name__ == "__main__":
app = QGuiApplication(sys.argv)
backend = MainWindow()
engine = QQmlApplicationEngine()
engine.rootContext().setContextProperty("backend", backend)
engine.load(os.path.join(os.path.dirname(__file__), "main.qml"))
if not engine.rootObjects():
sys.exit(-1)
sys.exit(app.exec_())
So how to show live video stream using QML/PySide2?
I am using QT Design Studio.
Although the QQuickImageProvider option can be a good one but the drawback is that you have to generate different urls, instead a better option is to use VideoOutput, for example in your case the following implementation should work (not tested):
from functools import cached_property
import os
import random
import sys
import threading
import cv2
from PySide2.QtCore import Property, QObject, Qt, QSize, QTimer, Signal, Slot
from PySide2.QtGui import QColor, QGuiApplication, QImage
from PySide2.QtMultimedia import QAbstractVideoSurface, QVideoFrame, QVideoSurfaceFormat
from PySide2.QtQml import QQmlApplicationEngine
import shiboken2
from pypylon import pylon
class CameraProvider(QObject):
imageChanged = Signal(int, QImage)
def start(self, cameras):
threading.Thread(target=self._execute, args=(cameras,), daemon=True).start()
def _execute(self, cameras):
while cameras.IsGrabbing():
for i, camera in enumerate(cameras):
try:
grab_result = cameras[i].RetrieveResult(
5000, pylon.TimeoutHandling_ThrowException
)
if grab_result.GrabSucceeded():
img = grab_result.GetArray()
# FIXME
# convert img to qimage
qimage = QImage(800, 746, QImage.Format_RGB888)
qimage.fill(QColor(*random.sample(range(0, 255), 3)))
if shiboken2.isValid(self):
self.imageChanged.emit(i, qimage.copy())
except Exception as error:
print(error)
class CameraService(QObject):
surfaceChanged = Signal()
def __init__(self, parent=None):
super().__init__(parent)
self._surface = None
self._format = QVideoSurfaceFormat()
self._format_is_valid = False
def get_surface(self):
return self._surface
def set_surface(self, surface):
if self._surface is surface:
return
if (
self._surface is not None
and self._surface is not surface
and self._surface.isActive()
):
self._surface.stop()
self._surface = surface
self.surfaceChanged.emit()
if self._surface is not None:
self._format = self._surface.nearestFormat(self._format)
self._surface.start(self._format)
videoSurface = Property(
QAbstractVideoSurface,
fget=get_surface,
fset=set_surface,
notify=surfaceChanged,
)
#Slot(QImage)
def update_frame(self, qimage):
if self.videoSurface is None or qimage.isNull():
return
if not self._format_is_valid:
self._set_format(qimage.width(), qimage.height(), QVideoFrame.Format_RGB32)
self._format_is_valid = True
qimage.convertTo(
QVideoFrame.imageFormatFromPixelFormat(QVideoFrame.Format_RGB32)
)
self._surface.present(QVideoFrame(qimage))
def _set_format(self, width, height, pixel_format):
size = QSize(width, height)
video_format = QVideoSurfaceFormat(size, pixel_format)
self._format = video_format
if self._surface is not None:
if self._surface.isActive():
self._surface.stop()
self._format = self._surface.nearestFormat(self._format)
self._surface.start(self._format)
class CameraManager(QObject):
def __init__(self, cameras, parent=None):
super().__init__(parent)
self._services = []
self.provider.imageChanged.connect(self.handle_image_changed)
self.provider.start(cameras)
for _ in cameras:
self._services.append(CameraService())
#cached_property
def provider(self):
return CameraProvider()
#Slot(int, QImage)
def handle_image_changed(self, index, qimage):
self._services[index].update_frame(qimage)
def get_services(self):
return self._services
services = Property("QVariantList", fget=get_services, constant=True)
def main():
app = QGuiApplication(sys.argv)
tlFactory = pylon.TlFactory.GetInstance()
devices = tlFactory.EnumerateDevices()
if len(devices) == 0:
raise pylon.RuntimeException("No camera present.")
cameras = pylon.InstantCameraArray(min(len(devices), 2))
for i, cam in enumerate(cameras):
cam.Attach(tlFactory.CreateDevice(devices[i]))
manager = CameraManager(cameras)
engine = QQmlApplicationEngine()
engine.rootContext().setContextProperty("manager", manager)
engine.load(os.path.join(os.path.dirname(__file__), "main.qml"))
if not engine.rootObjects():
sys.exit(-1)
sys.exit(app.exec_())
if __name__ == "__main__":
main()
import QtQuick 2.14
import QtQuick.Window 2.14
import QtMultimedia 5.14
Window {
visible: true
width: 640
height: 480
title: qsTr("Hello World")
GridView {
width: 300; height: 200
model: manager !== null ? manager.services : []
delegate: VideoOutput {
width: 100
height: 100
fillMode: VideoOutput.PreserveAspectCrop
source: model.modelData
}
}
}
Qt provides different methods to pass images/video streams to QML:
1. Converting pixmap to base64 encoding
QByteArray byteArray;
QBuffer buffer(&byteArray);
buffer.open(QIODevice::WriteOnly);
pixmap.save(&buffer,"PNG");
QString data("data:image/png;base64,");
data.append(QString::fromLatin1(byteArray.toBase64().data()));
This base64 encoded image may be passed to Image::source
2. Use QQuickImageProvider
This allows connecting a custom image://... url to a QPixmap or QImage directly. Check the docs for more information.
3. Use QtMultimedia
Especially VideoOutput may be useful.

How do I call a function to start Qthreading?

I am trying to make a program that can pixelate images. When the image is uploaded, the image is suppose to go through the color_quantization function then the pixelate function. But when I run the program, the image only goes through the pixelate function.
from PyQt5 import QtWidgets as Qtw, QtGui, QtCore
from PyQt5.QtWidgets import QFileDialog
from pixel import Ui_Form
from PyQt5.QtGui import QImage
import cv2
import numpy as np
class Worker(QtCore.QObject):
hash = QtCore.pyqtSignal()
def color_quantization(self, img, k=3):
data = np.float32(img).reshape((-1, 3))
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 20, 0.001)
ret, label, center = cv2.kmeans(data, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
center = np.uint8(center)
result = center[label.flatten()]
result = result.reshpe(img.shape)
for x in result:
self.hash.emit(x)
return x
class Pixel(Qtw.QWidget):
hash_requested = QtCore.pyqtSignal()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ui = Ui_Form()
self.ui.setupUi(self)
self.ui.upload.clicked.connect(self.upload_image)
self.worker = Worker()
self.worker_thread = QtCore.QThread()
self.worker.hash.connect(self.upload_image)
self.hash_requested.connect(self.worker.color_quantization)
self.worker.moveToThread(self.worker_thread)
self.worker_thread.start()
def upload_image(self):
filename = QFileDialog.getOpenFileName(filter="Image(*.*)")[0]
img = cv2.imread(filename)
self.pixelate(img)
def pixelate(self, image):
height, width = image.shape[:2]
w, h = (128, 128)
temp = cv2.resize(image, (w, h), interpolation=cv2.INTER_LINEAR)
output = cv2.resize(temp, (width, height), interpolation=cv2.INTER_NEAREST)
frame = cv2.cvtColor(output, cv2.COLOR_BGR2RGB)
image = QImage(frame, frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888)
self.ui.label.setPixmap(QtGui.QPixmap.fromImage(image))
if __name__ == '__main__':
app = Qtw.QApplication([])
win = Pixel()
win.show()
app.exec_()

QLabel is not updating the image after OpenCV image manipulation

I've been trying to create an interactive OpenCV image viewer where I'll be able to view the image immediately after a manipulation. Like say, I'm applying a binary thresholding operation on an image and changing the threshold value from PyQt slider. Now, I want to see each thresholded image in the image viewer.
I've created a very basic program for this purpose using python OpenCV and PyQT5 lib. But, the image is not being updated in the QLabel.
Below is my code:
import sys
import cv2
import numpy as np
import imutils
from PyQt5 import QtCore
from PyQt5.QtCore import Qt, QTimer
from PyQt5.QtWidgets import QApplication, QWidget, QHBoxLayout, QVBoxLayout, QLCDNumber, QSlider, QLabel, QCheckBox
from PyQt5.QtGui import QPixmap, QImage
class MyWindow(QWidget):
def __init__(self):
super().__init__()
self.imglabel = QLabel(self)
self.imglabel.setFixedSize(1200, 900)
ori_img = cv2.imread("../resources/omr-1-ans-ori.png", cv2.IMREAD_COLOR)
ori_img = imutils.resize(ori_img, height=960)
self.gray_img = cv2.cvtColor(ori_img, cv2.COLOR_BGR2GRAY)
self.gray_img_c = ori_img
self.thresh = False
self.thresh_karnel_size = 11
self.init_ui()
def init_ui(self):
# lcd = QLCDNumber(self)
hbox1 = QHBoxLayout()
cb_thresh = QCheckBox('thresh', self)
cb_thresh.setChecked(False)
cb_thresh.stateChanged.connect(self.changeTitleThresh)
hbox1.addWidget(cb_thresh)
thresh_slider = QSlider(Qt.Horizontal, self)
thresh_slider.setFocusPolicy(Qt.StrongFocus)
thresh_slider.setTickPosition(QSlider.TicksBothSides)
thresh_slider.setTickInterval(1)
thresh_slider.setSingleStep(1)
thresh_slider.setPageStep(1)
thresh_slider.setMinimum(1)
thresh_slider.setMaximum(127)
thresh_slider.valueChanged[int].connect(self.threshSliderChangeValue)
vbox = QVBoxLayout()
vbox.addLayout(hbox1)
vbox.addWidget(thresh_slider)
vbox.addWidget(self.imglabel)
self.setLayout(vbox)
self.setGeometry(50, 50, 1200, 768)
self.setWindowTitle('Learning PyQT5')
self.updateImage()
self.show()
def changeTitleThresh(self, state):
# print("thresh checkbox: ", state, Qt.Checked)
if state == Qt.Checked:
self.thresh = True
else:
self.thresh = False
def threshSliderChangeValue(self, value):
ksize = (value * 2) + 1
print("ksize: ", ksize)
if ksize > 1 and ksize % 2 != 0 and self.thresh:
self.thresh_karnel_size = ksize
self.gray_img = cv2.threshold(self.gray_img, self.thresh_karnel_size, 255, cv2.THRESH_BINARY)[1]
self.gray_img_c = cv2.cvtColor(self.gray_img.copy(), cv2.COLOR_GRAY2BGR)
self.updateImage()
def updateImage(self):
height, width, channel = self.gray_img_c.shape
bytesPerLine = 3 * width
qImg = QImage(self.gray_img_c.data, width, height, bytesPerLine, QImage.Format_RGB888)
pixMap = QPixmap.fromImage(qImg)
pixMap = pixMap.scaled(700, 500, Qt.KeepAspectRatio)
self.imglabel.setPixmap(pixMap)
self.imglabel.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = MyWindow()
sys.exit(app.exec_())
I've tried every solution found through google search. But, could not fix it.
Any help or hint will be much appreciated.
The original image must remain intact but you are applying the filter and modifying it every time, in the following example I show the correct way to do it
import sys
import cv2
import imutils
from PyQt5 import QtCore, QtGui, QtWidgets
class MyWindow(QtWidgets.QWidget):
def __init__(self):
super().__init__()
ori_img = cv2.imread("../resources/omr-1-ans-ori.png", cv2.IMREAD_COLOR)
self.original_image_color = imutils.resize(ori_img, height=960)
self.original_image_gray = cv2.cvtColor(self.original_image_color, cv2.COLOR_BGR2GRAY)
self.thresh = False
self.thresh_karnel_size = 11
self.init_ui()
def init_ui(self):
self.imglabel = QtWidgets.QLabel(alignment=QtCore.Qt.AlignCenter)
self.imglabel.setFixedSize(1200, 900)
cb_thresh = QtWidgets.QCheckBox('thresh', checked=False)
cb_thresh.stateChanged.connect(self.changeTitleThresh)
self.thresh_slider = QtWidgets.QSlider(QtCore.Qt.Horizontal,
focusPolicy=QtCore.Qt.StrongFocus,
tickPosition=QtWidgets.QSlider.TicksBothSides,
tickInterval=1,
singleStep=1,
pageStep=1,
minimum=1,
maximum=127)
self.thresh_slider.valueChanged[int].connect(self.threshSliderChangeValue)
vbox = QtWidgets.QVBoxLayout(self)
vbox.addWidget(cb_thresh)
vbox.addWidget(self.thresh_slider)
vbox.addWidget(self.imglabel)
self.threshSliderChangeValue(self.thresh_slider.value())
self.setGeometry(50, 50, 1200, 768)
self.setWindowTitle('Learning PyQT5')
self.show()
#QtCore.pyqtSlot(int)
def changeTitleThresh(self, state):
self.thresh = state == QtCore.Qt.Checked
self.threshSliderChangeValue(self.thresh_slider.value())
#QtCore.pyqtSlot(int)
def threshSliderChangeValue(self, value):
ksize = (value * 2) + 1
if ksize > 1 and ksize % 2 != 0 and self.thresh:
self.thresh_karnel_size = ksize
_, gray_img = cv2.threshold(self.original_image_gray, self.thresh_karnel_size, 255, cv2.THRESH_BINARY)
gray_img_c = cv2.cvtColor(gray_img.copy(), cv2.COLOR_GRAY2BGR)
self.updateImage(gray_img_c)
else:
self.updateImage(self.original_image_color)
def updateImage(self, image):
height, width, channel = image.shape
bytesPerLine = 3 * width
qImg = QtGui.QImage(image.data, width, height, bytesPerLine, QtGui.QImage.Format_RGB888)
pixMap = QtGui.QPixmap.fromImage(qImg).scaled(700, 500, QtCore.Qt.KeepAspectRatio)
self.imglabel.setPixmap(pixMap)
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
ex = MyWindow()
sys.exit(app.exec_())

How to make screenshot while showing video from cam?

#Importing necessary libraries, mainly the OpenCV, and PyQt libraries
import cv2
import numpy as np
import sys
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5.QtCore import pyqtSignal
class ShowVideo(QtCore.QObject):
#initiating the built in camera
camera_port = -1
camera = cv2.VideoCapture(camera_port)
VideoSignal = QtCore.pyqtSignal(QtGui.QImage)
def __init__(self, parent = None):
super(ShowVideo, self).__init__(parent)
#QtCore.pyqtSlot()
def startVideo(self):
run_video = True
while run_video:
ret, image = self.camera.read()
color_swapped_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
height, width, _ = color_swapped_image.shape
qt_image = QtGui.QImage(color_swapped_image.data,
width,
height,
color_swapped_image.strides[0],
QtGui.QImage.Format_RGB888)
pixmap = QtGui.QPixmap(qt_image)
qt_image = pixmap.scaled(640, 480, QtCore.Qt.KeepAspectRatio)
qt_image = QtGui.QImage(qt_image)
self.VideoSignal.emit(qt_image)
#QtCore.pyqtSlot()
def makeScreenshot(self):
#cv2.imwrite("test.jpg", self.image)
print("Screenshot saved")
#self.qt_image.save('test.jpg')
class ImageViewer(QtWidgets.QWidget):
def __init__(self, parent = None):
super(ImageViewer, self).__init__(parent)
self.image = QtGui.QImage()
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
def paintEvent(self, event):
painter = QtGui.QPainter(self)
painter.drawImage(0,0, self.image)
self.image = QtGui.QImage()
def initUI(self):
self.setWindowTitle('Test')
#QtCore.pyqtSlot(QtGui.QImage)
def setImage(self, image):
if image.isNull():
print("viewer dropped frame!")
self.image = image
if image.size() != self.size():
self.setFixedSize(image.size())
self.update()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
thread = QtCore.QThread()
thread.start()
vid = ShowVideo()
vid.moveToThread(thread)
image_viewer = ImageViewer()
#image_viewer.resize(200,400)
vid.VideoSignal.connect(image_viewer.setImage)
#Button to start the videocapture:
push_button = QtWidgets.QPushButton('Start')
push_button.clicked.connect(vid.startVideo)
push_button2 = QtWidgets.QPushButton('Screenshot')
push_button2.clicked.connect(vid.makeScreenshot)
vertical_layout = QtWidgets.QVBoxLayout()
vertical_layout.addWidget(image_viewer)
vertical_layout.addWidget(push_button)
vertical_layout.addWidget(push_button2)
layout_widget = QtWidgets.QWidget()
layout_widget.setLayout(vertical_layout)
main_window = QtWidgets.QMainWindow()
main_window.setCentralWidget(layout_widget)
main_window.resize(640,480)
main_window.show()
sys.exit(app.exec_())
This code showing video from camera in endless loop using OpenCV and PyQt5. But how to make screenshot and don't stop showing video. I think it needs to be stop loop for a little, make screnshot, and run loop again.
You can use cv2.waitKey() for the same, as shown below:
while run_video:
ret, image = self.camera.read()
if(cv2.waitKey(10) & 0xFF == ord('s')):
cv2.imwrite("screenshot.jpg",image)
(I'm guessing that by the term "screenshot", you mean the camera frame, and not the image of the entire screen.)
When you press 's' on the keyboard, it'll perform imwrite.
Note that if you wish to save multiple images, you'd have to vary the filename. The above code will overwrite screenshot.jpg to save only the latest frame.

Categories