Live video from GigE Cameras - python

I have a problem with live video stream from 2 GigE cameras in QML. I tried it before with QLabels and QPixmap and it worked without any problem. The QML Labels don't have pixmap property to send images using signal slots.
Here is my Python code:
import sys
import os
from PySide2.QtGui import QGuiApplication
from PySide2.QtQml import QQmlApplicationEngine
from PySide2.QtGui import QImage, QPixmap
from PySide2.QtCore import Slot, QThread, Signal, Qt, QObject
import cv2
from pypylon import pylon
tlFactory = pylon.TlFactory.GetInstance()
devices = tlFactory.EnumerateDevices()
if len(devices) == 0:
raise pylon.RuntimeException("No camera present.")
cameras = pylon.InstantCameraArray(min(len(devices), 2))
for i, cam in enumerate(cameras):
cam.Attach(tlFactory.CreateDevice(devices[i]))
class CamThread(QThread):
cam1 = Signal(QImage)
cam2 = Signal(QImage)
def run(self):
cameras.StartGrabbing(pylon.GrabStrategy_LatestImageOnly)
try:
while cameras.IsGrabbing():
grabResult1 = cameras[0].RetrieveResult(
5000, pylon.TimeoutHandling_ThrowException
)
grabResult2 = cameras[1].RetrieveResult(
5000, pylon.TimeoutHandling_ThrowException
)
if grabResult1.GrabSucceeded() and grabResult2.GrabSucceeded():
img1 = grabResult1.GetArray()
img2 = grabResult2.GetArray()
rgb1 = cv2.cvtColor(img1, cv2.COLOR_YUV2RGB_Y422)
rgb2 = cv2.cvtColor(img2, cv2.COLOR_YUV2RGB_Y422)
h1, w1, ch1 = rgb1.shape
h2, w2, ch2 = rgb2.shape
bytesPerLine1 = ch1 * w1
bytesPerLine2 = ch2 * w1
convertToQtFormat1 = QImage(
img1.data, w1, h1, bytesPerLine1, QImage.Format_RGB888
)
convertToQtFormat2 = QImage(
img2.data, w2, h2, bytesPerLine2, QImage.Format_RGB888
)
p = convertToQtFormat1.scaled(800, 746, Qt.KeepAspectRatio)
q = convertToQtFormat2.scaled(800, 746, Qt.KeepAspectRatio)
self.cam1.emit(p)
self.cam2.emit(q)
except Exception as error:
print(error)
class MainWindow(QObject):
def __init__(self):
QObject.__init__(self)
self.CamThread = CamThread()
self.CamThread.cam1.connect(self.camera1)
self.CamThread.cam2.connect(self.camera2)
self.CamThread.start()
#Slot(QImage)
def camera1(self, image):
pass
#Slot(QImage)
def camera2(self, image):
pass
if __name__ == "__main__":
app = QGuiApplication(sys.argv)
backend = MainWindow()
engine = QQmlApplicationEngine()
engine.rootContext().setContextProperty("backend", backend)
engine.load(os.path.join(os.path.dirname(__file__), "main.qml"))
if not engine.rootObjects():
sys.exit(-1)
sys.exit(app.exec_())
So how to show live video stream using QML/PySide2?
I am using QT Design Studio.

Although the QQuickImageProvider option can be a good one but the drawback is that you have to generate different urls, instead a better option is to use VideoOutput, for example in your case the following implementation should work (not tested):
from functools import cached_property
import os
import random
import sys
import threading
import cv2
from PySide2.QtCore import Property, QObject, Qt, QSize, QTimer, Signal, Slot
from PySide2.QtGui import QColor, QGuiApplication, QImage
from PySide2.QtMultimedia import QAbstractVideoSurface, QVideoFrame, QVideoSurfaceFormat
from PySide2.QtQml import QQmlApplicationEngine
import shiboken2
from pypylon import pylon
class CameraProvider(QObject):
imageChanged = Signal(int, QImage)
def start(self, cameras):
threading.Thread(target=self._execute, args=(cameras,), daemon=True).start()
def _execute(self, cameras):
while cameras.IsGrabbing():
for i, camera in enumerate(cameras):
try:
grab_result = cameras[i].RetrieveResult(
5000, pylon.TimeoutHandling_ThrowException
)
if grab_result.GrabSucceeded():
img = grab_result.GetArray()
# FIXME
# convert img to qimage
qimage = QImage(800, 746, QImage.Format_RGB888)
qimage.fill(QColor(*random.sample(range(0, 255), 3)))
if shiboken2.isValid(self):
self.imageChanged.emit(i, qimage.copy())
except Exception as error:
print(error)
class CameraService(QObject):
surfaceChanged = Signal()
def __init__(self, parent=None):
super().__init__(parent)
self._surface = None
self._format = QVideoSurfaceFormat()
self._format_is_valid = False
def get_surface(self):
return self._surface
def set_surface(self, surface):
if self._surface is surface:
return
if (
self._surface is not None
and self._surface is not surface
and self._surface.isActive()
):
self._surface.stop()
self._surface = surface
self.surfaceChanged.emit()
if self._surface is not None:
self._format = self._surface.nearestFormat(self._format)
self._surface.start(self._format)
videoSurface = Property(
QAbstractVideoSurface,
fget=get_surface,
fset=set_surface,
notify=surfaceChanged,
)
#Slot(QImage)
def update_frame(self, qimage):
if self.videoSurface is None or qimage.isNull():
return
if not self._format_is_valid:
self._set_format(qimage.width(), qimage.height(), QVideoFrame.Format_RGB32)
self._format_is_valid = True
qimage.convertTo(
QVideoFrame.imageFormatFromPixelFormat(QVideoFrame.Format_RGB32)
)
self._surface.present(QVideoFrame(qimage))
def _set_format(self, width, height, pixel_format):
size = QSize(width, height)
video_format = QVideoSurfaceFormat(size, pixel_format)
self._format = video_format
if self._surface is not None:
if self._surface.isActive():
self._surface.stop()
self._format = self._surface.nearestFormat(self._format)
self._surface.start(self._format)
class CameraManager(QObject):
def __init__(self, cameras, parent=None):
super().__init__(parent)
self._services = []
self.provider.imageChanged.connect(self.handle_image_changed)
self.provider.start(cameras)
for _ in cameras:
self._services.append(CameraService())
#cached_property
def provider(self):
return CameraProvider()
#Slot(int, QImage)
def handle_image_changed(self, index, qimage):
self._services[index].update_frame(qimage)
def get_services(self):
return self._services
services = Property("QVariantList", fget=get_services, constant=True)
def main():
app = QGuiApplication(sys.argv)
tlFactory = pylon.TlFactory.GetInstance()
devices = tlFactory.EnumerateDevices()
if len(devices) == 0:
raise pylon.RuntimeException("No camera present.")
cameras = pylon.InstantCameraArray(min(len(devices), 2))
for i, cam in enumerate(cameras):
cam.Attach(tlFactory.CreateDevice(devices[i]))
manager = CameraManager(cameras)
engine = QQmlApplicationEngine()
engine.rootContext().setContextProperty("manager", manager)
engine.load(os.path.join(os.path.dirname(__file__), "main.qml"))
if not engine.rootObjects():
sys.exit(-1)
sys.exit(app.exec_())
if __name__ == "__main__":
main()
import QtQuick 2.14
import QtQuick.Window 2.14
import QtMultimedia 5.14
Window {
visible: true
width: 640
height: 480
title: qsTr("Hello World")
GridView {
width: 300; height: 200
model: manager !== null ? manager.services : []
delegate: VideoOutput {
width: 100
height: 100
fillMode: VideoOutput.PreserveAspectCrop
source: model.modelData
}
}
}

Qt provides different methods to pass images/video streams to QML:
1. Converting pixmap to base64 encoding
QByteArray byteArray;
QBuffer buffer(&byteArray);
buffer.open(QIODevice::WriteOnly);
pixmap.save(&buffer,"PNG");
QString data("data:image/png;base64,");
data.append(QString::fromLatin1(byteArray.toBase64().data()));
This base64 encoded image may be passed to Image::source
2. Use QQuickImageProvider
This allows connecting a custom image://... url to a QPixmap or QImage directly. Check the docs for more information.
3. Use QtMultimedia
Especially VideoOutput may be useful.

Related

How to update QML property from within fuction

I'm trying to display text message on screen, when barcode is detected via OpenCV, from within long running background thread, but nothing that I've tried worked so far.
I'm confused by slight differences between PySide and PyQt and most examples deal with user interactions.
Could someone give me a simple example if possible of how to achieve that?
Much appreciated.
import os, sys, cv2, threading, time
from multiprocessing import Process
from PySide2.QtCore import QObject, Signal, Property, QUrl, QTimer, QDateTime
from PySide2.QtGui import QGuiApplication
from PySide2.QtQml import QQmlApplicationEngine
class TextManager(QObject):
textChanged = Signal()
def __init__(self, parent=None):
QObject.__init__(self, parent)
self._text = ""
def get_text(self):
return self._text
def set_text(self, value):
if self._text == value:
return
self._text = value
self.textChanged.emit()
class CamWorker(QObject):
def scanner(self):
camera = cv2.VideoCapture(0)
def decodeCam(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
barcodes = pyzbar.decode(gray)
if detected:
changeQMLProperty
return image
while True:
# Read current frame
try:
ret, frame = camera.read()
im=decodeCam(frame)
except ValueError:
print("error")
def run():
app = QtGui.QGuiApplication(sys.argv)
manager = TextManager()
directory = os.path.dirname(os.path.abspath(__file__))
engine = QQmlApplicationEngine()
engine.quit.connect(app.quit)
engine.load('qml/main.qml')
engine.rootContext().setContextProperty
if not engine.rootObjects():
sys.exit(-1)
engine.rootObjects()[0].setProperty('manager', manager)
worker = CamWorker()
threading.Thread(target=worker.scanner, daemon=True).start()
return app.exec_()
if __name__ == "__main__":
sys.exit(run())
import QtQuick 2.5
import QtQuick.Window 2.2
Window {
id: testing
visible: true
width: 640
height: 480
Text{
anchors.fill: parent
text: manager.text
}
}
You can use QML types for getting video from camera here is a simple example:
File main.py
import sys
from pathlib import Path
from PySide6.QtCore import QObject, Slot, Signal, QRunnable, QThreadPool
from PySide6.QtGui import QGuiApplication, QImage
from PySide6.QtQml import QQmlApplicationEngine, QmlElement
import cv2
import numpy as np
from pyzbar import pyzbar
import random
QML_IMPORT_NAME = "com.myapp.components"
QML_IMPORT_MAJOR_VERSION = 1
class Worker(QRunnable):
def __init__(self, emiter: Signal, image: QImage):
super().__init__(None)
self.image = image
self.emiter = emiter
def qimage_to_array(self, image: QImage) -> np.ndarray:
"""Converts a QImage into an opencv MAT format"""
image = image.convertToFormat(QImage.Format.Format_RGBA8888)
width = image.width()
height = image.height()
ptr = image.constBits()
return np.array(ptr).reshape(height, width, 4)
def run(self):
arr = self.qimage_to_array(self.image)
gray = cv2.cvtColor(arr, cv2.COLOR_BGR2GRAY)
barcodes = pyzbar.decode(gray)
# logic here.
# returning now random boolean
self.emiter.emit(random.choice([True, False]))
#QmlElement
class Cv2Capture(QObject):
imageAnalayized = Signal(bool)
#Slot(int, QImage)
def receive(self,req_id, image: QImage):
worker = Worker(self.imageAnalayized, image)
QThreadPool.globalInstance().start(worker)
if __name__ == "__main__":
app = QGuiApplication(sys.argv)
engine = QQmlApplicationEngine()
qml_file = Path(__file__).parent / "main.qml"
engine.load(qml_file)
if not engine.rootObjects():
sys.exit(-1)
sys.exit(app.exec())
File main.qml
import QtQuick
import QtQuick.Controls
import QtMultimedia
import com.myapp.components
import QtQuick.Controls.Material
ApplicationWindow {
id: mainFrame
Material.theme: Material.Dark
width: 640
height: 480
visible: true
title: qsTr("Cam Test")
Cv2Capture {
id: bridge
onImageAnalayized: function(res){console.log(res)}
}
Rectangle {id: rect
width: 640
height: 400
MediaDevices {
id: mediaDevices
}
CaptureSession {
imageCapture: ImageCapture {
id: capture
onImageCaptured: function(req_id, preview){bridge.receive(req_id, preview)}
}
camera: Camera {
id: camera
}
videoOutput: output
}
VideoOutput {
id: output
anchors.fill: parent
}
Button {
id: startCamButton
text: "Start Cam"
anchors.top: output.bottom
anchors.left: output.left
onClicked: {
camera.start()
camImage.opacity = 0
}
}
Button {
id: takePicButton
text: "take pic"
anchors.top: output.bottom
anchors.left: startCamButton.right
onClicked: {
capture.capture()
camImage.opacity = 1
}
}
Image {
id: camImage
anchors.fill: parent
source: capture.preview
}
}
}
This example uses PySide6 which has better support for QML than PySide2.
I'm confused by slight differences between PySide and PyQt
PyQt and PySide are both Python bindings of the C++ library called Qt.
PyQt - made by riverbanks.
PySide - by Qt company.
Explanation:
The QmlElement, as described here, decorator will register the class to be used in QML.
every Signal you create in that class would be available in QML
in the type instantiation e.g:
Cv2Capture {
id: bridge
onImageAnalayized: function(res){console.log(res)}
}
Where onImageAnalayized was imageAnalayized = Signal(bool) in Python.
slots are also available via calling them with the component id.

How to change the audio output device of QMediaPlayer in PySide2/PyQt5

How can I set the audio output of a QMediaPlayer to a specific output in Windows 7 and later?
This was really easy in PySide (using Phonon) but I can't find a way to do it in PySide2.
There are some related questions already, like this old but still not solved one, or this one that asks exactly what I want.
They are both in c++ and its difficult to convert it to PySide2.
The second one is answered with this code:
QMediaService *svc = player->service();
if (svc != nullptr)
{
QAudioOutputSelectorControl *out = qobject_cast<QAudioOutputSelectorControl *>
(svc->requestControl(QAudioOutputSelectorControl_iid));
if (out != nullptr)
{
out->setActiveOutput(this->ui->comboBox->currentText());
svc->releaseControl(out);
}
}
Another one with an attempt to python conversion didn't work also.
I tried to convert them to Python code, but the result was not successful.
Here is my minimal attempt:
import sys
from PySide2 import QtMultimedia
from PySide2.QtCore import QUrl, Qt
from PySide2.QtMultimedia import QMediaPlayer, QMediaContent
from PySide2.QtWidgets import (QPushButton, QSlider, QHBoxLayout, QVBoxLayout,
QFileDialog, QStyle, QApplication, QDialog, QComboBox)
class Window(QDialog):
def __init__(self):
super().__init__()
self.out_combo = QComboBox()
mode = QtMultimedia.QAudio.AudioOutput
devices = QtMultimedia.QAudioDeviceInfo.availableDevices(mode)
for item in [(dev.deviceName(), dev) for dev in devices]:
self.out_combo.addItem(item[0], item[1])
self.out_combo.currentIndexChanged.connect(self.out_device_changed)
openBtn = QPushButton('Open file')
openBtn.clicked.connect(self.open_file)
self.playBtn = QPushButton()
self.playBtn.setEnabled(False)
self.playBtn.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))
self.playBtn.clicked.connect(self.play_file)
self.slider = QSlider(Qt.Horizontal)
self.slider.setRange(0, 0)
self.slider.sliderMoved.connect(self.set_position)
hor_layout = QHBoxLayout()
hor_layout.setContentsMargins(0, 0, 0, 0)
hor_layout.addWidget(openBtn)
hor_layout.addWidget(self.playBtn)
hor_layout.addWidget(self.slider)
ver_layout = QVBoxLayout()
ver_layout.addWidget(self.out_combo)
ver_layout.addLayout(hor_layout)
self.setLayout(ver_layout)
self.player = QMediaPlayer(None, QMediaPlayer.VideoSurface)
self.player.stateChanged.connect(self.mediastate_changed)
self.player.positionChanged.connect(self.position_changed)
self.player.durationChanged.connect(self.duration_changed)
self.show()
def open_file(self):
file_name, _ = QFileDialog.getOpenFileName(self, "Open file")
if file_name != '':
self.player.setMedia(QMediaContent(QUrl.fromLocalFile(file_name)))
# self.label.setText(basename(file_name))
self.playBtn.setEnabled(True)
def play_file(self):
if self.player.state() == QMediaPlayer.PlayingState:
self.player.pause()
else:
self.player.play()
def mediastate_changed(self, state):
if self.player.state() == QMediaPlayer.PlayingState:
self.playBtn.setIcon(self.style().standardIcon(QStyle.SP_MediaPause))
else:
self.playBtn.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))
def position_changed(self, position):
self.slider.setValue(position)
def duration_changed(self, duration):
self.slider.setRange(0, duration)
def set_position(self, position):
self.player.setPosition(position)
def out_device_changed(self, idx):
device = self.out_combo.itemData(idx)
service = self.player.service()
if service:
out = service.requestControl("org.qt-project.qt.mediastreamscontrol/5.0")
if out:
out.setActiveOutput(self.out_combo.currentText())
service.releaseControl(out)
else:
print("No output found!")
app = QApplication(sys.argv)
window = Window()
sys.exit(app.exec_())

Store audio from QAudioInput and pass it to SciPy FFT

how can I get audio input in real time from QAudioInput, store it in a NumPy array and pass it to SciPy FFT? What I have tried:
from PyQt5.QtMultimedia import QAudioDeviceInfo, QAudioFormat, QAudioInput
import sys
class Window(QMainWindow):
def __init__(self):
info = QAudioDeviceInfo()
input_device = info.defaultInputDevice()
if input_device.isNull():
# If no avaiable device is found, we display a error
print("There is no audio input device available.")
exit(-1)
audio_format = QAudioFormat()
audio_format.setSampleRate(44100)
audio_format.setSampleSize(8)
audio_format.setChannelCount(1)
audio_format.setCodec("audio/pcm")
audio_format.setSampleType(QAudioFormat.UnSignedInt)
if sys.byteorder == "little":
audio_format.setByteOrder(QAudioFormat.LittleEndian)
else:
audio_format.setByteOrder(QAudioFormat.BigEndian)
self.audioInput = QAudioInput(input_device, audio_format, self)
self.ioDevice = self.audioInput.start()
self.ioDevice.readyRead.connect(self.read_audio)
def read_audio(self):
data: QByteArray = self.ioDevice.readAll()
print(data.toUInt()) # Prints (0, False) which means error converting data
Inspired by the official example Audio Example I have created a QIODevice that allows obtaining the data. The following example takes the last N samples every T seconds by calculating its fft and displaying it using matplotlib.
import sys
import collections
from functools import cached_property
from PyQt5.QtCore import QIODevice, QObject, pyqtSignal, QTimer
from PyQt5.QtMultimedia import QAudioDeviceInfo, QAudioFormat, QAudioInput
from PyQt5.QtWidgets import QApplication, QMainWindow
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.figure import Figure
from scipy.fft import fft, fftfreq
import numpy as np
FS = 44100
SAMPLE_COUNT = 2 * 1000
class AudioDevice(QIODevice):
data_changed = pyqtSignal(list, name="dataChanged")
def __init__(self, interval=1000, parent: QObject = None):
super().__init__(parent)
self.m_buffer = collections.deque(
[0 for _ in range(SAMPLE_COUNT)], maxlen=SAMPLE_COUNT
)
self.timer.timeout.connect(self.send_data)
self.timer.setInterval(interval)
self.timer.start()
#cached_property
def timer(self):
return QTimer()
def send_data(self):
self.data_changed.emit(list(self.m_buffer))
def readData(self, data, max_size):
return -1
def writeData(self, data):
max_size = len(data)
resolution = 4
start = 0
available_samples = int(max_size) // resolution
if available_samples < self.m_buffer.maxlen:
start = self.m_buffer.maxlen - available_samples
pos = 0
for _ in range(start, self.m_buffer.maxlen):
y = (1.0 * (data[pos] - 128)) / 128.0
self.m_buffer.append(y)
pos += resolution
return (self.m_buffer.maxlen - start) * resolution
class PlotWidget(QMainWindow):
def __init__(self, parent=None):
super().__init__(parent)
self.canvas = FigureCanvas(Figure(figsize=(5, 3)))
self.setCentralWidget(self.canvas)
self.ax = self.canvas.figure.subplots()
self._line = None
def update_data(self, data):
T = 1 / FS
N = SAMPLE_COUNT
yf = fft(data)
xf = fftfreq(N, T)[: N // 2]
x = xf
y = 2.0 / N * np.abs(yf[0 : N // 2])
if self._line is None:
(self._line,) = self.ax.plot(x, y)
else:
self._line.set_data(x, y)
self.canvas.draw()
def main(args):
app = QApplication(args)
plot_widget = PlotWidget()
plot_widget.resize(640, 480)
plot_widget.show()
info = QAudioDeviceInfo()
input_device = info.defaultInputDevice()
if input_device.isNull():
print("There is no audio input device available.")
exit(-1)
audio_format = QAudioFormat()
audio_format.setSampleRate(FS)
audio_format.setSampleSize(8)
audio_format.setChannelCount(1)
audio_format.setCodec("audio/pcm")
audio_format.setSampleType(QAudioFormat.UnSignedInt)
if sys.byteorder == "little":
audio_format.setByteOrder(QAudioFormat.LittleEndian)
else:
audio_format.setByteOrder(QAudioFormat.BigEndian)
audio_input = QAudioInput(input_device, audio_format, None)
audio_device = AudioDevice(interval=100)
audio_device.data_changed.connect(plot_widget.update_data)
audio_device.open(QIODevice.WriteOnly)
audio_input.start(audio_device)
app.exec_()
if __name__ == "__main__":
main(sys.argv)
data.toUInt() converts whole byte array to one uint value - not what you want. To get sample values you can use either numpy.frombuffer or struct.unpack.
import numpy
def read_audio(self):
data = self.ioDevice.readAll()
values = numpy.frombuffer(data.data(), dtype=numpy.uint8)
or
import struct
def read_audio(self):
data = self.ioDevice.readAll()
fmt = "#{}B".format(data.size())
values = struct.unpack(fmt, data.data())
I added widget that shows waveform to demonstrate that samples actually reflect signal from microphone - not random numbers.
from PyQt5.QtMultimedia import QAudioDeviceInfo, QAudioFormat, QAudioInput
from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget
from PyQt5.QtGui import QPainter, QPolygonF
from PyQt5.QtCore import QPointF
import sys
import numpy
class WaveWidget(QWidget):
def __init__(self, parent = None):
super().__init__(parent)
self._values = None
def setValues(self, values):
self._values = values
self.update()
def paintEvent(self, event):
if self._values is None:
return
painter = QPainter(self)
ys = self._values / 255 * self.height()
xs = numpy.linspace(0, self.width(), num = len(ys))
points = QPolygonF([QPointF(x,y) for x,y in zip(xs,ys)])
painter.drawPolyline(points)
class Window(QMainWindow):
def __init__(self):
super().__init__()
info = QAudioDeviceInfo()
input_device = info.defaultInputDevice()
if input_device.isNull():
# If no avaiable device is found, we display a error
print("There is no audio input device available.")
exit(-1)
audio_format = QAudioFormat()
audio_format.setSampleRate(44100)
audio_format.setSampleSize(8)
audio_format.setChannelCount(1)
audio_format.setCodec("audio/pcm")
audio_format.setSampleType(QAudioFormat.UnSignedInt)
if sys.byteorder == "little":
audio_format.setByteOrder(QAudioFormat.LittleEndian)
else:
audio_format.setByteOrder(QAudioFormat.BigEndian)
self.audioInput = QAudioInput(input_device, audio_format, self)
self.ioDevice = self.audioInput.start()
self.ioDevice.readyRead.connect(self.read_audio)
widget = WaveWidget()
self._widget = widget
self.setCentralWidget(widget)
def read_audio(self):
data = self.ioDevice.readAll()
values = numpy.frombuffer(data.data(), dtype=numpy.uint8)
self._widget.setValues(values)
if __name__ == "__main__":
app = QApplication([])
window = Window()
window.show()
app.exec()

Unable to stream frames from camera to QML

I am using PyQT5. I want to send frames from Opencv to QML using QQuickPaintedItem. I wrote a sample implementation here. I cant seem to find why the paint event is called only once, only when the application is loading. It is painting only one frame from the camera to the QML component and the self.update() is not calling paint event.
from OpenGL import GL
from PyQt5.QtQuick import QQuickPaintedItem, QQuickView
from PyQt5.QtGui import QPainter, QPixmap, QImage
from PyQt5.QtQml import qmlRegisterType
import sys
from PyQt5.QtGui import QColor
from PyQt5.QtCore import QUrl,QObject,pyqtSignal
import cv2.cv2 as cv2
from PyQt5.QtWidgets import QApplication
class ImageWriter(QQuickPaintedItem):
cam_frame = None
def __init__(self, *args, **kwargs):
super(ImageWriter, self).__init__(*args, **kwargs)
self.setRenderTarget(QQuickPaintedItem.FramebufferObject)
def paint(self, painter):
print(ImageWriter.cam_frame)
painter.drawPixmap(0,0,ImageWriter.cam_frame)
def update_frame(self,frame):
frame = cv2.resize(frame, (700, 500), cv2.INTER_AREA)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
frame = QImage(frame, frame.shape[1], frame.shape[0], 17)
ImageWriter.cam_frame = QPixmap.fromImage(frame)
self.update()
def get_frames(app):
cap = cv2.VideoCapture(0)
num = 0
imgw = ImageWriter()
while True:
while num != 30:
_ , bgframe = cap.read()
num += 1
_ , frame = cap.read()
imgw.update_frame(frame)
print("get frames")
app.processEvents()
if __name__ == '__main__':
app = QApplication(sys.argv)
qmlRegisterType(ImageWriter, "imageWriter", 1, 0, "ImageWriter")
view = QQuickView()
view.setSource(QUrl('test.qml'))
rootObject = view.rootObject()
view.show()
get_frames(app)
sys.exit(app.exec_())
Here is the QML i wrote for this,
import QtQuick 2.0
import imageWriter 1.0
Item {
width: 800
height: 600
ImageWriter {
id : imageWriter
width : 800
height : 600
}
}
I am quite not able to get why the paint event is not called by self.update() . I cant use QWidgets, i have to use this. Is there something i am missing out here ?
The problem is caused by having 2 ImageWriter objects, one created in imgw = ImageWriter() and the other in QML, you can combine it by adding prints in .py in .qml:
*.py
def get_frames(app):
cap = cv2.VideoCapture(0)
num = 0
imgw = ImageWriter()
print("Python:", imgw)
...
*.qml
...
Component.onCompleted: console.log("QML:", imageWriter)
...
Output:
qml: >>>> ImageWriter(0x55bf2927e770)
Python: <__main__.ImageWriter object at 0x7fce8e4ff798>
As you can see, there are 2 objects that point to different memory addresses, so a possible solution is to create a singleton using this library:
from OpenGL import GL
import sys
from PyQt5 import QtCore, QtGui, QtQml, QtQuick
import cv2
try: from PyQt5.QtCore import pyqtWrapperType
except ImportError:
from sip import wrappertype as pyqtWrapperType
class Singleton(pyqtWrapperType, type):
def __init__(cls, name, bases, dict):
super().__init__(name, bases, dict)
cls.instance=None
def __call__(cls,*args,**kw):
if cls.instance is None:
cls.instance=super().__call__(*args, **kw)
return cls.instance
class ImageWriter(QtQuick.QQuickPaintedItem, metaclass=Singleton):
def __init__(self, *args, **kwargs):
super(ImageWriter, self).__init__(*args, **kwargs)
self.setRenderTarget(QtQuick.QQuickPaintedItem.FramebufferObject)
self.cam_frame = QtGui.QImage()
def paint(self, painter):
painter.drawImage(0, 0, self.cam_frame)
def update_frame(self,frame):
frame = cv2.resize(frame, (700, 500), cv2.INTER_AREA)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
frame = QtGui.QImage(frame, frame.shape[1], frame.shape[0], 17)
self.cam_frame = frame.copy()
self.update()
def get_frames(app):
cap = cv2.VideoCapture(0)
num = 0
imgw = ImageWriter()
while True:
while num != 30:
_ , bgframe = cap.read()
num += 1
ret, frame = cap.read()
if ret:
imgw.update_frame(frame)
#print("get frames")
app.processEvents()
if __name__ == '__main__':
app = QtGui.QGuiApplication(sys.argv)
QtQml.qmlRegisterType(ImageWriter, "imageWriter", 1, 0, "ImageWriter")
view = QtQuick.QQuickView()
view.setSource(QtCore.QUrl('test.qml'))
rootObject = view.rootObject()
view.show()
get_frames(app)
sys.exit(app.exec_())
With the above should work the acquisition of images I think there is a better way, in a few moments I will show a better option.
Using my previous answer as a base I created a module that implements a handler of the camera using opencv, in addition to a viewer, and a generic class that allows adding filters, for this the project must have the following structure
├── main.py
├── main.qml
└── PyCVQML
├── cvcapture.py
├── cvitem.py
└── __init__.py
PyCVQML/cvcapture.py
import numpy as np
import threading
import cv2
from PyQt5 import QtCore, QtGui, QtQml
gray_color_table = [QtGui.qRgb(i, i, i) for i in range(256)]
class CVAbstractFilter(QtCore.QObject):
def process_image(self, src):
dst = src
return dst
class CVCapture(QtCore.QObject):
started = QtCore.pyqtSignal()
imageReady = QtCore.pyqtSignal()
indexChanged = QtCore.pyqtSignal()
def __init__(self, parent=None):
super(CVCapture, self).__init__(parent)
self._image = QtGui.QImage()
self._index = 0
self.m_videoCapture = cv2.VideoCapture()
self.m_timer = QtCore.QBasicTimer()
self.m_filters = []
self.m_busy = False
#QtCore.pyqtSlot()
#QtCore.pyqtSlot(int)
def start(self, *args):
if args:
self.setIndex(args[0])
self.m_videoCapture.release()
self.m_videoCapture = cv2.VideoCapture(self._index)
if self.m_videoCapture.isOpened():
self.m_timer.start(0, self)
self.started.emit()
#QtCore.pyqtSlot()
def stop(self):
self.m_timer.stop()
def timerEvent(self, e):
if e.timerId() != self.m_timer.timerId(): return
ret, frame = self.m_videoCapture.read()
if not ret:
self.m_timer.stop()
return
if not self.m_busy:
threading.Thread(target=self.process_image, args=(np.copy(frame),)).start()
#QtCore.pyqtSlot(np.ndarray)
def process_image(self, frame):
self.m_busy = True
for f in self.m_filters:
frame = f.process_image(frame)
image = CVCapture.ToQImage(frame)
self.m_busy = False
QtCore.QMetaObject.invokeMethod(self,
"setImage",
QtCore.Qt.QueuedConnection,
QtCore.Q_ARG(QtGui.QImage, image))
#staticmethod
def ToQImage(im):
if im is None:
return QtGui.QImage()
if im.dtype == np.uint8:
if len(im.shape) == 2:
qim = QtGui.QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QtGui.QImage.Format_Indexed8)
qim.setColorTable(gray_color_table)
return qim.copy()
elif len(im.shape) == 3:
if im.shape[2] == 3:
w, h, _ = im.shape
rgb_image = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
flip_image = cv2.flip(rgb_image, 1)
qim = QtGui.QImage(flip_image.data, h, w, QtGui.QImage.Format_RGB888)
return qim.copy()
return QtGui.QImage()
def image(self):
return self._image
#QtCore.pyqtSlot(QtGui.QImage)
def setImage(self, image):
if self._image == image: return
self._image = image
self.imageReady.emit()
def index(self):
return self._index
def setIndex(self, index):
if self._index == index: return
self._index = index
self.indexChanged.emit()
#QtCore.pyqtProperty(QtQml.QQmlListProperty)
def filters(self):
return QtQml.QQmlListProperty(CVAbstractFilter, self, self.m_filters)
image = QtCore.pyqtProperty(QtGui.QImage, fget=image, notify=imageReady)
index = QtCore.pyqtProperty(int, fget=index, fset=setIndex, notify=indexChanged)
PyCVQML/cvitem.py
from PyQt5 import QtCore, QtGui, QtQuick
class CVItem(QtQuick.QQuickPaintedItem):
imageChanged = QtCore.pyqtSignal()
def __init__(self, parent=None):
super(CVItem, self).__init__(parent)
# self.setRenderTarget(QtQuick.QQuickPaintedItem.FramebufferObject)
self.m_image = QtGui.QImage()
def paint(self, painter):
if self.m_image.isNull(): return
image = self.m_image.scaled(self.size().toSize())
painter.drawImage(QtCore.QPoint(), image)
def image(self):
return self.m_image
def setImage(self, image):
if self.m_image == image: return
self.m_image = image
self.imageChanged.emit()
self.update()
image = QtCore.pyqtProperty(QtGui.QImage, fget=image, fset=setImage, notify=imageChanged)
PyCVQML/__init__.py
from PyQt5 import QtQml
from .cvcapture import CVCapture, CVAbstractFilter
from .cvitem import CVItem
def registerTypes(uri = "PyCVQML"):
QtQml.qmlRegisterType(CVCapture, uri, 1, 0, "CVCapture")
QtQml.qmlRegisterType(CVItem, uri, 1, 0, "CVItem")
Then you use it in the main.py, I have added 2 example filters, for this CVCapture has the filters property where the filters are passed to it, and they will be executed in the order they are established. To implement a new filter you must inherit from CVAbstractFilter and overwrite the process_image() method that receives the image as an np.ndarray and should return the result after the filter.
main.py
import cv2
import numpy as np
from PyQt5 import QtGui, QtCore, QtQuick, QtQml
import PyCVQML
def max_rgb_filter(image):
# split the image into its BGR components
(B, G, R) = cv2.split(image)
# find the maximum pixel intensity values for each
# (x, y)-coordinate,, then set all pixel values less
# than M to zero
M = np.maximum(np.maximum(R, G), B)
R[R < M] = 0
G[G < M] = 0
B[B < M] = 0
# merge the channels back together and return the image
return cv2.merge([B, G, R])
def rgb_to_gray(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return gray
class MaxRGBFilter(PyCVQML.CVAbstractFilter):
def process_image(self, src):
return max_rgb_filter(src)
class GrayFilter(PyCVQML.CVAbstractFilter):
def process_image(self, src):
return rgb_to_gray(src)
if __name__ == '__main__':
import os
import sys
app = QtGui.QGuiApplication(sys.argv)
PyCVQML.registerTypes()
QtQml.qmlRegisterType(MaxRGBFilter, "Filters", 1, 0, "MaxRGBFilter")
QtQml.qmlRegisterType(GrayFilter, "Filters", 1, 0, "GrayFilter")
view = QtQuick.QQuickView()
view.setTitle("PyCVQML Example")
dir_path = os.path.dirname(os.path.realpath(__file__)
view.setSource(QtCore.QUrl.fromLocalFile(QtCore.QDir(dir_path).absoluteFilePath("main.qml")))
view.show()
sys.exit(app.exec_())
main.qml
import QtQuick 2.0
import PyCVQML 1.0
import Filters 1.0
Item {
width: 800
height: 600
CVItem {
id: imageWriter
anchors.fill: parent
image: capture.image
}
MaxRGBFilter{
id: max_rgb_filter
}
GrayFilter{
id: gray_filter
}
CVCapture{
id: capture
index: 0
filters: [max_rgb_filter, gray_filter]
Component.onCompleted: capture.start()
}
}

PyQt5 Access Frames with QmediaPlayer

I am creating a program to play videos and then process them. I am able to play the videos with QMediaPlayer. How do I access specific frames as images or something similar. My end goal would be to format the video into a 4-d tensor of size [Num of frames, width_of_video, height_of_video, channels].
Here is the code that loads my video.:
self.clear_layout(self.vlayout)
videoItem = QVideoWidget()
self.mediaPlayer = QMediaPlayer(None, QMediaPlayer.VideoSurface)
self.mediaPlayer.durationChanged.connect(self.update_duration)
self.mediaPlayer.positionChanged.connect(self.update_slider_position)
self.vlayout.addWidget(videoItem)
self.mediaPlayer.setVideoOutput(videoItem)
local = QUrl.fromLocalFile(self.video_paths)
media = QMediaContent(local)
self.mediaPlayer.setMedia(media)
self.play_video()
Here is a working example that I converted to Python from the C++ version available in this question: How to save a frame using QMediaPlayer?
import sys
import uuid
import PyQt5
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtCore import Qt, QObject, QUrl, QRect, pyqtSignal, QPoint
from PyQt5.QtGui import QPainter, QImage
from PyQt5.QtWidgets import QWidget, QApplication, QMainWindow, QGridLayout, QToolBar, QAction
from PyQt5.QtMultimedia import QMediaPlayer, QMediaContent, QAbstractVideoBuffer, \
QVideoFrame, QVideoSurfaceFormat, QAbstractVideoSurface
from PyQt5.QtMultimediaWidgets import QVideoWidget
class VideoFrameGrabber(QAbstractVideoSurface):
frameAvailable = pyqtSignal(QImage)
def __init__(self, widget: QWidget, parent: QObject):
super().__init__(parent)
self.widget = widget
def supportedPixelFormats(self, handleType):
return [QVideoFrame.Format_ARGB32, QVideoFrame.Format_ARGB32_Premultiplied,
QVideoFrame.Format_RGB32, QVideoFrame.Format_RGB24, QVideoFrame.Format_RGB565,
QVideoFrame.Format_RGB555, QVideoFrame.Format_ARGB8565_Premultiplied,
QVideoFrame.Format_BGRA32, QVideoFrame.Format_BGRA32_Premultiplied, QVideoFrame.Format_BGR32,
QVideoFrame.Format_BGR24, QVideoFrame.Format_BGR565, QVideoFrame.Format_BGR555,
QVideoFrame.Format_BGRA5658_Premultiplied, QVideoFrame.Format_AYUV444,
QVideoFrame.Format_AYUV444_Premultiplied, QVideoFrame.Format_YUV444,
QVideoFrame.Format_YUV420P, QVideoFrame.Format_YV12, QVideoFrame.Format_UYVY,
QVideoFrame.Format_YUYV, QVideoFrame.Format_NV12, QVideoFrame.Format_NV21,
QVideoFrame.Format_IMC1, QVideoFrame.Format_IMC2, QVideoFrame.Format_IMC3,
QVideoFrame.Format_IMC4, QVideoFrame.Format_Y8, QVideoFrame.Format_Y16,
QVideoFrame.Format_Jpeg, QVideoFrame.Format_CameraRaw, QVideoFrame.Format_AdobeDng]
def isFormatSupported(self, format):
imageFormat = QVideoFrame.imageFormatFromPixelFormat(format.pixelFormat())
size = format.frameSize()
return imageFormat != QImage.Format_Invalid and not size.isEmpty() and \
format.handleType() == QAbstractVideoBuffer.NoHandle
def start(self, format: QVideoSurfaceFormat):
imageFormat = QVideoFrame.imageFormatFromPixelFormat(format.pixelFormat())
size = format.frameSize()
if imageFormat != QImage.Format_Invalid and not size.isEmpty():
self.imageFormat = imageFormat
self.imageSize = size
self.sourceRect = format.viewport()
super().start(format)
self.widget.updateGeometry()
self.updateVideoRect()
return True
else:
return False
def stop(self):
self.currentFrame = QVideoFrame()
self.targetRect = QRect()
super().stop()
self.widget.update()
def present(self, frame):
if frame.isValid():
cloneFrame = QVideoFrame(frame)
cloneFrame.map(QAbstractVideoBuffer.ReadOnly)
image = QImage(cloneFrame.bits(), cloneFrame.width(), cloneFrame.height(),
QVideoFrame.imageFormatFromPixelFormat(cloneFrame.pixelFormat()))
self.frameAvailable.emit(image) # this is very important
cloneFrame.unmap()
if self.surfaceFormat().pixelFormat() != frame.pixelFormat() or \
self.surfaceFormat().frameSize() != frame.size():
self.setError(QAbstractVideoSurface.IncorrectFormatError)
self.stop()
return False
else:
self.currentFrame = frame
self.widget.repaint(self.targetRect)
return True
def updateVideoRect(self):
size = self.surfaceFormat().sizeHint()
size.scale(self.widget.size().boundedTo(size), Qt.KeepAspectRatio)
self.targetRect = QRect(QPoint(0, 0), size)
self.targetRect.moveCenter(self.widget.rect().center())
def paint(self, painter):
if self.currentFrame.map(QAbstractVideoBuffer.ReadOnly):
oldTransform = self.painter.transform()
if self.surfaceFormat().scanLineDirection() == QVideoSurfaceFormat.BottomToTop:
self.painter.scale(1, -1)
self.painter.translate(0, -self.widget.height())
image = QImage(self.currentFrame.bits(), self.currentFrame.width(), self.currentFrame.height(),
self.currentFrame.bytesPerLine(), self.imageFormat)
self.painter.drawImage(self.targetRect, image, self.sourceRect)
self.painter.setTransform(oldTransform)
self.currentFrame.unmap()
class App(QApplication):
def __init__(self, sys_argv):
super().__init__(sys_argv)
# Show main window
self.view = QMainWindow()
self.centralWidget = QWidget(self.view)
self.gridLayout = QGridLayout(self.centralWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.video_item = QVideoWidget()
self.gridLayout.addWidget(self.video_item)
self.view.setCentralWidget(self.centralWidget)
self.mediaPlayer = QMediaPlayer(None, QMediaPlayer.VideoSurface)
self.grabber = VideoFrameGrabber(self.video_item, self)
self.mediaPlayer.setVideoOutput(self.grabber)
self.grabber.frameAvailable.connect(self.process_frame)
self.mediaPlayer.durationChanged.connect(self.update_duration)
self.mediaPlayer.positionChanged.connect(self.update_slider_position)
local = QUrl.fromLocalFile('c:/temp/lorem.mp4')
media = QMediaContent(local)
self.mediaPlayer.setMedia(media)
self.mediaPlayer.play()
self.view.show()
def process_frame(self, image):
# Save image here
image.save('c:/temp/{}.jpg'.format(str(uuid.uuid4())))
def update_duration(self):
pass
def update_slider_position(self):
pass
if __name__ == '__main__':
def except_hook(cls, exception, traceback):
sys.__excepthook__(cls, exception, traceback)
if hasattr(QtCore.Qt, 'AA_EnableHighDpiScaling'):
PyQt5.QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling, True)
if hasattr(QtCore.Qt, 'AA_UseHighDpiPixmaps'):
PyQt5.QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps, True)
app = App(sys.argv)
app.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)
sys.excepthook = except_hook
sys.exit(app.exec_())

Categories