This question already has an answer here:
What is the difference between an opencv BGR image and its reverse version RGB image[:,:,::-1]?
(1 answer)
Closed 3 years ago.
I am dynamically resizing image frame received from webcam via OpenCV but it shows distorted image.
Given the code and ui file below.
To resize , please press hold right click and drag.
This size works:
But this size doesn't work:
I have tried to make resolution in even numbers, like if resolution is 623x420 then i am converting it into 624x420. This reduces scrambling but still it occurs for some sizes. Tried adding wait keys but no effect.
Tried with video files instead of webcam, the result is same.
import sys
from PyQt5 import QtWidgets, QtGui, QtCore
import cv2
from we import Ui_MainWindow
class MainWindow_exec(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
QtWidgets.QMainWindow.__init__(self, parent)
self.setupUi(self)
self.fps=24
self.start_camera()
self.rightClick=None
self.x = 600
self.y = 400
def mousePressEvent(self, QMouseEvent):
self.timer.stop()
self.press = QMouseEvent.pos()
if QMouseEvent.button() == QtCore.Qt.RightButton:
self.rdragx = QMouseEvent.x()
self.rdragy = QMouseEvent.y()
self.currentx = self.width()
self.currenty = self.height()
self.rightClick = True
def mouseMoveEvent(self, event):
if event.buttons() == QtCore.Qt.LeftButton:
globalPos = event.globalPos()
self.move(globalPos-self.press)
if self.rightClick == True:
self.x = self.currentx + event.x() - self.rdragx
self.y = self.currenty + event.y() - self.rdragy
self.resize(self.x, self.y)
self.label.setFixedSize(self.x, self.y)
def mouseReleaseEvent(self, event):
self.rightClick = False
self.timer.start(1000./self.fps)
def next_frame(self):
ret, frame = self.cap.read()
frame = cv2.resize(frame,(self.x,self.y ))
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
img = QtGui.QImage(frame, frame.shape[1], frame.shape[0], QtGui.QImage.Format_RGB888)
pix = QtGui.QPixmap.fromImage(img)
self.label.setPixmap(pix)
def start_camera(self):
self.cap = cv2.VideoCapture(0)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.next_frame)
self.timer.start(1000./self.fps)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
MainWindow1 = MainWindow_exec()
MainWindow1.show()
sys.exit(app.exec_())
UI file we.py
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(532, 353)
MainWindow.setWindowOpacity(1.0)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setStyleSheet("background:grey")
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
The problem is that QImage expects the data rows to be 4 byte padded.
Add this to the beginning of the file
import numpy as np
def padded_row(row_size, padding=4):
return (row_size + padding - 1) // padding * padding
Add padding to the frame before you pass it to QImage:
def next_frame(self):
ret, frame = self.cap.read()
frame = cv2.resize(frame,(self.x,self.y ))
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
padded_frame = np.zeros((padded_row(self.x * 3), self.y), 'u1')
padded_frame[:self.x * 3, :] = frame
frame = padded_frame
img = QtGui.QImage(frame, frame.shape[1], frame.shape[0], QtGui.QImage.Format_RGB888)
pix = QtGui.QPixmap.fromImage(img)
self.label.setPixmap(pix)
Related
I'm trying to create a UI that streams a webcam. This is what I referenced from. I tested from the answer and it works well.
Now I'm trying to change from link's QThread-run method to QThread-moveToThread method, but UI shows nothing but just empty gray screen with the message to output terminal, 'QThread: Destoryed while thread is still running'.
import cv2
import sys
from PyQt5.QtWidgets import QWidget, QLabel, QApplication
from PyQt5.QtCore import QThread, Qt, pyqtSignal, pyqtSlot, QObject
from PyQt5.QtGui import QImage, QPixmap
class Thread(QObject): # Changed here
changePixmap = pyqtSignal(QImage)
#pyqtSlot() # Changed here
def run(self):
print('run!') # Changed here
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
while True:
ret, frame = cap.read()
if ret:
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'PyQt5 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
#pyqtSlot(QImage)
def setImage(self, image):
self.label.setPixmap(QPixmap.fromImage(image))
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
# create a label
self.label = QLabel(self)
self.label.move(280, 120)
self.label.resize(640, 480)
# Changed here
# th = Thread(self)
# th.changePixmap.connect(self.setImage)
# th.start()
qth = QThread()
qth.start()
th = Thread()
th.changePixmap.connect(self.setImage)
th.moveToThread(qth)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
It never prints the 'run!', So It seems like I can't access to the run method in class Thread(). I'm working on Windows 10 pro, with python3.8
I am using Windows, Python 3.6, OpenCV3, and PyQt5. I have a main window that has two QLabel widgets (label1 and label2). I want to drag and drop different video files into both QLabel widgets. My script displays the first frame of each of the two video files.
If I drag a file through label1 into label2 and then release the
mouse, then the first video frame is displayed in label1 (not what
I want).
If I drag a file around label1 into label2 and then release the
mouse, then the first video frame is displayed in label2 (desired
effect).
If I drag a file through label2 into label1 and then release the
mouse, then the first video frame is displayed in label1 (desired
effect).
I want to have the video display in label2 regardless if I drag the file around or through label1. Suggestions?
import sys, cv2
from PyQt5.QtWidgets import QApplication, QLabel, QFrame, QMainWindow
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtCore import Qt
class Example(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setGeometry(200, 300, 800, 600)
self.setAcceptDrops(True)
self.setMouseTracking(True)
self.label1 = QLabel(self)
self.label1.move(10, 10)
self.label1.resize(780, 280)
self.label1.setFrameShape(QFrame.Box)
self.label1.setAcceptDrops(True)
self.label2 = QLabel(self)
self.label2.move(10, 310)
self.label2.resize(780, 280)
self.label2.setFrameShape(QFrame.Box)
self.label2.setAcceptDrops(True)
self.label1.setText("Label 1")
self.label2.setText("Label 2")
self.show()
def dragEnterEvent(self, e):
if e.mimeData().hasUrls:
e.accept()
else:
e.ignore()
def dropEvent(self, e):
if e.mimeData().hasUrls:
e.accept()
for url in e.mimeData().urls():
if self.label1.underMouse():
fname = str(url.toLocalFile())
self.openFile1(fname)
elif self.label2.underMouse():
fname = str(url.toLocalFile())
self.openFile2(fname)
else:
e.ignore()
def openFile1(self, filename):
self.cap1 = cv2.VideoCapture(str(filename))
self.cap1.set(cv2.CAP_PROP_POS_FRAMES, 0)
width = self.cap1.get(cv2.CAP_PROP_FRAME_WIDTH)
height = self.cap1.get(cv2.CAP_PROP_FRAME_HEIGHT)
ret, frame = self.cap1.read()
if ret == True:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
img = QImage(frame, frame.shape[1], frame.shape[0], QImage.Format_RGB888)
pix = QPixmap.fromImage(img)
pix = self.scalePix(self.label1, pix, width, height)
self.label1.setPixmap(pix)
def openFile2(self, filename):
self.cap2 = cv2.VideoCapture(str(filename))
self.cap2.set(cv2.CAP_PROP_POS_FRAMES, 0)
width = self.cap2.get(cv2.CAP_PROP_FRAME_WIDTH)
height = self.cap2.get(cv2.CAP_PROP_FRAME_HEIGHT)
ret, frame = self.cap2.read()
if ret == True:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
img = QImage(frame, frame.shape[1], frame.shape[0], QImage.Format_RGB888)
pix = QPixmap.fromImage(img)
pix = self.scalePix(self.label1, pix, width, height)
self.label2.setPixmap(pix)
def scalePix(self, label, p, width, height):
window_width = label.width()
ratio = height / width
window_height = int(ratio * window_width)
window_height = label.height()
window_width = int(1 / ratio * window_height)
p = p.scaledToWidth(window_width, Qt.SmoothTransformation)
p = p.scaledToHeight(window_height, Qt.SmoothTransformation)
return p
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
ex.show()
sys.exit(app.exec_())
Documentation indicates that underMouse() may fail in the drag and drop process:
bool QWidget::underMouse() const
Returns true if the widget is under the mouse cursor; otherwise
returns false.
This value is not updated properly during drag and drop operations.
So it's better not to use, instead we can create a custom QLabel that implements the drag and drop:
import sys, cv2
from PyQt5 import QtCore, QtGui, QtWidgets
class OpenCVLabel(QtWidgets.QLabel):
def __init__(self, *args, **kwargs):
super(OpenCVLabel, self).__init__(*args, **kwargs)
self.setFrameShape(QtWidgets.QFrame.Box)
self.setAcceptDrops(True)
def dragEnterEvent(self, e):
if e.mimeData().hasUrls():
e.accept()
else:
e.ignore()
def dropEvent(self, e):
if e.mimeData().hasUrls():
e.accept()
for url in e.mimeData().urls():
self.openFile(url.toLocalFile())
else:
e.ignore()
def openFile(self, filename):
cap = cv2.VideoCapture(str(filename))
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
ret, frame = cap.read()
if ret:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
img = QtGui.QImage(frame, frame.shape[1], frame.shape[0], QtGui.QImage.Format_RGB888)
pix = QtGui.QPixmap.fromImage(img)
pix = pix.scaled(self.size(), QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)
self.setPixmap(pix)
class Example(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
label1 = OpenCVLabel("label1")
label2 = OpenCVLabel("label2")
central_widget = QtWidgets.QWidget()
self.setCentralWidget(central_widget)
lay = QtWidgets.QVBoxLayout(central_widget)
lay.addWidget(label1)
lay.addWidget(label2)
self.resize(780, 560)
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
ex = Example()
ex.show()
sys.exit(app.exec_())
Try to link PyQt and Opencv video feed, can't understand how to apply while loop for continuously streaming video. It just take a still picture.Please can anyone help to solve the problem.
PtQt=5
Python=3.6.1
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'PyQt5 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
#create a label
label = QLabel(self)
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
convertToQtFormat = QtGui.QImage(rgbImage.data, rgbImage.shape[1], rgbImage.shape[0],
QtGui.QImage.Format_RGB888)
convertToQtFormat = QtGui.QPixmap.fromImage(convertToQtFormat)
pixmap = QPixmap(convertToQtFormat)
resizeImage = pixmap.scaled(640, 480, QtCore.Qt.KeepAspectRatio)
QApplication.processEvents()
label.setPixmap(resizeImage)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
The problem is that the function that obtains the image is executed only once and not updating the label.
The correct way is to place it inside a loop, but it will result in blocking the main window. This blocking of main window can be solved by using the QThread class and send through a signal QImage to update the label. For example:
import cv2
import sys
from PyQt5.QtWidgets import QWidget, QLabel, QApplication
from PyQt5.QtCore import QThread, Qt, pyqtSignal, pyqtSlot
from PyQt5.QtGui import QImage, QPixmap
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def run(self):
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if ret:
# https://stackoverflow.com/a/55468544/6622587
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
class App(QWidget):
def __init__(self):
super().__init__()
[...]
self.initUI()
#pyqtSlot(QImage)
def setImage(self, image):
self.label.setPixmap(QPixmap.fromImage(image))
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
# create a label
self.label = QLabel(self)
self.label.move(280, 120)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(self.setImage)
th.start()
self.show()
Updating this for PySide2 and qimage2ndarray
from PySide2.QtCore import *
from PySide2.QtGui import *
import cv2 # OpenCV
import qimage2ndarray # for a memory leak,see gist
import sys # for exiting
# Minimal implementation...
def displayFrame():
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = qimage2ndarray.array2qimage(frame)
label.setPixmap(QPixmap.fromImage(image))
app = QApplication([])
window = QWidget()
# OPENCV
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
# timer for getting frames
timer = QTimer()
timer.timeout.connect(displayFrame)
timer.start(60)
label = QLabel('No Camera Feed')
button = QPushButton("Quiter")
button.clicked.connect(sys.exit) # quiter button
layout = QVBoxLayout()
layout.addWidget(button)
layout.addWidget(label)
window.setLayout(layout)
window.show()
app.exec_()
# See also: https://gist.github.com/bsdnoobz/8464000
Thank you Taimur Islam for the question.
Thank you eyllanesc for wonderful answering and I have modified your code little bit. I used PtQt=4 Python=2.7 and I didn't use opencv
import sys
import numpy as np
import flycapture2 as fc2
from PyQt4.QtCore import (QThread, Qt, pyqtSignal)
from PyQt4.QtGui import (QPixmap, QImage, QApplication, QWidget, QLabel)
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def __init__(self, parent=None):
QThread.__init__(self, parent=parent)
self.cameraSettings()
def run(self):
while True:
im = fc2.Image()
self.c.retrieve_buffer(im)
a = np.array(im)
rawImage = QImage(a.data, a.shape[1], a.shape[0], QImage.Format_Indexed8)
self.changePixmap.emit(rawImage)
def cameraSettings(self):
print(fc2.get_library_version())
self.c = fc2.Context()
numberCam = self.c.get_num_of_cameras()
print(numberCam)
self.c.connect(*self.c.get_camera_from_index(0))
print(self.c.get_camera_info())
m, f = self.c.get_video_mode_and_frame_rate()
print(m, f)
print(self.c.get_property_info(fc2.FRAME_RATE))
p = self.c.get_property(fc2.FRAME_RATE)
print(p)
self.c.set_property(**p)
self.c.start_capture()
class App(QWidget):
def __init__(self):
super(App,self).__init__()
self.title = 'PyQt4 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(800, 600)
# create a label
self.label = QLabel(self)
self.label.move(0, 0)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(lambda p: self.setPixMap(p))
th.start()
def setPixMap(self, p):
p = QPixmap.fromImage(p)
p = p.scaled(640, 480, Qt.KeepAspectRatio)
self.label.setPixmap(p)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
ex.show()
sys.exit(app.exec_())
Try to link PyQt and Opencv video feed, can't understand how to apply while loop for continuously streaming video. It just take a still picture.Please can anyone help to solve the problem.
PtQt=5
Python=3.6.1
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'PyQt5 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
#create a label
label = QLabel(self)
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
convertToQtFormat = QtGui.QImage(rgbImage.data, rgbImage.shape[1], rgbImage.shape[0],
QtGui.QImage.Format_RGB888)
convertToQtFormat = QtGui.QPixmap.fromImage(convertToQtFormat)
pixmap = QPixmap(convertToQtFormat)
resizeImage = pixmap.scaled(640, 480, QtCore.Qt.KeepAspectRatio)
QApplication.processEvents()
label.setPixmap(resizeImage)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
The problem is that the function that obtains the image is executed only once and not updating the label.
The correct way is to place it inside a loop, but it will result in blocking the main window. This blocking of main window can be solved by using the QThread class and send through a signal QImage to update the label. For example:
import cv2
import sys
from PyQt5.QtWidgets import QWidget, QLabel, QApplication
from PyQt5.QtCore import QThread, Qt, pyqtSignal, pyqtSlot
from PyQt5.QtGui import QImage, QPixmap
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def run(self):
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if ret:
# https://stackoverflow.com/a/55468544/6622587
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
class App(QWidget):
def __init__(self):
super().__init__()
[...]
self.initUI()
#pyqtSlot(QImage)
def setImage(self, image):
self.label.setPixmap(QPixmap.fromImage(image))
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
# create a label
self.label = QLabel(self)
self.label.move(280, 120)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(self.setImage)
th.start()
self.show()
Updating this for PySide2 and qimage2ndarray
from PySide2.QtCore import *
from PySide2.QtGui import *
import cv2 # OpenCV
import qimage2ndarray # for a memory leak,see gist
import sys # for exiting
# Minimal implementation...
def displayFrame():
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = qimage2ndarray.array2qimage(frame)
label.setPixmap(QPixmap.fromImage(image))
app = QApplication([])
window = QWidget()
# OPENCV
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
# timer for getting frames
timer = QTimer()
timer.timeout.connect(displayFrame)
timer.start(60)
label = QLabel('No Camera Feed')
button = QPushButton("Quiter")
button.clicked.connect(sys.exit) # quiter button
layout = QVBoxLayout()
layout.addWidget(button)
layout.addWidget(label)
window.setLayout(layout)
window.show()
app.exec_()
# See also: https://gist.github.com/bsdnoobz/8464000
Thank you Taimur Islam for the question.
Thank you eyllanesc for wonderful answering and I have modified your code little bit. I used PtQt=4 Python=2.7 and I didn't use opencv
import sys
import numpy as np
import flycapture2 as fc2
from PyQt4.QtCore import (QThread, Qt, pyqtSignal)
from PyQt4.QtGui import (QPixmap, QImage, QApplication, QWidget, QLabel)
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def __init__(self, parent=None):
QThread.__init__(self, parent=parent)
self.cameraSettings()
def run(self):
while True:
im = fc2.Image()
self.c.retrieve_buffer(im)
a = np.array(im)
rawImage = QImage(a.data, a.shape[1], a.shape[0], QImage.Format_Indexed8)
self.changePixmap.emit(rawImage)
def cameraSettings(self):
print(fc2.get_library_version())
self.c = fc2.Context()
numberCam = self.c.get_num_of_cameras()
print(numberCam)
self.c.connect(*self.c.get_camera_from_index(0))
print(self.c.get_camera_info())
m, f = self.c.get_video_mode_and_frame_rate()
print(m, f)
print(self.c.get_property_info(fc2.FRAME_RATE))
p = self.c.get_property(fc2.FRAME_RATE)
print(p)
self.c.set_property(**p)
self.c.start_capture()
class App(QWidget):
def __init__(self):
super(App,self).__init__()
self.title = 'PyQt4 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(800, 600)
# create a label
self.label = QLabel(self)
self.label.move(0, 0)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(lambda p: self.setPixMap(p))
th.start()
def setPixMap(self, p):
p = QPixmap.fromImage(p)
p = p.scaled(640, 480, Qt.KeepAspectRatio)
self.label.setPixmap(p)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
ex.show()
sys.exit(app.exec_())
why the code doesn't work?? the video work file, when i tried to show a text in 2nd label it also show up.but If i want to change the value continuously like current time in the setText() what can i do? I am new in multi threading.
import sys
import cv2
from PyQt5.QtCore import QThread, pyqtSignal, Qt
from PyQt5.QtGui import QPixmap, QImage
from PyQt5.QtWidgets import QApplication, QWidget, QLabel
import datetime
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'PyQt5 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
# create a label
label = QLabel(self)
label.move(280, 120)
label.resize(640, 480)
label1 = QLabel(self)
label1.move(680, 820)
th = Thread(self)
th.changePixmap.connect(lambda p: label.setPixmap(p))
th.changeLabel.connect(lambda n:label1.setText("A"))
th.start()
class Thread(QThread):
changePixmap = pyqtSignal(QPixmap)
changeLabel = pyqtSignal(QLabel)
def __init__(self, parent=None):
QThread.__init__(self, parent=parent)
def run(self):
cap = cv2.VideoCapture(0)
n=QLabel()
while True:
ret, frame = cap.read()
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
convertToQtFormat = QImage(rgbImage.data, rgbImage.shape[1], rgbImage.shape[0], QImage.Format_RGB888)
convertToQtFormat = QPixmap.fromImage(convertToQtFormat)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
now = datetime.datetime.now()
sec = now.second
try:
self.changeLabel.emit(n)
except Exception as e:
print(str(e))
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
ex.show()
sys.exit(app.exec_())
You should not send a new widget (QLabel) to the main view since that will be a new label, and not the original label, you should send the text that is of type str.
class Thread(QThread):
changePixmap = pyqtSignal(QPixmap)
changeLabel = pyqtSignal(str)
def run(self):
[...]
now = datetime.datetime.now()
sec = now.second
self.changeLabel.emit(str(sec))
And then connect it to the setText function:
th.changeLabel.connect(label1.setText)
Complete code:
import sys
import cv2
from PyQt5.QtCore import QThread, pyqtSignal, Qt
from PyQt5.QtGui import QPixmap, QImage
from PyQt5.QtWidgets import QApplication, QWidget, QLabel
import datetime
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'PyQt5 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
# create a label
# create a label
label = QLabel(self)
label.move(280, 120)
label.resize(640, 480)
label1 = QLabel(self)
label1.move(580, 620)
self.th = Thread(self)
self.th.changePixmap.connect(label.setPixmap)
self.th.changeLabel.connect(label1.setText)
self.th.start()
def closeEvent(self, event):
self.th.stop()
QWidget.closeEvent(self, event)
class Thread(QThread):
changePixmap = pyqtSignal(QPixmap)
changeLabel = pyqtSignal(str)
def __init__(self, parent=None):
QThread.__init__(self, parent=parent)
self.isRunning = True
def run(self):
cap = cv2.VideoCapture(0)
while self.isRunning:
ret, frame = cap.read()
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
convertToQtFormat = QImage(rgbImage.data, rgbImage.shape[1], rgbImage.shape[0], QImage.Format_RGB888)
convertToQtFormat = QPixmap.fromImage(convertToQtFormat)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
now = datetime.datetime.now()
sec = now.second
self.changeLabel.emit(str(sec))
def stop(self):
self.isRunning = False
self.quit()
self.wait()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
ex.show()
sys.exit(app.exec_())
Note: I have added stop method to be able to stop the thread and close properly, for this I also overwritten the closeEvent method. Another change, I moved label1 since my screen is not that big.