I have a UI I built in QDesigner and when I try to connect the signal and the slot, Python gives the following error:
TypeError: connect() failed between clicked(bool) and loadClicked()
If I remove the #pyqtSlot() designer, the UI will launch, but when I click the button, Python errors out and shuts down. What could be causing the issue?
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import pyqtSlot
import cv2
import sys
class Ui_Dialog(object):
def setupUi(self, Dialog):
# Other UI Setup and config attributes
self.SelectImage.clicked.connect(self.loadClicked)
#pyqtSlot()
def loadClicked(self):
super(self.loadImage("Wilkerson.png"))
def loadImage(self, fname):
self.image = cv2.imread(fname)
self.displayImage()
def displayImage(self):
qformat=QImage.Format_Indexed8
if len(self.image.shape) == 3:
if (self.image.shape[2]) == 4:
qformat = QImage.Format_RGBA8888
else:
qformat = QImage.Format_RGB888
img = QImage(self.image, self.image.shape[1], self.image.shape[0], self.image.strides[0], qformat)
img = img.rgbSwapped()
self.importedImageFrame.setPixmap(QPixmap.fromImage(img))
self.importedImageFrame.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
I got rid of the QDesigner UI code and tried to just import the UI file. It still hasn't fixed the issue. The revised code is below.
class ezNPR(QDialog):
def __init__(self):
super(ezNPR,self).__init__()
loadUi('ezNPRApp.ui',self)
self.Image=None
# Insert Application control commands here
self.AppStackedWidget.setCurrentIndex(0)
self.ExitPushButton.clicked.connect(QDialog.close)
self.ImagePushButton.clicked.connect(lambda:self.AppStackedWidget.setCurrentIndex(0))
self.SignaturePushButton.clicked.connect(lambda:self.AppStackedWidget.setCurrentIndex(1))
self.HelpPushButton.clicked.connect(lambda:self.AppStackedWidget.setCurrentIndex(2))
self.SelectImage.clicked.connect(self.loadClicked)
self.setWindowTitle("ezNPR Image and Photo Cropping Tool")
#pyqtSlot()
def loadClicked(self):
fname, filter = QFileDialog.getOpenFileName(self, "Open File", 'C:\\', "Image Files (*.jpg,*.png,*.bmp,*.tif)")
if fname:
self.loadImage(fname)
else:
msg = QtWidgets.QMessageBox()
msg.setText("Invalid image type selected. Please select a new image.")
msg.setInformativeText("You must selct a BMP, JPG, PNG, or TIFF")
msg.setWindowTitle("Error Loading Image")
msg.setStandardButtons(msg.Ok)
def loadImage(self, fname):
self.displayImage()
self.image = cv2.imread(fname)
def displayImage(self):
qformat = QImage.Format_Indexed8
if len(self.image.shape) == 3:
if (self.image.shape[2]) == 4:
qformat = QImage.Format_RGBA8888
else:
qformat = QImage.Format_RGB888
img = QImage(self.image, self.image.shape[1], self.image.shape[0], self.image.strides[0], qformat)
img = img.rgbSwapped()
self.importedImageFrame.setPixmap(QPixmap.fromImage(img))
self.importedImageFrame.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
I went back and re-looked at a few things and was able to work out the problem. However, Now I have encountered a new problem of not being able to get the output image to fit in the QDsigner Label I created to display the image. The pre-transformed image displays, but it is not scaled to fit the label. My code is hanging at the second to last line in the display_image function...
import sys, traceback
import os
import cv2
from PyQt5 import QtCore
from PyQt5.QtCore import *
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import QDialog, QFileDialog, QApplication, QComboBox, QStackedWidget
from PyQt5.uic import loadUi
class ezNPR(QDialog):
def __init__(self):
super(ezNPR, self).__init__()
loadUi('ezNPRApp.ui', self)
self.image = None
self.AppStackedWidget.setCurrentIndex(0)
self.ExitPushButton.clicked.connect(QDialog.close)
self.ImagePushButton.clicked.connect(lambda: self.AppStackedWidget.setCurrentIndex(0))
self.SignaturePushButton.clicked.connect(lambda: self.AppStackedWidget.setCurrentIndex(1))
self.HelpPushButton.clicked.connect(lambda: self.AppStackedWidget.setCurrentIndex(2))
self.SelectImage.clicked.connect(self.load_clicked)
#pyqtSlot()
def load_clicked(self):
fname, filter = QFileDialog.getOpenFileName(self, "Open File", os.getenv("HOME"))
try:
if fname:
self.image = cv2.imread(fname)
#cv2.imshow("First Image", self.image)
self.display_image()
else:
print('No image selected')
# msg = QtWidgets.QMessageBox()
# msg.setText("Invalid image type selected. Please select a new image.")
# msg.setInformativeText("You must select a BMP, JPG, PNG, or TIFF")
# msg.setWindowTitle("Error Loading Image")
# msg.setStandardButtons(msg.Ok)
finally:
exc_type, exc_value, exc_traceback = sys.exc_info()
print("*** print_tb:")
traceback.print_tb(exc_traceback, limit=5, file=sys.stdout)
def display_image(self):
displayorig = self.image
dih, diw = displayorig.shape[:2]
if dih > diw:
resizedisplayorig = cv2.resize(displayorig, None, fx=0.2, fy=0.5, interpolation=cv2.INTER_LINEAR)
else:
resizedisplayorig = cv2.resize(displayorig, None, fx=0.5, fy=0.2, interpolation=cv2.INTER_LINEAR)
resizedisplayorig = cv2.cvtColor(resizedisplayorig, cv2.COLOR_BGR2RGB)
rdih, rdiw = resizedisplayorig.shape[:2]
#cv2.imshow("Final Image", resizedisplayorig)
self.importedImageFrame.setPixmap(QPixmap.fromImage(resizedisplayorig)) #Hangs here
self.importedImageFrame.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.inputImageLabel.setText(rdih + " X " + rdiw)
if __name__ == "__main__":
app=QApplication(sys.argv)
window=ezNPR()
window.show()
sys.exit(app.exec_())
Related
How can I set the audio output of a QMediaPlayer to a specific output in Windows 7 and later?
This was really easy in PySide (using Phonon) but I can't find a way to do it in PySide2.
There are some related questions already, like this old but still not solved one, or this one that asks exactly what I want.
They are both in c++ and its difficult to convert it to PySide2.
The second one is answered with this code:
QMediaService *svc = player->service();
if (svc != nullptr)
{
QAudioOutputSelectorControl *out = qobject_cast<QAudioOutputSelectorControl *>
(svc->requestControl(QAudioOutputSelectorControl_iid));
if (out != nullptr)
{
out->setActiveOutput(this->ui->comboBox->currentText());
svc->releaseControl(out);
}
}
Another one with an attempt to python conversion didn't work also.
I tried to convert them to Python code, but the result was not successful.
Here is my minimal attempt:
import sys
from PySide2 import QtMultimedia
from PySide2.QtCore import QUrl, Qt
from PySide2.QtMultimedia import QMediaPlayer, QMediaContent
from PySide2.QtWidgets import (QPushButton, QSlider, QHBoxLayout, QVBoxLayout,
QFileDialog, QStyle, QApplication, QDialog, QComboBox)
class Window(QDialog):
def __init__(self):
super().__init__()
self.out_combo = QComboBox()
mode = QtMultimedia.QAudio.AudioOutput
devices = QtMultimedia.QAudioDeviceInfo.availableDevices(mode)
for item in [(dev.deviceName(), dev) for dev in devices]:
self.out_combo.addItem(item[0], item[1])
self.out_combo.currentIndexChanged.connect(self.out_device_changed)
openBtn = QPushButton('Open file')
openBtn.clicked.connect(self.open_file)
self.playBtn = QPushButton()
self.playBtn.setEnabled(False)
self.playBtn.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))
self.playBtn.clicked.connect(self.play_file)
self.slider = QSlider(Qt.Horizontal)
self.slider.setRange(0, 0)
self.slider.sliderMoved.connect(self.set_position)
hor_layout = QHBoxLayout()
hor_layout.setContentsMargins(0, 0, 0, 0)
hor_layout.addWidget(openBtn)
hor_layout.addWidget(self.playBtn)
hor_layout.addWidget(self.slider)
ver_layout = QVBoxLayout()
ver_layout.addWidget(self.out_combo)
ver_layout.addLayout(hor_layout)
self.setLayout(ver_layout)
self.player = QMediaPlayer(None, QMediaPlayer.VideoSurface)
self.player.stateChanged.connect(self.mediastate_changed)
self.player.positionChanged.connect(self.position_changed)
self.player.durationChanged.connect(self.duration_changed)
self.show()
def open_file(self):
file_name, _ = QFileDialog.getOpenFileName(self, "Open file")
if file_name != '':
self.player.setMedia(QMediaContent(QUrl.fromLocalFile(file_name)))
# self.label.setText(basename(file_name))
self.playBtn.setEnabled(True)
def play_file(self):
if self.player.state() == QMediaPlayer.PlayingState:
self.player.pause()
else:
self.player.play()
def mediastate_changed(self, state):
if self.player.state() == QMediaPlayer.PlayingState:
self.playBtn.setIcon(self.style().standardIcon(QStyle.SP_MediaPause))
else:
self.playBtn.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))
def position_changed(self, position):
self.slider.setValue(position)
def duration_changed(self, duration):
self.slider.setRange(0, duration)
def set_position(self, position):
self.player.setPosition(position)
def out_device_changed(self, idx):
device = self.out_combo.itemData(idx)
service = self.player.service()
if service:
out = service.requestControl("org.qt-project.qt.mediastreamscontrol/5.0")
if out:
out.setActiveOutput(self.out_combo.currentText())
service.releaseControl(out)
else:
print("No output found!")
app = QApplication(sys.argv)
window = Window()
sys.exit(app.exec_())
In the main window i can open, save or create an image. If i modify this image in a second window, how can i send the result in the main window when i close the second windows?
import sys
import cv2
from PyQt5 import QtWidgets, QtGui, QtCore
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.uic import loadUi
This is the second windows where i pass the image loaded in the main window and i made some modification.
class toolWindows(QtWidgets.QMainWindow):
def __init__(self, image):
super(toolWindows, self).__init__()
self._image = image
self._result = None
menubar = self.menuBar()
modifyImage = QtWidgets.QAction('&modifyTheImage', self)
modifyImage.setShortcut('Ctrl+M')
modifyImage.setStatusTip('modify the image image')
modifyImage.triggered.connect(self.doSomething)
fileMenu = menubar.addMenu('&modify')
fileMenu.addAction(modifyImage)
self.show()
def doSomething(self):
self._result = cv2.Canny(self._image,100,200)
cv2.imshow('doSomething',self._result)
return self._result
def getResult(self):
return self._result
this is the main windows
class mainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super(mainWindow, self).__init__(*args, **kwargs)
self.mainWindowImageSource = None
self.mainWindowImageResult = None
menubar = self.menuBar()
LoadImage = QtWidgets.QAction('&LoadImage', self)
LoadImage.setShortcut('Ctrl+L')
LoadImage.setStatusTip('Load an image')
LoadImage.triggered.connect(self.loadImage)
toolWin = QtWidgets.QAction('&tool', self)
toolWin.setShortcut('Ctrl+T')
toolWin.setStatusTip('open the tool win')
toolWin.triggered.connect(self.openToolWindow)
checkResult = QtWidgets.QAction('&Check', self)
checkResult.setShortcut('Ctrl+K')
checkResult.setStatusTip('check that tool win pass the image on close')
checkResult.triggered.connect(self.checkResultImage)
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(LoadImage)
fileMenu.addAction(toolWin)
fileMenu.addAction(checkResult)
self.show()
def loadImage(self):
fileName, filter = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', 'C:\\', "Image Files (*.jpg)")
if fileName:
self.mainWindowImageSource = cv2.imread(fileName, cv2.IMREAD_COLOR)
else:
print('Invalid Image')
def openToolWindow(self):
self.tw = toolWindows(self.mainWindowImageSource)
self.tw.show()
self.mainWindowImageResult = self.tw.getResult()
def checkResultImage(self):
print("ol")
cv2.imshow('test', self.mainWindowImageResult)
cv2.waitKey(0)
cv2.destroyAllWindows()
##### APP MAIN ####
app = QtWidgets.QApplication(sys.argv)
window = mainWindow()
# this start a loop till exit
sys.exit(app.exec_())
I found a solution accordingly with varius post i found.
To get a result from the second windows the simplest way is probably to use a QDialog.
in the main windows i change the code in this way:
def openToolWindow(self):
self.tw = toolWindows(self.mainWindowImageSource)
self.tw.show()
check = True
if self.tw.exec_() == QtWidgets.QDialog.Accepted:
self.mainWindowImageResult = self.tw.getResult()
cv2.imshow('test', self.mainWindowImageResult)
cv2.waitKey(0)
cv2.destroyAllWindows()
In this way wheh it open the second windows (the tool windows), the main wait for the accept button and take the result with getResult() -a function that simply return the image-
I have a window that has six symmetrically placed labels, all showing images (designed using qt-designer with the help of layouts). I would like to resize these images according to the changing window size. I have found some help in previous questions like: PyQt: Detect resizing in Widget-window resized signal
At present, using resizeEvent() in my case does not shrink the images according to the resize function. It is already triggered with the display of my form window thereby making the pushButton useless. Above all, the resulting execution is very slow. My images are of 2058x1536 dimension and displayed transparently.
My qt-designer code is given here: https://pastebin.com/TzM6qiKZ
import Ui_ImageCrop_Test
import sys
from PyQt5.QtWidgets import QMainWindow, QApplication
from PyQt5.QtGui import QPixmap, QImage, QPainter, QColor
from PyQt5.QtCore import Qt
class ImageCrop(Ui_ImageCrop_Test.Ui_MainWindow, QMainWindow):
def __init__(self, parent=None):
super(ImageCrop, self).__init__()
self.setupUi(self)
self.transparency = 220
with open("Img_files.txt") as file:
self.img_files = file.read().splitlines()
self.length = len(self.img_files)
self.pushButton_1.clicked.connect(self.click1)
self.label_1.resizeEvent = self.click1
def click1(self, event):
for i in range(6):
image = QImage(self.img_files[i])
image = image.convertToFormat(QImage.Format_ARGB8565_Premultiplied)
p = QPainter(image)
p.setCompositionMode(QPainter.CompositionMode_DestinationIn)
p.fillRect(image.rect(), QColor(0, 0, 0, self.transparency))
p.end()
pixmap = QPixmap(image)
w = int(self.label_1.width() - 4.0)
h = int(self.label_1.height() - 4.0)
smaller_pixmap = pixmap.scaled(w, h, Qt.IgnoreAspectRatio, Qt.FastTransformation)
if i == 0:
self.label_1.setPixmap(smaller_pixmap)
if i == 1:
self.label_2.setPixmap(smaller_pixmap)
if i == 2:
self.label_3.setPixmap(smaller_pixmap)
if i == 3:
self.label_4.setPixmap(smaller_pixmap)
if i == 4:
self.label_5.setPixmap(smaller_pixmap)
if i == 5:
self.label_6.setPixmap(smaller_pixmap)
def main():
app = QApplication(sys.argv)
form1 = ImageCrop()
form1.show()
app.exec_()
if __name__ == '__main__': main()
Is there any solution to run this code faster? For example, I was thinking to make all my labels turn blank during a mouse click at the edge of my window and then images reappear after the mouse button is released. This does not seem so neat. Also, I am not sure if using paintEvent can reduce my lag. Thank you for your suggestions and comments.
QLabel has the scaledContents property that allows the image to scale automatically:
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
import Ui_ImageCrop_Test
class ImageCrop(QtWidgets.QMainWindow, Ui_ImageCrop_Test.Ui_MainWindow):
def __init__(self, parent=None):
super(ImageCrop, self).__init__()
self.setupUi(self)
self.pushButton_1.clicked.connect(self.click1)
self.transparency = 220
with open("Img_files.txt") as file:
self.img_files = file.read().splitlines()
#QtCore.pyqtSlot()
def click1(self):
labels = [self.label_1, self.label_2, self.label_3,
self.label_4, self.label_5, self.label_6]
for label, filename in zip(labels, self.img_files):
image = QtGui.QImage(filename)
image = image.convertToFormat(QtGui.QImage.Format_ARGB8565_Premultiplied)
p = QtGui.QPainter(image)
p.setCompositionMode(QtGui.QPainter.CompositionMode_DestinationIn)
p.fillRect(image.rect(), QtGui.QColor(0, 0, 0, self.transparency))
p.end()
pixmap = QtGui.QPixmap(image)
w = int(label.width() - 4.0)
h = int(label.height() - 4.0)
smaller_pixmap = pixmap.scaled(w, h, QtCore.Qt.IgnoreAspectRatio, QtCore.Qt.FastTransformation)
label.setPixmap(smaller_pixmap)
label.setScaledContents(True)
def main():
app = QtWidgets.QApplication(sys.argv)
form1 = ImageCrop()
form1.show()
app.exec_()
if __name__ == '__main__': main()
I am creating a program to play videos and then process them. I am able to play the videos with QMediaPlayer. How do I access specific frames as images or something similar. My end goal would be to format the video into a 4-d tensor of size [Num of frames, width_of_video, height_of_video, channels].
Here is the code that loads my video.:
self.clear_layout(self.vlayout)
videoItem = QVideoWidget()
self.mediaPlayer = QMediaPlayer(None, QMediaPlayer.VideoSurface)
self.mediaPlayer.durationChanged.connect(self.update_duration)
self.mediaPlayer.positionChanged.connect(self.update_slider_position)
self.vlayout.addWidget(videoItem)
self.mediaPlayer.setVideoOutput(videoItem)
local = QUrl.fromLocalFile(self.video_paths)
media = QMediaContent(local)
self.mediaPlayer.setMedia(media)
self.play_video()
Here is a working example that I converted to Python from the C++ version available in this question: How to save a frame using QMediaPlayer?
import sys
import uuid
import PyQt5
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtCore import Qt, QObject, QUrl, QRect, pyqtSignal, QPoint
from PyQt5.QtGui import QPainter, QImage
from PyQt5.QtWidgets import QWidget, QApplication, QMainWindow, QGridLayout, QToolBar, QAction
from PyQt5.QtMultimedia import QMediaPlayer, QMediaContent, QAbstractVideoBuffer, \
QVideoFrame, QVideoSurfaceFormat, QAbstractVideoSurface
from PyQt5.QtMultimediaWidgets import QVideoWidget
class VideoFrameGrabber(QAbstractVideoSurface):
frameAvailable = pyqtSignal(QImage)
def __init__(self, widget: QWidget, parent: QObject):
super().__init__(parent)
self.widget = widget
def supportedPixelFormats(self, handleType):
return [QVideoFrame.Format_ARGB32, QVideoFrame.Format_ARGB32_Premultiplied,
QVideoFrame.Format_RGB32, QVideoFrame.Format_RGB24, QVideoFrame.Format_RGB565,
QVideoFrame.Format_RGB555, QVideoFrame.Format_ARGB8565_Premultiplied,
QVideoFrame.Format_BGRA32, QVideoFrame.Format_BGRA32_Premultiplied, QVideoFrame.Format_BGR32,
QVideoFrame.Format_BGR24, QVideoFrame.Format_BGR565, QVideoFrame.Format_BGR555,
QVideoFrame.Format_BGRA5658_Premultiplied, QVideoFrame.Format_AYUV444,
QVideoFrame.Format_AYUV444_Premultiplied, QVideoFrame.Format_YUV444,
QVideoFrame.Format_YUV420P, QVideoFrame.Format_YV12, QVideoFrame.Format_UYVY,
QVideoFrame.Format_YUYV, QVideoFrame.Format_NV12, QVideoFrame.Format_NV21,
QVideoFrame.Format_IMC1, QVideoFrame.Format_IMC2, QVideoFrame.Format_IMC3,
QVideoFrame.Format_IMC4, QVideoFrame.Format_Y8, QVideoFrame.Format_Y16,
QVideoFrame.Format_Jpeg, QVideoFrame.Format_CameraRaw, QVideoFrame.Format_AdobeDng]
def isFormatSupported(self, format):
imageFormat = QVideoFrame.imageFormatFromPixelFormat(format.pixelFormat())
size = format.frameSize()
return imageFormat != QImage.Format_Invalid and not size.isEmpty() and \
format.handleType() == QAbstractVideoBuffer.NoHandle
def start(self, format: QVideoSurfaceFormat):
imageFormat = QVideoFrame.imageFormatFromPixelFormat(format.pixelFormat())
size = format.frameSize()
if imageFormat != QImage.Format_Invalid and not size.isEmpty():
self.imageFormat = imageFormat
self.imageSize = size
self.sourceRect = format.viewport()
super().start(format)
self.widget.updateGeometry()
self.updateVideoRect()
return True
else:
return False
def stop(self):
self.currentFrame = QVideoFrame()
self.targetRect = QRect()
super().stop()
self.widget.update()
def present(self, frame):
if frame.isValid():
cloneFrame = QVideoFrame(frame)
cloneFrame.map(QAbstractVideoBuffer.ReadOnly)
image = QImage(cloneFrame.bits(), cloneFrame.width(), cloneFrame.height(),
QVideoFrame.imageFormatFromPixelFormat(cloneFrame.pixelFormat()))
self.frameAvailable.emit(image) # this is very important
cloneFrame.unmap()
if self.surfaceFormat().pixelFormat() != frame.pixelFormat() or \
self.surfaceFormat().frameSize() != frame.size():
self.setError(QAbstractVideoSurface.IncorrectFormatError)
self.stop()
return False
else:
self.currentFrame = frame
self.widget.repaint(self.targetRect)
return True
def updateVideoRect(self):
size = self.surfaceFormat().sizeHint()
size.scale(self.widget.size().boundedTo(size), Qt.KeepAspectRatio)
self.targetRect = QRect(QPoint(0, 0), size)
self.targetRect.moveCenter(self.widget.rect().center())
def paint(self, painter):
if self.currentFrame.map(QAbstractVideoBuffer.ReadOnly):
oldTransform = self.painter.transform()
if self.surfaceFormat().scanLineDirection() == QVideoSurfaceFormat.BottomToTop:
self.painter.scale(1, -1)
self.painter.translate(0, -self.widget.height())
image = QImage(self.currentFrame.bits(), self.currentFrame.width(), self.currentFrame.height(),
self.currentFrame.bytesPerLine(), self.imageFormat)
self.painter.drawImage(self.targetRect, image, self.sourceRect)
self.painter.setTransform(oldTransform)
self.currentFrame.unmap()
class App(QApplication):
def __init__(self, sys_argv):
super().__init__(sys_argv)
# Show main window
self.view = QMainWindow()
self.centralWidget = QWidget(self.view)
self.gridLayout = QGridLayout(self.centralWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.video_item = QVideoWidget()
self.gridLayout.addWidget(self.video_item)
self.view.setCentralWidget(self.centralWidget)
self.mediaPlayer = QMediaPlayer(None, QMediaPlayer.VideoSurface)
self.grabber = VideoFrameGrabber(self.video_item, self)
self.mediaPlayer.setVideoOutput(self.grabber)
self.grabber.frameAvailable.connect(self.process_frame)
self.mediaPlayer.durationChanged.connect(self.update_duration)
self.mediaPlayer.positionChanged.connect(self.update_slider_position)
local = QUrl.fromLocalFile('c:/temp/lorem.mp4')
media = QMediaContent(local)
self.mediaPlayer.setMedia(media)
self.mediaPlayer.play()
self.view.show()
def process_frame(self, image):
# Save image here
image.save('c:/temp/{}.jpg'.format(str(uuid.uuid4())))
def update_duration(self):
pass
def update_slider_position(self):
pass
if __name__ == '__main__':
def except_hook(cls, exception, traceback):
sys.__excepthook__(cls, exception, traceback)
if hasattr(QtCore.Qt, 'AA_EnableHighDpiScaling'):
PyQt5.QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling, True)
if hasattr(QtCore.Qt, 'AA_UseHighDpiPixmaps'):
PyQt5.QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps, True)
app = App(sys.argv)
app.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)
sys.excepthook = except_hook
sys.exit(app.exec_())
#Importing necessary libraries, mainly the OpenCV, and PyQt libraries
import cv2
import numpy as np
import sys
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5.QtCore import pyqtSignal
class ShowVideo(QtCore.QObject):
#initiating the built in camera
camera_port = -1
camera = cv2.VideoCapture(camera_port)
VideoSignal = QtCore.pyqtSignal(QtGui.QImage)
def __init__(self, parent = None):
super(ShowVideo, self).__init__(parent)
#QtCore.pyqtSlot()
def startVideo(self):
run_video = True
while run_video:
ret, image = self.camera.read()
color_swapped_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
height, width, _ = color_swapped_image.shape
qt_image = QtGui.QImage(color_swapped_image.data,
width,
height,
color_swapped_image.strides[0],
QtGui.QImage.Format_RGB888)
pixmap = QtGui.QPixmap(qt_image)
qt_image = pixmap.scaled(640, 480, QtCore.Qt.KeepAspectRatio)
qt_image = QtGui.QImage(qt_image)
self.VideoSignal.emit(qt_image)
#QtCore.pyqtSlot()
def makeScreenshot(self):
#cv2.imwrite("test.jpg", self.image)
print("Screenshot saved")
#self.qt_image.save('test.jpg')
class ImageViewer(QtWidgets.QWidget):
def __init__(self, parent = None):
super(ImageViewer, self).__init__(parent)
self.image = QtGui.QImage()
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
def paintEvent(self, event):
painter = QtGui.QPainter(self)
painter.drawImage(0,0, self.image)
self.image = QtGui.QImage()
def initUI(self):
self.setWindowTitle('Test')
#QtCore.pyqtSlot(QtGui.QImage)
def setImage(self, image):
if image.isNull():
print("viewer dropped frame!")
self.image = image
if image.size() != self.size():
self.setFixedSize(image.size())
self.update()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
thread = QtCore.QThread()
thread.start()
vid = ShowVideo()
vid.moveToThread(thread)
image_viewer = ImageViewer()
#image_viewer.resize(200,400)
vid.VideoSignal.connect(image_viewer.setImage)
#Button to start the videocapture:
push_button = QtWidgets.QPushButton('Start')
push_button.clicked.connect(vid.startVideo)
push_button2 = QtWidgets.QPushButton('Screenshot')
push_button2.clicked.connect(vid.makeScreenshot)
vertical_layout = QtWidgets.QVBoxLayout()
vertical_layout.addWidget(image_viewer)
vertical_layout.addWidget(push_button)
vertical_layout.addWidget(push_button2)
layout_widget = QtWidgets.QWidget()
layout_widget.setLayout(vertical_layout)
main_window = QtWidgets.QMainWindow()
main_window.setCentralWidget(layout_widget)
main_window.resize(640,480)
main_window.show()
sys.exit(app.exec_())
This code showing video from camera in endless loop using OpenCV and PyQt5. But how to make screenshot and don't stop showing video. I think it needs to be stop loop for a little, make screnshot, and run loop again.
You can use cv2.waitKey() for the same, as shown below:
while run_video:
ret, image = self.camera.read()
if(cv2.waitKey(10) & 0xFF == ord('s')):
cv2.imwrite("screenshot.jpg",image)
(I'm guessing that by the term "screenshot", you mean the camera frame, and not the image of the entire screen.)
When you press 's' on the keyboard, it'll perform imwrite.
Note that if you wish to save multiple images, you'd have to vary the filename. The above code will overwrite screenshot.jpg to save only the latest frame.