i want to click a button to start to capture frame and click another button to release the webcam which allows me to redo the process.
but a problem occurs when i run this code below and click the button:
import cv2
...
self.StartVideo.clicked.connect(self.startVideo)
...
def startVideo(self):
self.StartVideo.setDisabled(True)
self.cap = cv2.VideoCapture(0)
self.cap.release()
the system crashed and said Process finished with exit code 139 (interrupted by signal 11: SIGSEGV)
and then i tried this way:
import cv2
...
self.StartVideo.clicked.connect(self.startVideo)
self.StartVideo.clicked.connect(self.closeVideo)
...
def startVideo(self):
from Video_Analysis.video_threads import video_thread
self.videoThread = video_thread()
self.videoThread.start()
def closeVideo(self):
self.videoThread.stop()
Video_Analysis.video_threads.py is:
# -*- coding: utf-8 -*-
from PyQt5 import QtWidgets, QtGui, QtCore
import threading
import numpy as np
from time import sleep
import cv2
class video_thread(QtCore.QThread):
# finishSignal_frame = QtCore.pyqtSignal(list)
# finishSignal_mark = QtCore.pyqtSignal(np.ndarray)
def __init__(self, parent=None):
super(video_thread, self).__init__(parent)
self.cap = cv2.VideoCapture(0)
def run(self):
self.cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 320)
self.cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 240)
self.cap.set(cv2.cv.CV_CAP_PROP_FPS, 30)
while True:
ret, frame = self.cap.read()
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
convertToQtFormat = QtGui.QImage(rgbImage.data, rgbImage.shape[1], rgbImage.shape[0],
QtGui.QImage.Format_RGB888)
convertToQtFormat = QtGui.QPixmap.fromImage(convertToQtFormat)
pixmap = QtGui.QPixmap(convertToQtFormat)
def stop(self):
self.cap.release()
self.terminate()
but the problem is same, how can i sovle this?
Thanks a lot!
Related
Im trying to resize the video to full screen but its not working? how to fix it?
from asyncore import loop
from distutils import command
import tkinter
from tkinter import *
from tkvideo import tkvideo
from tkinter import ttk
from tkinter import font
import PIL
from PIL import ImageTk
from PIL import Image
from StudentDetails import *
from entry import EntryLogsSystem
from main import *
import os, sys, subprocess
from training import Training
from recognition import Recognition
from datetime import datetime
import time
from entry import *
import os
import tkinter as tk
from tkVideoPlayer import TkinterVideo
import matplotlib
import matplotlib.pyplot as plt
class home:
def __init__(self, root):
self.root = root
self.root.geometry("1530x790+0+0")
self.root.title("Face Recognition System")
self.root.config(bg="black")
cap = cv2.VideoCapture('v2.mp4')
if (cap.isOpened()== False):
print("Error opening video file")
while(cap.isOpened()):
ret, frame = cap.read()
if ret == True:
cv2.imshow('Smart Access Face Recognition Portal', frame)
frame = cv2.resize(frame, (1920, 1080))
if cv2.waitKey(25) & 0xFF == ord('q'):
break
else:
break
cap.release()
os.system('python loading.py')
# Resource: https://coding-engineer.com/2020/09/09/python-opencv-reading-a-video/
if __name__ == "__main__":
root = Tk()
obj = home(root)
root.mainloop()
i tried this code but nothing works. i tried to resize using cv2.resize but it resize it to a specific ration not full screen even when i increase the values.
as i tried this code too but nothing worked too
class home:
def _init_(self, root):
self.root = root
self.root.geometry("1530x790+0+0")
self.root.title("Face Recognition System")
self.root.config(bg="black")
cap = cv2.VideoCapture("v2.mp4")
while(cap.isOpened()):
ret, frame = cap.read()
frame = cv2.resize(frame, (3000,3000))
cv2.imshow("video", frame)
if cv2.waitKey(10) & 0xFF == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
root = Tk()
obj = home(root)
root.mainloop()
You can create a namedWindow with fullscreen attribute, then show your image in it.
Creation of window:
cv2.namedWindow("winname", cv2.WINDOW_FULLSCREEN)
cv2.setWindowProperty("winname", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
Displaying image:
cv2.imshow("winname", frame)
It'll automatically resize the image to your window width/height.
I am trying to make a multi camera video streaming using OpenCV and have used PyQt for GUI. The code is running fine in Full HD but the streams are getting distorted when I change the resolution of the display. Can anyone tell me why is this happening?
Following are the screenshots for your reference:
Screenshot in 1920*1080(Full HD)
Screenshot in 1600*900
Screenshot in 1366*768
The code is below:
from PyQt4 import QtCore, QtGui
from threading import Thread
from collections import deque
from datetime import datetime
import time
import sys
import cv2
import imutils
class CameraWidget(QtGui.QWidget):
def __init__(self, width, height, stream_link=0, aspect_ratio=False, parent=None, deque_size=1):
super(CameraWidget, self).__init__(parent)
# Initialize deque used to store frames read from the stream
self.deque = deque(maxlen=deque_size)
self.screen_width = width
self.screen_height = height
self.maintain_aspect_ratio = aspect_ratio
self.camera_stream_link = stream_link
# Flag to check if camera is valid/working
self.online = False
self.capture = None
self.video_frame = QtGui.QLabel()
self.load_network_stream()
# Start background frame grabbing
self.get_frame_thread = Thread(target=self.get_frame, args=())
self.get_frame_thread.daemon = True
self.get_frame_thread.start()
# Periodically set video frame to display
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.set_frame)
self.timer.start(.5)
print('Started camera: {}'.format(self.camera_stream_link))
def load_network_stream(self):
"""Verifies stream link and open new stream if valid"""
def load_network_stream_thread():
if self.verify_network_stream(self.camera_stream_link):
self.capture = cv2.VideoCapture(self.camera_stream_link)
self.online = True
self.load_stream_thread = Thread(target=load_network_stream_thread, args=())
self.load_stream_thread.daemon = True
self.load_stream_thread.start()
def verify_network_stream(self, link):
"""Attempts to receive a frame from given link"""
cap = cv2.VideoCapture(link)
if not cap.isOpened():
return False
cap.release()
return True
def get_frame(self):
"""Reads frame, resizes, and converts image to pixmap"""
while True:
try:
if self.capture.isOpened() and self.online:
# Read next frame from stream and insert into deque
status, frame = self.capture.read()
if status:
self.deque.append(frame)
else:
self.capture.release()
self.online = False
else:
# Attempt to reconnect
print('attempting to reconnect', self.camera_stream_link)
self.load_network_stream()
self.spin(2)
self.spin(.001)
except AttributeError:
pass
def spin(self, seconds):
"""Pause for set amount of seconds, replaces time.sleep so program doesnt stall"""
time_end = time.time() + seconds
while time.time() < time_end:
QtGui.QApplication.processEvents()
def set_frame(self):
"""Sets pixmap image to video frame"""
if not self.online:
self.spin(1)
return
if self.deque and self.online:
# Grab latest frame
frame = self.deque[-1]
# Keep frame aspect ratio
if self.maintain_aspect_ratio:
self.frame = imutils.resize(frame, width=self.screen_width)
# Force resize
else:
self.frame = cv2.resize(frame, (self.screen_width, self.screen_height))
# Convert to pixmap and set to video frame
self.img = QtGui.QImage(self.frame, self.frame.shape[1], self.frame.shape[0], QtGui.QImage.Format_RGB888).rgbSwapped()
self.pix = QtGui.QPixmap.fromImage(self.img)
self.video_frame.setPixmap(self.pix)
def get_video_frame(self):
return self.video_frame
I found the solution, QImage was responsible for the trouble.
else:
self.frame = cv2.resize(frame, (self.screen_width, self.screen_height))
self.frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
h, w, ch = self.frame.shape
bytesPerLine = ch * w
# Convert to pixmap and set to video frame
self.img = QtGui.QImage(self.frame, w, h, bytesPerLine, QtGui.QImage.Format_RGB888)
self.pix = QtGui.QPixmap.fromImage(self.img)
self.video_frame.setPixmap(self.pix)
I have a UI I built in QDesigner and when I try to connect the signal and the slot, Python gives the following error:
TypeError: connect() failed between clicked(bool) and loadClicked()
If I remove the #pyqtSlot() designer, the UI will launch, but when I click the button, Python errors out and shuts down. What could be causing the issue?
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import pyqtSlot
import cv2
import sys
class Ui_Dialog(object):
def setupUi(self, Dialog):
# Other UI Setup and config attributes
self.SelectImage.clicked.connect(self.loadClicked)
#pyqtSlot()
def loadClicked(self):
super(self.loadImage("Wilkerson.png"))
def loadImage(self, fname):
self.image = cv2.imread(fname)
self.displayImage()
def displayImage(self):
qformat=QImage.Format_Indexed8
if len(self.image.shape) == 3:
if (self.image.shape[2]) == 4:
qformat = QImage.Format_RGBA8888
else:
qformat = QImage.Format_RGB888
img = QImage(self.image, self.image.shape[1], self.image.shape[0], self.image.strides[0], qformat)
img = img.rgbSwapped()
self.importedImageFrame.setPixmap(QPixmap.fromImage(img))
self.importedImageFrame.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
I got rid of the QDesigner UI code and tried to just import the UI file. It still hasn't fixed the issue. The revised code is below.
class ezNPR(QDialog):
def __init__(self):
super(ezNPR,self).__init__()
loadUi('ezNPRApp.ui',self)
self.Image=None
# Insert Application control commands here
self.AppStackedWidget.setCurrentIndex(0)
self.ExitPushButton.clicked.connect(QDialog.close)
self.ImagePushButton.clicked.connect(lambda:self.AppStackedWidget.setCurrentIndex(0))
self.SignaturePushButton.clicked.connect(lambda:self.AppStackedWidget.setCurrentIndex(1))
self.HelpPushButton.clicked.connect(lambda:self.AppStackedWidget.setCurrentIndex(2))
self.SelectImage.clicked.connect(self.loadClicked)
self.setWindowTitle("ezNPR Image and Photo Cropping Tool")
#pyqtSlot()
def loadClicked(self):
fname, filter = QFileDialog.getOpenFileName(self, "Open File", 'C:\\', "Image Files (*.jpg,*.png,*.bmp,*.tif)")
if fname:
self.loadImage(fname)
else:
msg = QtWidgets.QMessageBox()
msg.setText("Invalid image type selected. Please select a new image.")
msg.setInformativeText("You must selct a BMP, JPG, PNG, or TIFF")
msg.setWindowTitle("Error Loading Image")
msg.setStandardButtons(msg.Ok)
def loadImage(self, fname):
self.displayImage()
self.image = cv2.imread(fname)
def displayImage(self):
qformat = QImage.Format_Indexed8
if len(self.image.shape) == 3:
if (self.image.shape[2]) == 4:
qformat = QImage.Format_RGBA8888
else:
qformat = QImage.Format_RGB888
img = QImage(self.image, self.image.shape[1], self.image.shape[0], self.image.strides[0], qformat)
img = img.rgbSwapped()
self.importedImageFrame.setPixmap(QPixmap.fromImage(img))
self.importedImageFrame.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
I went back and re-looked at a few things and was able to work out the problem. However, Now I have encountered a new problem of not being able to get the output image to fit in the QDsigner Label I created to display the image. The pre-transformed image displays, but it is not scaled to fit the label. My code is hanging at the second to last line in the display_image function...
import sys, traceback
import os
import cv2
from PyQt5 import QtCore
from PyQt5.QtCore import *
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import QDialog, QFileDialog, QApplication, QComboBox, QStackedWidget
from PyQt5.uic import loadUi
class ezNPR(QDialog):
def __init__(self):
super(ezNPR, self).__init__()
loadUi('ezNPRApp.ui', self)
self.image = None
self.AppStackedWidget.setCurrentIndex(0)
self.ExitPushButton.clicked.connect(QDialog.close)
self.ImagePushButton.clicked.connect(lambda: self.AppStackedWidget.setCurrentIndex(0))
self.SignaturePushButton.clicked.connect(lambda: self.AppStackedWidget.setCurrentIndex(1))
self.HelpPushButton.clicked.connect(lambda: self.AppStackedWidget.setCurrentIndex(2))
self.SelectImage.clicked.connect(self.load_clicked)
#pyqtSlot()
def load_clicked(self):
fname, filter = QFileDialog.getOpenFileName(self, "Open File", os.getenv("HOME"))
try:
if fname:
self.image = cv2.imread(fname)
#cv2.imshow("First Image", self.image)
self.display_image()
else:
print('No image selected')
# msg = QtWidgets.QMessageBox()
# msg.setText("Invalid image type selected. Please select a new image.")
# msg.setInformativeText("You must select a BMP, JPG, PNG, or TIFF")
# msg.setWindowTitle("Error Loading Image")
# msg.setStandardButtons(msg.Ok)
finally:
exc_type, exc_value, exc_traceback = sys.exc_info()
print("*** print_tb:")
traceback.print_tb(exc_traceback, limit=5, file=sys.stdout)
def display_image(self):
displayorig = self.image
dih, diw = displayorig.shape[:2]
if dih > diw:
resizedisplayorig = cv2.resize(displayorig, None, fx=0.2, fy=0.5, interpolation=cv2.INTER_LINEAR)
else:
resizedisplayorig = cv2.resize(displayorig, None, fx=0.5, fy=0.2, interpolation=cv2.INTER_LINEAR)
resizedisplayorig = cv2.cvtColor(resizedisplayorig, cv2.COLOR_BGR2RGB)
rdih, rdiw = resizedisplayorig.shape[:2]
#cv2.imshow("Final Image", resizedisplayorig)
self.importedImageFrame.setPixmap(QPixmap.fromImage(resizedisplayorig)) #Hangs here
self.importedImageFrame.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.inputImageLabel.setText(rdih + " X " + rdiw)
if __name__ == "__main__":
app=QApplication(sys.argv)
window=ezNPR()
window.show()
sys.exit(app.exec_())
#Importing necessary libraries, mainly the OpenCV, and PyQt libraries
import cv2
import numpy as np
import sys
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5.QtCore import pyqtSignal
class ShowVideo(QtCore.QObject):
#initiating the built in camera
camera_port = -1
camera = cv2.VideoCapture(camera_port)
VideoSignal = QtCore.pyqtSignal(QtGui.QImage)
def __init__(self, parent = None):
super(ShowVideo, self).__init__(parent)
#QtCore.pyqtSlot()
def startVideo(self):
run_video = True
while run_video:
ret, image = self.camera.read()
color_swapped_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
height, width, _ = color_swapped_image.shape
qt_image = QtGui.QImage(color_swapped_image.data,
width,
height,
color_swapped_image.strides[0],
QtGui.QImage.Format_RGB888)
pixmap = QtGui.QPixmap(qt_image)
qt_image = pixmap.scaled(640, 480, QtCore.Qt.KeepAspectRatio)
qt_image = QtGui.QImage(qt_image)
self.VideoSignal.emit(qt_image)
#QtCore.pyqtSlot()
def makeScreenshot(self):
#cv2.imwrite("test.jpg", self.image)
print("Screenshot saved")
#self.qt_image.save('test.jpg')
class ImageViewer(QtWidgets.QWidget):
def __init__(self, parent = None):
super(ImageViewer, self).__init__(parent)
self.image = QtGui.QImage()
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
def paintEvent(self, event):
painter = QtGui.QPainter(self)
painter.drawImage(0,0, self.image)
self.image = QtGui.QImage()
def initUI(self):
self.setWindowTitle('Test')
#QtCore.pyqtSlot(QtGui.QImage)
def setImage(self, image):
if image.isNull():
print("viewer dropped frame!")
self.image = image
if image.size() != self.size():
self.setFixedSize(image.size())
self.update()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
thread = QtCore.QThread()
thread.start()
vid = ShowVideo()
vid.moveToThread(thread)
image_viewer = ImageViewer()
#image_viewer.resize(200,400)
vid.VideoSignal.connect(image_viewer.setImage)
#Button to start the videocapture:
push_button = QtWidgets.QPushButton('Start')
push_button.clicked.connect(vid.startVideo)
push_button2 = QtWidgets.QPushButton('Screenshot')
push_button2.clicked.connect(vid.makeScreenshot)
vertical_layout = QtWidgets.QVBoxLayout()
vertical_layout.addWidget(image_viewer)
vertical_layout.addWidget(push_button)
vertical_layout.addWidget(push_button2)
layout_widget = QtWidgets.QWidget()
layout_widget.setLayout(vertical_layout)
main_window = QtWidgets.QMainWindow()
main_window.setCentralWidget(layout_widget)
main_window.resize(640,480)
main_window.show()
sys.exit(app.exec_())
This code showing video from camera in endless loop using OpenCV and PyQt5. But how to make screenshot and don't stop showing video. I think it needs to be stop loop for a little, make screnshot, and run loop again.
You can use cv2.waitKey() for the same, as shown below:
while run_video:
ret, image = self.camera.read()
if(cv2.waitKey(10) & 0xFF == ord('s')):
cv2.imwrite("screenshot.jpg",image)
(I'm guessing that by the term "screenshot", you mean the camera frame, and not the image of the entire screen.)
When you press 's' on the keyboard, it'll perform imwrite.
Note that if you wish to save multiple images, you'd have to vary the filename. The above code will overwrite screenshot.jpg to save only the latest frame.
I want to use python OpenCV bindings to display webcam stream in a QLabel. I found some previous posts here:
updating QLabel in non-GUI thread continuously
Displaying a video stream in QLabel with PySide
In the beginning I tried a simple "while" loop:
def onRun(self):
self.playing = True
capture = cv2.VideoCapture(0)
while self.playing:
_, data = capture.read()
data = cv2.cvtColor(data, cv2.cv.CV_BGR2RGB)
qImage = QtGui.QImage(data, data.shape[1], data.shape[0],
QtGui.QImage.Format_RGB888)
self.lblImage.setPixmap(QtGui.QPixmap.fromImage(qImage))
self.lblImage.adjustSize()
time.sleep(0.02)
But I met with a "white-window" problem. I found that proper way to solve this is to create a new thread. My question is: what is it all about new thread? should I create QThread or something? And what is it signal/slot emitting in a thread?
I've never used threads so it's totally new thing to me.
I can't test this myself, but would it not be enough to simply process the pending events within the loop?
That is:
def onRun(self):
self.playing = True
capture = cv2.VideoCapture(0)
while self.playing:
...
QtGui.qApp.processEvents()
time.sleep(0.02)
A solution is to use pyqtSignal. Here is an example:
import time
from PyQt5.QtCore import pyqtSignal, pyqtSlot
from PyQt5.QtWidgets import QApplication, QLabel, QWidget, QVBoxLayout
from PyQt5.QtGui import QPixmap, QImage
import sys
import threading
import numpy as np
class FakeCamera(QWidget):
"""Simulate a camera"""
image_taken = pyqtSignal(np.ndarray)
def __init__(self, callback, time_cost=0.01, image_shape=(256, 256)):
super(FakeCamera, self).__init__()
self.time_cost = time_cost # the time cost to take a frame, determine the frame rate
self.image_shape = image_shape
self.image_taken.connect(callback)
self._stop = threading.Event()
def start(self):
"""start a thread to take images"""
def run():
while not self._stop.is_set():
time.sleep(self.time_cost)
image = np.random.randint(0, 256, self.image_shape, np.uint8)
self.image_taken.emit(image)
threading.Thread(target=run).start()
def stop(self):
self._stop.set()
class WindowDemo(QWidget):
def __init__(self):
super(WindowDemo, self).__init__()
# label
self.label = QLabel()
self.label.setScaledContents(True)
self.label.setMinimumSize(1, 1)
# layout
vbox = QVBoxLayout()
vbox.addWidget(self.label)
self.setLayout(vbox)
# camera
self.camera = FakeCamera(self.show_image)
# start a thread to take images
self.camera.start()
#pyqtSlot(np.ndarray)
def show_image(self, image):
qimage = QImage(image, image.shape[1], image.shape[0], QImage.Format_Grayscale8)
self.label.setPixmap(QPixmap.fromImage(qimage))
def closeEvent(self, e):
self.camera.stop()
e.accept()
if __name__ == '__main__':
app = QApplication(sys.argv)
win = WindowDemo()
win.show()
sys.exit(app.exec_())