I'm trying to process an image sequence and make a video of the results using OpenCV and PyQt5. I've got some code that loops through a directory, reads in the images, and tries to display them on a QGraphicsView.
def on_start(self):
for f in self.image_list:
img = cv2.imread(f)
img = cv2qimage(img, False)
self.scene.set_qimage(img)
self.scene inherits from QGraphicsScene.
def set_qimage(self, qimage):
self.pixmap = QPixmap.fromImage(qimage)
self.addPixmap(self.pixmap)
The problem is everytime I call addPixmap() the image is just added on top of all the other images and soon I run out of memory and everything crashes.
The current code doesn't include any of the processing steps, it just converts the numpy ndarry to a QImage and adds the QPixmap to the scene.
What is the proper way to update the QGraphicsScene so that I can stream a sequence of images?
Every time you use addPixmap() you are creating a new QGraphicsPixmapItem adding memory unnecessarily. The solution is to create a QGraphicsPixmapItem and reuse it. In addition the processing task can block the main thread so you must use a thread to do the heavy task and send the QImage through signals.
class ProcessWorker(QObject):
imageChanged = pyqtSignal(QImage)
def doWork(self):
for f in self.image_list:
img = cv2.imread(f)
img = cv2qimage(img, False)
self.imageChanged.emit(img)
QThread.msleep(1)
class Widget(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
lay = QVBoxLayout(self)
gv = QGraphicsView()
lay.addWidget(gv)
scene = QGraphicsScene(self)
gv.setScene(scene)
self.pixmap_item = QGraphicsPixmapItem()
scene.addItem(self.pixmap_item)
self.workerThread = QThread()
self.worker = ProcessWorker()
self.worker.moveToThread(self.workerThread)
self.workerThread.finished.connect(self.worker.deleteLater)
self.workerThread.started.connect(self.worker.doWork)
self.worker.imageChanged.connect(self.setImage)
self.workerThread.start()
#pyqtSlot(QImage)
def setImage(self, image):
pixmap = QPixmap.fromImage(image)
self.pixmap_item.setPixmap(pixmap)
if __name__ == '__main__':
app = QApplication(sys.argv)
w = Widget()
w.show()
sys.exit(app.exec_())
Related
I'm developing an application with PyQt5 and QtDesigner. For one of the pages (page 2), I'm trying to embed a live video stream from a camera with OpenCV. The code has a thread running and I confirmed that it is sending good frames. The problem I'm facing is dynamically updating a QLabel with the OpenCV frame.
The program currently crashes when this line of code (in the MainWindow class) is uncommented: Why?
self.ui.Worker1.ImageUpdate.connect(self.ui.ImageUpdateSlot)
Below is the main code
# by: reevve
# Import Modules
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import cv2
# Import UI files
from ui_main import Ui_MainWindow
from ui_splashscreen import Ui_SplashScreen
# Global Variables
counter = 0
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow,self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# add page btn click functionality
self.ui.btn_page_1.clicked.connect(lambda: self.ui.stackedWidget.setCurrentWidget(self.ui.page_1))
self.ui.btn_page_2.clicked.connect(lambda: self.ui.stackedWidget.setCurrentWidget(self.ui.page_2))
self.ui.btn_page_3.clicked.connect(lambda: self.ui.stackedWidget.setCurrentWidget(self.ui.page_3))
# set up the video feed
self.ui.CancelBTN.clicked.connect(lambda: self.ui.CancelFeed)
self.ui.Worker1 = Worker1()
self.ui.Worker1.start()
# the line below is causing the program to crash
#self.ui.Worker1.ImageUpdate.connect(self.ui.ImageUpdateSlot)
def ImageUpdateSlot(self, Image):
print('recieve frames')
self.ui.FeedLabel.setPixmap(QPixmap.fromImage(Image))
def CancelFeed(self):
print('cancel feed')
self.ui.Worker1.stop()
class SplashScreen(QMainWindow):
def __init__(self):
super(SplashScreen,self).__init__()
self.ui = Ui_SplashScreen()
self.ui.setupUi(self)
# remove title bar
self.setWindowFlag(QtCore.Qt.FramelessWindowHint)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
# drop shadow effect
self.shadow = QGraphicsDropShadowEffect(self)
self.shadow.setBlurRadius(20)
self.shadow.setXOffset(0)
self.shadow.setYOffset(0)
self.shadow.setColor(QColor(0, 0, 0, 60))
self.ui.dropShadowFrame.setGraphicsEffect(self.shadow)
# start timer
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.progress)
# specify duration of launcher
self.timer.start(15)
# initial text
self.ui.label_description.setText("<strong>UD ASAE</strong> Ground Station GUI")
# change texts during loading process
QtCore.QTimer.singleShot(1500, lambda: self.ui.label_description.setText("<strong>LOADING</strong> the good stuff"))
QtCore.QTimer.singleShot(3000, lambda: self.ui.label_description.setText("<strong>GATHERING</strong> remaining braincells"))
# show main window
self.show()
def progress(self):
global counter
self.ui.progressBar.setValue(counter)
# close splash screen and open main gui
if counter > 100:
self.timer.stop()
self.main = MainWindow()
self.main.show()
self.close()
counter += 1
# FPV thread
class Worker1(QThread):
ImageUpdate = pyqtSignal(QImage)
def run(self):
print('\nrun feed')
self.ThreadActive = True
Capture = cv2.VideoCapture(0)
while self.ThreadActive:
ret, frame = Capture.read()
if ret:
Image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
ConvertToQtFormat = QImage(Image.data, Image.shape[1], Image.shape[0], QImage.Format_RGB888)
Pic = ConvertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.ImageUpdate.emit(Pic)
print('send good frames')
def stop(self):
print('stop feed')
self.ThreadActive = False
self.quit()
def window():
app = QApplication(sys.argv)
win = SplashScreen()
sys.exit(app.exec_())
window()
Again, the Worker1 thread seems to be sending good frames (confirmed with print statement), but I'm having trouble updating my QLabel (called FeedLabel) as the frames come in.
I did not attach the supporting .ui files to this post.
I changed a bunch of things in your code, indicated in the comments. Essentially the methods of your program were defined in a strange way and you stored many things in self.ui instead of self.
I made myself a minimal UI to be able to test the changes and it works. Below you can see the back of the sticky note I put on my laptop's camera:
Here is your modified code:
class MainWindow(QMainWindow):
def __init__(self):
super().__init__() # (optional) removed the args of `super`
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# add page btn click functionality
...
# set up the video feed
self.ui.CancelBTN.clicked.connect(self.CancelFeed) # removed `lambda` and `.ui`
self.Worker1 = Worker1() # (optional) removed `.ui` because your thread should be an attr of the program, not of the ui. This is a matter of preference though.
self.Worker1.start() # (optional) removed `.ui`
self.Worker1.ImageUpdate.connect(self.ImageUpdateSlot) # removed `.ui`
#pyqtSlot(QImage) # (optional) decorator to indicate what object the signal will provide.
def ImageUpdateSlot(self, Image): # Unindented by 4 spaces.
print('recieve frames')
self.ui.FeedLabel.setPixmap(QPixmap.fromImage(Image))
def CancelFeed(self): # Unindented by 4 spaces.
print('cancel feed')
self.Worker1.stop() # (optional) removed `.ui`
#Importing necessary libraries, mainly the OpenCV, and PyQt libraries
import cv2
import numpy as np
import sys
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5.QtCore import pyqtSignal
class ShowVideo(QtCore.QObject):
#initiating the built in camera
camera_port = -1
camera = cv2.VideoCapture(camera_port)
VideoSignal = QtCore.pyqtSignal(QtGui.QImage)
def __init__(self, parent = None):
super(ShowVideo, self).__init__(parent)
#QtCore.pyqtSlot()
def startVideo(self):
run_video = True
while run_video:
ret, image = self.camera.read()
color_swapped_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
height, width, _ = color_swapped_image.shape
qt_image = QtGui.QImage(color_swapped_image.data,
width,
height,
color_swapped_image.strides[0],
QtGui.QImage.Format_RGB888)
pixmap = QtGui.QPixmap(qt_image)
qt_image = pixmap.scaled(640, 480, QtCore.Qt.KeepAspectRatio)
qt_image = QtGui.QImage(qt_image)
self.VideoSignal.emit(qt_image)
#QtCore.pyqtSlot()
def makeScreenshot(self):
#cv2.imwrite("test.jpg", self.image)
print("Screenshot saved")
#self.qt_image.save('test.jpg')
class ImageViewer(QtWidgets.QWidget):
def __init__(self, parent = None):
super(ImageViewer, self).__init__(parent)
self.image = QtGui.QImage()
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
def paintEvent(self, event):
painter = QtGui.QPainter(self)
painter.drawImage(0,0, self.image)
self.image = QtGui.QImage()
def initUI(self):
self.setWindowTitle('Test')
#QtCore.pyqtSlot(QtGui.QImage)
def setImage(self, image):
if image.isNull():
print("viewer dropped frame!")
self.image = image
if image.size() != self.size():
self.setFixedSize(image.size())
self.update()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
thread = QtCore.QThread()
thread.start()
vid = ShowVideo()
vid.moveToThread(thread)
image_viewer = ImageViewer()
#image_viewer.resize(200,400)
vid.VideoSignal.connect(image_viewer.setImage)
#Button to start the videocapture:
push_button = QtWidgets.QPushButton('Start')
push_button.clicked.connect(vid.startVideo)
push_button2 = QtWidgets.QPushButton('Screenshot')
push_button2.clicked.connect(vid.makeScreenshot)
vertical_layout = QtWidgets.QVBoxLayout()
vertical_layout.addWidget(image_viewer)
vertical_layout.addWidget(push_button)
vertical_layout.addWidget(push_button2)
layout_widget = QtWidgets.QWidget()
layout_widget.setLayout(vertical_layout)
main_window = QtWidgets.QMainWindow()
main_window.setCentralWidget(layout_widget)
main_window.resize(640,480)
main_window.show()
sys.exit(app.exec_())
This code showing video from camera in endless loop using OpenCV and PyQt5. But how to make screenshot and don't stop showing video. I think it needs to be stop loop for a little, make screnshot, and run loop again.
You can use cv2.waitKey() for the same, as shown below:
while run_video:
ret, image = self.camera.read()
if(cv2.waitKey(10) & 0xFF == ord('s')):
cv2.imwrite("screenshot.jpg",image)
(I'm guessing that by the term "screenshot", you mean the camera frame, and not the image of the entire screen.)
When you press 's' on the keyboard, it'll perform imwrite.
Note that if you wish to save multiple images, you'd have to vary the filename. The above code will overwrite screenshot.jpg to save only the latest frame.
I'm attempting to get webcam data from a camera using opencv and then display that in a PyQt gui. I have done this before with Tkinter by gaining access to Tkinter main window loop with the .after function. However, PyQt doesn't seem to have the same usability and in order to have another loop running with an application you need to use a separate thread. So this is what I have come up with:
import sys
import cv2
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtGui import QImage
import time
class VideoCapture(QtGui.QWidget):
def __init__(self, parent = None):
QtGui.QWidget().__init__()
self.camera = None
self.camera = cv2.VideoCapture(0)
b, self.frame = self.camera.read()
self.label = QtGui.QLabel()
self.workThread = WorkThread(self)
self.connect(self.workThread, QtCore.SIGNAL('update_Camera'), self.draw)
self.workThread.start()
def closeEvent(self, event):
self.workThread.stop()
def draw(self):
print "I should Redraw"
height, width, channel = self.frame.shape
bpl = 3 * width
self.qImg = QImage(self.frame.data, width, height, bpl, QImage.Format_RGB888)
pix = QtGui.QPixmap(self.qImg)
self.label.setPixmap(pix)
self.label.show()
class WorkThread(QtCore.QThread):
def __init__(self, parent):
QtCore.QThread.__init__(self)
self.parent = parent
def __del__(self):
self.wait()
def run(self):
while True:
self.emit(QtCore.SIGNAL('update_Camera'), "_")
self.terminate()
app = QtGui.QApplication(sys.argv)
test = VideoCapture()
test.draw()
sys.exit(app.exec_())
My idea was simple: I'll just create a thread with a loop which emits a signal telling the main application to update. (Obviously I don't I want a thread with a while True loop but I just used it for convenience and planned on replacing it once I could guarantee this idea would work). However, the signal doesn't appear to be registering because the draw() function is never called. Any idea what i'm doing wrong?
I don't know anything about OpenCV, so I can only guess at the problems.
My guess is that you are only reading the video data once. If it is a video stream then you have to continually read and interpret the data.
import sys
import cv2
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtGui import QImage
import time
class VideoCapture(QtGui.QWidget):
update_video = QtCore.pyqtSignal()
def __init__(self, parent = None):
QtGui.QWidget().__init__()
self.camera = cv2.VideoCapture(0)
self.label = QtGui.QLabel()
layout = QtGui.QHBoxLayout()
self.setLayout(layout)
layout.addWidget(self.label)
# Create the worker Thread
self.workThread = WorkThread(self.readVideo)
self.update_video.connect(self.draw)
def start(self):
self.workerThread.start()
def stop(self):
self.workThread.alive = False
self.workThread.stop()
def readVideo(self):
"""Note this method is executed in a thread. No drawing can happen in a thread. Emit a signal to draw items."""
b, self.frame = self.camera.read()
self.update_video.emit() # Signals are slow this may happen too fast
def closeEvent(self, event):
self.stop()
return QtGui.QWidget.closeEvent(self, event)
#self.workThread.alive = False
#self.workThread.stop()
def draw(self):
print "I should Redraw"
height, width, channel = self.frame.shape
bpl = 3 * width
qImg = QImage(self.frame.data, width, height, bpl, QImage.Format_RGB888)
pix = QtGui.QPixmap(qImg)
self.label.setPixmap(pix)
# self.label.show() # The label is now a part of the widget layout
class WorkThread(QtCore.QThread):
def __init__(self, target=None, args=(), kwargs={}):
QtCore.QThread.__init__(self)
# I don't know how Qt's threads work, so I am treating it like a python thread
self.target = target
self.args = args
self.kwargs = kwargs
self.alive = True
def run(self):
while self.alive:
self.target(*self.args, **self.kwargs)
app = QtGui.QApplication(sys.argv)
test = VideoCapture()
test.start()
sys.exit(app.exec_())
Since you are only updating so many times per second you could probably use a timer for this instead of a thread. The timer is probably easier and safer to use.
import sys
import cv2
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtGui import QImage
import time
class VideoCapture(QtGui.QWidget):
def __init__(self, parent = None):
QtGui.QWidget().__init__()
self.camera = cv2.VideoCapture(0)
self.label = QtGui.QLabel()
layout = QtGui.QHBoxLayout()
self.setLayout(layout)
layout.addWidget(self.label)
# Create the worker Thread
self.timer= QtCore.QTimer()
self.timer.setInterval(300)
self.timer.timeout.connect(self.draw_camera)
def start(self):
self.timer.start()
def stop(self):
self.timer.stop()
def draw_camera(self):
"""You can draw in a timer, so just read the data and draw however fast you want."""
print "I should Redraw"
b, frame = self.camera.read()
height, width, channel = frame.shape
bpl = 3 * width
qImg = QImage(frame.data, width, height, bpl, QImage.Format_RGB888)
pix = QtGui.QPixmap(qImg)
self.label.setPixmap(pix)
def closeEvent(self, event):
self.stop()
return QtGui.QWidget.closeEvent(self, event)
app = QtGui.QApplication(sys.argv)
test = VideoCapture()
test.start()
sys.exit(app.exec_())
I've been working on things very similar to your problem. I modified your code and tested it on my Windows PC.
The key point here is that you have to put the cv2 camera object in the WorkThread, read each frame in the main while loop in the run() method, and finally emit the image to the QWidget object to display it. In this way you get a continuous iteration of image capturing and display.
import sys
import cv2
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtGui import QImage
import time
class VideoCapture(QtGui.QWidget):
def __init__(self, parent = None):
# Use super() to call __init__() methods in the parent classes
super(VideoCapture, self).__init__()
# The instantiated QLabel object should belong to the 'self' QWidget object
self.label = QtGui.QLabel(self) # <- So put 'self' in the parenthesis
# Set the QLabel geometry to fit the image dimension (640, 480)
# The top left corner (0, 0) is the position within the QWidget main window
self.label.setGeometry(0,0,640,480)
# Instantiate a QThread object. No need to pass in the parent QWidget object.
self.workThread = WorkThread()
# Connect signal from self.workThread to the slot self.draw
self.connect(self.workThread, QtCore.SIGNAL('update_Camera'), self.draw)
self.workThread.start()
def closeEvent(self, event):
self.workThread.stop()
event.accept()
def draw(self, img):
print "I should Redraw"
height, width, channel = img.shape
bpl = 3 * width
self.qImg = QImage(img, width, height, bpl, QImage.Format_RGB888)
pix = QtGui.QPixmap(self.qImg)
self.label.setPixmap(pix)
self.label.show()
class WorkThread(QtCore.QThread):
def __init__(self):
# Use super() to call __init__() methods in the parent classes
super(WorkThread, self).__init__()
# Place the camera object in the WorkThread
self.camera = cv2.VideoCapture(0)
# The boolean variable to break the while loop in self.run() method
self.running = True
def run(self):
while self.running:
# Read one frame
b, self.frame = self.camera.read()
# Emit self.frame to the QWidget object
self.emit(QtCore.SIGNAL('update_Camera'), self.frame)
def stop(self):
# Terminate the while loop in self.run() method
self.running = False
app = QtGui.QApplication(sys.argv)
video_capture_widget = VideoCapture()
video_capture_widget.show()
sys.exit(app.exec_())
I'm integrating openCV 3.0 with Qt5 in Python 3.4.3 using pyqt5. I've been trying to build an app to process videos from files, but ran into some trouble with pyqt. Specifically, I will be loading videos through a file dialog multiple times and these videos will not be the same size. Therefore, I want the main window in my app to wrap/expand to the size of the video being played.
Below is a simplified version of my code with the 3 core classes for showing the video. One for the Main Window, one for a frame viewer widget to show each video frame in the GUI, and one for a video processor to read and process the video through opencv, transform it to a QImage then send it to the viewer.
class videoProcessor(QtCore.QObject):
filename = None
cap = None
videoSignal = QtCore.pyqtSignal(QtGui.QImage)
def __init__(self):
super().__init__()
self.filename = "test.mp4"
#QtCore.pyqtSlot()
def runVideoProcessor(self):
self.cap = cv2.VideoCapture(self.filename)
while self.cap.isOpened():
ret, frame = self.cap.read()
if ret:
outimg = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
imgh, imgw, bytesPerComponent = outimg.shape
bytesPerLine = bytesPerComponent * imgw;
qtimg = QtGui.QImage(outimg.data,imgw,imgh,bytesPerLine,QtGui.QImage.Format_RGB888)
self.videoSignal.emit(qtimg)
else:
break
self.cap.release()
class frameViewer(QtWidgets.QWidget):
def __init__(self):
super().__init__()
self.image = QtGui.QImage()
self.imageAvailable = False
def paintEvent(self,event):
painter = QtGui.QPainter(self)
painter.drawImage(0,0,self.image)
self.image = QtGui.QImage()
painter.end()
#QtCore.pyqtSlot(QtGui.QImage)
def setFrame(self,image):
self.image = image
self.setFixedSize(self.image.size())
self.repaint()
class mainWindow(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.fview = frameViewer()
self.vproc = videoProcessor()
self.vproc.videoSignal.connect(self.fview.setFrame)
self.startButton = QtWidgets.QPushButton("Start")
self.startButton.clicked.connect(self.vproc.runVideoProcessor)
self.mainLayout = QtWidgets.QVBoxLayout()
self.mainLayout.addWidget(self.fview)
self.mainLayout.addWidget(self.startButton)
self.mainWidget = QtWidgets.QWidget()
self.mainWidget.setLayout(self.mainLayout)
self.mainWidget.setSizePolicy(QtWidgets.QSizePolicy.Expanding,QtWidgets.QSizePolicy.Expanding)
self.setCentralWidget(self.mainWidget)
self.statusBar().showMessage('Ready')
self.setGeometry(50, 50, 300, 300)
self.setWindowTitle('OpenCV PyQt Test')
self.show()
if __name__=='__main__':
app = QtWidgets.QApplication(sys.argv)
mw = mainWindow()
sys.exit(app.exec_())
So far, the program can run videos but there are two main problems:
The window does not adjust to the size of the video frame until the end of the video. However, any repetition of the same video play will be in the correct size.
If I don't set self.image=QtGui.QImage() in paintEvent after drawing the image, the program crashes. However, if I put that line in, at the end of the video, the window will go blank because an empty QImage will be drawn in the last frame's place whenever the window is updated.
Any ideas on how to solve these issues? Thank you.
I have made a opencv project which processes input stream from the video and displays the processed output. I have used PyQt buttons to switch from one output to another. My PyQt window covvers almost the entire screen and when i click on my buttons, the opencv window remains behind the PyQt window. Also, I have made the main window of PyQt my parent window. How can I bring the opencv window on top of PyQt window. I searched for cvGetWindowHandle(), but didn't find it's implementation for python.
I have used PyQt4 and opencv2, and the PyQt window has not been designed using a QtDesigner.
You can always wrap OpenCV window in Qt widget...
class QtCapture(QtGui.QWidget):
def __init__(self, *args):
super(QtGui.QWidget, self).__init__()
self.fps = 24
self.cap = cv2.VideoCapture(*args)
self.video_frame = QtGui.QLabel()
lay = QtGui.QVBoxLayout()
lay.setMargin(0)
lay.addWidget(self.video_frame)
self.setLayout(lay)
def setFPS(self, fps):
self.fps = fps
def nextFrameSlot(self):
ret, frame = self.cap.read()
# OpenCV yields frames in BGR format
frame = cv2.cvtColor(frame, cv2.cv.CV_BGR2RGB)
img = QtGui.QImage(frame, frame.shape[1], frame.shape[0], QtGui.QImage.Format_RGB888)
pix = QtGui.QPixmap.fromImage(img)
self.video_frame.setPixmap(pix)
def start(self):
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.nextFrameSlot)
self.timer.start(1000./self.fps)
def stop(self):
self.timer.stop()
def deleteLater(self):
self.cap.release()
super(QtGui.QWidget, self).deleteLater()
...and do with it whatever you will:
class ControlWindow(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
self.capture = None
self.start_button = QtGui.QPushButton('Start')
self.start_button.clicked.connect(self.startCapture)
self.quit_button = QtGui.QPushButton('End')
self.quit_button.clicked.connect(self.endCapture)
self.end_button = QtGui.QPushButton('Stop')
vbox = QtGui.QVBoxLayout(self)
vbox.addWidget(self.start_button)
vbox.addWidget(self.end_button)
vbox.addWidget(self.quit_button)
self.setLayout(vbox)
self.setWindowTitle('Control Panel')
self.setGeometry(100,100,200,200)
self.show()
def startCapture(self):
if not self.capture:
self.capture = QtCapture(0)
self.end_button.clicked.connect(self.capture.stop)
self.capture.setFPS(30)
self.capture.setParent(self)
self.capture.setWindowFlags(QtCore.Qt.Tool)
self.capture.start()
self.capture.show()
def endCapture(self):
self.capture.deleteLater()
self.capture = None
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
window = ControlWindow()
sys.exit(app.exec_())