I'm using OpenCV for some image processing and want to create a transparent overlay on my screen using PyQt widgets. Below I have a basic example of sending a basic frame from opencv to PyQt through a signal/slot and displaying it on the window. The issue is I can't get the transparent background using this method, instead it is just a black background:
from PyQt5 import QtGui, QtCore
from PyQt5.QtWidgets import QWidget, QApplication, QLabel, QVBoxLayout
from PyQt5.QtGui import QPixmap
import sys
import cv2
from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QThread
import numpy as np
import os
class VideoThread(QThread):
change_pixmap_signal = pyqtSignal(np.ndarray)
def run(self):
img = np.zeros((500, 500, 4), dtype=np.uint8)
cv2.rectangle(img, (0, 0), (200, 200), (0, 0, 255), 2)
while True
self.change_pixmap_signal.emit(img)
class App(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("Qt live label demo")
self.disply_width = 1920
self.display_height = 1080
# create the label that holds the image
self.image_label = QLabel(self)
self.image_label.resize(self.disply_width, self.display_height)
# create a text label
self.textLabel = QLabel('Webcam')
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.setStyleSheet("background-color:transparent;")
# create a vertical box layout and add the two labels
vbox = QVBoxLayout()
vbox.addWidget(self.image_label)
vbox.addWidget(self.textLabel)
# set the vbox layout as the widgets layout
self.setLayout(vbox)
# create the video capture thread
self.thread = VideoThread()
# connect its signal to the update_image slot
self.thread.change_pixmap_signal.connect(self.update_image)
# start the thread
self.thread.start()
#pyqtSlot(np.ndarray)
def update_image(self, cv_img):
"""Updates the image_label with a new opencv image"""
qt_img = self.convert_cv_qt(cv_img)
self.image_label.setPixmap(qt_img)
def convert_cv_qt(self, cv_img):
"""Convert from an opencv image to QPixmap"""
rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
h, w, ch = rgb_image.shape
bytes_per_line = ch * w
convert_to_Qt_format = QtGui.QImage(
rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)
p = convert_to_Qt_format.scaled(
self.disply_width, self.display_height, Qt.KeepAspectRatio)
return QPixmap.fromImage(p)
if __name__ == "__main__":
app = QApplication(sys.argv)
a = App()
a.show()
sys.exit(app.exec_())
If it isn't possible to send a frame with an alpha channel to PyQt, I was wondering if it's possible to just send the rectangle 4 point location and use the PyQt paint to draw a rectangle on screen? I think this would require a widget.update() but I'm not sure where to invoke that.
If you are going to use transparencies then the colors and the image must be 4-channel, in your case you are passing a 3-channel color to the cv2.rectangle method and then converting you use a 3-channel format in QImage. On the other hand cv2.rectangle returns the drawn image, it does not modify the input array.
def run(self):
img = np.zeros((500, 500, 4), dtype=np.uint8)
output = cv2.rectangle(img, (0, 0), (200, 200), (0, 0, 255, 255), 2)
while True:
self.change_pixmap_signal.emit(output)
QThread.msleep(1)
def convert_cv_qt(self, cv_img):
h, w, ch = cv_img.shape
bytes_per_line = ch * w
convert_to_Qt_format = QtGui.QImage(
cv_img.data, w, h, bytes_per_line, QtGui.QImage.Format_RGBA8888
)
p = convert_to_Qt_format.scaled(
self.disply_width, self.display_height, Qt.KeepAspectRatio
)
return QPixmap.fromImage(p)
Related
In a nutshell, I currently am trying to load a picture and view it using pyqt5, via QtWidgets.QGraphicsScene.
However, after loading the picture, I run some opencv2 commands (canny, gaussian to get some region-of-interest). These region-of-interest are simply X-Y coordinates. This then breaks the displaying picture -- and I get lots of blue screens.
The picture and opencv2 operations are showcased below with the working example:
MAC OS,
Python3.7
pyqt5
opencv
import sys
import os
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PIL.ImageQt import ImageQt
from PIL import Image
import cv2
# Convert an opencv image to QPixmap
def convertCvImage2QtImage(cv_img):
rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
PIL_image = Image.fromarray(rgb_image).convert('RGB')
return QtGui.QPixmap.fromImage(ImageQt(PIL_image))
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel
from PyQt5.QtCore import Qt, QMimeData
from PyQt5.QtGui import QDrag, QPixmap
import numpy as np
class Ui_Form(object):
def setupUi(self, Form):
if not Form.objectName():
Form.setObjectName(u"Form")
Form.resize(900, 712)
Form.setAcceptDrops(True)
self.frame_graphics_view = QGraphicsView(Form)
self.frame_graphics_view.setObjectName(u"frame_graphics_view")
self.frame_graphics_view.setEnabled(True)
self.frame_graphics_view.setGeometry(QRect(10, 10, 700, 300))
sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_graphics_view.sizePolicy().hasHeightForWidth())
self.frame_graphics_view.setSizePolicy(sizePolicy)
self.frame_graphics_view.setMouseTracking(True)
self.frame_graphics_view.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.frame_graphics_view.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.frame_graphics_view.setSizeAdjustPolicy(QAbstractScrollArea.AdjustIgnored)
self.frame_graphics_view.setAlignment(Qt.AlignLeading|Qt.AlignLeft|Qt.AlignTop)
self.frame_graphics_view.setRubberBandSelectionMode(Qt.ContainsItemBoundingRect)
self.refresh_frame_button = QPushButton(Form)
self.refresh_frame_button.setObjectName(u"refresh_frame_button")
self.refresh_frame_button.setGeometry(QRect(720, 10, 80, 26))
self.show_roi_checkbox = QCheckBox(Form)
self.show_roi_checkbox.setObjectName(u"show_roi_checkbox")
self.show_roi_checkbox.setGeometry(QRect(720, 50, 85, 21))
self.retranslateUi(Form)
QMetaObject.connectSlotsByName(Form)
# setupUi
def retranslateUi(self, Form):
Form.setWindowTitle(QCoreApplication.translate("Form", u"Form", None))
self.refresh_frame_button.setText(QCoreApplication.translate("Form", u"refresh", None))
self.show_roi_checkbox.setText(QCoreApplication.translate("Form", u"Show Roi", None))
class MyScene(QtWidgets.QGraphicsScene):
def __init__(self, parent):
super(MyScene, self).__init__()
self.parent = parent
self.red_color = QColor(255, 0, 0, 180)
self.green_color = QColor(255, 255, 0, 180)
self.blue_color = QColor(255, 0, 255, 180)
self.greenBrush = QBrush(self.green_color)
self.pen = QPen(self.red_color)
self.current_ellipses = []
def add_ellipses(self, pts):
to_add = []
for (x,y) in pts:
ellipse = QGraphicsEllipseItem()
ellipse.setPen(self.pen)
ellipse.setBrush(self.greenBrush)
ellipse.setRect(x, y, 10, 10)
ellipse.setFlag(QGraphicsItem.ItemIsMovable)
ellipse.setFlag(QGraphicsItem.ItemIsFocusable)
ellipse.setFlag(QGraphicsItem.ItemIsSelectable)
to_add.append(ellipse)
for ellipse in to_add:
self.addItem(ellipse)
self.current_ellipses+=to_add
def remove_current_ellipses(self):
for item in self.current_ellipses:
self.removeItem(item)
self.current_ellipses = []
class ImageLoader(QtWidgets.QWidget):
def __init__(self):
QtWidgets.QWidget.__init__(self)
self.ui = Ui_Form()
self.ui.setupUi(self)
self.ui.refresh_frame_button.clicked.connect(self.refresh_frame_only)
self.ui.show_roi_checkbox.toggled.connect(self.show_roi)
self.scene = QtWidgets.QGraphicsScene(self)
self.scene = MyScene(self)
self.ui.frame_graphics_view.setScene(self.scene)
self.pixmap_item = self.scene.addPixmap(QtGui.QPixmap())
# unclear why the UI is not setting this.. it should have.
self.ui.frame_graphics_view.setDragMode(QGraphicsView.RubberBandDrag)
self.go_to_image()
def go_to_image(self):
# get image
img = self.get_image_to_show()
if img is None:
return
# convert image
pixmap = convertCvImage2QtImage(img)
if pixmap.isNull():
return
# show image and determine if show ROI
self.pixmap_item.setPixmap(pixmap)
self.show_roi()
def refresh_frame_only(self):
# just refreshing, sometimes it fixes the underlying picture!
self.go_to_image()
def show_roi(self):
'''Show region of interest in the picture. This may require calculations.
Lots of testing has shown that the more opencv2 commands that you call,
the more likely it is to get the picture to "break".
'''
self.scene.remove_current_ellipses()
if self.ui.show_roi_checkbox.isChecked():
image = self.get_image_to_show() # the image
def calculate_roi():
# run some opencv2 operations to find the ROI
mask = cv2.Canny(image, 150, 250, 3)
mask = cv2.bitwise_not(mask)
image[mask!=0] = np.array((255,255,255))
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Performing OTSU threshold
ret, image_thresh = cv2.threshold(gray_image, 0, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY_INV)
# we igonre douptuts
ellipses = list([i, i] for i in [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 120, 130, 140, 150, 160, 170])
return ellipses
# as we decrease the range to say 1, it significantly decreases the chance of breaking the picture.
for i in range(5):
ellipses = calculate_roi()
self.scene.add_ellipses(ellipses)
else:
self.scene.remove_current_ellipses()
def get_image_to_show(self):
'''LOAD A PICTURE'''
image = cv2.imread('ducks.png')
return image
#
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
imageLoader = ImageLoader()
imageLoader.show()
sys.exit(app.exec_())
To reproduce the issue:
Run the app.
Click the checkbox so that it is Toggled
Click Refresh many times. It can break on 1 click or 5-10 times.
To "fix", untoggle the checkbox and hit refresh.
Things I have tried:
Remove the self.scene.add_ellipses
-- this removes the yellow dots, but still causes blue screen.
decrease the opencv2 commands, this definitely helps.
instead opencv2 commands do things like: for i in range(10000000): print. this runs slow...but no breaks
Very confused! any ideas are helpful
Example screenshots below:
Loading pics. Family of ducks! Loaded yellow dots (note the toggle)
Clicking refresh a few timestimes: Blue screens. Note partial loading of ducks!
I am trying to make a program that can pixelate images. When the image is uploaded, the image is suppose to go through the color_quantization function then the pixelate function. But when I run the program, the image only goes through the pixelate function.
from PyQt5 import QtWidgets as Qtw, QtGui, QtCore
from PyQt5.QtWidgets import QFileDialog
from pixel import Ui_Form
from PyQt5.QtGui import QImage
import cv2
import numpy as np
class Worker(QtCore.QObject):
hash = QtCore.pyqtSignal()
def color_quantization(self, img, k=3):
data = np.float32(img).reshape((-1, 3))
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 20, 0.001)
ret, label, center = cv2.kmeans(data, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
center = np.uint8(center)
result = center[label.flatten()]
result = result.reshpe(img.shape)
for x in result:
self.hash.emit(x)
return x
class Pixel(Qtw.QWidget):
hash_requested = QtCore.pyqtSignal()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ui = Ui_Form()
self.ui.setupUi(self)
self.ui.upload.clicked.connect(self.upload_image)
self.worker = Worker()
self.worker_thread = QtCore.QThread()
self.worker.hash.connect(self.upload_image)
self.hash_requested.connect(self.worker.color_quantization)
self.worker.moveToThread(self.worker_thread)
self.worker_thread.start()
def upload_image(self):
filename = QFileDialog.getOpenFileName(filter="Image(*.*)")[0]
img = cv2.imread(filename)
self.pixelate(img)
def pixelate(self, image):
height, width = image.shape[:2]
w, h = (128, 128)
temp = cv2.resize(image, (w, h), interpolation=cv2.INTER_LINEAR)
output = cv2.resize(temp, (width, height), interpolation=cv2.INTER_NEAREST)
frame = cv2.cvtColor(output, cv2.COLOR_BGR2RGB)
image = QImage(frame, frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888)
self.ui.label.setPixmap(QtGui.QPixmap.fromImage(image))
if __name__ == '__main__':
app = Qtw.QApplication([])
win = Pixel()
win.show()
app.exec_()
I have a custom LED indicator widget(from github), which looks like this on QMainWindow:
I have an image which I can put in the backgroud of the QMainWindow like this:
Now my quesion is, How do I put the LED indicator widget OVER the background image(I want to put them in all the boxes)? The LED indicator widget is not showing up at all when I put a background image in the program.
This is the code:
import sys
from LedIndicatorWidget import *
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import QMainWindow, QLabel, QCheckBox, QWidget
from PyQt5.QtCore import QSize
class ExampleWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.setMinimumSize(QSize(300, 300))
self.setWindowTitle("Checkbox")
oImage = QImage("map.png")
sImage = oImage.scaled(QSize(800,277))
palette = QPalette()
palette.setBrush(QPalette.Window, QBrush(sImage))
self.setPalette(palette)
self.show()
self.led = LedIndicator(self)
self.led.setDisabled(True)
self.led.move(10,20)
self.led.resize(100,100)
self.led1 = LedIndicator(self)
self.led1.setDisabled(True)
self.led1.move(150,20)
self.led1.resize(100,100)
self.led2 = LedIndicator(self)
self.led2.setDisabled(True)
self.led2.move(300,20)
self.led2.resize(100,100)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.onPressButton)
self.timer.start()
def onPressButton(self):
self.led.setChecked(not self.led.isChecked())
self.timer.stop()
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
mainWin = ExampleWindow()
mainWin.show()
sys.exit( app.exec_() )
I've used Qpalette for the background image, if you have a better idea to add the image and make it work, feel free to do so, because I couldn't find one.
To use the LED indicator widget, make a file "LedIndicatorWidget.py" and copy this code:
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
class LedIndicator(QAbstractButton):
scaledSize = 1000.0
def __init__(self, parent=None):
QAbstractButton.__init__(self, parent)
self.setMinimumSize(24, 24)
self.setCheckable(True)
# Green
self.on_color_1 = QColor(0, 255, 0)
self.on_color_2 = QColor(0, 192, 0)
self.off_color_1 = QColor(0, 28, 0)
self.off_color_2 = QColor(0, 128, 0)
def resizeEvent(self, QResizeEvent):
self.update()
def paintEvent(self, QPaintEvent):
realSize = min(self.width(), self.height())
painter = QPainter(self)
pen = QPen(Qt.black)
pen.setWidth(1)
painter.setRenderHint(QPainter.Antialiasing)
painter.translate(self.width() / 2, self.height() / 2)
painter.scale(realSize / self.scaledSize, realSize / self.scaledSize)
gradient = QRadialGradient(QPointF(-500, -500), 1500, QPointF(-500, -500))
gradient.setColorAt(0, QColor(224, 224, 224))
gradient.setColorAt(1, QColor(28, 28, 28))
painter.setPen(pen)
painter.setBrush(QBrush(gradient))
painter.drawEllipse(QPointF(0, 0), 500, 500)
gradient = QRadialGradient(QPointF(500, 500), 1500, QPointF(500, 500))
gradient.setColorAt(0, QColor(224, 224, 224))
gradient.setColorAt(1, QColor(28, 28, 28))
painter.setPen(pen)
painter.setBrush(QBrush(gradient))
painter.drawEllipse(QPointF(0, 0), 450, 450)
painter.setPen(pen)
if self.isChecked():
gradient = QRadialGradient(QPointF(-500, -500), 1500, QPointF(-500, -500))
gradient.setColorAt(0, self.on_color_1)
gradient.setColorAt(1, self.on_color_2)
else:
gradient = QRadialGradient(QPointF(500, 500), 1500, QPointF(500, 500))
gradient.setColorAt(0, self.off_color_1)
gradient.setColorAt(1, self.off_color_2)
painter.setBrush(gradient)
painter.drawEllipse(QPointF(0, 0), 400, 400)
#pyqtProperty(QColor)
def onColor1(self):
return self.on_color_1
#onColor1.setter
def onColor1(self, color):
self.on_color_1 = color
#pyqtProperty(QColor)
def onColor2(self):
return self.on_color_2
#onColor2.setter
def onColor2(self, color):
self.on_color_2 = color
#pyqtProperty(QColor)
def offColor1(self):
return self.off_color_1
#offColor1.setter
def offColor1(self, color):
self.off_color_1 = color
#pyqtProperty(QColor)
def offColor2(self):
return self.off_color_2
#offColor2.setter
def offColor2(self, color):
self.off_color_2 = color
The issue is not related to the background: the LED widgets are there, the problem is that widgets added to a parent that is already shown (and without using a layout manager) does not make them visible, and they must be explicitly shown by calling show() or setVisible(True).
You can see the difference if you remove the self.show() line after setting the palette (but leaving the mainWin.show() at the end): in that case, the leds become automatically visible.
The solution is to either show the child widgets explicitly, or call show()/setVisible(True) on the parent after adding them.
I've been trying to create an interactive OpenCV image viewer where I'll be able to view the image immediately after a manipulation. Like say, I'm applying a binary thresholding operation on an image and changing the threshold value from PyQt slider. Now, I want to see each thresholded image in the image viewer.
I've created a very basic program for this purpose using python OpenCV and PyQT5 lib. But, the image is not being updated in the QLabel.
Below is my code:
import sys
import cv2
import numpy as np
import imutils
from PyQt5 import QtCore
from PyQt5.QtCore import Qt, QTimer
from PyQt5.QtWidgets import QApplication, QWidget, QHBoxLayout, QVBoxLayout, QLCDNumber, QSlider, QLabel, QCheckBox
from PyQt5.QtGui import QPixmap, QImage
class MyWindow(QWidget):
def __init__(self):
super().__init__()
self.imglabel = QLabel(self)
self.imglabel.setFixedSize(1200, 900)
ori_img = cv2.imread("../resources/omr-1-ans-ori.png", cv2.IMREAD_COLOR)
ori_img = imutils.resize(ori_img, height=960)
self.gray_img = cv2.cvtColor(ori_img, cv2.COLOR_BGR2GRAY)
self.gray_img_c = ori_img
self.thresh = False
self.thresh_karnel_size = 11
self.init_ui()
def init_ui(self):
# lcd = QLCDNumber(self)
hbox1 = QHBoxLayout()
cb_thresh = QCheckBox('thresh', self)
cb_thresh.setChecked(False)
cb_thresh.stateChanged.connect(self.changeTitleThresh)
hbox1.addWidget(cb_thresh)
thresh_slider = QSlider(Qt.Horizontal, self)
thresh_slider.setFocusPolicy(Qt.StrongFocus)
thresh_slider.setTickPosition(QSlider.TicksBothSides)
thresh_slider.setTickInterval(1)
thresh_slider.setSingleStep(1)
thresh_slider.setPageStep(1)
thresh_slider.setMinimum(1)
thresh_slider.setMaximum(127)
thresh_slider.valueChanged[int].connect(self.threshSliderChangeValue)
vbox = QVBoxLayout()
vbox.addLayout(hbox1)
vbox.addWidget(thresh_slider)
vbox.addWidget(self.imglabel)
self.setLayout(vbox)
self.setGeometry(50, 50, 1200, 768)
self.setWindowTitle('Learning PyQT5')
self.updateImage()
self.show()
def changeTitleThresh(self, state):
# print("thresh checkbox: ", state, Qt.Checked)
if state == Qt.Checked:
self.thresh = True
else:
self.thresh = False
def threshSliderChangeValue(self, value):
ksize = (value * 2) + 1
print("ksize: ", ksize)
if ksize > 1 and ksize % 2 != 0 and self.thresh:
self.thresh_karnel_size = ksize
self.gray_img = cv2.threshold(self.gray_img, self.thresh_karnel_size, 255, cv2.THRESH_BINARY)[1]
self.gray_img_c = cv2.cvtColor(self.gray_img.copy(), cv2.COLOR_GRAY2BGR)
self.updateImage()
def updateImage(self):
height, width, channel = self.gray_img_c.shape
bytesPerLine = 3 * width
qImg = QImage(self.gray_img_c.data, width, height, bytesPerLine, QImage.Format_RGB888)
pixMap = QPixmap.fromImage(qImg)
pixMap = pixMap.scaled(700, 500, Qt.KeepAspectRatio)
self.imglabel.setPixmap(pixMap)
self.imglabel.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = MyWindow()
sys.exit(app.exec_())
I've tried every solution found through google search. But, could not fix it.
Any help or hint will be much appreciated.
The original image must remain intact but you are applying the filter and modifying it every time, in the following example I show the correct way to do it
import sys
import cv2
import imutils
from PyQt5 import QtCore, QtGui, QtWidgets
class MyWindow(QtWidgets.QWidget):
def __init__(self):
super().__init__()
ori_img = cv2.imread("../resources/omr-1-ans-ori.png", cv2.IMREAD_COLOR)
self.original_image_color = imutils.resize(ori_img, height=960)
self.original_image_gray = cv2.cvtColor(self.original_image_color, cv2.COLOR_BGR2GRAY)
self.thresh = False
self.thresh_karnel_size = 11
self.init_ui()
def init_ui(self):
self.imglabel = QtWidgets.QLabel(alignment=QtCore.Qt.AlignCenter)
self.imglabel.setFixedSize(1200, 900)
cb_thresh = QtWidgets.QCheckBox('thresh', checked=False)
cb_thresh.stateChanged.connect(self.changeTitleThresh)
self.thresh_slider = QtWidgets.QSlider(QtCore.Qt.Horizontal,
focusPolicy=QtCore.Qt.StrongFocus,
tickPosition=QtWidgets.QSlider.TicksBothSides,
tickInterval=1,
singleStep=1,
pageStep=1,
minimum=1,
maximum=127)
self.thresh_slider.valueChanged[int].connect(self.threshSliderChangeValue)
vbox = QtWidgets.QVBoxLayout(self)
vbox.addWidget(cb_thresh)
vbox.addWidget(self.thresh_slider)
vbox.addWidget(self.imglabel)
self.threshSliderChangeValue(self.thresh_slider.value())
self.setGeometry(50, 50, 1200, 768)
self.setWindowTitle('Learning PyQT5')
self.show()
#QtCore.pyqtSlot(int)
def changeTitleThresh(self, state):
self.thresh = state == QtCore.Qt.Checked
self.threshSliderChangeValue(self.thresh_slider.value())
#QtCore.pyqtSlot(int)
def threshSliderChangeValue(self, value):
ksize = (value * 2) + 1
if ksize > 1 and ksize % 2 != 0 and self.thresh:
self.thresh_karnel_size = ksize
_, gray_img = cv2.threshold(self.original_image_gray, self.thresh_karnel_size, 255, cv2.THRESH_BINARY)
gray_img_c = cv2.cvtColor(gray_img.copy(), cv2.COLOR_GRAY2BGR)
self.updateImage(gray_img_c)
else:
self.updateImage(self.original_image_color)
def updateImage(self, image):
height, width, channel = image.shape
bytesPerLine = 3 * width
qImg = QtGui.QImage(image.data, width, height, bytesPerLine, QtGui.QImage.Format_RGB888)
pixMap = QtGui.QPixmap.fromImage(qImg).scaled(700, 500, QtCore.Qt.KeepAspectRatio)
self.imglabel.setPixmap(pixMap)
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
ex = MyWindow()
sys.exit(app.exec_())
I am new in both python and opencv and trying to develop a simple object tracking GUI by using opencv tracking API. I am using my webcam to test it. My purpose is selecting the target object in the current frame. For this, I plot a rectangle in the center of the window and select the target object with reference to the rectangle. Since the rectangular shape may fail the tracking operation I want to select the target object from the original frame. In the code I create two different frames which are called rawInp and outImage. rawInp is the input video. outImage is the final outcome and I want all the shapes are plotted in this image. I use an external function to plot the rectangle. I show also the rawInp to check it. However, I see the rectangular shape also in this output. How is it possible and how can I solve this problem? Also, how can I use only rawInp variable for both operations? Because copying the same the same variable is not a good way to handle. I am adding the related part of my code but if you want to see the whole code I can add. Thank you in advance for any of your answers.
import sys
import cv2
import numpy as np
from PyQt5 import QtCore
from PyQt5.QtCore import pyqtSlot, QTimer
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import QDialog, QApplication, QFileDialog, QMainWindow
from PyQt5.uic import loadUi
cap = cv2.VideoCapture(0)
if not cap.isOpened(): print ("Could not open video") ,sys.exit()
ok, frame = cap.read(0)
height, width, channels = frame.shape
upper_left = (3*int(width/8), 3*int(height/8))
bottom_right = (5*int(width/8), 5*int(height/8))
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
class trackingApp(QMainWindow):
def __init__(self):
super(trackingApp,self).__init__()
loadUi('tracking_ui.ui',self)
self.rawInp = None
self.outImage = None
self.stBtclk = False
self.trckBtclk = False
self.startButton.clicked.connect(self.start_webcam)
self.trackingButton.clicked.connect(self.tracking_clicked)
#pyqtSlot()
def start_webcam(self):
self.stBtclk = True
self.capture = cv2.VideoCapture(0)
self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT,480)
self.capture.set(cv2.CAP_PROP_FRAME_WIDTH,620)
self.timer=QTimer(self)
self.timer.timeout.connect(self.update_frame)
self.timer.start(5)
def update_frame(self):
ret, self.rawInp = self.capture.read()
self.rawInp = cv2.flip(self.rawInp,1)
self.rawInp = cv2.cvtColor(self.rawInp, cv2.COLOR_BGR2GRAY)
self.rawInp = clahe.apply(self.rawInp)
self.rawInp = cv2.cvtColor(self.rawInp, cv2.COLOR_GRAY2BGR)
self.outImage = self.rawInp
if self.trckBtclk: self.tracker_update()
self.displayImage(self.outImage,1)
def plotCenter(self, outImage):
cv2.rectangle(outImage, upper_left, bottom_right, (0, 255, 0), 2)
# Plot the central horizontal and vertical lines
cv2.line(outImage,(50,int(height/2)),(width-50,int(height/2)),(0,255,0),1)
cv2.line(outImage,(int(width/2),50),(int(width/2),height-50),(0,255,0),1)
cv2.imshow('rawInp',self.rawInp)
#pyqtSlot()
def tracking_clicked(self):
if self.stBtclk:
self.trckBtclk = True
self.tracker = cv2.TrackerKCF_create()
bbox = (3*int(width/8), 3*int(height/8), 2*int(width/8), 2*int(height/8))
self.tracker.init(self.rawInp, bbox)
marker=self.rawInp[3*int(height/8):5*int(height/8), 3*int(width/8):5*int(width/8)]
self.surf = cv2.xfeatures2d.SURF_create(500)
kp, des = self.surf.detectAndCompute(marker,None)
marker = cv2.drawKeypoints(marker,kp,None,(0,0,255),4)
cv2.imshow("marker", marker)
else: pass
def tracker_update(self):
ok, bbox = self.tracker.update(self.outImage)
if ok:
# Tracking success
p1 = (int(bbox[0]), int(bbox[1]))
p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
cv2.rectangle(self.outImage, p1, p2, (0,255,255), 2, 1)
kp, des = self.surf.detectAndCompute(self.outImage,None)
# self.outImage = cv2.drawKeypoints(self.outImage,kp,None,(0,0,255),4)
cv2.putText(self.outImage, "Tracking", (5,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,255,0),2)
else:
# Tracking failure
cv2.putText(self.outImage, "Tracking failure detected", (5,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
def displayImage(self, outImage, window):
self.plotCenter(self.outImage)
qformat=QImage.Format_Indexed8
if len(outImage.shape)==3: #[0]=rows, [1]=columns, [2]=channels
if(outImage.shape[2])==4:
qformat=QImage.Format_RGBA8888
else:
qformat=QImage.Format_RGB888
outImg=QImage(outImage,outImage.shape[1],outImage.shape[0],outImage.strides[0],qformat)
outImg=outImg.rgbSwapped() #BRG>>RGB
if window==1:
self.trackingScreen.setPixmap(QPixmap.fromImage(outImg))
self.trackingScreen.setScaledContents(True)
if __name__=="__main__":
app = QApplication(sys.argv)
app.aboutToQuit.connect(app.deleteLater)
window = trackingApp()
window.show()
#sys.exit(app.exec_())
app.exec_()
And here is an example screenshot:
The first window is showing the ui and final output while the second one, named "rawInp", is showing the unprocessed input video. I do not expect to see the green rectangle in second window