I am new in both python and opencv and trying to develop a simple object tracking GUI by using opencv tracking API. I am using my webcam to test it. My purpose is selecting the target object in the current frame. For this, I plot a rectangle in the center of the window and select the target object with reference to the rectangle. Since the rectangular shape may fail the tracking operation I want to select the target object from the original frame. In the code I create two different frames which are called rawInp and outImage. rawInp is the input video. outImage is the final outcome and I want all the shapes are plotted in this image. I use an external function to plot the rectangle. I show also the rawInp to check it. However, I see the rectangular shape also in this output. How is it possible and how can I solve this problem? Also, how can I use only rawInp variable for both operations? Because copying the same the same variable is not a good way to handle. I am adding the related part of my code but if you want to see the whole code I can add. Thank you in advance for any of your answers.
import sys
import cv2
import numpy as np
from PyQt5 import QtCore
from PyQt5.QtCore import pyqtSlot, QTimer
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import QDialog, QApplication, QFileDialog, QMainWindow
from PyQt5.uic import loadUi
cap = cv2.VideoCapture(0)
if not cap.isOpened(): print ("Could not open video") ,sys.exit()
ok, frame = cap.read(0)
height, width, channels = frame.shape
upper_left = (3*int(width/8), 3*int(height/8))
bottom_right = (5*int(width/8), 5*int(height/8))
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
class trackingApp(QMainWindow):
def __init__(self):
super(trackingApp,self).__init__()
loadUi('tracking_ui.ui',self)
self.rawInp = None
self.outImage = None
self.stBtclk = False
self.trckBtclk = False
self.startButton.clicked.connect(self.start_webcam)
self.trackingButton.clicked.connect(self.tracking_clicked)
#pyqtSlot()
def start_webcam(self):
self.stBtclk = True
self.capture = cv2.VideoCapture(0)
self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT,480)
self.capture.set(cv2.CAP_PROP_FRAME_WIDTH,620)
self.timer=QTimer(self)
self.timer.timeout.connect(self.update_frame)
self.timer.start(5)
def update_frame(self):
ret, self.rawInp = self.capture.read()
self.rawInp = cv2.flip(self.rawInp,1)
self.rawInp = cv2.cvtColor(self.rawInp, cv2.COLOR_BGR2GRAY)
self.rawInp = clahe.apply(self.rawInp)
self.rawInp = cv2.cvtColor(self.rawInp, cv2.COLOR_GRAY2BGR)
self.outImage = self.rawInp
if self.trckBtclk: self.tracker_update()
self.displayImage(self.outImage,1)
def plotCenter(self, outImage):
cv2.rectangle(outImage, upper_left, bottom_right, (0, 255, 0), 2)
# Plot the central horizontal and vertical lines
cv2.line(outImage,(50,int(height/2)),(width-50,int(height/2)),(0,255,0),1)
cv2.line(outImage,(int(width/2),50),(int(width/2),height-50),(0,255,0),1)
cv2.imshow('rawInp',self.rawInp)
#pyqtSlot()
def tracking_clicked(self):
if self.stBtclk:
self.trckBtclk = True
self.tracker = cv2.TrackerKCF_create()
bbox = (3*int(width/8), 3*int(height/8), 2*int(width/8), 2*int(height/8))
self.tracker.init(self.rawInp, bbox)
marker=self.rawInp[3*int(height/8):5*int(height/8), 3*int(width/8):5*int(width/8)]
self.surf = cv2.xfeatures2d.SURF_create(500)
kp, des = self.surf.detectAndCompute(marker,None)
marker = cv2.drawKeypoints(marker,kp,None,(0,0,255),4)
cv2.imshow("marker", marker)
else: pass
def tracker_update(self):
ok, bbox = self.tracker.update(self.outImage)
if ok:
# Tracking success
p1 = (int(bbox[0]), int(bbox[1]))
p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
cv2.rectangle(self.outImage, p1, p2, (0,255,255), 2, 1)
kp, des = self.surf.detectAndCompute(self.outImage,None)
# self.outImage = cv2.drawKeypoints(self.outImage,kp,None,(0,0,255),4)
cv2.putText(self.outImage, "Tracking", (5,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,255,0),2)
else:
# Tracking failure
cv2.putText(self.outImage, "Tracking failure detected", (5,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
def displayImage(self, outImage, window):
self.plotCenter(self.outImage)
qformat=QImage.Format_Indexed8
if len(outImage.shape)==3: #[0]=rows, [1]=columns, [2]=channels
if(outImage.shape[2])==4:
qformat=QImage.Format_RGBA8888
else:
qformat=QImage.Format_RGB888
outImg=QImage(outImage,outImage.shape[1],outImage.shape[0],outImage.strides[0],qformat)
outImg=outImg.rgbSwapped() #BRG>>RGB
if window==1:
self.trackingScreen.setPixmap(QPixmap.fromImage(outImg))
self.trackingScreen.setScaledContents(True)
if __name__=="__main__":
app = QApplication(sys.argv)
app.aboutToQuit.connect(app.deleteLater)
window = trackingApp()
window.show()
#sys.exit(app.exec_())
app.exec_()
And here is an example screenshot:
The first window is showing the ui and final output while the second one, named "rawInp", is showing the unprocessed input video. I do not expect to see the green rectangle in second window
Related
Goal:
I am trying to display an image using PyQt5 edited using OpenCV.
The above image shows a side by side comparison of my expected output (Shown from a openCV Window) and the actual output displayed in a PyQt5 Label Pixmap.
The picture shows that the image is successfully resized, but not being displayed correctly.
About the QLabel (used to display the image):
The QLabel is within a Frame. Here's how it is defined:
self.ImageDisplayerLB = QtWidgets.QLabel(self.topFrame) #topFrame is a QFrame
self.ImageDisplayerLB.setEnabled(True)
self.ImageDisplayerLB.setText("")
self.ImageDisplayerLB.setPixmap(QtGui.QPixmap("./<image>.jpg"))
self.ImageDisplayerLB.setAlignment(QtCore.Qt.AlignCenter)
self.ImageDisplayerLB.setObjectName("ImageDisplayerLB")
self.gridLayout_2.addWidget(self.ImageDisplayerLB, 0, 0, 1, 1)
About the QFrame used in QLabel:
The QFrame does have a minimum height and width (Size) set so it doesn't look too small while displaying the image.
self.topFrame = QtWidgets.QFrame(self.frame)
self.topFrame.setMinimumSize(QtCore.QSize(831, 409))
self.topFrame.setStyleSheet("background-color: rgb(1,1,1);")
self.topFrame.setObjectName("topFrame")
self.topFrame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.topFrame.setFrameShadow(QtWidgets.QFrame.Raised)
The Pixmap is being set again with a different function call while handling an event. The below code snippet is where the error seem to be occuring.
if hw := self.__check_oversized_image(image): # Returns height, width if image is larger than the QLabel size, else returns None.
w, h = self.ImageDisplayerLB.width(), self.ImageDisplayerLB.height()
self.ImageDisplayerLB.pixmap().detach() # Tried the same without it, makes no difference
thresh = min((self.ImageDisplayerLB.width(), self.ImageDisplayerLB.height()))
r = thresh / image.shape[1]
dim = (thresh, int(image.shape[0] * r))
image = cv2.resize(image, dim, interpolation=cv2.INTER_AREA) # Resize Image maintaining the ratio
self.ImageDisplayerLB.setScaledContents(True) # Makes no difference with or without this
# End of if block
frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
self.qimage = QtGui.QImage(
frame, frame.shape[1], frame.shape[0], QtGui.QImage.Format_RGB888
)
try:
self.pimage = QtGui.QPixmap.fromImage(self.qimage, QtCore.Qt.AutoColor)
self.ImageDisplayerLB.setPixmap(self.pimage)
except Exception as e:
print(e)
When does this occur?
This issue is only when the image is found oversized and I am resizing the image. It works fine without the image being oversized.
Any help to fix the issue where the image is grayscale and tilted.
Minimal Reproducible Code:
from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtWidgets import QMainWindow
import cv2 as cv
class main(QMainWindow):
def __init__(self):
super().__init__()
self.mainFrame = QtWidgets.QFrame(self)
self.grid = QtWidgets.QGridLayout(self.mainFrame)
self.label = QtWidgets.QLabel(self.mainFrame)
img = cv.imread("cert_template.png") # Image from -> https://simplecert.net/certificate-templates/
frame = img.copy()
self.label.setPixmap(QtGui.QPixmap.fromImage(QtGui.QImage(frame.data,frame.shape[1],frame.shape[0],QtGui.QImage.Format_RGB888,))) # Not sure why this is grayscale and tilted
self.mainFrame.setMinimumSize(QtCore.QSize(831, 409))
self.label.setScaledContents(True)
self.grid.addWidget(self.label, 0, 0, 1, 1)
cv.imshow("image", img) # Displays the predicted output
cv.waitKey(0)
if __name__ == "__main__":
app = QtWidgets.QApplication([])
window = main()
window.show()
app.exec_()
Much appreciated.
In a nutshell, I currently am trying to load a picture and view it using pyqt5, via QtWidgets.QGraphicsScene.
However, after loading the picture, I run some opencv2 commands (canny, gaussian to get some region-of-interest). These region-of-interest are simply X-Y coordinates. This then breaks the displaying picture -- and I get lots of blue screens.
The picture and opencv2 operations are showcased below with the working example:
MAC OS,
Python3.7
pyqt5
opencv
import sys
import os
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PIL.ImageQt import ImageQt
from PIL import Image
import cv2
# Convert an opencv image to QPixmap
def convertCvImage2QtImage(cv_img):
rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
PIL_image = Image.fromarray(rgb_image).convert('RGB')
return QtGui.QPixmap.fromImage(ImageQt(PIL_image))
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel
from PyQt5.QtCore import Qt, QMimeData
from PyQt5.QtGui import QDrag, QPixmap
import numpy as np
class Ui_Form(object):
def setupUi(self, Form):
if not Form.objectName():
Form.setObjectName(u"Form")
Form.resize(900, 712)
Form.setAcceptDrops(True)
self.frame_graphics_view = QGraphicsView(Form)
self.frame_graphics_view.setObjectName(u"frame_graphics_view")
self.frame_graphics_view.setEnabled(True)
self.frame_graphics_view.setGeometry(QRect(10, 10, 700, 300))
sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_graphics_view.sizePolicy().hasHeightForWidth())
self.frame_graphics_view.setSizePolicy(sizePolicy)
self.frame_graphics_view.setMouseTracking(True)
self.frame_graphics_view.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.frame_graphics_view.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.frame_graphics_view.setSizeAdjustPolicy(QAbstractScrollArea.AdjustIgnored)
self.frame_graphics_view.setAlignment(Qt.AlignLeading|Qt.AlignLeft|Qt.AlignTop)
self.frame_graphics_view.setRubberBandSelectionMode(Qt.ContainsItemBoundingRect)
self.refresh_frame_button = QPushButton(Form)
self.refresh_frame_button.setObjectName(u"refresh_frame_button")
self.refresh_frame_button.setGeometry(QRect(720, 10, 80, 26))
self.show_roi_checkbox = QCheckBox(Form)
self.show_roi_checkbox.setObjectName(u"show_roi_checkbox")
self.show_roi_checkbox.setGeometry(QRect(720, 50, 85, 21))
self.retranslateUi(Form)
QMetaObject.connectSlotsByName(Form)
# setupUi
def retranslateUi(self, Form):
Form.setWindowTitle(QCoreApplication.translate("Form", u"Form", None))
self.refresh_frame_button.setText(QCoreApplication.translate("Form", u"refresh", None))
self.show_roi_checkbox.setText(QCoreApplication.translate("Form", u"Show Roi", None))
class MyScene(QtWidgets.QGraphicsScene):
def __init__(self, parent):
super(MyScene, self).__init__()
self.parent = parent
self.red_color = QColor(255, 0, 0, 180)
self.green_color = QColor(255, 255, 0, 180)
self.blue_color = QColor(255, 0, 255, 180)
self.greenBrush = QBrush(self.green_color)
self.pen = QPen(self.red_color)
self.current_ellipses = []
def add_ellipses(self, pts):
to_add = []
for (x,y) in pts:
ellipse = QGraphicsEllipseItem()
ellipse.setPen(self.pen)
ellipse.setBrush(self.greenBrush)
ellipse.setRect(x, y, 10, 10)
ellipse.setFlag(QGraphicsItem.ItemIsMovable)
ellipse.setFlag(QGraphicsItem.ItemIsFocusable)
ellipse.setFlag(QGraphicsItem.ItemIsSelectable)
to_add.append(ellipse)
for ellipse in to_add:
self.addItem(ellipse)
self.current_ellipses+=to_add
def remove_current_ellipses(self):
for item in self.current_ellipses:
self.removeItem(item)
self.current_ellipses = []
class ImageLoader(QtWidgets.QWidget):
def __init__(self):
QtWidgets.QWidget.__init__(self)
self.ui = Ui_Form()
self.ui.setupUi(self)
self.ui.refresh_frame_button.clicked.connect(self.refresh_frame_only)
self.ui.show_roi_checkbox.toggled.connect(self.show_roi)
self.scene = QtWidgets.QGraphicsScene(self)
self.scene = MyScene(self)
self.ui.frame_graphics_view.setScene(self.scene)
self.pixmap_item = self.scene.addPixmap(QtGui.QPixmap())
# unclear why the UI is not setting this.. it should have.
self.ui.frame_graphics_view.setDragMode(QGraphicsView.RubberBandDrag)
self.go_to_image()
def go_to_image(self):
# get image
img = self.get_image_to_show()
if img is None:
return
# convert image
pixmap = convertCvImage2QtImage(img)
if pixmap.isNull():
return
# show image and determine if show ROI
self.pixmap_item.setPixmap(pixmap)
self.show_roi()
def refresh_frame_only(self):
# just refreshing, sometimes it fixes the underlying picture!
self.go_to_image()
def show_roi(self):
'''Show region of interest in the picture. This may require calculations.
Lots of testing has shown that the more opencv2 commands that you call,
the more likely it is to get the picture to "break".
'''
self.scene.remove_current_ellipses()
if self.ui.show_roi_checkbox.isChecked():
image = self.get_image_to_show() # the image
def calculate_roi():
# run some opencv2 operations to find the ROI
mask = cv2.Canny(image, 150, 250, 3)
mask = cv2.bitwise_not(mask)
image[mask!=0] = np.array((255,255,255))
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Performing OTSU threshold
ret, image_thresh = cv2.threshold(gray_image, 0, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY_INV)
# we igonre douptuts
ellipses = list([i, i] for i in [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 120, 130, 140, 150, 160, 170])
return ellipses
# as we decrease the range to say 1, it significantly decreases the chance of breaking the picture.
for i in range(5):
ellipses = calculate_roi()
self.scene.add_ellipses(ellipses)
else:
self.scene.remove_current_ellipses()
def get_image_to_show(self):
'''LOAD A PICTURE'''
image = cv2.imread('ducks.png')
return image
#
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
imageLoader = ImageLoader()
imageLoader.show()
sys.exit(app.exec_())
To reproduce the issue:
Run the app.
Click the checkbox so that it is Toggled
Click Refresh many times. It can break on 1 click or 5-10 times.
To "fix", untoggle the checkbox and hit refresh.
Things I have tried:
Remove the self.scene.add_ellipses
-- this removes the yellow dots, but still causes blue screen.
decrease the opencv2 commands, this definitely helps.
instead opencv2 commands do things like: for i in range(10000000): print. this runs slow...but no breaks
Very confused! any ideas are helpful
Example screenshots below:
Loading pics. Family of ducks! Loaded yellow dots (note the toggle)
Clicking refresh a few timestimes: Blue screens. Note partial loading of ducks!
I'm using OpenCV for some image processing and want to create a transparent overlay on my screen using PyQt widgets. Below I have a basic example of sending a basic frame from opencv to PyQt through a signal/slot and displaying it on the window. The issue is I can't get the transparent background using this method, instead it is just a black background:
from PyQt5 import QtGui, QtCore
from PyQt5.QtWidgets import QWidget, QApplication, QLabel, QVBoxLayout
from PyQt5.QtGui import QPixmap
import sys
import cv2
from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QThread
import numpy as np
import os
class VideoThread(QThread):
change_pixmap_signal = pyqtSignal(np.ndarray)
def run(self):
img = np.zeros((500, 500, 4), dtype=np.uint8)
cv2.rectangle(img, (0, 0), (200, 200), (0, 0, 255), 2)
while True
self.change_pixmap_signal.emit(img)
class App(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("Qt live label demo")
self.disply_width = 1920
self.display_height = 1080
# create the label that holds the image
self.image_label = QLabel(self)
self.image_label.resize(self.disply_width, self.display_height)
# create a text label
self.textLabel = QLabel('Webcam')
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.setStyleSheet("background-color:transparent;")
# create a vertical box layout and add the two labels
vbox = QVBoxLayout()
vbox.addWidget(self.image_label)
vbox.addWidget(self.textLabel)
# set the vbox layout as the widgets layout
self.setLayout(vbox)
# create the video capture thread
self.thread = VideoThread()
# connect its signal to the update_image slot
self.thread.change_pixmap_signal.connect(self.update_image)
# start the thread
self.thread.start()
#pyqtSlot(np.ndarray)
def update_image(self, cv_img):
"""Updates the image_label with a new opencv image"""
qt_img = self.convert_cv_qt(cv_img)
self.image_label.setPixmap(qt_img)
def convert_cv_qt(self, cv_img):
"""Convert from an opencv image to QPixmap"""
rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
h, w, ch = rgb_image.shape
bytes_per_line = ch * w
convert_to_Qt_format = QtGui.QImage(
rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)
p = convert_to_Qt_format.scaled(
self.disply_width, self.display_height, Qt.KeepAspectRatio)
return QPixmap.fromImage(p)
if __name__ == "__main__":
app = QApplication(sys.argv)
a = App()
a.show()
sys.exit(app.exec_())
If it isn't possible to send a frame with an alpha channel to PyQt, I was wondering if it's possible to just send the rectangle 4 point location and use the PyQt paint to draw a rectangle on screen? I think this would require a widget.update() but I'm not sure where to invoke that.
If you are going to use transparencies then the colors and the image must be 4-channel, in your case you are passing a 3-channel color to the cv2.rectangle method and then converting you use a 3-channel format in QImage. On the other hand cv2.rectangle returns the drawn image, it does not modify the input array.
def run(self):
img = np.zeros((500, 500, 4), dtype=np.uint8)
output = cv2.rectangle(img, (0, 0), (200, 200), (0, 0, 255, 255), 2)
while True:
self.change_pixmap_signal.emit(output)
QThread.msleep(1)
def convert_cv_qt(self, cv_img):
h, w, ch = cv_img.shape
bytes_per_line = ch * w
convert_to_Qt_format = QtGui.QImage(
cv_img.data, w, h, bytes_per_line, QtGui.QImage.Format_RGBA8888
)
p = convert_to_Qt_format.scaled(
self.disply_width, self.display_height, Qt.KeepAspectRatio
)
return QPixmap.fromImage(p)
I've already succeded to plot a grayscale histogram of a video : for each image of the video, the histogram was updated to correspond to the current image. For this program I used the classic way, with the functions subplots, plot, set_ydata etc. I only had 2 windows : one with the video and one figure with the histogram, and now what I'm trying to do is to have only one window with the video and the histogram on it, and add buttons like "pause", "play" or "restart". With research I saw that Tkinter could be a way to do that, so I started to use it.
I configured all my window (with buttons, displaying the video and the histogram) and video is shown normally, but I can't update my histogram, my program just plot the first histogram (of the first image) and nothing else. I've already tried several things, like the tkinter animation, or to put an ax clear and a draw() in my function calc_hist() (with the function draw() I have an error "draw_wrapper() missing 1 required positional argument: 'renderer'", I didnt find what it corresponded to), but it's not working. Maybe I misused theses functions, so maybe you cand find what's going wrong with my code.
Here's my class App which configure the window and supposed to display the histogram (I delete useless part for my problem like functions and declaration of button to reduce the code) :
import tkinter
import cv2
import PIL.Image, PIL.ImageTk
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import numpy as np
import matplotlib.pyplot as plt
class App:
def __init__(self, window, window_title, video_source=0):
self.window = window
self.window.title(window_title)
self.video_source = video_source
self.vid = MyVideoCapture(self.video_source)
#Video
self.canvas = tkinter.Canvas(window, width = 640, height = 480)
self.canvas.grid(row=0, column = 0)
#Histogram
self.frame_hist = tkinter.Frame(window)
self.frame_hist.grid(row=0, column = 1)
self.figure = plt.Figure(figsize=(5,4), dpi = 100)
self.ax = self.figure.add_subplot()
self.canvas_hist = FigureCanvasTkAgg(self.figure, self.frame_hist)
self.canvas_hist.get_tk_widget().pack(fill = tkinter.BOTH, side = tkinter.TOP)
self.ax = self.figure.gca()
x = np.linspace(0, 255, 256)
y = np.linspace(10, 100000, 256)
self.canvas_hist, = self.ax.plot(x,y)
self.ax.set_ylabel('Nombre pixel', fontsize = 15)
self.ax.set_xlabel('Valeur pixel', fontsize = 15)
self.ax.set_yscale('log')
self.delay = 15
self.update()
self.window.mainloop()
def update(self):
ret, frame = self.vid.get_frame()
if ret :
self.gris = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
self.smaller_image = cv2.resize(self.gris,(640,480))
self.photo = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(self.smaller_image))
self.canvas.create_image(0, 0, image = self.photo, anchor = tkinter.NW)
self.calc_hist(self.gris)
self.window.after(self.delay, self.update)
def calc_hist(self, gris) :
self.histogram = cv2.calcHist([gris], [0], None, [256], [0, 256])
self.canvas_hist.set_ydata(self.histogram)
and here's the second part of my code with the video class to initialize it, I put you the code just in case but I think it's useless to look it, nothing matter to my problem in it :
class MyVideoCapture:
def __init__(self, video_source=0):
# Open the video source
self.vid = cv2.VideoCapture(video_source)
if not self.vid.isOpened():
raise ValueError("Unable to open video source", video_source)
# Get video source width and height
self.width = self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)
self.height = self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
def get_frame(self):
if self.vid.isOpened():
ret, frame = self.vid.read()
if ret:
# Return a boolean success flag and the current frame converted to BGR
return (ret, cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
else:
return (ret, None)
else:
return (ret, None)
# Release the video source when the object is destroyed
def __del__(self):
if self.vid.isOpened():
self.vid.release()
# Create a window and pass it to the Application object
App(tkinter.Tk(), "Tkinter and OpenCV", "output.avi")
And here's my final interface :
When you update the y data, you need to refresh the graph using self.canvas_hist.draw().
However self.canvas_hist (instance of FigureCanvasTkAgg()) is overwritten by the line:
self.canvas_hist, = self.ax.plot(x, y)
So suggest to change the above line to:
self.graph, = self.ax.plot(x, y)
Then add self.canvas_hist.draw() at the end of calc_hist():
def calc_hist(self, gris):
histogram = cv2.calcHist([gris], [0], None, [256], [0, 256])
self.graph.set_ydata(histogram)
self.canvas_hist.draw()
I am trying to integrate the pyqtgraph example into a class.
However, since the example uses "global" to acces important methods, I am having trouble translating it into a class.
The example:
import initExample ## Add path to library (just for examples; you do not need this)
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
# Interpret image data as row-major instead of col-major
pg.setConfigOptions(imageAxisOrder='row-major')
pg.mkQApp()
win = pg.GraphicsLayoutWidget()
win.setWindowTitle('pyqtgraph example: Image Analysis')
# A plot area (ViewBox + axes) for displaying the image
p1 = win.addPlot()
# Item for displaying image data
img = pg.ImageItem()
p1.addItem(img)
# Custom ROI for selecting an image region
roi = pg.ROI([-8, 14], [6, 5])
roi.addScaleHandle([0.5, 1], [0.5, 0.5])
roi.addScaleHandle([0, 0.5], [0.5, 0.5])
p1.addItem(roi)
roi.setZValue(10) # make sure ROI is drawn above image
# Isocurve drawing
iso = pg.IsocurveItem(level=0.8, pen='g')
iso.setParentItem(img)
iso.setZValue(5)
# Contrast/color control
hist = pg.HistogramLUTItem()
hist.setImageItem(img)
win.addItem(hist)
# Draggable line for setting isocurve level
isoLine = pg.InfiniteLine(angle=0, movable=True, pen='g')
hist.vb.addItem(isoLine)
hist.vb.setMouseEnabled(y=False) # makes user interaction a little easier
isoLine.setValue(0.8)
isoLine.setZValue(1000) # bring iso line above contrast controls
# Another plot area for displaying ROI data
win.nextRow()
p2 = win.addPlot(colspan=2)
p2.setMaximumHeight(250)
win.resize(800, 800)
win.show()
# Generate image data
data = np.random.normal(size=(200, 100))
data[20:80, 20:80] += 2.
data = pg.gaussianFilter(data, (3, 3))
data += np.random.normal(size=(200, 100)) * 0.1
img.setImage(data)
hist.setLevels(data.min(), data.max())
# build isocurves from smoothed data
iso.setData(pg.gaussianFilter(data, (2, 2)))
# set position and scale of image
img.scale(0.2, 0.2)
img.translate(-50, 0)
# zoom to fit imageo
p1.autoRange()
# Callbacks for handling user interaction
def updatePlot():
global img, roi, data, p2
selected = roi.getArrayRegion(data, img)
p2.plot(selected.mean(axis=0), clear=True)
roi.sigRegionChanged.connect(updatePlot)
updatePlot()
def updateIsocurve():
global isoLine, iso
iso.setLevel(isoLine.value())
isoLine.sigDragged.connect(updateIsocurve)
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
What I tried: (only changed parts)
def updatePlot(img, roi, data, p2):
#global img, roi, data, p2
selected = roi.getArrayRegion()
p2.plot(selected.mean(axis=0), clear=True)
roi.sigRegionChanged.connect(updatePlot(img, roi, data, p2))
updatePlot(img, roi, data, p2)
def updateIsocurve(isoLine, iso):
# global isoLine, iso
so.setLevel(isoLine.value())
isoLine.sigDragged.connect(updateIsocurve(isoLine, iso))
This gives an error, since the "img" object I am giving it instead of accessing it through "global" seems to be of type None.
I don't know how to give the update function access to the necessary objects.
Make all the variables into instance variables by using self
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
class ImageWidget(QtGui.QWidget):
def __init__(self, parent=None):
super(ImageWidget, self).__init__(parent)
# Interpret image data as row-major instead of col-major
pg.setConfigOptions(imageAxisOrder='row-major')
pg.mkQApp()
self.win = pg.GraphicsLayoutWidget()
self.win.setWindowTitle('pyqtgraph example: Image Analysis')
# A plot1 area (ViewBox + axes) for displaying the image
self.plot1 = self.win.addPlot()
# Item for displaying image data
self.item = pg.ImageItem()
self.plot1.addItem(self.item)
# Custom ROI for selecting an image region
self.ROI = pg.ROI([-8, 14], [6, 5])
self.ROI.addScaleHandle([0.5, 1], [0.5, 0.5])
self.ROI.addScaleHandle([0, 0.5], [0.5, 0.5])
self.plot1.addItem(self.ROI)
self.ROI.setZValue(10) # make sure ROI is drawn above image
# Isocurve drawing
self.iso = pg.IsocurveItem(level=0.8, pen='g')
self.iso.setParentItem(self.item)
self.iso.setZValue(5)
# Contrast/color control
self.hist = pg.HistogramLUTItem()
self.hist.setImageItem(self.item)
self.win.addItem(self.hist)
# Draggable line for setting isocurve level
self.isoLine = pg.InfiniteLine(angle=0, movable=True, pen='g')
self.hist.vb.addItem(self.isoLine)
self.hist.vb.setMouseEnabled(y=False) # makes user interaction a little easier
self.isoLine.setValue(0.8)
self.isoLine.setZValue(1000) # bring iso line above contrast controls
# Another plot1 area for displaying ROI data
self.win.nextRow()
self.plot2 = self.win.addPlot(colspan=2)
self.plot2.setMaximumHeight(250)
self.win.resize(800, 800)
self.win.show()
# Generate image self.data
self.data = np.random.normal(size=(200, 100))
self.data[20:80, 20:80] += 2.
self.data = pg.gaussianFilter(self.data, (3, 3))
self.data += np.random.normal(size=(200, 100)) * 0.1
self.item.setImage(self.data)
self.hist.setLevels(self.data.min(), self.data.max())
# build isocurves from smoothed self.data
self.iso.setData(pg.gaussianFilter(self.data, (2, 2)))
# set position and scale of image
self.item.scale(0.2, 0.2)
self.item.translate(-50, 0)
# zoom to fit imageo
self.plot1.autoRange()
self.ROI.sigRegionChanged.connect(self.updatePlot)
self.updatePlot()
self.isoLine.sigDragged.connect(self.updateIsocurve)
# Callbacks for handling user interaction
def updatePlot(self):
selected = self.ROI.getArrayRegion(self.data, self.item)
self.plot2.plot(selected.mean(axis=0), clear=True)
def updateIsocurve(self):
self.iso.setLevel(self.isoLine.value())
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
app = QtGui.QApplication([])
app.setStyle(QtGui.QStyleFactory.create("Cleanlooks"))
image_widget = ImageWidget()
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()