Tkinter : problem to udpate a grayscale histogram of video - python

I've already succeded to plot a grayscale histogram of a video : for each image of the video, the histogram was updated to correspond to the current image. For this program I used the classic way, with the functions subplots, plot, set_ydata etc. I only had 2 windows : one with the video and one figure with the histogram, and now what I'm trying to do is to have only one window with the video and the histogram on it, and add buttons like "pause", "play" or "restart". With research I saw that Tkinter could be a way to do that, so I started to use it.
I configured all my window (with buttons, displaying the video and the histogram) and video is shown normally, but I can't update my histogram, my program just plot the first histogram (of the first image) and nothing else. I've already tried several things, like the tkinter animation, or to put an ax clear and a draw() in my function calc_hist() (with the function draw() I have an error "draw_wrapper() missing 1 required positional argument: 'renderer'", I didnt find what it corresponded to), but it's not working. Maybe I misused theses functions, so maybe you cand find what's going wrong with my code.
Here's my class App which configure the window and supposed to display the histogram (I delete useless part for my problem like functions and declaration of button to reduce the code) :
import tkinter
import cv2
import PIL.Image, PIL.ImageTk
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import numpy as np
import matplotlib.pyplot as plt
class App:
def __init__(self, window, window_title, video_source=0):
self.window = window
self.window.title(window_title)
self.video_source = video_source
self.vid = MyVideoCapture(self.video_source)
#Video
self.canvas = tkinter.Canvas(window, width = 640, height = 480)
self.canvas.grid(row=0, column = 0)
#Histogram
self.frame_hist = tkinter.Frame(window)
self.frame_hist.grid(row=0, column = 1)
self.figure = plt.Figure(figsize=(5,4), dpi = 100)
self.ax = self.figure.add_subplot()
self.canvas_hist = FigureCanvasTkAgg(self.figure, self.frame_hist)
self.canvas_hist.get_tk_widget().pack(fill = tkinter.BOTH, side = tkinter.TOP)
self.ax = self.figure.gca()
x = np.linspace(0, 255, 256)
y = np.linspace(10, 100000, 256)
self.canvas_hist, = self.ax.plot(x,y)
self.ax.set_ylabel('Nombre pixel', fontsize = 15)
self.ax.set_xlabel('Valeur pixel', fontsize = 15)
self.ax.set_yscale('log')
self.delay = 15
self.update()
self.window.mainloop()
def update(self):
ret, frame = self.vid.get_frame()
if ret :
self.gris = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
self.smaller_image = cv2.resize(self.gris,(640,480))
self.photo = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(self.smaller_image))
self.canvas.create_image(0, 0, image = self.photo, anchor = tkinter.NW)
self.calc_hist(self.gris)
self.window.after(self.delay, self.update)
def calc_hist(self, gris) :
self.histogram = cv2.calcHist([gris], [0], None, [256], [0, 256])
self.canvas_hist.set_ydata(self.histogram)
and here's the second part of my code with the video class to initialize it, I put you the code just in case but I think it's useless to look it, nothing matter to my problem in it :
class MyVideoCapture:
def __init__(self, video_source=0):
# Open the video source
self.vid = cv2.VideoCapture(video_source)
if not self.vid.isOpened():
raise ValueError("Unable to open video source", video_source)
# Get video source width and height
self.width = self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)
self.height = self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
def get_frame(self):
if self.vid.isOpened():
ret, frame = self.vid.read()
if ret:
# Return a boolean success flag and the current frame converted to BGR
return (ret, cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
else:
return (ret, None)
else:
return (ret, None)
# Release the video source when the object is destroyed
def __del__(self):
if self.vid.isOpened():
self.vid.release()
# Create a window and pass it to the Application object
App(tkinter.Tk(), "Tkinter and OpenCV", "output.avi")
And here's my final interface :

When you update the y data, you need to refresh the graph using self.canvas_hist.draw().
However self.canvas_hist (instance of FigureCanvasTkAgg()) is overwritten by the line:
self.canvas_hist, = self.ax.plot(x, y)
So suggest to change the above line to:
self.graph, = self.ax.plot(x, y)
Then add self.canvas_hist.draw() at the end of calc_hist():
def calc_hist(self, gris):
histogram = cv2.calcHist([gris], [0], None, [256], [0, 256])
self.graph.set_ydata(histogram)
self.canvas_hist.draw()

Related

How to display the whole image on this canvas?

The code provided here is:
import tkinter as tk
from PIL import Image, ImageTk
from pathlib import Path
class App(tk.Tk):
def __init__(self):
super().__init__()
self.geometry('600x600')
self.img_path = Path(r'D:\Python\Lena.jpg')
self.img = Image.open(self.img_path)
self.img_rgb = self.img.convert('RGB')
dim_x, dim_y = self.img_rgb.size
self.img_tk = ImageTk.PhotoImage(self.img_rgb.resize((dim_x, dim_y)))
self.canvas = tk.Canvas(self)
self.canvas.create_image(dim_x // 2, dim_y // 2, image=self.img_tk)
self.canvas.pack(expand=True, fill=tk.BOTH)
self.rgb_var = tk.StringVar(self, '0 0 0')
self.rgb_label = tk.Label(self, textvariable=self.rgb_var)
self.rgb_label.pack()
self.bind('<Motion>', lambda e: self.get_rgb(e))
def get_rgb(self, event):
x, y = event.x, event.y
try:
rgb = self.img_rgb.getpixel((x, y))
self.rgb_var.set(rgb)
except IndexError:
pass # ignore errors if the cursor is outside the image
if __name__ == '__main__':
app = App()
app.mainloop()
It displays an image with the RGB value of the pixel under the mouse pointer under the image (when the mouse pointer is over the image). The image used is this.
However, only the upper left quadrant of the image is displayed on the canvas. You can see that in the screenshot below.
How can I display the whole image and still have the RGB values of the pixel under the mouse pointer displayed (when the mouse pointer is over the image)?
I can see two possible solutions:
Expand image to fit window
Wrap window around image
To expand image to fit window
dim_x, dim_y = 600, 600
self.img_tk = ImageTk.PhotoImage(self.img_rgb.resize((dim_x, dim_y)))
OR
To wrap window around image
dim_x, dim_y = self.img_rgb.size
self.img_tk = ImageTk.PhotoImage(self.img_rgb)
Both approaches will display the entire image.
Here is the complete code with both options available via select flag.
import tkinter as tk
from PIL import Image, ImageTk
from pathlib import Path
class App(tk.Tk):
def __init__(self, select = True):
super().__init__()
self.img_path = Path('D:\Lenna.jpg')
self.img = Image.open(self.img_path)
self.img_rgb = self.img.convert('RGB')
if select:
# resize image to fit window
dim_x, dim_y = 600, 600
self.img_tk = ImageTk.PhotoImage(self.img_rgb.resize((dim_x, dim_y)))
else:
# resize window to fit image
dim_x, dim_y = self.img_rgb.size
self.img_tk = ImageTk.PhotoImage(self.img_rgb)
self.geometry(f'{dim_x}x{dim_y+21}')
self.canvas = tk.Canvas(self, borderwidth = 0, highlightthickness = 0)
self.canvas.create_image(0, 0, image = self.img_tk, anchor= tk.NW)
self.canvas.pack(expand=True, fill=tk.BOTH)
self.rgb_var = tk.StringVar(self, '0 0 0')
self.rgb_label = tk.Label(self, textvariable=self.rgb_var)
self.rgb_label.pack()
self.bind('<Motion>', lambda e: self.get_rgb(e))
def get_rgb(self, event):
x, y = event.x, event.y
try:
rgb = self.img_rgb.getpixel((x, y))
self.rgb_var.set(rgb)
except IndexError:
pass # ignore errors if the cursor is outside the image
if __name__ == '__main__':
app = App(False)
app.mainloop()
Everything works as expected when borderwidth and highlightthickness are removed.

How to create box around selected image on GUI

My GUI is a slideshow of images and there is a certain correct image on each page of the slideshow. The correct image is the top-left triangle, heart balloon, the circle, and the hippo. I am completely new to python so I have been struggling with how to create box around the selected image when a user is using the slideshow. This box forms around the correct or the incorrect image, whichever is pressed. To further explain, this includes inserting a box around an image based on where the user clicks but it does not say whether the image selected is the correct image or not. Thank you for your help
import PIL.Image
import PIL.ImageDraw
import tkinter as tk
import PIL.ImageTk
import csv
from PIL import Image
MAX_HEIGHT = 500
# height of the window (dimensions of the image)
class App(tk.Frame):
def __init__(self, imageData, master=None):
tk.Frame.__init__(self, master)
self.clickStatus = tk.StringVar()
self.loadedImages = dict()
self.master.title('Slideshow')
fram = tk.Frame(self)
tk.Button(fram, text="Previous Image", command=self.prev).pack(side=tk.LEFT)
tk.Button(fram, text=" Next Image ", command=self.next).pack(side=tk.LEFT)
tk.Label(fram, textvariable=self.clickStatus, font='Helvetica 18 bold').pack(side=tk.RIGHT)
# inside or outside
fram.pack(side=tk.TOP, fill=tk.BOTH)
self.imageLabel = tk.Label(self)
# drawing the image on the label
self.imageData = imageData
self.currentIndex = 0
# start from 0th image
self.__loadImage__()
self.imageLabel.bind("<Button-1>", self.clicked_evt)
# when you click button, it opens event of clicked_evt
self.imageLabel.pack()
self.pack()
def clicked_evt(self, evt):
x, y = evt.x, evt.y
imgData = self.loadedImages[self.imageData[self.currentIndex]['image_file']]
(l, t), (r,b) = imgData['lt'], imgData['rb']
if t<=y<=b and l<=x<=r:
##self.clickStatus.set('inside')
print('Inside')
else:
##self.clickStatus.set('outside')
print('Outside')
def __loadImage__(self):
if self.imageData[self.currentIndex]['image_file'] not in self.loadedImages:
self.im = PIL.Image.open(self.imageData[self.currentIndex]['image_file'])
ratio = MAX_HEIGHT/self.im.height
# ratio divided by existing height -> to get constant amount
height, width = int(self.im.height*ratio), int(self.im.width * ratio)
# calculate the new h and w and then resize next
self.im = self.im.resize((width, height))
lt = (int(self.imageData[self.currentIndex]['left']*ratio), int(self.imageData[self.currentIndex]['top']*ratio))
rb = (int(self.imageData[self.currentIndex]['right']*ratio), int(self.imageData[self.currentIndex]['bottom']*ratio))
# modifying new ratios with new height and width
#shape = [lt, rb]
# print(shape)
#img1 = PIL.ImageDraw.Draw(self.im)
#img1.rectangle(shape, outline ="red")
if self.im.mode == "1":
self.img = PIL.ImageTk.BitmapImage(self.im, foreground="white")
else:
self.img = PIL.ImageTk.PhotoImage(self.im)
imgData = self.loadedImages.setdefault(self.imageData[self.currentIndex]['image_file'], dict())
imgData['image'] = self.img
imgData['lt'] = lt
imgData['rb'] = rb
# for next and previous so it loads the same image adn don't do calculations again
self.img = self.loadedImages[self.imageData[self.currentIndex]['image_file']]['image']
self.imageLabel.config(image=self.img, width=self.img.width(), height=self.img.height())
def prev(self):
self.currentIndex = (self.currentIndex+len(self.imageData) - 1 ) % len(self.imageData)
self.__loadImage__()
# here if i go to the first one and press back, goes to last, round robbin
def next(self):
self.currentIndex = (self.currentIndex + 1) % len(self.imageData)
self.__loadImage__()
# here if i go to the last one and press next, goes to first, round robbin
def loadData(fname):
with open(fname, mode='r') as f:
reader = csv.DictReader(f)
data = [dict(row) for row in reader]
for row in data:
row['top'], row['bottom'], row['left'], row['right'] = int(row['top']),int(row['bottom']),int(row['left']),int(row['right'])
return data
if __name__ == "__main__":
data = loadData('bounding_box.csv')
app = App(data)
app.mainloop()
As far as I understand you simply want to draw a rectangle around an image when it is clicked (explanation in code comments):
from tkinter import Tk, Canvas
from PIL import Image, ImageTk
# below code is used to create images, you can also load them from a file if you need
mode = 'RGB'
size = (150, 150)
color_lst = ['red', 'green', 'blue', 'yellow']
# best to get all the images for the slide in a single list for further easier workings
img_lst = [Image.new(mode, size, color) for color in color_lst]
# selecting function that will be called when user clicks on image
def select(id_):
canvas.create_rectangle(canvas.bbox(id_), width=5)
# here you will put other code to be executed after
# user clicks on the image like going to the next frame or sth
# additionally you could use this if you want the other rectangles to disappear
# but kinda pointless if you will switch frames and stuff
# for canvas_id in canvas_images:
# if canvas_id == id_:
# continue
# canvas.create_rectangle(canvas.bbox(canvas_id), width=5, outline='white')
root = Tk()
# here create a photoimage list from the above image list
photo_lst = [ImageTk.PhotoImage(image) for image in img_lst]
# create canvas
canvas = Canvas(root, width=400, height=400)
canvas.pack()
# set the coordinates for the four images
coordinates = [(100, 100), (300, 100), (100, 300), (300, 300)]
# create the images and append their id to a list
canvas_images = [
canvas.create_image(pos[0], pos[1], image=photo) for pos, photo in zip(coordinates, photo_lst)
]
# bind the ids from `canvas_images` to being clicked and execute simple drawing method
for c_img in canvas_images:
canvas.tag_bind(
c_img, '<Button-1>', lambda e, i=c_img: select(i)
)
root.mainloop()

Resize video frame to fit tkinter window size when the window is resized

I've written a piece of code to play a video using OpenCV on tkinter. Its part of a game I've made and compiled into an application. But I've noticed that when I play the game in a different computers, since the screen sizes are different, the video doesn't fit exactly to screen size like I want it to. The same goes for the background images I used in different pages but I wrote a piece of code to resize the background images to screen size. Here it is:
def resizeimage(self,event) :
width, height = self.winfo_width(), self.winfo_height()
image = self.bg_image.resize((width,height))
self.image1 = ImageTk.PhotoImage(image)
self.bg_label.config(image = self.image1)
I've bound this function to the label that displays the background image like this:
self.bg_image = Image.open("project_pics\\start_pg.png")
bg_image = ImageTk.PhotoImage(self.bg_image)
self.bg_label = Label(self,image=bg_image)
self.bg_label.image = bg_image
self.bg_label.bind('<Configure>',self.resizeimage)
self.bg_label.grid(sticky="nwse")
here, self.bg_image is the image to be displayed as background and self.bg_label is the label that displays the image.
I know that I can implement something similar by resizing the frames, in my code to play the video, but I cant seem to figure out a quick, efficient a way to do so. Here is the code for the video player:
from tkinter import *
from tkinter.ttk import Button
from PIL import Image, ImageTk
import time
import cv2 as cv2
from threading import Thread
from Scripts.music_player import m_player
from Scripts.styles import Styles
# The Video Player
class VideoPlayer :
def __init__(self,parent) :
self.parent = parent
self.play = False
def player(self,vid_file,m_file,nxt_func):
def get_frame():
ret,frame = vid.read()
if ret and self.play :
return(ret,cv2.cvtColor(frame,cv2.COLOR_BGR2RGB))
else :
return(ret,None)
def update() :
ret,frame = get_frame()
if ret and self.play :
img = Image.fromarray(frame)
photo = ImageTk.PhotoImage(image=img)
photo.image=img
self.canvas.itemconfig(self.vid_frame,image=photo)
self.canvas.image=photo
self.parent.after(delay,lambda : update())
else :
time.sleep(0.01)
# stopping vid_music and starting game music
m_player.music_control(m_file,True,-1,0)
m_player.music_control("project_media\\signal.ogg",False,-1,0)
nxt_func()
def skip() :
self.play = False
self.parent.clear()
self.play = True
# starting music
m_player.music_control("project_media\\signal.ogg",True,-1,0)
m_player.music_control(m_file,False,-1,0)
vid = cv2.VideoCapture(vid_file)
width = vid.get(cv2.CAP_PROP_FRAME_WIDTH)
height = vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
self.canvas = Canvas(self.parent, width = width, height = height)
self.canvas.place(relx=0.5,rely=0.5,anchor=CENTER)
self.vid_frame = self.canvas.create_image(0, 0, anchor = NW)
# Skip button
if vid_file != "project_media\\glitch.mp4" :
skip_thread = Thread(target=skip)
skip = Button(self.parent,text="Skip",command=skip_thread.start,style="skip.TButton")
skip.place(relx=0.88,rely=0.04)
delay = 5
update()
My question is this. How could I efficiently resize my frames to fit screen size without slowing down execution?. Also, the function I'm using right now to resize my background images also seems to be slowing down execution. So I can see something like a glitch on the screen every time I change pages. So is there any other way I can resize my background images. Sorry if the code is a bit messy. I'm a beginner and this is the first game I've made .

wx.grid.Grid doesn't load image

I'm trying to use a code in this tutorial, but the result is a grayed cell and no image in the cell (see screenshot). It's been days since I started looking for a solution to add an image to a grid cell and I find this solution the least complicated so far, but it won't work for me. Please, can someone help me with this issue so I can move on with my project? It would be greatly appreciated. Thank you.
Here is the code:
import wx
import wx.grid
class MyApp(wx.App):
def OnInit(self):
frame = wx.Frame(None, -1, title = "wx.Grid - Bitmap example")
grid = wx.grid.Grid(frame)
grid.CreateGrid(1,1)
img = wx.Bitmap(r"E:\Dropbox2\Dropbox\Ubot\Ubot\Python\Magnify\Tkinter Magnify\Tests\python-logo.png", wx.BITMAP_TYPE_PNG)
imageRenderer = MyImageRenderer(img)
grid.SetCellRenderer(0,0,imageRenderer)
grid.SetColSize(0,img.GetWidth()+2)
grid.SetRowSize(0,img.GetHeight()+2)
frame.Show(True)
return True
class MyImageRenderer(wx.grid.PyGridCellRenderer):
def __init__(self, img):
wx.grid.PyGridCellRenderer.__init__(self)
self.img = img
def Draw(self, grid, attr, dc, rect, row, col, isSelected):
image = wx.MemoryDC()
image.SelectObject(self.img)
dc.SetBackgroundMode(wx.SOLID)
if isSelected:
dc.SetBrush(wx.Brush(wx.BLUE, wx.SOLID))
dc.SetPen(wx.Pen(wx.BLUE, 1, wx.SOLID))
else:
dc.SetBrush(wx.Brush(wx.WHITE, wx.SOLID))
dc.SetPen(wx.Pen(wx.WHITE, 1, wx.SOLID))
dc.DrawRectangleRect(rect)
width, height = self.img.GetWidth(), self.img.GetHeight()
if width > rect.width-2:
width = rect.width-2
if height > rect.height-2:
height = rect.height-2
dc.Blit(rect.x+1, rect.y+1, width, height, image, 0, 0, wx.COPY, True)
app = MyApp(0)
app.MainLoop()
And the result I get:
You can use this image for tests:
I don't know if you are running this in an IDE but if you run it on the command line, you will see all of the warnings and errors. i.e.
wxPyDeprecationWarning: Using deprecated class. Use GridCellRenderer instead.
wx.grid.PyGridCellRenderer.__init__(self)
Traceback (most recent call last):
File "20190519.py", line 30, in Draw
dc.DrawRectangleRect(rect)
AttributeError: 'PaintDC' object has no attribute 'DrawRectangleRect'
Acting on these, because the example is old and outdated, we can replace PyGridCellRenderer with GridCellRenderer and dump the dc.DrawRectangleRect(rect) line altogether. if the function doesn't exist, try not using it, then look for an alternative if that doesn't work.
Edit: that line should have been dc.DrawRectangle(rect)
We end up with this:
import wx
import wx.grid
class MyApp(wx.App):
def OnInit(self):
frame = wx.Frame(None, -1, title = "wx.Grid - Bitmap example")
grid = wx.grid.Grid(frame)
grid.CreateGrid(2,2)
img = wx.Bitmap("wxPython.jpg", wx.BITMAP_TYPE_ANY)
imageRenderer = MyImageRenderer(img)
grid.SetCellRenderer(0,0,imageRenderer)
grid.SetColSize(0,img.GetWidth()+2)
grid.SetRowSize(0,img.GetHeight()+2)
frame.Show(True)
return True
class MyImageRenderer(wx.grid.GridCellRenderer):
def __init__(self, img):
wx.grid.GridCellRenderer.__init__(self)
self.img = img
def Draw(self, grid, attr, dc, rect, row, col, isSelected):
image = wx.MemoryDC()
image.SelectObject(self.img)
dc.SetBackgroundMode(wx.SOLID)
if isSelected:
dc.SetBrush(wx.Brush(wx.BLUE, wx.SOLID))
dc.SetPen(wx.Pen(wx.BLUE, 1, wx.SOLID))
else:
dc.SetBrush(wx.Brush(wx.WHITE, wx.SOLID))
dc.SetPen(wx.Pen(wx.WHITE, 1, wx.SOLID))
dc.DrawRectangle(rect)
width, height = self.img.GetWidth(), self.img.GetHeight()
if width > rect.width-2:
width = rect.width-2
if height > rect.height-2:
height = rect.height-2
dc.Blit(rect.x+1, rect.y+1, width, height, image, 0, 0, wx.COPY, True)
app = MyApp(0)
app.MainLoop()
Which gives us this:
A full set of downloadable documentation is available here: https://extras.wxpython.org/wxPython4/extras/4.0.4/wxPython-docs-4.0.4.tar.gz
The Demos are here:
https://extras.wxpython.org/wxPython4/extras/4.0.4/wxPython-demo-4.0.4.tar.gz
If you got the error as below, then you probably called setlocale() directly instead of using wxLocale, creating a mismatch between the C/C++ and Windows locales.
Only change the locale by creating wxLocale objects to avoid this!
Use these two lines in the code then it will work fine:
import locale
#after created the grid
locale.setlocale(locale.LC_ALL, 'C')

Same plot in two different output images

I am new in both python and opencv and trying to develop a simple object tracking GUI by using opencv tracking API. I am using my webcam to test it. My purpose is selecting the target object in the current frame. For this, I plot a rectangle in the center of the window and select the target object with reference to the rectangle. Since the rectangular shape may fail the tracking operation I want to select the target object from the original frame. In the code I create two different frames which are called rawInp and outImage. rawInp is the input video. outImage is the final outcome and I want all the shapes are plotted in this image. I use an external function to plot the rectangle. I show also the rawInp to check it. However, I see the rectangular shape also in this output. How is it possible and how can I solve this problem? Also, how can I use only rawInp variable for both operations? Because copying the same the same variable is not a good way to handle. I am adding the related part of my code but if you want to see the whole code I can add. Thank you in advance for any of your answers.
import sys
import cv2
import numpy as np
from PyQt5 import QtCore
from PyQt5.QtCore import pyqtSlot, QTimer
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import QDialog, QApplication, QFileDialog, QMainWindow
from PyQt5.uic import loadUi
cap = cv2.VideoCapture(0)
if not cap.isOpened(): print ("Could not open video") ,sys.exit()
ok, frame = cap.read(0)
height, width, channels = frame.shape
upper_left = (3*int(width/8), 3*int(height/8))
bottom_right = (5*int(width/8), 5*int(height/8))
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
class trackingApp(QMainWindow):
def __init__(self):
super(trackingApp,self).__init__()
loadUi('tracking_ui.ui',self)
self.rawInp = None
self.outImage = None
self.stBtclk = False
self.trckBtclk = False
self.startButton.clicked.connect(self.start_webcam)
self.trackingButton.clicked.connect(self.tracking_clicked)
#pyqtSlot()
def start_webcam(self):
self.stBtclk = True
self.capture = cv2.VideoCapture(0)
self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT,480)
self.capture.set(cv2.CAP_PROP_FRAME_WIDTH,620)
self.timer=QTimer(self)
self.timer.timeout.connect(self.update_frame)
self.timer.start(5)
def update_frame(self):
ret, self.rawInp = self.capture.read()
self.rawInp = cv2.flip(self.rawInp,1)
self.rawInp = cv2.cvtColor(self.rawInp, cv2.COLOR_BGR2GRAY)
self.rawInp = clahe.apply(self.rawInp)
self.rawInp = cv2.cvtColor(self.rawInp, cv2.COLOR_GRAY2BGR)
self.outImage = self.rawInp
if self.trckBtclk: self.tracker_update()
self.displayImage(self.outImage,1)
def plotCenter(self, outImage):
cv2.rectangle(outImage, upper_left, bottom_right, (0, 255, 0), 2)
# Plot the central horizontal and vertical lines
cv2.line(outImage,(50,int(height/2)),(width-50,int(height/2)),(0,255,0),1)
cv2.line(outImage,(int(width/2),50),(int(width/2),height-50),(0,255,0),1)
cv2.imshow('rawInp',self.rawInp)
#pyqtSlot()
def tracking_clicked(self):
if self.stBtclk:
self.trckBtclk = True
self.tracker = cv2.TrackerKCF_create()
bbox = (3*int(width/8), 3*int(height/8), 2*int(width/8), 2*int(height/8))
self.tracker.init(self.rawInp, bbox)
marker=self.rawInp[3*int(height/8):5*int(height/8), 3*int(width/8):5*int(width/8)]
self.surf = cv2.xfeatures2d.SURF_create(500)
kp, des = self.surf.detectAndCompute(marker,None)
marker = cv2.drawKeypoints(marker,kp,None,(0,0,255),4)
cv2.imshow("marker", marker)
else: pass
def tracker_update(self):
ok, bbox = self.tracker.update(self.outImage)
if ok:
# Tracking success
p1 = (int(bbox[0]), int(bbox[1]))
p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
cv2.rectangle(self.outImage, p1, p2, (0,255,255), 2, 1)
kp, des = self.surf.detectAndCompute(self.outImage,None)
# self.outImage = cv2.drawKeypoints(self.outImage,kp,None,(0,0,255),4)
cv2.putText(self.outImage, "Tracking", (5,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,255,0),2)
else:
# Tracking failure
cv2.putText(self.outImage, "Tracking failure detected", (5,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
def displayImage(self, outImage, window):
self.plotCenter(self.outImage)
qformat=QImage.Format_Indexed8
if len(outImage.shape)==3: #[0]=rows, [1]=columns, [2]=channels
if(outImage.shape[2])==4:
qformat=QImage.Format_RGBA8888
else:
qformat=QImage.Format_RGB888
outImg=QImage(outImage,outImage.shape[1],outImage.shape[0],outImage.strides[0],qformat)
outImg=outImg.rgbSwapped() #BRG>>RGB
if window==1:
self.trackingScreen.setPixmap(QPixmap.fromImage(outImg))
self.trackingScreen.setScaledContents(True)
if __name__=="__main__":
app = QApplication(sys.argv)
app.aboutToQuit.connect(app.deleteLater)
window = trackingApp()
window.show()
#sys.exit(app.exec_())
app.exec_()
And here is an example screenshot:
The first window is showing the ui and final output while the second one, named "rawInp", is showing the unprocessed input video. I do not expect to see the green rectangle in second window

Categories