opencv window not closing after calling destroyAllWindows() function - python

I am implementing camshift algorithm in python using opencv and using the position given by it to move my mouse and draw on the kolour application.
When i press the button on in tkinter ui a frame launches and then
Upon pressing the key 'q' the 'frame' does not close, The frame just freezes and the option to force quit comes. I am using tkinter for UI.
global paint_open
paint_open = 0
def func():
global frame, roiPts, inputMode ,token
camera = cv2.VideoCapture(0)
cv2.namedWindow("frame")
cv2.setMouseCallback("frame", selectROI)
termination = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
roiBox = None
li=[]
while True:
(grabbed, frame) = camera.read()
if roiBox is not None:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
backProj = cv2.calcBackProject([hsv], [0], roiHist, [0, 180], 1)
(r, roiBox) = cv2.CamShift(backProj, roiBox, termination)
pts = np.int0(cv2.cv.BoxPoints(r))
li.append(pts)
#coordinates contain the coordinates of the tracked object
coordinates = r[0]
# x, y contains the coordinates
x = int(coordinates[0])
y = int(coordinates[1])
drawing(x,y)
#draws a circle around the center from x,y
cv2.circle(frame, (int(x), int(y)), 4, (0, 0, 255), 2)
#draws a colored frame around the object
cv2.polylines(frame, [pts], True, (255, 0, 0), 2)
#here imshow function start
cv2.imshow("frame", frame)
key = cv2.waitKey(1) & 0xFF
# handle if the 'i' key is pressed, then go into ROI
if key == ord("i") and len(roiPts) < 4:
inputMode = True
orig = frame.copy()
while len(roiPts) < 4:
cv2.imshow("frame", frame)
cv2.waitKey(0)
roiPts = np.array(roiPts)
s = roiPts.sum(axis = 1)
tl = roiPts[np.argmin(s)]
br = roiPts[np.argmax(s)]
roi = orig[tl[1]:br[1], tl[0]:br[0]]
roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
roiHist = cv2.calcHist([roi], [0], None, [16], [0, 180])
roiHist = cv2.normalize(roiHist, roiHist, 0, 255, cv2.NORM_MINMAX)
roiBox = (tl[0], tl[1], br[0], br[1])
# if the 'q' key is pressed, stop the loop
elif key == ord("q"):
break
camera.release()
cv2.destroyAllWindows()
def drawing(x,y):
global paint_open
if paint_open == 0:
launchapp('kolourpaint')
paint_open = 1
py.dragTo(x,y)
def main():
root=tk.Tk()
bkgcolor='#D84315'
root.configure(background=bkgcolor)
label=tk.Label(root,text="Gesture Recognition Project",font=('arial black',12),fg="white",width=90,height=3,bg="purple")
label.pack()
frame1 = tk.Frame(root,bg=bkgcolor)
frame1.pack(padx=205,pady=25)
bframe1 = tk.Frame(root,bg=bkgcolor)
bframe1.pack(side=tk.BOTTOM,padx=205)
photo3 = tk.PhotoImage(file="3.png")
button2 = tk.Button(frame1, width=255, height=255, image=photo3,text="Slide Presenter",fg="purple",bg="white",command=func)
button2.pack(side=tk.LEFT,padx=25)
button2.image = photo1
# start the event loop
root.mainloop()

This question is a bit old but worth answering, since it comes up in searches. I experienced the same problem and eventually found an answer that worked. It seems hack-ish, but allows opencv to go through its processes to close a window. Opencv performs its processing during the waitKey(), so adding a few of those before and after the destroyallwindows() function did the trick.
In your code, where it says
cv2.destroyAllWindows()
... try this instead. It worked for me.
# All these waitkeys are a hack to get the OpenCV window to close
cv2.waitKey(1)
cv2.destroyAllWindows()
for i in range (1,5):
cv2.waitKey(1)
return

I'm not sure but cv may use tkinter to display windows so root.mainloop() may keep windows open. You can try to end root.mainloop() to close program and all windows.
import Tkinter as tk
import cv2
# create global value (with some value)
root = None
def func():
# inform function to use global variable instead of local one
global root
# ...
cv2.destroyAllWindows()
camera.release()
# close root window and leave mainloop()
root.destroy()
def main():
# inform function to use global variable instead of local one
global root
root = tk.Tk()
# ...
root.mainloop()
main()

Since a new window is opened upon calling destroyAllWindows() function, you need to press space-key to close the execution of the command.

This work for me
while True:
_, frame = cap.read()
cv2.imshow(windowName, frame)
keyCode = cv2.waitKey(1)
if cv2.getWindowProperty(windowName, cv2.WND_PROP_VISIBLE) <1:
break
cv2.destroyAllWindows()
from https://newbedev.com/closing-video-window-using-close-x-button-in-opencv-python

Related

Python, drawing a polygon over webcam video using mouse clicks to detect points

I'm using Python3 and OpenCV (4.1.0) to realize a script that:
displays the contents of the webcam;
records the coordinates of mouse clicks over video;
after pressing a certain button ('p' in my example), draws a polyline between the points identified by previous mouse clicks;
So far, I'm trying:
import numpy as np
import cv2
def main():
cap = cv2.VideoCapture("files/long_video.mp4") # Open video file
points = []
while (cap.isOpened()):
ret, frame = cap.read() # read a frame
try:
cv2.imshow('Frame', frame)
except:
print('EOF')
break
cv2.setMouseCallback('Frame', left_click_detect, points)
# Abort and exit with 'Q'
key = cv2.waitKey(25)
if (key == ord('q')):
break
elif (key== ord('p')): # HERE, IT SHOULD DRAW POLYLINE OVER VIDEO!!!
pts_array = np.array([[x, y] for (x, y) in points], np.int0)
frame = cv2.polylines(frame, np.int32(np.array(points)), False, (255, 0, 0), thickness=5)
points = []
cv2.imshow('Frame', frame)
cap.release() # release video file
cv2.destroyAllWindows() # close all openCV windows
def left_click(event, x, y, flags, points):
if (event == cv2.EVENT_LBUTTONDOWN):
print(f"\tClick on {x}, {y}")
points.append([x,y])
It kinda works, but after pressing 'p' it doesn't draw the polyline over the video.
Any suggestions?
There are 2 problems with your code:
cv2.polylines() accepts a list of arrays. so here:
frame = cv2.polylines(frame, np.int32(np.array(points)), False, (255, 0, 0), thickness=5)
Replace np.int32(np.array(points)) with [np.int32(points)] to fix the exception. (you also don't need to use np.array() here)
After you draw the polygon on the frame, you call cv2.show(), but almost immediately after, you show the next frame without the polygon on it, so you don't have time to see the polygon. to fix it you need to draw the polygon again for each frame. and to do that, you need to save it until you press p again (to show another polygon).
This will work:
import numpy as np
import cv2
def main():
cap = cv2.VideoCapture("files/long_video.mp4") # Open video file
polygon = []
points = []
while (cap.isOpened()):
ret, frame = cap.read() # read a frame
if not ret:
print('EOF')
break
frame = cv2.polylines(frame, polygon, False, (255, 0, 0), thickness=5)
cv2.imshow('Frame', frame)
# Abort and exit with 'Q'
key = cv2.waitKey(25)
if (key == ord('q')):
break
elif (key== ord('p')):
polygon = [np.int32(points)]
points = []
cv2.setMouseCallback('Frame', left_click_detect, points)
cap.release() # release video file
cv2.destroyAllWindows() # close all openCV windows
def left_click_detect(event, x, y, flags, points):
if (event == cv2.EVENT_LBUTTONDOWN):
print(f"\tClick on {x}, {y}")
points.append([x,y])
print(points)

How can I zoom my webcam in Open CV Python?

I want my webcam to be zoomed in open cv python and I don't know how. Can anyone help me with my problem?
import cv2
video = cv2.VideoCapture(0)
while True:
check, frame = video.read()
cv2.imshow('Video', frame)
key = cv2.waitKey(1)
if key == 27:
break
video.release()
cv2.destroyAllWindows
You can use this solution. It makes the job -> croping + zoom + array up and array down.
import cv2
def show_webcam(mirror=False):
scale=10
cam = cv2.VideoCapture(0)
while True:
ret_val, image = cam.read()
if mirror:
image = cv2.flip(image, 1)
#get the webcam size
height, width, channels = image.shape
#prepare the crop
centerX,centerY=int(height/2),int(width/2)
radiusX,radiusY= int(scale*height/100),int(scale*width/100)
minX,maxX=centerX-radiusX,centerX+radiusX
minY,maxY=centerY-radiusY,centerY+radiusY
cropped = image[minX:maxX, minY:maxY]
resized_cropped = cv2.resize(cropped, (width, height))
cv2.imshow('my webcam', resized_cropped)
if cv2.waitKey(1) == 27:
break # esc to quit
#add + or - 5 % to zoom
if cv2.waitKey(1) == 0:
scale += 5 # +5
if cv2.waitKey(1) == 1:
scale = 5 # +5
cv2.destroyAllWindows()
def main():
show_webcam(mirror=True)
if __name__ == '__main__':
main()
Zooming is simply increasing image size . Just increase image size by converting frame to image using PIL and then resize function.
Sample code
import tkinter
import cv2
from PIL import Image,ImageTk
root = tkinter.Tk()
root.geometry("1000x500+200+0")
w = 1000
h = 630
capture = tkinter.Canvas(root, bd=2, bg="blue", height=h, width=w)
capture.grid(column = 0, row = 0)
video = None
frame = None
img=None
show=None
begin = False
def start_capture(event):
global begin,frame,img,root,show,capture,video
video = cv2.VideoCapture(0)
begin = True
while begin:
check, frame = video.read()
img = Image.fromarray(frame)
w,h = img.size
img = img.resize((w*2,h*2))
show = ImageTk.PhotoImage(img)
capture.create_image(0,0,anchor=tkinter.NW,image=show)
root.update()
def stop_capture(event):
global video,begin
begin = False
video.release()
start = tkinter.Button(root, text='Start')
start.grid(column = 0, row = 2)
start.bind('<Button-1>', start_capture)
stop = tkinter.Button(root, text='Stop')
stop.grid(column = 1, row = 2)
stop.bind('<Button-1>', stop_capture)
root.mainloop()
I am not sure if this is useful now to add this point. (Hope this helps for all the noobs in opencv at least)
I suffered a lot at this: cropped = image[minX:maxX, minY:maxY] Crop was somehow out of the interested area.
After researching a lot, I found out that the problem was with the syntax.
Actually it should have been : cropped = image[minY:maxY , minX:maxX]
because openCV crop syntax should be like this?..
Anyways, thanks to Amara BOUDIB & JDS for the sample codes!

Emulate keyboard pressing from one infinite loop to another in a GUI

I’m trying to do a GUI where the user is gonna control the buttons by eyeblinking. Basically, a short blink should emulate pressing the keyboard key Tab (to move from one button to another) and a long blink should emulate pressing the key Space (to enter in the selected button).
The idea is that both processes, the window and the eyeblink detection system, run at the same time. So here I get all the problems: as they are both while loops I cannot run them at the same time.
In the code I attach, I simplify this by opening first the main window and afterwards clicking the button Start to run the eyeblink system. With pyautogui.press() I suppose to emulate the keyboard pressing in the main window. However, when the eyeblink detection system is working, the main window is no longer accessible (you cannot press anything).
I have tried to evoke the blink function every frame instead of an endless loop, but it’s too slow and not able to properly detect the blinks. I’ve also tried multiprocessing and ‘Python quits unexpectadly’, no error shown so not sure what’s going on (the code I used to try this is at the end commented). I also tried threading but in a simple way an no error but nothing appears either (again, the code I used to try this is at the end commented).
Here I attach the link to the files (.mp3, .xml, .py):
https://drive.google.com/drive/folders/1U2uwHXzl2MtSTlAKw1L68L3xcRmelP2d?usp=sharing
I’ve just started using Python so my knowledge is not high, I’m running out of time and I’m stuck at this point… So any help would be welcome!! Thanks in advance ;)
MacOs
Python 2.7
OpenCV 3.4
Tkinter (I just chose it because it is easy to handle yet I’m open to change if it’s neccessary)
# Ventana+Blink
from Tkinter import *
import numpy as np
import cv2
# To emulate a keyboard pressing
import pyautogui
import time
# To play the sounds
import subprocess
# from Blink import funcion_blink
# from multiprocessing import Process
# import threading
def Onbutton_clicked():
# while True:
# Repeating 2 times the sound
for x in range (0,2):
subprocess.call(['afplay', 'alarm2.mp3'])
def Onbutton2_clicked():
# Repeating 1 times the sound
for x in range (0,1):
subprocess.call(['afplay', 'sound.mp3'])
def execute_func1():
print('enter\n')
pyautogui.press('space') # press the Space key
for x in range (1,2):
subprocess.call(['afplay', 'unconvinced.mp3'])
def execute_func2():
print('tab\n')
pyautogui.press('tab') # press the Tab key
for x in range (1,2):
subprocess.call(['afplay', 'case-closed.mp3'])
def execute_func3():
print('space\n')
pyautogui.press('space') # press the Space key
for x in range (1,2):
subprocess.call(['afplay', 'cheerful.mp3'])
# ----- Eyeblink detection system -----
def funcion_blink():
#XML classifiers should be in the folder with this file
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
video_capture = cv2.VideoCapture(0)
det = 0
n = 0
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(100, 100),
flags=cv2.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
roi_color = frame[y:y + h, x:x + w]
eyes = eye_cascade.detectMultiScale(
roi_gray,
scaleFactor = 1.1,
minNeighbors = 5,
minSize = (30, 30),
flags = cv2.CASCADE_SCALE_IMAGE
)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
face_img = gray[x:x + w, y:y + h]
face_res = cv2.resize(face_img, (100, 100), interpolation=cv2.INTER_CUBIC)
eye_reg = face_res[15:85, 20:50]
cv2.rectangle(frame, (x+15*w/100, y + 2*h / 10), (x + w*85/100, y + (5 * h / 10)), (0, 0, 255), 2)
if (det < 10):
tmpl_eyes = eye_reg
det = det + 1
print('template acquired\n')
elif (det == 10):
# template matching
wt, ht = tmpl_eyes.shape[::-1]
#res_templ = cv2.matchTemplate(eye_reg, tmpl_eyes, cv2.TM_CCORR_NORMED)
res_templ = cv2.matchTemplate(eye_reg, tmpl_eyes, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res_templ)
# print(max_val, n)
#value 0.92 should be adapted to the conditions and camera position
if (max_val>0.90):
n=n+1
else:
if (n>=12):
execute_func1()
#here should go the code that triggers some action when the person blinks??
elif (n>=6):
execute_func2()
elif (n>=3):
execute_func3()
n = 0
print(max_val, n)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
cv2.destroyAllWindows()
video_capture.release()
# ---- Main window ----
def main_window():
root= Tk()
root.geometry('700x700')
# Create the buttons of the main window
button=Button(root, text='alarm', command=Onbutton_clicked)
button.bind('<Return>', Onbutton_clicked)
button.pack()
button2=Button(root, text='extra', command=Onbutton2_clicked)
button2.bind('<Return>', Onbutton2_clicked)
button2.pack()
# By pressing this button we start running the eyeblink detection system
button3=Button(root, text='Start', command=funcion_blink)
button3.bind('<Button-1>', funcion_blink)
button3.pack()
# To maintain the window until you close it
root.mainloop()
# Execute the main window
main_window()
# ---- Trials ----
# while True:
# main_window()
# funcion_blink()
# It just plays one function and when it finishes it plays the next one
# Multiprocessing
# if __name__ == '__main__':
# Process(target=main_window).start()
# Process(target=funcion_blink).start()
# PYTHON QUITS UNEXPECTADLY
# Threading
# p1 = threading.Thread(target=main_window, args=())
# p2 = threading.Thread(target=funcion_blink, args=())
# p1.start()
# p2.start()

Closing an image window displays another window. Why?

I'm stuck. My code sucks. My silders don't work either, but the infinite image windows are driving me nuts. When I close the namedWindow, it opens a new display window with the image (infinitely). Help?
import numpy as np
import cv2
from pylepton import Lepton
#setup the Lepton image buffer
def capture(device = "/dev/spidev0.0"):
with Lepton() as l:
a,_ = l.capture() #grab the buffer
cv2.normalize(a, a, 0, 65535, cv2.NORM_MINMAX) # extend contrast
np.right_shift(a, 8, a) # fit data into 8 bits
return np.uint8(a)
#Create a window and give it features
def nothing(x):
pass
cv2.namedWindow('flir', cv2.WINDOW_NORMAL)
cv2.moveWindow('flir',1,1)
cv2.createTrackbar('thresh','flir',50,100,nothing)
cv2.createTrackbar('erode','flir',5,100,nothing)
cv2.createTrackbar('dilate','flir',7,100,nothing)
#process the buffer into an image on a continuous loop
while True:
#update the image processing variables
thresh = cv2.getTrackbarPos('thresh', 'flir')
erodeSize = cv2.getTrackbarPos('erode', 'flir')
dilateSize = cv2.getTrackbarPos('dilate', 'flir')
image = capture()
#apply some image processing
blurredBrightness = cv2.bilateralFilter(image,9,150,150)
thresh = 50
edges = cv2.Canny(blurredBrightness,thresh,thresh*2, L2gradient=True)
_,mask = cv2.threshold(blurredBrightness,200,1,cv2.THRESH_BINARY)
erodeSize = 5
dilateSize = 14
eroded = cv2.erode(mask, np.ones((erodeSize, erodeSize)))
mask = cv2.dilate(eroded, np.ones((dilateSize, dilateSize)))
adjusted_image = cv2.resize(cv2.cvtColor(mask*edges, cv2.COLOR_GRAY2RGB) | image, (640, 4$
final_image = cv2.applyColorMap(adjusted_image, cv2.COLORMAP_HOT)
#display the image
cv2.imshow('flir', final_image)
if cv2.waitKey(1) == ord('q'):
break
cv2.waitKey()
cv2.destroyWindow('flir')
Firstly, Calm down.
Secondly, look at your code closely. Closing the window wouldn't do you any good, because of the lines:
cv2.imshow('flir', final_image)
and
cv2.destroyWindow('flir')
What these 2 are doing in tandem is that you're displaying a frame in a new window, and then destroying it, then recreating that window in imshow, then displaying the next frame and destroying it...and so on and so forth.
That should explain your flickering windows.
To stop execution of your program, you've added this code:
if cv2.waitKey(1) == ord('q'):
break
What this implies is that when you press 'q' on your keyboard while your image window is in focus, your while loop will break and your program will terminate.
So I would advise you to remove cv2.destroyWindow and use 'q' key to quit your application than to close it using your mouse.

How to give Start, stop, capture and close buttons in Opencv Cam window in Python

How to give start, stop, capture, and close buttons in video capture window to start, to stop, to take snapshot, to close the window?
I am using the below code to to open camera for video streaming:
import cv2.cv as cv
cv.NamedWindow("camera", 1)
capture = cv.CaptureFromCAM(0)
while True:
img = cv.QueryFrame(capture)
cv.ShowImage("camera", img)
if cv.WaitKey(10) == 27:
break
Buttons aren't possible but you can use mouse clicks and key strokes to control your video. For example, use left click to toggle play/pause and implement record via key stroke:
import cv2
run=False
frame=0
path=#some video path
def foo(event, x, y, flags, param):
global run
global frame
#check which mouse button was pressed
#e.g. play video on left mouse click
if event == cv2.EVENT_LBUTTONDOWN:
run= not run
while run:
frame+=1
frame=cap.read()[1]
cv2.imshow(window_name, frame)
key = cv2.waitKey(5) & 0xFF
if key == ord("v"):
pass
#do some stuff on key press
elif event == cv2.EVENT_RBUTTONDOWN:
pass
#do some other stuff on right click
window_name='videoPlayer'
cv2.namedWindow(window_name)
cv2.setMouseCallback(window_name, foo)
cap=cv2.VideoCapture(path)
I had this problem before with OpenCV. As far as I am aware there is no functionality for buttons in OpenCV.
However, I used Tkinter and created a canvas along with some buttons (in your case these will be start, stop, capture, close). Each frame that was captured using OpenCV I drew onto the Tkinter canvas.
I was using this for a frame by frame program, so I am not sure how well this method will perform in real time.
A very quick example code:
from Tkinter import *
import cv2.cv as cv
root = Tk()
w = Canvas(root, width=500, height=300, bd = 10, bg = 'white')
w.grid(row = 0, column = 0, columnspan = 2)
b = Button(width = 10, height = 2, text = 'Button1')
b.grid(row = 1, column = 0)
b2 = Button(width = 10, height = 2, text = 'Button2')
b2.grid(row = 1,column = 1)
cv.NamedWindow("camera",1)
capture = cv.CaptureFromCAM(0)
while True:
img = cv.QueryFrame(capture)
canvas.create_image(0,0, image=img)
if cv.WaitKey(10) == 27:
break
root.mainloop()
This may or may not work right off the bat as I am not in a position to test this right now. One potential change I can see would be the image format OpenCV uses. You may need to use one of the conversion functions to change the format.

Categories