mouse events on opencv - python

I am trying to write a function which will open an image and draw a circle where the left mouse button is clicked. the circle's size can then be adjusted using the mousewheel/keyboard. Also, every click will print a label in sequence e.g. 1st circle puts label '1', 2nd circle drawn puts a label'2' and so on. I have managed to get the circle and the label on the image but i am unsure how to increase the radius or change the label with different clicks.
import cv2
import numpy as np
# Create a black image and a window
windowName = 'Drawing'
img = cv2.imread('000025.png',cv2.IMREAD_COLOR)
cv2.namedWindow(windowName)
# mouse callback function
def draw_circle(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
cv2.circle(img, (x,y), 30, (255, 0,), 1)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'label' , (x + 30, y + 30), font, 1, (200, 255, 155), 1, cv2.LINE_AA)
# bind the callback function to window
cv2.setMouseCallback(windowName, draw_circle)
def main():
while (True):
cv2.imshow(windowName, img)
if cv2.waitKey(20) == 27:
break
cv2.destroyAllWindows()
if __name__ == "__main__":
main()

Using the following code you can visualize the circle while moving the mouse as well. I have supplemented the code provided by Salman by adding another condition involving MOUSEMOVE event.
import cv2
import numpy as np
import math
drawing = False
def draw_circle(event, x, y, flags, param):
global x1, y1, drawing, radius, num, img, img2
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
x1, y1 = x, y
radius = int(math.hypot(x - x1, y - y1))
cv2.circle(img, (x1,y1), radius, (255, 0, 0), 1)
elif event == cv2.EVENT_MOUSEMOVE:
if drawing == True:
a, b = x, y
if a != x & b != y:
img = img2.copy()
radius = int(math.hypot(a - x1, b - y1))
cv2.circle(img, (x1,y1), radius, (255, 0, 0), 1)
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
num += 1
radius = int(math.hypot(x - x1, y - y1))
cv2.circle(img, (x1,y1), radius, (255, 0, 255), 1)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, '_'.join(['label', str(num)]), (x + 20, y + 20), font, 1, (200, 255, 155), 1, cv2.LINE_AA)
img2 = img.copy()
if __name__ == "__main__":
num = 0
windowName = 'Drawing'
img = np.zeros((500, 500, 3), np.uint8)
img2 = img.copy()
cv2.namedWindow(windowName)
cv2.setMouseCallback(windowName, draw_circle)
while (True):
cv2.imshow(windowName, img)
if cv2.waitKey(20) == 27:
break
cv2.destroyAllWindows()
Sample output:

I think this may works for you:
import cv2
import numpy as np
import math
# mouse callback function
def draw_circle(event, x, y, flags, param):
global x1, y1, radius, num
if event == cv2.EVENT_LBUTTONDOWN:
x1, y1 = x, y
if event == cv2.EVENT_LBUTTONUP:
num += 1
radius = int(math.hypot(x - x1, y - y1))
cv2.circle(img, (x1,y1), radius, (255, 0,), 1)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, f'label: {num}', (x + 30, y + 30), font, 1, (200, 255, 155), 1, cv2.LINE_AA)
if __name__ == "__main__":
num = 0
# Create a black image and a window
windowName = 'Drawing'
img = cv2.imread('img.jpg', cv2.IMREAD_COLOR)
cv2.namedWindow(windowName)
# bind the callback function to window
cv2.setMouseCallback(windowName, draw_circle)
while (True):
cv2.imshow(windowName, img)
if cv2.waitKey(20) == 27:
break
cv2.destroyAllWindows()
Result:
This is a simple code and you can do a lot of things with mouse events.

First you have to keep all coordinates (or other attributes) of your drawables in global dynamic object.
You have to give guidance to the app, if you are drawing circle, label or other drawable. It can be done by creating menu items in the OpenCV window or by key presses (I have done both). You have to keep track of context (is next click x,y coords of center of cirle, point in the circle (for radius calc, unless you decide to use mousewheel/kbd for it) left-up corner of rectangle, etc.
You have to store the created drawables in the said global object.
If you want to edit/delete the existing drawable, you have to make iterator function, that detects closest drawable (by its mid- or other point) for proper selection.
All above is doable in OpenCV alone.

Python class implementation of getting mouse click points in an image using OpenCV mouse click callback. You can make an object of this class and use getpt(n, img) method to select n points in an image using mouse click. Edit and use for your purpose.
import cv2
import numpy as np
#events = [i for i in dir(cv2) if 'EVENT' in i]
#print (events)
class MousePts:
def __init__(self,windowname,img):
self.windowname = windowname
self.img1 = img.copy()
self.img = self.img1.copy()
cv2.namedWindow(windowname,cv2.WINDOW_NORMAL)
cv2.imshow(windowname,img)
self.curr_pt = []
self.point = []
def select_point(self,event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDOWN:
self.point.append([x,y])
#print(self.point)
cv2.circle(self.img,(x,y),5,(0,255,0),-1)
elif event == cv2.EVENT_MOUSEMOVE:
self.curr_pt = [x,y]
#print(self.point)
def getpt(self,count=1,img=None):
if img is not None:
self.img = img
else:
self.img = self.img1.copy()
cv2.namedWindow(self.windowname,cv2.WINDOW_NORMAL)
cv2.imshow(self.windowname,self.img)
cv2.setMouseCallback(self.windowname,self.select_point)
self.point = []
while(1):
cv2.imshow(self.windowname,self.img)
k = cv2.waitKey(20) & 0xFF
if k == 27 or len(self.point)>=count:
break
#print(self.point)
cv2.setMouseCallback(self.windowname, lambda *args : None)
#cv2.destroyAllWindows()
return self.point, self.img
if __name__=='__main__':
img = np.zeros((512,512,3), np.uint8)
windowname = 'image'
coordinateStore = MousePts(windowname,img)
pts,img = coordinateStore.getpt(3)
print(pts)
pts,img = coordinateStore.getpt(3,img)
print(pts)
cv2.imshow(windowname,img)
cv2.waitKey(0)

Related

Multiple overlapping rectangle being drawn over an image when using cv2 to draw using mouse

I was trying to draw rectangles using mouse over an image, using openCV package in python. When ever I drew a rectangle, I got multiple rectangles overlapping one another, instead of a single rectangle. Like the below image
Here is my code. Please tell me where I went wrong and what needs to be corrected, so that I get only 1 rectangle.
import cv2
import numpy as np
drawing = False
ix,iy = -1, -1
img = cv2.imread('drawing_over_image/dog.jpg')
def draw(event, x, y, flags, params):
global ix, iy, drawing
if event == cv2.EVENT_LBUTTONDOWN:
ix,iy = x,y
drawing = True
elif event == cv2.EVENT_MOUSEMOVE:
if drawing:
if ix < x and iy < y:
cv2.rectangle(img=img, pt1=(ix,iy), pt2=(x,y), color=[255,0,0], thickness=1)
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
cv2.rectangle(img=img, pt1=(ix,iy), pt2=(x,y), color=[255,0,0], thickness=1)
if __name__ == "__main__":
while True:
cv2.imshow(winname='image', mat=img)
cv2.setMouseCallback('image', draw)
if cv2.waitKey(1) == 27:
cv2.destroyAllWindows()
break
Its drawing multiple rectangles because you are drawing rectangles on every mouse move after user is pressing on the button. Instead you should draw whenever the event is done namely when the user release the left button. I fixed your code and add a basic ref image for you to see your rectangle when you are drawing. Hope it helps!
import cv2
import numpy as np
drawing = False
ix,iy = -1, -1
img = cv2.imread('drawing_over_image/dog.jpg')
refimg = img.copy()
def draw(event, x, y, flags, params):
global ix, iy, drawing
if event == cv2.EVENT_LBUTTONDOWN:
ix,iy = x,y
drawing = True
elif event == cv2.EVENT_MOUSEMOVE:
if drawing:
if ix < x and iy < y:
cv2.rectangle(img=refimg, pt1=(ix,iy), pt2=(x,y), color=[255,0,0], thickness=1)
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
if ix < x and iy < y:
cv2.rectangle(img=img, pt1=(ix,iy), pt2=(x,y), color=[255,0,0], thickness=1)
if __name__ == "__main__":
while True:
cv2.imshow(winname='image', mat=refimg)
cv2.setMouseCallback('image', draw)
refimg = img.copy()
if cv2.waitKey(1) & 0xFF == 27:
cv2.destroyAllWindows()
break
Output:
You have two options:
use cv.selectROI()
do it all by hand
with selectROI()
rect = cv.selectROI("image", img, False)
cv.destroyWindow("image")
print(rect) # (x,y,w,h) or (0,0,0,0)
by hand
keep the original image
while dragging, from the event handler, draw on a copy, imshow() that
handle mousedown, mousemove, mouseup
call separate completion and cancelation functions to make it neat
sel1 = None # first point
sel2 = None # second point
def on_mouse(event, x, y, flags, params):
global sel1, sel2
if event == cv2.EVENT_LBUTTONDOWN:
sel1 = (x,y)
sel2 = None
elif event == cv2.EVENT_RBUTTONDOWN:
cancel_selection()
elif event == cv2.EVENT_MOUSEMOVE:
if sel1 is not None:
canvas = img.copy()
cv2.rectangle(canvas, sel1, (x,y), color=[255,0,0], thickness=2)
cv2.imshow("image", canvas)
elif event == cv2.EVENT_LBUTTONUP:
if sel1 is not None:
sel2 = (x,y)
selection_done(sel1, sel2)
sel1 = sel2 = None
def cancel_selection():
global sel1
sel1 = None
# restore display
cv2.imshow("image", img)
def selection_done(pt1, pt2):
# draw on the source image?
cv2.rectangle(img, pt1, pt2, color=[255,255,0], thickness=2)
cv2.imshow("image", img)
if __name__ == "__main__":
cv2.imshow('image', img)
cv2.setMouseCallback('image', on_mouse)
while True:
key = cv2.waitKey()
if key == 27: # ESC cancels ongoing selection, or exits the window
if sel1 is None:
break
else:
cancel_selection()
cv2.destroyAllWindows()

When I try to declare a global variable it throws the error Statement expected

Good evening! I need a global variable in a function to be used in another function, however, when I try to declare this variable as a global variable, it throws the error "Statement expected, found Py:EQ", this in the line where the global code snippet is id, confidence = recognizer.predict(faceimage) specifically above the = sign on line 53. How do I fix this error?
# install opencv "pip install opencv-python"
import cv2
# distance from camera to object(face) measured
# centimeter
Known_distance = 76.2
# width of face in the real world or Object Plane
# centimeter
Known_width = 14.3
# Colors
GREEN = (0, 255, 0)
RED = (0, 0, 255)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
# defining the fonts
fonts = cv2.FONT_HERSHEY_COMPLEX
# face detector object
face_detector = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
# focal length finder function
def Focal_Length_Finder(measured_distance, real_width, width_in_rf_image):
# finding the focal length
focal_length = (width_in_rf_image * measured_distance) / real_width
return focal_length
# distance estimation function
def Distance_finder(Focal_Length, real_face_width, face_width_in_frame):
distance = (real_face_width * Focal_Length) / face_width_in_frame
# return the distance
return distance
def microFacialExpressions(recognizer, width, height):
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
detectorFace = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
camera = cv2.VideoCapture(0)
recognizer = cv2.face.EigenFaceRecognizer_create()
recognizer.read("classifierEigen.yml")
width, height = 220, 220
while(True):
connected, image = camera.read()
# Grayscale conversion
grayimage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
facesDetected = detectorFace.detectMultiScale(GrayImage,scaleFactor=1.5, minSize=(100, 100))
for (x, y, l, a) in facesDetected:
faceimage = cv2.resize(greyimage[y:y + a, x:x + l], (width, height))
cv2.rectangle(image, (x, y), (x + l, y + a), (0,0,255), 2)
global id, confidence = recognizer.predict(faceimage)
#If ID is equal to 1, issue the message "Safe to exit" if not, issue the message "Hostile area"
if id == 1:
warning="Safe to exit"
else:
warning = "Hostile area"
cv2.putText(image, warning, (x,y +(a+30)), font, 2, (0,0,255))
return warning
def face_data(image):
face_width = 0 # making face width to zero
# converting color image to gray scale image
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detecting face in the image
faces = face_detector.detectMultiScale(gray_image, 1.3, 5)
# looping through the faces detect in the image
# getting coordinates x, y , width and height
for (x, y, h, w) in faces:
# draw the rectangle on the face
cv2.rectangle(image, (x, y), (x + w, y + h), GREEN, 2)
# getting face width in the pixels
face_width = w
# return the face width in pixel
return face_width
# reading reference_image from directory
ref_image = cv2.imread("Ref_image.jpg")
# find the face width(pixels) in the reference_image
ref_image_face_width = face_data(ref_image)
# get the focal by calling "Focal_Length_Finder"
# face width in reference(pixels),
# Known_distance(centimeters),
# known_width(centimeters)
Focal_length_found = Focal_Length_Finder(
Known_distance, Known_width, ref_image_face_width)
print(Focal_length_found)
# show the reference image
cv2.imshow("ref_image", ref_image)
# initialize the camera object so that we
# can get frame from it
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
# looping through frame, incoming from
# camera/video
while True:
# reading the frame from camera
_, frame = cap.read()
# calling face_data function to find
# the width of face(pixels) in the frame
face_width_in_frame = face_data(frame)
# check if the face is zero then not
# find the distance
if face_width_in_frame != 0:
# finding the distance by calling function
# Distance finder function need
# these arguments the Focal_Length,
# known_width(centimeters),
# and Known_distance(centimeters)
Distance = Distance_finder(
Focal_length_found, Known_width, face_width_in_frame)
if Distance <= 50 and id:
print("Level S Alert!")
# draw line as background of text
cv2.line(frame, (30, 30), (230, 30), RED, 32)
cv2.line(frame, (30, 30), (230, 30), BLACK, 28)
# Drawing Text on the screen
cv2.putText(
frame, f"Distance: {round(Distance, 2)} CM", (30, 35),
fonts, 0.6, GREEN, 2)
# show the frame on the screen
cv2.imshow("frame", frame)
# quit the program if you press 'q' on keyboard
if cv2.waitKey(1) == ord("q"):
break
# closing the camera
cap.release()
# closing the windows that are opened
cv2.destroyAllWindows()
The global statement does not support assigning to a name, only declaring the name to be a global variable, rather than local variable. While global statements are legal pretty much anywhere, it is strongly recommended to put such declarations at the top of the function.
def microFacialExpressions(recognizer, width, height):
global id, confidence
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
detectorFace = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
camera = cv2.VideoCapture(0)
recognizer = cv2.face.EigenFaceRecognizer_create()
recognizer.read("classifierEigen.yml")
width, height = 220, 220
while(True):
connected, image = camera.read()
# Grayscale conversion
grayimage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
facesDetected = detectorFace.detectMultiScale(GrayImage,scaleFactor=1.5, minSize=(100, 100))
for (x, y, l, a) in facesDetected:
faceimage = cv2.resize(greyimage[y:y + a, x:x + l], (width, height))
cv2.rectangle(image, (x, y), (x + l, y + a), (0,0,255), 2)
confidence = recognizer.predict(faceimage)
#If ID is equal to 1, issue the message "Safe to exit" if not, issue the message "Hostile area"
if id == 1:
warning="Safe to exit"
else:
warning = "Hostile area"
cv2.putText(image, warning, (x,y +(a+30)), font, 2, (0,0,255))
return warning
Given that both variables are repeatedly changed in the loop, it's not clear why the last value of either is special enough to need in the global scope. I suspect neither variable needs to be declared global at all.

how to select circle automatically on image with mouse and crop it using python with open cv

I want to select a circular part of an image automatically using the mouse and then crop and save it, how can I do that using open cv
Here is the following code for cropping images using a mouse.
import cv2
cropping = False
x_start, y_start, x_end, y_end = 0, 0, 0, 0
image = cv2.imread('normal_face1624.jpg')
oriImage = image.copy()
def mouse_crop(event, x, y, flags, param):
global x_start, y_start, x_end, y_end, cropping
if event == cv2.EVENT_LBUTTONDOWN:
x_start, y_start, x_end, y_end = x, y, x, y
cropping = True
# Mouse is Moving
elif event == cv2.EVENT_MOUSEMOVE:
if cropping == True:
x_end, y_end = x, y
# if the left mouse button was released
elif event == cv2.EVENT_LBUTTONUP:
# record the ending (x, y) coordinates
x_end, y_end = x, y
cropping = False # cropping is finished
refPoint = [(x_start, y_start), (x_end, y_end)]
if len(refPoint) == 2: #when two points were found
roi = oriImage[refPoint[0][1]:refPoint[1][1], refPoint[0][0]:refPoint[1][0]]
cv2.imwrite('crop_image.png', roi)
cv2.imshow("Cropped", roi)
cv2.namedWindow("image")
cv2.setMouseCallback("image", mouse_crop)
while True:
i = image.copy()
if not cropping:
cv2.imshow("image", image)
elif cropping:
cv2.rectangle(i, (x_start, y_start), (x_end, y_end), (255, 0, 0), 2)
cv2.imshow("image", i)
if cv2.waitKey(1) == ord('q'):
break
# close all open windows
cv2.destroyAllWindows()

Augmented Reality line that moves with an image in OpenCV

I want to do the following things in openCV. The problem statement that I have is with a bottle, which needs a line on the image and the line needs to rotate as per the movement of bottle.
The first image needs to have red lines as the borders and initiate a green line
The second image needs to have the green line in the middle when the bottle gets rotated. That is the green line has to follow the rotation of the bottle
Finally as per the third image, the application needs to kill itself or save the picture when the green line gets aligned to the red line
I tried doing this in OpenCV using template matching. I tried keeping a template image and then tracking the template image using template matching algorithm. But it does not seem to work properly in this case.
import cv2
from time import sleep
import numpy as np
vid = cv2.VideoCapture(0)
sleep(2)
line_show = False
save_reference = False
template_compare_method = cv2.TM_SQDIFF_NORMED
i = 0
while True:
check, frame = vid.read()
print(check)
frame1 = cv2.line(frame, (500, 0), (500, 720), (255, 0, 0), 7)
frame1 = cv2.line(frame1, (800, 0), (800, 720), (255, 0, 0), 7)
if line_show:
h, w = frame1.shape[:2]
if not save_reference:
reference = frame1[200:500, 780:790]
cv2.imwrite("../../images/white_image.jpg", reference)
save_reference = True
if save_reference:
reference_image = cv2.imread('../../images/white_image.jpg')
result = cv2.matchTemplate(reference_image, frame1, template_compare_method)
mn, _, mnLoc, _ = cv2.minMaxLoc(result)
MPx, MPy = mnLoc
trows, tcols = reference_image.shape[:2]
frame1 = cv2.rectangle(frame1, (MPx, MPy), (MPx+tcols, MPy+trows), (0, 0, 255), 2)
cv2.imshow("image", frame1)
key = cv2.waitKey(1)
if key == ord('l'):
line_show = True
if key == ord('k'):
cv2.imwrite("../../images/saved_image_"+str(i)+".jpg", frame1)
i = i + 1
if key == ord('s'):
cv2.imwrite("../../images/saved_image.jpg", frame)
vid.release()
print("Image saved")
break
elif key == ord('q'):
vid.release()
cv2.destroyAllWindows()
break
Can I use any other algorithms, or am I approaching this problem in a wrong way by looking it as a object tracking task, where I save a small image and track it through template matching ?
Can I use some other algorithms like Meanshift, Frame Difference etc. to achieve this ?
If I were you, I would solve this problem using line algorithm. Of course, you can choose any other robust algorithm. My idea is to solve the problem as quickly as possible.
Assume I have the following image with left and right boundaries (blue), and I have the green-line.
When green-line passes the left-border, quit.
Tracking the green-line
First you need to find the features of the frame to track efficiently the green-line.
while True:
ret, frm = cap.read()
frm_gry = cv2.cvtColor(frm, cv2.COLOR_BGR2GRAY)
frm_cny = cv2.Canny(frm_gry, 50, 200)
Sample output:
Second, find the approximate length of the green-line:
There is no direct way to find the length, do error-trial calculation.
Once you are sure, initialize the line algorithm.
lns = cv2.ximgproc.createFastLineDetector(_length_threshold=400).detect(frm_cny)
Third, get the coordinates, and check if the green-line is in the border.
if lns is not None:
for ln in lns:
x1 = int(ln[0][0])
y1 = int(ln[0][1])
x2 = int(ln[0][2])
y2 = int(ln[0][3])
if x1 <= 232:
break
Code:
import cv2
cap = cv2.VideoCapture("sample.mp4")
while True:
ret, frm = cap.read()
if ret:
rgt_bdr = cv2.line(frm, (794, 250), (794, 1250), (255, 0, 0), 7)
lft_bdr = cv2.line(frm, (232, 250), (232, 1250), (255, 0, 0), 7)
frm_gry = cv2.cvtColor(frm, cv2.COLOR_BGR2GRAY)
frm_cny = cv2.Canny(frm_gry, 50, 200)
lns = cv2.ximgproc.createFastLineDetector(_length_threshold=400).detect(frm_cny)
if lns is not None:
for ln in lns:
x1 = int(ln[0][0])
y1 = int(ln[0][1])
x2 = int(ln[0][2])
y2 = int(ln[0][3])
cv2.line(frm,
pt1=(x1, y1),
pt2=(x2, y2),
color=(0, 255, 0),
thickness=3)
print("({}, {})-({}, {})".format(x1, y1, x2, y2))
if x1 <= 232:
break
cv2.imshow("frm", frm)
cv2.waitKey(1)

Dynamically draw circle using mouse in openCV

I have been trying to make an OpenCV-Py program to draw rectangle, line, and circle on mouse click and drag. I could successfully do it for line and rectangle but the code for the circle is wrong and I need help with that.
import numpy as np
import cv2 as cv
import math
drawing = False # true if mouse is pressed
ix,iy = -1,-1
# mouse callback function
def draw_circle(event,x,y,flags,param):
global ix,iy,drawing
if event == cv.EVENT_LBUTTONDOWN:
drawing = True
ix,iy = x,y
elif event == cv.EVENT_MOUSEMOVE:
if drawing == True:
k = cv.waitKey(33)
if k == ord('r'):
cv.rectangle(img,(ix,iy),(x,y),(0,255,0),-1)
elif k==ord('c'):
cv.circle(img,int(((ix+x)/2,(iy+y)/2)),int(math.sqrt( ((ix-x)**2)+((iy-y)**2) )),(0,0,255),-1)
elif k== ord('l'):
cv.line(img,(ix,iy),(x,y),(255,0,0),5)
elif event == cv.EVENT_LBUTTONUP:
drawing = False
img = np.zeros((512,512,3), np.uint8)
cv.namedWindow('image')
cv.setMouseCallback('image',draw_circle)
while(1):
cv.imshow('image',img)
k = cv.waitKey(1) & 0xFF
if k == 27:
break
cv.destroyAllWindows()
ERROR: Traceback (most recent call last):
File "mouse.py", line 19, in draw_circle
cv.circle(img,int(((ix+x)/2,(iy+y)/2)),int(math.sqrt( ((ix-x)**2)+((iy-y)**2) )),(0,0,255),-1)
TypeError: int() argument must be a string, a bytes-like object or a number, not 'tuple'
To dynamically draw a circle with OpenCV,
import numpy as np
import cv2
import math
drawing = False # true if mouse is pressed
ix,iy = -1,-1
# Create a function based on a CV2 Event (Left button click)
def draw_circle(event,x,y,flags,param):
global ix,iy,drawing
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
# we take note of where that mouse was located
ix,iy = x,y
elif event == cv2.EVENT_MOUSEMOVE:
drawing == True
elif event == cv2.EVENT_LBUTTONUP:
radius = int(math.sqrt( ((ix-x)**2)+((iy-y)**2)))
cv2.circle(img,(ix,iy),radius,(0,0,255), thickness=1)
drawing = False
# Create a black image
img = np.zeros((512,512,3), np.uint8)
# This names the window so we can reference it
cv2.namedWindow('image')
# Connects the mouse button to our callback function
cv2.setMouseCallback('image',draw_circle)
while(1):
cv2.imshow('image',img)
# EXPLANATION FOR THIS LINE OF CODE:
# https://stackoverflow.com/questions/35372700/whats-0xff-for-in-cv2-waitkey1/39201163
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
# Once script is done, its usually good practice to call this line
# It closes all windows (just in case you have multiple windows called)
cv2.destroyAllWindows()
This is how to draw the circle dynamically.
As you drag the mouse you can see the size of the circle dynamically change.
import math
import numpy as np
import cv2 as cv
cv.namedWindow('image', cv.WND_PROP_ASPECT_RATIO)
drawing = False # true if mouse is pressed
# Coordinate
x1, y1, x2, y2 = -1, -1, -1, -1
def run():
img = cv.imread(f'image/python.png')
cv.imshow('image', img)
# Create a layer to draw circle. The layer has the same dimension of image
layer = np.zeros((img.shape[0], img.shape[1], 3), dtype="uint8")
# mouse callback function
def draw_circle(event, x, y, flags, param):
global x1, y1, x2, y2, drawing
# Manage different button state
if event == cv.EVENT_LBUTTONDOWN:
drawing = True
x1, y1 = x, y
elif event == cv.EVENT_MOUSEMOVE:
if drawing == True:
# Fill all value to 0 to clean layer
layer.fill(0)
cv.circle(layer, (x1, y1),calc_radius(x1, y1, x, y), (255, 0, 0), 1)
# Create a mask of shape
img2gray = cv.cvtColor(layer, cv.COLOR_BGR2GRAY)
ret, mask = cv.threshold(img2gray, 0, 255, cv.THRESH_BINARY)
# Create a copy of original image
_img = img.copy()
# Set the value of mask to 0, to avoid color overlap problems
_img[np.where(mask)] = 0
cv.imshow('image', np.where(layer == 0, _img, layer))
elif event == cv.EVENT_LBUTTONUP:
drawing = False
layer.fill(0)
cv.circle(layer, (x1, y1), calc_radius(x1, y1, x, y), (255, 0, 0), 1)
# Create a mask of shape
img2gray = cv.cvtColor(layer, cv.COLOR_BGR2GRAY)
ret, mask = cv.threshold(img2gray, 0, 255, cv.THRESH_BINARY)
_img = img.copy()
# Set the value of mask to 0, to avoid color overlap problems
_img[np.where(mask)] = 0
# Merge two array using Numpy where function
cv.imshow('image', np.where(layer == 0, _img, layer))
# Assig callback
cv.setMouseCallback('image', draw_circle)
# Service function to calculate radius (Pythagorean theorem)
def calc_radius(x1, y1, x2, y2):
delta_x = abs(x2 - x1)
delta_y = abs(y2 - y1)
return int(math.sqrt((delta_x**2)+(delta_y**2)))
while True:
k = cv.waitKey(1)
if k == ord("c"): # c to terminate a program
cv.destroyAllWindows()
break
if __name__ == '__main__':
run()

Categories