I want to do the following things in openCV. The problem statement that I have is with a bottle, which needs a line on the image and the line needs to rotate as per the movement of bottle.
The first image needs to have red lines as the borders and initiate a green line
The second image needs to have the green line in the middle when the bottle gets rotated. That is the green line has to follow the rotation of the bottle
Finally as per the third image, the application needs to kill itself or save the picture when the green line gets aligned to the red line
I tried doing this in OpenCV using template matching. I tried keeping a template image and then tracking the template image using template matching algorithm. But it does not seem to work properly in this case.
import cv2
from time import sleep
import numpy as np
vid = cv2.VideoCapture(0)
sleep(2)
line_show = False
save_reference = False
template_compare_method = cv2.TM_SQDIFF_NORMED
i = 0
while True:
check, frame = vid.read()
print(check)
frame1 = cv2.line(frame, (500, 0), (500, 720), (255, 0, 0), 7)
frame1 = cv2.line(frame1, (800, 0), (800, 720), (255, 0, 0), 7)
if line_show:
h, w = frame1.shape[:2]
if not save_reference:
reference = frame1[200:500, 780:790]
cv2.imwrite("../../images/white_image.jpg", reference)
save_reference = True
if save_reference:
reference_image = cv2.imread('../../images/white_image.jpg')
result = cv2.matchTemplate(reference_image, frame1, template_compare_method)
mn, _, mnLoc, _ = cv2.minMaxLoc(result)
MPx, MPy = mnLoc
trows, tcols = reference_image.shape[:2]
frame1 = cv2.rectangle(frame1, (MPx, MPy), (MPx+tcols, MPy+trows), (0, 0, 255), 2)
cv2.imshow("image", frame1)
key = cv2.waitKey(1)
if key == ord('l'):
line_show = True
if key == ord('k'):
cv2.imwrite("../../images/saved_image_"+str(i)+".jpg", frame1)
i = i + 1
if key == ord('s'):
cv2.imwrite("../../images/saved_image.jpg", frame)
vid.release()
print("Image saved")
break
elif key == ord('q'):
vid.release()
cv2.destroyAllWindows()
break
Can I use any other algorithms, or am I approaching this problem in a wrong way by looking it as a object tracking task, where I save a small image and track it through template matching ?
Can I use some other algorithms like Meanshift, Frame Difference etc. to achieve this ?
If I were you, I would solve this problem using line algorithm. Of course, you can choose any other robust algorithm. My idea is to solve the problem as quickly as possible.
Assume I have the following image with left and right boundaries (blue), and I have the green-line.
When green-line passes the left-border, quit.
Tracking the green-line
First you need to find the features of the frame to track efficiently the green-line.
while True:
ret, frm = cap.read()
frm_gry = cv2.cvtColor(frm, cv2.COLOR_BGR2GRAY)
frm_cny = cv2.Canny(frm_gry, 50, 200)
Sample output:
Second, find the approximate length of the green-line:
There is no direct way to find the length, do error-trial calculation.
Once you are sure, initialize the line algorithm.
lns = cv2.ximgproc.createFastLineDetector(_length_threshold=400).detect(frm_cny)
Third, get the coordinates, and check if the green-line is in the border.
if lns is not None:
for ln in lns:
x1 = int(ln[0][0])
y1 = int(ln[0][1])
x2 = int(ln[0][2])
y2 = int(ln[0][3])
if x1 <= 232:
break
Code:
import cv2
cap = cv2.VideoCapture("sample.mp4")
while True:
ret, frm = cap.read()
if ret:
rgt_bdr = cv2.line(frm, (794, 250), (794, 1250), (255, 0, 0), 7)
lft_bdr = cv2.line(frm, (232, 250), (232, 1250), (255, 0, 0), 7)
frm_gry = cv2.cvtColor(frm, cv2.COLOR_BGR2GRAY)
frm_cny = cv2.Canny(frm_gry, 50, 200)
lns = cv2.ximgproc.createFastLineDetector(_length_threshold=400).detect(frm_cny)
if lns is not None:
for ln in lns:
x1 = int(ln[0][0])
y1 = int(ln[0][1])
x2 = int(ln[0][2])
y2 = int(ln[0][3])
cv2.line(frm,
pt1=(x1, y1),
pt2=(x2, y2),
color=(0, 255, 0),
thickness=3)
print("({}, {})-({}, {})".format(x1, y1, x2, y2))
if x1 <= 232:
break
cv2.imshow("frm", frm)
cv2.waitKey(1)
Related
Good evening! I need a global variable in a function to be used in another function, however, when I try to declare this variable as a global variable, it throws the error "Statement expected, found Py:EQ", this in the line where the global code snippet is id, confidence = recognizer.predict(faceimage) specifically above the = sign on line 53. How do I fix this error?
# install opencv "pip install opencv-python"
import cv2
# distance from camera to object(face) measured
# centimeter
Known_distance = 76.2
# width of face in the real world or Object Plane
# centimeter
Known_width = 14.3
# Colors
GREEN = (0, 255, 0)
RED = (0, 0, 255)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
# defining the fonts
fonts = cv2.FONT_HERSHEY_COMPLEX
# face detector object
face_detector = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
# focal length finder function
def Focal_Length_Finder(measured_distance, real_width, width_in_rf_image):
# finding the focal length
focal_length = (width_in_rf_image * measured_distance) / real_width
return focal_length
# distance estimation function
def Distance_finder(Focal_Length, real_face_width, face_width_in_frame):
distance = (real_face_width * Focal_Length) / face_width_in_frame
# return the distance
return distance
def microFacialExpressions(recognizer, width, height):
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
detectorFace = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
camera = cv2.VideoCapture(0)
recognizer = cv2.face.EigenFaceRecognizer_create()
recognizer.read("classifierEigen.yml")
width, height = 220, 220
while(True):
connected, image = camera.read()
# Grayscale conversion
grayimage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
facesDetected = detectorFace.detectMultiScale(GrayImage,scaleFactor=1.5, minSize=(100, 100))
for (x, y, l, a) in facesDetected:
faceimage = cv2.resize(greyimage[y:y + a, x:x + l], (width, height))
cv2.rectangle(image, (x, y), (x + l, y + a), (0,0,255), 2)
global id, confidence = recognizer.predict(faceimage)
#If ID is equal to 1, issue the message "Safe to exit" if not, issue the message "Hostile area"
if id == 1:
warning="Safe to exit"
else:
warning = "Hostile area"
cv2.putText(image, warning, (x,y +(a+30)), font, 2, (0,0,255))
return warning
def face_data(image):
face_width = 0 # making face width to zero
# converting color image to gray scale image
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detecting face in the image
faces = face_detector.detectMultiScale(gray_image, 1.3, 5)
# looping through the faces detect in the image
# getting coordinates x, y , width and height
for (x, y, h, w) in faces:
# draw the rectangle on the face
cv2.rectangle(image, (x, y), (x + w, y + h), GREEN, 2)
# getting face width in the pixels
face_width = w
# return the face width in pixel
return face_width
# reading reference_image from directory
ref_image = cv2.imread("Ref_image.jpg")
# find the face width(pixels) in the reference_image
ref_image_face_width = face_data(ref_image)
# get the focal by calling "Focal_Length_Finder"
# face width in reference(pixels),
# Known_distance(centimeters),
# known_width(centimeters)
Focal_length_found = Focal_Length_Finder(
Known_distance, Known_width, ref_image_face_width)
print(Focal_length_found)
# show the reference image
cv2.imshow("ref_image", ref_image)
# initialize the camera object so that we
# can get frame from it
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
# looping through frame, incoming from
# camera/video
while True:
# reading the frame from camera
_, frame = cap.read()
# calling face_data function to find
# the width of face(pixels) in the frame
face_width_in_frame = face_data(frame)
# check if the face is zero then not
# find the distance
if face_width_in_frame != 0:
# finding the distance by calling function
# Distance finder function need
# these arguments the Focal_Length,
# known_width(centimeters),
# and Known_distance(centimeters)
Distance = Distance_finder(
Focal_length_found, Known_width, face_width_in_frame)
if Distance <= 50 and id:
print("Level S Alert!")
# draw line as background of text
cv2.line(frame, (30, 30), (230, 30), RED, 32)
cv2.line(frame, (30, 30), (230, 30), BLACK, 28)
# Drawing Text on the screen
cv2.putText(
frame, f"Distance: {round(Distance, 2)} CM", (30, 35),
fonts, 0.6, GREEN, 2)
# show the frame on the screen
cv2.imshow("frame", frame)
# quit the program if you press 'q' on keyboard
if cv2.waitKey(1) == ord("q"):
break
# closing the camera
cap.release()
# closing the windows that are opened
cv2.destroyAllWindows()
The global statement does not support assigning to a name, only declaring the name to be a global variable, rather than local variable. While global statements are legal pretty much anywhere, it is strongly recommended to put such declarations at the top of the function.
def microFacialExpressions(recognizer, width, height):
global id, confidence
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
detectorFace = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
camera = cv2.VideoCapture(0)
recognizer = cv2.face.EigenFaceRecognizer_create()
recognizer.read("classifierEigen.yml")
width, height = 220, 220
while(True):
connected, image = camera.read()
# Grayscale conversion
grayimage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
facesDetected = detectorFace.detectMultiScale(GrayImage,scaleFactor=1.5, minSize=(100, 100))
for (x, y, l, a) in facesDetected:
faceimage = cv2.resize(greyimage[y:y + a, x:x + l], (width, height))
cv2.rectangle(image, (x, y), (x + l, y + a), (0,0,255), 2)
confidence = recognizer.predict(faceimage)
#If ID is equal to 1, issue the message "Safe to exit" if not, issue the message "Hostile area"
if id == 1:
warning="Safe to exit"
else:
warning = "Hostile area"
cv2.putText(image, warning, (x,y +(a+30)), font, 2, (0,0,255))
return warning
Given that both variables are repeatedly changed in the loop, it's not clear why the last value of either is special enough to need in the global scope. I suspect neither variable needs to be declared global at all.
There is a simple code for using Yolo to display a video or camera from a PC:
import cv2
import time
Conf_threshold = 0.4
NMS_threshold = 0.4
COLORS = [(0, 255, 0), (0, 0, 255), (255, 0, 0),
(255, 255, 0), (255, 0, 255), (0, 255, 255)]
class_name = []
with open('Resources\coco.names.txt', 'r') as f:
class_name = [cname.strip() for cname in f.readlines()]
# print(class_name)
net = cv2.dnn.readNet('Resources\yolov4-tiny.weights', 'Resources\yolov4-tiny.cfg')
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA_FP16)
model = cv2.dnn_DetectionModel(net)
model.setInputParams(size=(416, 416), scale=1/255, swapRB=True)
cap = cv2.VideoCapture('test.mp4')
starting_time = time.time()
frame_counter = 0
while True:
ret, frame = cap.read()
frame_counter += 1
if ret == False:
break
classes, scores, boxes = model.detect(frame, Conf_threshold, NMS_threshold)
for (classid, score, box) in zip(classes, scores, boxes):
color = COLORS[int(classid) % len(COLORS)]
label = "%s : %f" % (class_name[classid], score)
cv2.rectangle(frame, box, color, 1)
cv2.putText(frame, label, (box[0], box[1]-10),
cv2.FONT_HERSHEY_COMPLEX, 0.5, color, 1)
endingTime = time.time() - starting_time
fps = frame_counter/endingTime
# print(fps)
cv2.putText(frame, f'FPS: {fps}', (5, 35),
cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 255, 0), 2)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
But I need a screen capture instead of test.mp4 video. To create screenshots from the screen, this code was used:
import numpy as np
import cv2
from mss import mss
def screen_record_efficiency():
bbox = {'top': 0, 'left': 0, 'width': 800, 'height': 600}
sct = mss()
font = cv2.FONT_HERSHEY_SIMPLEX
while 1:
# grab image
sct_img = np.array(sct.grab(bbox))
# display image
cv2.imshow('screen', sct_img)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
I can’t understand how it is possible to load an array with images from the second code instead of a video file and how to combine it in general. Due to poor knowledge, Python i can’t solve this problem in any way, and I didn’t find a video or article on capturing the desktop in real time
If you pass an array with pictures directly, by type frame=sct_img, then it displays an error that 4 arguments are passed instead of 3 (or something similar).
I am trying to figure out color in a specific ROI in Traffic Light Video. The code although predicts the color correctly it doesn't do it for the specific ROI i am looking at.
Initially when the traffic video starts the ROI region has no (RGY) colors but it still predicts and shows RED based on other areas. What am i doing wrong.
Have uploaded the test Video here for testing -- https://ufile.io/ha20buns
Python Code below.
import cv2
import numpy as np
cap = cv2.VideoCapture('D:\Videos\Sample.mp4')
while True:
ret,frame = cap.read()
if ret == False:
break
frame = cv2.resize(frame,(1920 ,1080))
#Extract required section from entire frame
roiColor = cv2.rectangle(frame.copy(),(1022, 565),(1411, 709),(255,255,255),2) #For SampleTL.mp4
blcolor = (255, 0, 0)
cv2.rectangle(frame, (1022, 565),(1411, 709), blcolor)
hsv = cv2.cvtColor(roiColor,cv2.COLOR_BGR2HSV)
#red
lower_hsv_red = np.array([157,177,122])
upper_hsv_red = np.array([179,255,255])
mask_red = cv2.inRange(hsv,lowerb=lower_hsv_red,upperb=upper_hsv_red)
red_blur = cv2.medianBlur(mask_red, 7)
#green
lower_hsv_green = np.array([49,79,137])
upper_hsv_green = np.array([90,255,255])
mask_green = cv2.inRange(hsv,lowerb=lower_hsv_green,upperb=upper_hsv_green)
green_blur = cv2.medianBlur(mask_green, 7)
lower_hsv_yellow = np.array([15,150,150])
upper_hsv_yellow = np.array([35,255,255])
mask_yellow = cv2.inRange(hsv,lowerb=lower_hsv_yellow,upperb=upper_hsv_yellow)
yellow_blur = cv2.medianBlur(mask_yellow, 7)
#Because the image is a binary image, If the image has a white point, which is 255, then take his maximum max value 255
red_color = np.max(red_blur)
green_color = np.max(green_blur)
yellow_color = np.max(yellow_blur)
if red_color == 255:
print('red')
cv2.rectangle(frame,(1020,50),(1060,90),(0,0,255),2 ) #Draw a rectangular frame by coordinates
cv2.putText(frame, "red", (1020, 40), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255),2) #red text information
elif green_color == 255:
print('green')
cv2.rectangle(frame,(1020,50),(1060,90),(0,255 ,0),2)
cv2.putText(frame, "green", (1020, 40), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0),2)
elif yellow_color == 255:
print('yellow')
cv2.rectangle(frame,(1020,50),(1060,90),(0,255 ,0),2)
cv2.putText(frame, "yellow", (1020, 40), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 0),2)
cv2.imshow('frame',frame)
red_blur = cv2.resize(red_blur,(300,200))
green_blur = cv2.resize(green_blur,(300,200))
yellow_blur = cv2.resize(yellow_blur, (300,200))
#cv2.imshow('red_window',red_blur)
#cv2.imshow('green_window',green_blur)
#cv2.imshow('yellow_window',yellow_blur)
c = cv2.waitKey(10)
if c==27:
break
cap.release()
cv2.destroyAllWindows() # destroy all opened windows
cv2.rectangle doesn't crop the image but returns the original image with a drawn rectangle. Try this instead:
roiColor = frame[565:709, 1022:1411]
I wish to alter this code in order to track a singular large object in motion, i.e. a person, when I run the code as is, the display will track a person as multiple objects rather than as one singular object.
Ignoring the firebase, I want to draw the rectangle around the entire object, rather than parts of the object.
Also, I wish to change the orientation of the lines set on the display from horizontal to vertical, please?
import datetime
import math
import cv2
import numpy as np
from firebase import firebase
# global variables
width = 0
height = 0
EntranceCounter = 0
ExitCounter = 0
min_area = 3000 # Adjust ths value according to your usage
_threshold = 70 # Adjust ths value according to your usage
OffsetRefLines = 150 # Adjust ths value according to your usage
# Check if an object in entering in monitored zone
def check_entrance_line_crossing(y, coor_y_entrance, coor_y_exit):
abs_distance = abs(y - coor_y_entrance)
if ((abs_distance <= 2) and (y < coor_y_exit)):
return 1
else:
return 0
# Check if an object in exitting from monitored zone
def check_exit_line_crossing(y, coor_y_entrance, coor_y_exit):
abs_distance = abs(y - coor_y_exit)
if ((abs_distance <= 2) and (y > coor_y_entrance)):
return 1
else:
return 0
camera = cv2.VideoCapture(0)
# force 640x480 webcam resolution
camera.set(3, 640)
camera.set(4, 480)
ReferenceFrame = None
# Frames may discard while adjusting to light
for i in range(0, 20):
(grabbed, Frame) = camera.read()
while True:
(grabbed, Frame) = camera.read()
height = np.size(Frame, 0)
width = np.size(Frame, 1)
# if cannot grab a frame, this program ends here.
if not grabbed:
break
# gray-scale and Gaussian blur filter applying
GrayFrame = cv2.cvtColor(Frame, cv2.COLOR_BGR2GRAY)
GrayFrame = cv2.GaussianBlur(GrayFrame, (21, 21), 0)
if ReferenceFrame is None:
ReferenceFrame = GrayFrame
continue
# Background subtraction and image manipulation
FrameDelta = cv2.absdiff(ReferenceFrame, GrayFrame)
FrameThresh = cv2.threshold(FrameDelta, _threshold, 255, cv2.THRESH_BINARY)[1]
# Dilate image and find all the contours
FrameThresh = cv2.dilate(FrameThresh, None, iterations=2)
_, cnts, _ = cv2.findContours(FrameThresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
qtty_of_count = 0
# plot reference lines (entrance and exit lines)
coor_y_entrance = (height // 2) - OffsetRefLines
coor_y_exit = (height // 2) + OffsetRefLines
cv2.line(Frame, (0, coor_y_entrance), (width, coor_y_entrance), (255, 0, 0), 2)
cv2.line(Frame, (0, coor_y_exit), (width, coor_y_exit), (0, 0, 255), 2)
# check all found count
for c in cnts:
# if a contour has small area, it'll be ignored
if cv2.contourArea(c) < min_area:
continue
qtty_of_count = qtty_of_count + 1
app = firebase.FirebaseApplication('https://finalyearproj-caa49.firebaseio.com/', None)
## result = app.post('/people', {'count': qtty_of_count})##
update = app.put('/people', "count", qtty_of_count)
print("Updated value in FB" + str(update))
# draw an rectangle "around" the object
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(Frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# find object's centroid
coor_x_centroid = (x + x + w) // 2
coor_y_centroid = (y + y + h) // 2
ObjectCentroid = (coor_x_centroid, coor_y_centroid)
cv2.circle(Frame, ObjectCentroid, 1, (0, 0, 0), 5)
if (check_entrance_line_crossing(coor_y_centroid, coor_y_entrance, coor_y_exit)):
EntranceCounter += 1
if (check_exit_line_crossing(coor_y_centroid, coor_y_entrance, coor_y_exit)):
ExitCounter += 1
print("Total countours found: " + str(qtty_of_count))
# Write entrance and exit counter values on frame and shows it
cv2.putText(Frame, "Entrances: {}".format(str(EntranceCounter)), (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (250, 0, 1), 2)
cv2.putText(Frame, "Exits: {}".format(str(ExitCounter)), (10, 70),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.imshow("Original Frame", Frame)
cv2.waitKey(1)
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
I am trying to write a function which will open an image and draw a circle where the left mouse button is clicked. the circle's size can then be adjusted using the mousewheel/keyboard. Also, every click will print a label in sequence e.g. 1st circle puts label '1', 2nd circle drawn puts a label'2' and so on. I have managed to get the circle and the label on the image but i am unsure how to increase the radius or change the label with different clicks.
import cv2
import numpy as np
# Create a black image and a window
windowName = 'Drawing'
img = cv2.imread('000025.png',cv2.IMREAD_COLOR)
cv2.namedWindow(windowName)
# mouse callback function
def draw_circle(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
cv2.circle(img, (x,y), 30, (255, 0,), 1)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'label' , (x + 30, y + 30), font, 1, (200, 255, 155), 1, cv2.LINE_AA)
# bind the callback function to window
cv2.setMouseCallback(windowName, draw_circle)
def main():
while (True):
cv2.imshow(windowName, img)
if cv2.waitKey(20) == 27:
break
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
Using the following code you can visualize the circle while moving the mouse as well. I have supplemented the code provided by Salman by adding another condition involving MOUSEMOVE event.
import cv2
import numpy as np
import math
drawing = False
def draw_circle(event, x, y, flags, param):
global x1, y1, drawing, radius, num, img, img2
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
x1, y1 = x, y
radius = int(math.hypot(x - x1, y - y1))
cv2.circle(img, (x1,y1), radius, (255, 0, 0), 1)
elif event == cv2.EVENT_MOUSEMOVE:
if drawing == True:
a, b = x, y
if a != x & b != y:
img = img2.copy()
radius = int(math.hypot(a - x1, b - y1))
cv2.circle(img, (x1,y1), radius, (255, 0, 0), 1)
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
num += 1
radius = int(math.hypot(x - x1, y - y1))
cv2.circle(img, (x1,y1), radius, (255, 0, 255), 1)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, '_'.join(['label', str(num)]), (x + 20, y + 20), font, 1, (200, 255, 155), 1, cv2.LINE_AA)
img2 = img.copy()
if __name__ == "__main__":
num = 0
windowName = 'Drawing'
img = np.zeros((500, 500, 3), np.uint8)
img2 = img.copy()
cv2.namedWindow(windowName)
cv2.setMouseCallback(windowName, draw_circle)
while (True):
cv2.imshow(windowName, img)
if cv2.waitKey(20) == 27:
break
cv2.destroyAllWindows()
Sample output:
I think this may works for you:
import cv2
import numpy as np
import math
# mouse callback function
def draw_circle(event, x, y, flags, param):
global x1, y1, radius, num
if event == cv2.EVENT_LBUTTONDOWN:
x1, y1 = x, y
if event == cv2.EVENT_LBUTTONUP:
num += 1
radius = int(math.hypot(x - x1, y - y1))
cv2.circle(img, (x1,y1), radius, (255, 0,), 1)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, f'label: {num}', (x + 30, y + 30), font, 1, (200, 255, 155), 1, cv2.LINE_AA)
if __name__ == "__main__":
num = 0
# Create a black image and a window
windowName = 'Drawing'
img = cv2.imread('img.jpg', cv2.IMREAD_COLOR)
cv2.namedWindow(windowName)
# bind the callback function to window
cv2.setMouseCallback(windowName, draw_circle)
while (True):
cv2.imshow(windowName, img)
if cv2.waitKey(20) == 27:
break
cv2.destroyAllWindows()
Result:
This is a simple code and you can do a lot of things with mouse events.
First you have to keep all coordinates (or other attributes) of your drawables in global dynamic object.
You have to give guidance to the app, if you are drawing circle, label or other drawable. It can be done by creating menu items in the OpenCV window or by key presses (I have done both). You have to keep track of context (is next click x,y coords of center of cirle, point in the circle (for radius calc, unless you decide to use mousewheel/kbd for it) left-up corner of rectangle, etc.
You have to store the created drawables in the said global object.
If you want to edit/delete the existing drawable, you have to make iterator function, that detects closest drawable (by its mid- or other point) for proper selection.
All above is doable in OpenCV alone.
Python class implementation of getting mouse click points in an image using OpenCV mouse click callback. You can make an object of this class and use getpt(n, img) method to select n points in an image using mouse click. Edit and use for your purpose.
import cv2
import numpy as np
#events = [i for i in dir(cv2) if 'EVENT' in i]
#print (events)
class MousePts:
def __init__(self,windowname,img):
self.windowname = windowname
self.img1 = img.copy()
self.img = self.img1.copy()
cv2.namedWindow(windowname,cv2.WINDOW_NORMAL)
cv2.imshow(windowname,img)
self.curr_pt = []
self.point = []
def select_point(self,event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDOWN:
self.point.append([x,y])
#print(self.point)
cv2.circle(self.img,(x,y),5,(0,255,0),-1)
elif event == cv2.EVENT_MOUSEMOVE:
self.curr_pt = [x,y]
#print(self.point)
def getpt(self,count=1,img=None):
if img is not None:
self.img = img
else:
self.img = self.img1.copy()
cv2.namedWindow(self.windowname,cv2.WINDOW_NORMAL)
cv2.imshow(self.windowname,self.img)
cv2.setMouseCallback(self.windowname,self.select_point)
self.point = []
while(1):
cv2.imshow(self.windowname,self.img)
k = cv2.waitKey(20) & 0xFF
if k == 27 or len(self.point)>=count:
break
#print(self.point)
cv2.setMouseCallback(self.windowname, lambda *args : None)
#cv2.destroyAllWindows()
return self.point, self.img
if __name__=='__main__':
img = np.zeros((512,512,3), np.uint8)
windowname = 'image'
coordinateStore = MousePts(windowname,img)
pts,img = coordinateStore.getpt(3)
print(pts)
pts,img = coordinateStore.getpt(3,img)
print(pts)
cv2.imshow(windowname,img)
cv2.waitKey(0)