Object tracking in python using openCV - python

I'm trying to modify this code to allow tracking of multiple objects of the same color and draw a path where the object travel. Currenlty the code just tracks the largest object based on color and the travel path dissappears as the object moves around the video. Finally, I could use some guidance on how to create a new video file capturing the paths. This is my first post so I'm not sure if the code was posted correctly lol. Go easy on me ;)
from collections import deque
import numpy as np
import argparse
import imutils
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the "green"
# object in the HSV color space, then initialize the
# list of tracked points
greenLower = (29, 86, 6)
greenUpper = (64, 255, 255)
pts = deque(maxlen=args["buffer"])
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
camera = cv2.VideoCapture(0)
# otherwise, grab a reference to the video file
else:
camera = cv2.VideoCapture(args["video"])
# keep looping
while True:
# grab the current frame
(grabbed, frame) = camera.read()
# if we are viewing a video and we did not grab a frame,
# then we have reached the end of the video
if args.get("video") and not grabbed:
break
# resize the frame, blur it, and convert it to the HSV
# color space
frame = imutils.resize(frame, width=600)
# blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# construct a mask for the color "green", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
# update the points queue
pts.appendleft(center)
# loop over the set of tracked points
for i in xrange(1, len(pts)):
# if either of the tracked points are None, ignore
# them
if pts[i - 1] is None or pts[i] is None:
continue
# otherwise, compute the thickness of the line and
# draw the connecting lines
thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)
# show the frame to our screen
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()

Related

Decode barcodes & Qr codes using deep learning

Objective:
I have a drone and it has to detect and decode all barcodes and QR-codes(small or big) from 3 meters distance. Even though I have been able to achieve the same on a hand-held camera, the same does not work on a drone, as it's very hard to keep the drone constant without moving.
Research Done:
I surfed the internet and found a program by pyimagesearch, where it draws bounding boxes on the detected barcode. The program consists of two files namely,
1.simple_barcode_detection.py: Processes the received frame and return the bounding box locations of the barcode.
2.detect_barcode.py: Sends the input frames to simple_barcode_detection.py and displays the returned frames. note:(This program successfully drew bounding boxes on the barcode).
I am attaching the current status of the code which I have built from this.
simple_barcode_detection.py:
# import the necessary packages
import numpy as np
import cv2
import imutils
from pyzbar import pyzbar
import imutils
global text, box
def detect(image):
# convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# compute the Scharr gradient magnitude representation of the images
# in both the x and y direction using OpenCV 2.4
ddepth = cv2.cv.CV_32F if imutils.is_cv2() else cv2.CV_32F
gradX = cv2.Sobel(gray, ddepth=ddepth, dx=1, dy=0, ksize=-1)
gradY = cv2.Sobel(gray, ddepth=ddepth, dx=0, dy=1, ksize=-1)
# subtract the y-gradient from the x-gradient
gradient = cv2.subtract(gradX, gradY)
gradient = cv2.convertScaleAbs(gradient)
# blur and threshold the image
blurred = cv2.blur(gradient, (9, 9))
(_, thresh) = cv2.threshold(blurred, 225, 255, cv2.THRESH_BINARY)
# construct a closing kernel and apply it to the thresholded image
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (21, 7))
closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
# perform a series of erosions and dilations
closed = cv2.erode(closed, None, iterations=4)
closed = cv2.dilate(closed, None, iterations=4)
# find the contours in the thresholded image
cnts = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# if no contours were found, return None
if len(cnts) == 0:
return None
# otherwise, sort the contours by area and compute the rotated
# bounding box of the largest contour
c = sorted(cnts, key=cv2.contourArea, reverse=True)[0]
rect = cv2.minAreaRect(c)
box = cv2.cv.BoxPoints(rect) if imutils.is_cv2() else cv2.boxPoints(rect)
box = np.int0(box)
return box
# draw a bounding box arounded the detected barcode and display the frame
def process(image,box):
min_y = int(np.min(box[:,-1]))
max_y = int(np.max(box[:,-1]))
min_x = int(np.min(box[:,0]))
max_x = int(np.max(box[:,0]))
image = image[min_y:max_y, min_x:max_x]
barcodes = pyzbar.decode(image)
# Processing the barcode #
for barcode in barcodes:
(x, y, w, h) = barcode.rect
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
barcodeData = barcode.data.decode("utf-8")
barcodeType = barcode.type
text = "{} ({})".format(barcodeData, barcodeType)
return text
detect_barcode.py
# import the necessary packages
import simple_barcode_detection
from imutils.video import VideoStream
import argparse
import time
import cv2
from pyzbar import pyzbar
import imutils
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
args = vars(ap.parse_args())
# if the video path was not supplied, grab the reference to the
# camera
if not args.get("video", False):
vs = VideoStream(src=0).start()
time.sleep(2.0)
# otherwise, load the video
else:
vs = cv2.VideoCapture(args["video"])
# keep looping over the frames
while True:
frame = vs.read()
frame = frame[1] if args.get("video", False) else frame
if frame is None:
break
box = simple_barcode_detection.detect(frame)
if box is not None:
cv2.putText(frame, text, (x, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.imshow("Frame", frame)
# cv2.imshow("Frame", image)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
# if we are not using a video file, stop the video file stream
if not args.get("video", False):
vs.stop()
# otherwise, release the camera pointer
else:
vs.release()
# close all windows
cv2.destroyAllWindows()
Error:
line 30, in <module> cv2.putText(frame, text, (x, y - 10), NameError: name 'text' is not defined
Adding to the same I also found a research paper, where yolo and pyzbar are used to identify and decode the barcode.
Question:
Is there any possibility that we can accept the returned bounding box locations and send them to pyzbar so that it scans that region for barcodes?
How does pyzbar work & can we create a similar library like pyzbar?
Reference images:
Drone not able to scan barcode
Drone not able to scan barcodes of this size
Links:
https://www.pyimagesearch.com/2014/12/15/real-time-barcode-detection-video-python-opencv/
https://d1wqtxts1xzle7.cloudfront.net/63796937/rahman-2019-ijais-451835_IJAIS20200701-113597-137d8us-with-cover-page-v2.pdf?Expires=1637915838&Signature=geCHJHKsaMnDCJMzNAz-OHHjxXSdX~rLtTf-MO0gGuNutOnHl5x33q8-Xiab4haQGnhuE8ektKO4Ah2REj9nebwfwnO3GYxBGRqZgqMsK63375AUQV7YsTjJz9Qwp~OwUa9st2h4a6~ep3eSwvCjCX-Dl-g612osElU6ATgdI4DGqqaaat-QuLAmjQqywXTZrTWs0~nLvVeZBLEJRnNbcphtlJPv1yM35Ix2AhiwKhax4X4qCqLR7Wzt3XR5IaW33X3zSPNoo0QhDLEZrEFG0m-Hi156fsq488mC4n6Udyoi8KNhaUxqQ316b7Zru7XF1z1UaBbGU44O4nuI5AtflA__&Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA

How to get only water level contour with OpenCV python on raspberry pi

I am using raspberry pi4 (8GB) with pi camera to detect water level . I have defined a line from 0,375 to 800,375 . If top most point of water level contour goes above this line then I want to call a function. Here is my code and attached image of setup. How do I get water level contour only. Does it require canny edge detection over contours to get clear water level ? first I am getting largest contour and then defining its top most point.
import numpy as np
import cv2
import time
from datetime import datetime
#color=(255,0,0)
color=(0,255,0)
thickness=2
kernel = np.ones((2,2),np.uint8) # added 01/07/2021
picflag = 0 # set value to 1 once picture is taken
# function to take still picture when water level goes beyond threshold
def takepicture(frame):
currentTime = datetime.now()
picTime = currentTime.strftime("%d.%m.%Y-%H%M%S") # Create file name for our picture
text = currentTime.strftime("%d.%m.%Y-%H:%M:%S")
font = cv2.FONT_HERSHEY_SIMPLEX # font
org = (05, 20) # org
fontScale = 0.5 # fontScale
color = (0, 0, 255) # Red color in BGR
thickness = 1 # Line thickness of 2 px
picName = picTime + '.png'
image = cv2.putText(frame, text, org, font, fontScale, color, thickness, cv2.LINE_AA, False)
cv2.imwrite(picName , image)
picflag = 1
return
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read() # ret = 1 if the video is captured; frame is the image
# Our operations on the frame come here
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
#blur = cv2.GaussianBlur(gray,(21,21),0)
gray= cv2.medianBlur(gray, 3) #to remove salt and paper noise
#ret,thresh = cv2.threshold(gray,10,20,cv2.THRESH_BINARY_INV)
ret,thresh = cv2.threshold(gray,127,127,cv2.THRESH_BINARY_INV)
thresh = cv2.morphologyEx(thresh, cv2.MORPH_GRADIENT, kernel) # get outer boundaries only added 01/07/2021
thresh = cv2.dilate(thresh,kernel,iterations = 5) # strengthen weak pixels added 01/07/2021
img1, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
#img1,contours,hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) #added 01/07/2021
cv2.line(frame, pt1=(0,375), pt2=(800,375), color=(0,0,255), thickness=2) # added 01/07/2021
if len(contours) != 0:
c = max(contours, key = cv2.contourArea) # find the largest contour
#x,y,w,h = cv2.boundingRect(c) # get bounding box of largest contour
img2=cv2.drawContours(frame, c, -1, color, thickness) # draw largest contour
#img2=cv2.drawContours(frame, contours, -1, color, thickness) # draw all contours
#img3 = cv2.rectangle(img2,(x,y),(x+w,y+h),(0,0,255),2) # draw red bounding box in img
#center = (x, y)
#print(center)
left = tuple(c[c[:, :, 0].argmin()][0])
right = tuple(c[c[:, :, 0].argmax()][0])
top = tuple(c[c[:, :, 1].argmin()][0])
bottom = tuple(c[c[:, :, 1].argmax()][0])
# Draw dots onto frame
cv2.drawContours(frame, [c], -1, (36, 255, 12), 2)
cv2.circle(frame, left, 8, (0, 50, 255), -1)
cv2.circle(frame, right, 8, (0, 255, 255), -1)
cv2.circle(frame, top, 8, (255, 50, 0), -1)
cv2.circle(frame, bottom, 8, (255, 255, 0), -1)
#print('left: {}'.format(left))
#print('right: {}'.format(right))
#print(format(top))
top_countour_point = top[1]
print(top_countour_point)
#print('bottom: {}'.format(bottom))
#if ((top_countour_point <= 375) and (picflag == 0)): #checking if contour top point is above line
#takepicture(frame)
#continue
#if ((top_countour_point > 375) and (picflag == 0)) :
#picflag = 0
#continue
# Display the resulting image
# cv2.line(frame, pt1=(0,375), pt2=(800,375), color=(0,0,255), thickness=2) # added 01/07/2021
#cv2.imshow('Contour',img3)
#cv2.imshow('thresh' ,thresh)
cv2.imshow('Contour',frame)
if cv2.waitKey(1) & 0xFF == ord('q'): # press q to quit
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
Caveats
As it was pointed out in the comments, there is very little to work with based on your post. In general, I agree with s0mbre that you'd be better of with a water level sensor, and with kavko, that if you do want to use a camera, you need to better control your environment, with lighting, camera angles, background, etc.
However, that is not to say that it is not possible with your current setup, assuming, that it is a static setup except for the water level. As such, there are some assumptions that we can make.
Here are the steps that I took to get an approximate approach:
Gather image data
You have only provided one image with lines already on it, so that's not a lot to go on. What I did is that I removed the line that you added:
Fortunately there wasn't too much of a line left afterwards.
Image processing:
I have loaded the image (this would come from the code you have posted).
Using the assumptions above, I have decided to focus on a narrow slice of the image. I selected only the middle 60 pixels (1)
slc = frame[:, 300:360]
Next, I have converted it to greyscale (2)
gray_slc = cv2.cvtColor(slc, cv2.COLOR_BGR2GRAY)
I have used Canny edge detection (see docs here) to find the edges in the image (3)
edges = cv2.Canny(gray_slc, 50, 90)
After that, I have applied a Hough Transform, to find all the edges (related Stack Overflow answer) (4)
rho = 1
theta = np.pi / 180
threshold = 15
min_line_length = 50
max_line_gap = 20
lines = cv2.HoughLinesP(edges, rho, theta, threshold, np.array([]), min_line_length, max_line_gap)
Given that I can assume that all of the top lines are the edge of the container, I averaged the y coordinates of the lines, and picked the lower most one as the water level:
y_avgs = [(line[0, 1] + line[0, 3]) / 2 for line in lines]
water_level = max(y_avgs)
Having this, I just checked, if it is over the threshold you have selected:
trigger_threshold = 375
if water_level > trigger_threshold:
print("Water level is under the selected line")
Now, keep in mind, I only had the one image to go on. Considering lighting conditions, yours results may vary.

How to confirm if a point is in a certain region

I'm making a program in OpenCV-Python that tracks an object of a certain color as it moves around the frame. I want the frame to be divided into four equal parts, with each part representing a different letter. For example, if the object is in quadrant 1, output A. My question is, how do I set up regions on the frame that, when the object is in that region, the program displays that region's letter on the frame. Like, how do I set up rectangles of regions using coordinate points?
Here's the code I have so far, and any help is appreciated.
# import the necessary packages
from collections import deque
from imutils.video import VideoStream
import numpy as np
import argparse
import cv2
import imutils
import time
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=32,
help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the "orange"
# fish in the HSV color space
orangeLower = (5, 50, 50)
orangeUpper = (15, 255, 255)
# initialize the list of tracked points, the frame counter,
# and the coordinate deltas
pts = deque(maxlen=args["buffer"])
counter = 0
(dX, dY) = (0, 0)
direction = ""
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
vs = VideoStream(src=0).start()
# otherwise, grab a reference to the video file
else:
vs = cv2.VideoCapture(args["video"])
# allow the camera or video file to warm up
time.sleep(2.0)
# keep looping
while True:
# grab the current frame
frame = vs.read()
# handle the frame from VideoCapture or VideoStream
frame = frame[1] if args.get("video", False) else frame
# if we are viewing a video and we did not grab a frame,
# then we have reached the end of the video
if frame is None:
break
# resize the frame, blur it, and convert it to the HSV
# color space
frame = imutils.resize(frame, width=600)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
# construct a mask for the color "orange", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, orangeLower, orangeUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
pts.appendleft(center)
# loop over the set of tracked points
for i in np.arange(1, len(pts)):
# if either of the tracked points are None, ignore
# them
if pts[i - 1] is None or pts[i] is None:
continue
# check to see if enough points have been accumulated in
# the buffer
if counter >= 10 and i == 10 and pts[i-10] is not None:
# compute the difference between the x and y
# coordinates and re-initialize the direction
# text variables
dX = pts[i-10][0] - pts[i][0]
dY = pts[i-10][1] - pts[i][1]
(dirX, dirY) = ("", "")
# ensure there is significant movement in the
# x-direction
if np.abs(dX) > 20:
dirX = "East" if np.sign(dX) == 1 else "West"
# ensure there is significant movement in the
# y-direction
if np.abs(dY) > 20:
dirY = "South" if np.sign(dY) == 1 else "North"
# handle when both directions are non-empty
if dirX != "" and dirY != "":
direction = "{}-{}".format(dirY, dirX)
# otherwise, only one direction is non-empty
else:
direction = dirX if dirX != "" else dirY
# otherwise, compute the thickness of the line and
# draw the connecting lines
thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)
# show the movement deltas and the direction of movement on
# the frame
cv2.putText(frame, direction, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
0.65, (0, 0, 255), 3)
cv2.putText(frame, "dx: {}, dy: {}".format(dX, dY),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.35, (0, 0, 255), 1)
# show the frame to the screen and increment the frame counter
cv2.imshow("Frame", frame)
cv2.rectangle(img=frame, pt1=(0, 0), pt2=(300, 225), color=(0, 0, 0), thickness=3, lineType=8, shift=0)
cv2.rectangle(img=frame, pt1 = (300, 1), pt2 = (600, 225), color = (0, 0, 0), thickness = 3, lineType = 8, shift = 0)
cv2.rectangle(img=frame, pt1 = (0, 225), pt2 = (300, 550), color = (0, 0, 0), thickness = 3, lineType = 8, shift = 0)
cv2.rectangle(img=frame, pt1 = (300, 225), pt2 = (600, 550), color = (0, 0, 0), thickness = 3, lineType = 8, shift = 0)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
counter += 1
# Set up contours
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
# if we are not using a video file, stop the camera video stream
if not args.get("video", False):
vs.stop()
# otherwise, release the camera
else:
vs.release()
# close all windows
cv2.destroyAllWindows()

Self-variating array size in openCV tracking program

So, for a school project i have to create an app that would make a tatoo appear on your arm.
At the moment openCV uses the color of the skin to detect which part of the image is skin.
My problem is this one: on the last step of the code, where the mask with the tattoo is merged to the video feed, the size of the array changes
#attempt to save the ROI coordinates
fy1=y1
fy2=y1+tatHeight
fx1=x1
fx2=x1+tatWidth
#create a ROI mask
roi = frame[fy1:fy2,fx1:fx2]
#merge the roi mask with the tatoo and the inverted tatoo masks
roi_bg = cv2.bitwise_and(roi,roi,mask = mask2inv)
roi_fg = cv2.bitwise_and(tatoo,tatoo,mask = mask2)
#merge the background and foreground ROI masks
dst = cv2.add(roi_bg,roi_fg)
# add the merged mask to the video feed
roiColor[fy1:fy2,fx1:fx2]=dst #the problem is here
I get this error
ValueError: could not broadcast input array from shape (33,2,3) into shape (0,0,3)
Could someone help me figure out why the value of fx and fy change?
You can find the repo with the full code here
Thanks to anyone that can help
EDIT : This is the website where I found some inspiration for my code
EDIT 2: Here is the code
# USAGE
# python ball_tracking.py --video ball_tracking_example.mp4
# python ball_tracking.py
# import the necessary packages
from collections import deque
import numpy as np
import argparse
import imutils
import cv2
#load tatoo image
imgTatoo=cv2.imread('mustache.png',-1)
tatMask=imgTatoo[:,:,3]
#create a mask from the image
invTatMask=cv2.bitwise_not(tatMask)
imgTatoo=imgTatoo[:,:,0:3]
#define original sizes for the tatoo
tatOrigHeight,tatOrigWidth = imgTatoo.shape[:2]
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the "green"
# ball in the HSV color space, then initialize the
# list of tracked points
greenLower = (0, 0, 73)
greenUpper = (35, 93, 255)
pts = deque(maxlen=args["buffer"])
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
camera = cv2.VideoCapture(0)
# otherwise, grab a reference to the video file
else:
camera = cv2.VideoCapture(args["video"])
# keep looping
while True:
# grab the current frame
(grabbed, frame) = camera.read()
# if we are viewing a video and we did not grab a frame,
# then we have reached the end of the video
if args.get("video") and not grabbed:
break
# resize the frame, blur it, and convert it to the HSV
# color space
frame = imutils.resize(frame, width=600)
frame = cv2.bilateralFilter(frame, 11, 17, 17)
# blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# construct a mask for the color "green", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
#draw contour of desired shape
cv2.drawContours( frame, c, -1, (239, 0, 0),6 )
#create the smallest box containing that contour
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
#draw the box
cv2.drawContours(frame,[box],0,(0,0,255),2)
#Save the box parameters (center,height,width and angle)
areaCenter=rect[0]
areaX,areaY=int(areaCenter[0]),int(areaCenter[1])
areaSize=rect[1]
areaHeight=int(areaSize[0])
areaWidth=int(areaSize[1])
areaAngle=rect[2]
#define the tattoo size
tatWidth=int(0.2*areaWidth)
tatHeight=tatWidth * tatOrigHeight // tatOrigWidth
#face = cv2.rectangle(frame,(areaX-areaWidth//4,areaY-areaHeight//4),(areaX+areaWidth//4,areaY+areaHeight//4),(255,0,0),2)
#roiGray=gray[areaY-areaHeight//2:areaY+areaHeight//2, areaX-areaWidth//2:areaX+areaWidth//2]
#create a mask from the video feed with the size of the region of interest (box created before)
roiColor=frame[areaY-areaHeight//2:areaY+areaHeight//2, areaX-areaWidth//2:areaX+areaWidth//2]
# print(areaX,areaY,areaWidth,areaHeight)
# print(tatWidth,tatHeight)
# save the center of the region of interest (ROI)
x1 = areaX - (tatWidth//2)
x2 = areaX + (tatWidth//2)
y1 = areaY - (tatHeight//2)
y2 = areaY + (tatHeight//2)
# protect from wierd center coordinates (outside of the frame)
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
if x2 > areaWidth:
x2 = areaWidth
if y2 > areaHeight:
y2 = areaHeight
print(x1,x2,y1,y2)
# resize the tattoo to match the ROI size
tatHeight=tatWidth * tatOrigHeight // tatOrigWidth
tatWidth=x2-x1
# protect from wierd (negative) tatoo sizes
if tatHeight<=0:
tatHeight=1
if tatWidth<=0:
tatWidth=2
print(tatHeight)
print(tatWidth)
# resize all the masks to the same size in order to merge them
tatoo=cv2.resize(imgTatoo,(tatWidth,tatHeight),interpolation=cv2.INTER_AREA)
mask2=cv2.resize(tatMask,(tatWidth,tatHeight),interpolation=cv2.INTER_AREA)
mask2inv=cv2.resize(invTatMask,(tatWidth,tatHeight),interpolation=cv2.INTER_AREA)
print(mask2inv.shape)
#attempt to save the ROI coordinates
fy1=y1
fy2=y1+tatHeight
fx1=x1
fx2=x1+tatWidth
#create a ROI mask
roi = frame[fy1:fy2,fx1:fx2]
print(roi.shape)
#merge the roi mask with the tatoo and the inverted tatoo masks
roi_bg = cv2.bitwise_and(roi,roi,mask = mask2inv)
roi_fg = cv2.bitwise_and(tatoo,tatoo,mask = mask2)
print(roi_bg.shape,roi_fg.shape)
#merge the background and foreground ROI masks
dst = cv2.add(roi_bg,roi_fg)
print("dst: ",dst.shape)
print("roi: ",roiColor.shape)
print(fy1,fy2,fy2-fy1)
print(fx1,fx2,fx2-fx1)
# add the merged mask to the video feed
roiColor[fy1:fy2,fx1:fx2]=dst
# show the frame to our screen
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
Ok so thanks to Dan Masek, if solved the problem by bypassing the roiColor that was useless with the following line:
frame[fy1:fy2,fx1:fx2]=dst
In order to be sure that the area that I want can be included in the original image.

Trying to draw circle on Laser pointer, OpenCV

So i have taken the code from Github of #bradmontgomer and trying to modify it. The code first converts the frame into HSV color space, split the video frame into color channels and then Performs an AND on HSV components to identify the laser. I am having trouble in finding the contours of the detected laser point. heres my code;
def threshold_image(self, channel):
if channel == "hue":
minimum = self.hue_min
maximum = self.hue_max
elif channel == "saturation":
minimum = self.sat_min
maximum = self.sat_max
elif channel == "value":
minimum = self.val_min
maximum = self.val_max
(t, tmp) = cv2.threshold(
self.channels[channel], # src
maximum, # threshold value
0, # we dont care because of the selected type
cv2.THRESH_TOZERO_INV #t type
)
(t, self.channels[channel]) = cv2.threshold(
tmp, # src
minimum, # threshold value
255, # maxvalue
cv2.THRESH_BINARY # type
)
if channel == 'hue':
# only works for filtering red color because the range for the hue is split
self.channels['hue'] = cv2.bitwise_not(self.channels['hue'])
def detect(self, frame):
# resize the frame, blur it, and convert it to the HSV
# color space
frame = imutils.resize(frame, width=600)
hsv_img = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# split the video frame into color channels
h, s, v = cv2.split(hsv_img)
self.channels['hue'] = h
self.channels['saturation'] = s
self.channels['value'] = v
# Threshold ranges of HSV components; storing the results in place
self.threshold_image("hue")
self.threshold_image("saturation")
self.threshold_image("value")
# Perform an AND on HSV components to identify the laser!
self.channels['laser'] = cv2.bitwise_and(
self.channels['hue'],
self.channels['value']
)
self.channels['laser'] = cv2.bitwise_and(
self.channels['saturation'],
self.channels['laser']
)
# Merge the HSV components back together.
hsv_image = cv2.merge([
self.channels['hue'],
self.channels['saturation'],
self.channels['value'],
])
thresh = cv2.threshold(self.channels['laser'], 25, 255, cv2.THRESH_BINARY)[1]
#find contours in the mask and initialize the current
#(x, y) center of the ball
#cnts = cv2.findContours(self.channels['laser'].copy(), cv2.RETR_EXTERNAL,
#cv2.CHAIN_APPROX_SIMPLE)
(_, cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
cv2.imshow('LaserPointer', self.channels['laser'])
################################################
return hsv_image
I am getting the cnts greater then 0 in line "if len(cnts) > 0:", but can't see a circle drawn in the laser pointer.
There was another function (display()) that was displaying laser frame (self.channel['laser']),
def display(self, img, frame):
"""Display the combined image and (optionally) all other image channels
NOTE: default color space in OpenCV is BGR.
"""
cv2.imshow('RGB_VideoFrame', frame)
cv2.imshow('LaserPointer', self.channels['laser'])
I commented out these cv2.iamshow lines from this function and then I was able to see circle around the laser pointer. This is because now frame from cv2.iamshow line inside function "detect(self, frame):" was executed. I then applied further codings on the pointer to detect its location.

Categories