Not able to load opencv detection inside video stream (flask) - python

I'm trying to run this Python script inside of a Flask application, but the detections are not loading and only the camera feed is visible. I've tried face and hand detection with Flask, and both of those worked perfectly for me, but when I try to run this script, it doesn't show the detection, and the below script detect the object size in centimeters.
This is the python script which i am trying to load inside flask app.
def midpoint(ptA, ptB):
return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)
cap = cv2.VideoCapture(0)
while (cap.read()):
ref,frame = cap.read()
frame = cv2.resize(frame, None, fx=1, fy=1, interpolation=cv2.INTER_AREA)
orig = frame[:1080,0:1920]
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (15, 15), 0)
thresh = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV,11,2)
kernel = np.ones((3,3),np.uint8)
closing = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel,iterations=3)
result_img = closing.copy()
contours,hierachy = cv2.findContours(result_img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
hitung_objek = 0
pixelsPerMetric = None
for cnt in contours:
area = cv2.contourArea(cnt)
if area < 1000 or area > 120000:
continue
orig = frame.copy()
box = cv2.minAreaRect(cnt)
box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
box = np.array(box, dtype="int")
box = perspective.order_points(box)
cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 64), 2)
for (x, y) in box:
cv2.circle(orig, (int(x), int(y)), 5, (0, 255, 64), -1)
(tl, tr, br, bl) = box
(tltrX, tltrY) = midpoint(tl, tr)
(blbrX, blbrY) = midpoint(bl, br)
(tlblX, tlblY) = midpoint(tl, bl)
(trbrX, trbrY) = midpoint(tr, br)
cv2.circle(orig, (int(tltrX), int(tltrY)), 0, (0, 255, 64), 5)
cv2.circle(orig, (int(blbrX), int(blbrY)), 0, (0, 255, 64), 5)
cv2.circle(orig, (int(tlblX), int(tlblY)), 0, (0, 255, 64), 5)
cv2.circle(orig, (int(trbrX), int(trbrY)), 0, (0, 255, 64), 5)
cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),
(255, 0, 255), 2)
cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),
(255, 0, 255), 2)
lebar_pixel = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
panjang_pixel = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
if pixelsPerMetric is None:
pixelsPerMetric = lebar_pixel
pixelsPerMetric = panjang_pixel
lebar = lebar_pixel
panjang = panjang_pixel
cv2.putText(orig, "L: {:.1f}CM".format(lebar_pixel/25.5),(int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX,0.7, (0,0,255), 2)
cv2.putText(orig, "B: {:.1f}CM".format(panjang_pixel/25.5),(int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX,0.7, (0,0,255), 2)
hitung_objek+=1
cv2.putText(orig, "OBJECTS: {}".format(hitung_objek),(10,50),cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255),2, cv2.LINE_AA)
cv2.imshow('Camera',orig)
Output Image
PS: i have checked the console also but no error

Related

OpenCV Code which I try cannot detect some ArUco's

As I learned from tutorial videos, I tried this code:
import numpy as np
import cv2
ARUCO_DICT = {
"DICT_4X4_50": cv2.aruco.DICT_4X4_50,
"DICT_4X4_100": cv2.aruco.DICT_4X4_100,
}
#In fact there are more dictionary keys than written above. I deleted them to shorten the question.
def aruco_display(corners, ids, rejected, image):
if len(corners) > 0:
ids = ids.flatten()
for (markerCorner, markerID) in zip(corners, ids):
corners = markerCorner.reshape((4, 2))
(topLeft, topRight, bottomRight, bottomLeft) = corners
topRight = (int(topRight[0]), int(topRight[1]))
bottomRight = (int(bottomRight[0]), int(bottomRight[1]))
bottomLeft = (int(bottomLeft[0]), int(bottomLeft[1]))
topLeft = (int(topLeft[0]), int(topLeft[1]))
cv2.line(image, topLeft, topRight, (0, 255, 0), 2)
cv2.line(image, topRight, bottomRight, (0, 255, 0), 2)
cv2.line(image, bottomRight, bottomLeft, (0, 255, 0), 2)
cv2.line(image, bottomLeft, topLeft, (0, 255, 0), 2)
cX = int((topLeft[0] + bottomRight[0]) / 2.0)
cY = int((topLeft[1] + bottomRight[1]) / 2.0)
cv2.circle(image, (cX, cY), 4, (0, 0, 255), -1)
cv2.putText(image, str(markerID),(topLeft[0], topLeft[1] - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0, 255, 0), 2)
print("[Inference] ArUco marker ID: {}".format(markerID))
return image
img = cv2.imread('markers.jpg', 1)
#the first parameter will change according to the name of the photo
aruco_type = ["DICT_4X4_50",
"DICT_4X4_100",
"DICT_4X4_250",
]
for i in aruco_type:
arucoDict = cv2.aruco.getPredefinedDictionary(ARUCO_DICT[i])
arucoParams = cv2.aruco.DetectorParameters()
corners, ids, rejected = cv2.aruco.ArucoDetector(arucoDict, arucoParams).detectMarkers(img)
detected_markers = aruco_display(corners, ids, rejected, img)
cv2.imshow("Image", detected_markers)
cv2.waitKey(0)
cv2.destroyAllWindows()
This code can detect most of codes but there is still problem due to not detecting some ArUco's like that:
ArUco1
ArUco2
How can I solve this issue?
I think if it can detect some of them, I don't understand why it cannot detect ArUco's on same image.

Real Time Measurement Tools with Opencv and Python

I try to build a real time measurement algorithm. My problem is as you can see a picture; enter image description here
I need a diagonal measurement but I just found these 2 edges and one diagonal. When I start to find other diagonal, I took an error. Because calculation just start this corner, I couldn't start another corner. when I try to start another corner I took an error. this corner is 0.
I couldn't understand why I couldn't start with another corner.
Measurement code
import cv2
import utlis
###################################
webcam = True
path = '1.jpg'
cap = cv2.VideoCapture(0)
cap.set(10, 160)
cap.set(3, 1920)
cap.set(4, 1080)
scale = 3
wP = 210 * scale
hP = 297 * scale
###################################
while True:
if webcam:
success, img = cap.read()
else:
img = cv2.imread(path)
imgContours, conts = utlis.getContours(img, minArea=50000, filter=4)
if len(conts) !=0:
biggest = conts[0][2]
print(biggest)
imgWarp = utlis.warpImg(img, biggest, wP, hP)
imgContours2, conts2 = utlis.getContours(imgWarp,
minArea=2000, filter=4,
cThr=[50, 50], draw=False)
if len(conts) != 0:
for obj in conts2:
cv2.polylines(imgContours2, [obj[2]], True, (0, 255, 0), 2)
nPoints = utlis.reorder(obj[2])
nW = round((utlis.findDis(nPoints[0][0] // scale, nPoints[1][0] // scale) / 10), 1)
nH = round((utlis.findDis(nPoints[0][0] // scale, nPoints[2][0] // scale) / 10), 1)
nQ = round((utlis.findDis(nPoints[0][0] // scale, nPoints[3][0] // scale) / 10), 1)
nW2 = round((utlis.findDis(nPoints[2][2] // scale, nPoints[1][2] // scale) / 10), 1)
#nH2 = round((utlis.findDis(nPoints[3][3] // scale, nPoints[2][0] // scale) / 10), 1)
#nZ = round((utlis.findDis(nPoints[3][3] // scale, nPoints[3][0] // scale) / 10), 1)
cv2.arrowedLine(imgContours2, (nPoints[0][0][0], nPoints[0][0][1]),
(nPoints[1][0][0], nPoints[1][0][1]),
(255, 0, 255), 3, 8, 0, 0.05)
cv2.arrowedLine(imgContours2, (nPoints[0][0][0], nPoints[0][0][1]),
(nPoints[2][0][0], nPoints[2][0][1]),
(255, 0, 255), 3, 8, 0, 0.05)
cv2.arrowedLine(imgContours2, (nPoints[0][0][0], nPoints[0][0][1]),
(nPoints[3][0][0], nPoints[3][0][1]),
(255, 0, 255), 3, 8, 0, 0.05)
#cv2.arrowedLine(imgContours2, (nPoints[3][3][3], nPoints[3][3][1]),
#(nPoints[1][0][0], nPoints[1][0][1]),
#(255, 0, 255), 3, 8, 0, 0.05)
x, y, w, h = obj[3]
cv2.putText(imgContours2, '{}cm'.format(nW), (x + 30, y - 10), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.5,
(255, 0, 255), 2)
cv2.putText(imgContours2, '{}cm'.format(nH), (x - 70, y + h // 2), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.5,
(255, 0, 255), 2)
cv2.putText(imgContours2, '{}cm'.format(nQ), (x + 200 , y + 200 ), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.5,
(255, 0, 255), 2)
cv2.imshow('A4', imgContours2)
img = cv2.resize(img, (0, 0), None, 0.5, 0.5)
cv2.imshow('Original', img)
cv2.waitKey(1)
utlis
import cv2
import numpy as np
def getContours(img, cThr=[100, 100], showCanny=False, minArea=1000, filter=0, draw=False):
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 1)
imgCanny = cv2.Canny(imgBlur, cThr[0], cThr[1])
kernel = np.ones((5, 5))
imgDial = cv2.dilate(imgCanny, kernel, iterations=3)
imgThre = cv2.erode(imgDial, kernel, iterations=2)
if showCanny: cv2.imshow('Canny', imgThre)
contours, hiearchy = cv2.findContours(imgThre, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
finalCountours = []
for i in contours:
area = cv2.contourArea(i)
if area > minArea:
peri = cv2.arcLength(i, True)
approx = cv2.approxPolyDP(i, 0.02 * peri, True)
bbox = cv2.boundingRect(approx)
if filter > 0:
if len(approx) == filter:
finalCountours.append([len(approx), area, approx, bbox, i])
else:
finalCountours.append([len(approx), area, approx, bbox, i])
finalCountours = sorted(finalCountours, key=lambda x: x[1], reverse=True)
if draw:
for con in finalCountours:
cv2.drawContours(img, con[4], -1, (0, 0, 255), 3)
return img, finalCountours
def reorder(myPoints):
# print(myPoints.shape)
myPointsNew = np.zeros_like(myPoints)
myPoints = myPoints.reshape((4, 2))
add = myPoints.sum(1)
myPointsNew[0] = myPoints[np.argmin(add)]
myPointsNew[3] = myPoints[np.argmax(add)]
diff = np.diff(myPoints, axis=1)
myPointsNew[1] = myPoints[np.argmin(diff)]
myPointsNew[2] = myPoints[np.argmax(diff)]
return myPointsNew
def warpImg(img, points, w, h, pad=20):
# print(points)
points = reorder(points)
pts1 = np.float32(points)
pts2 = np.float32([[0, 0], [w, 0], [0, h], [w, h]])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
imgWarp = cv2.warpPerspective(img, matrix, (w, h))
imgWarp = imgWarp[pad:imgWarp.shape[0] - pad, pad:imgWarp.shape[1] - pad]
return imgWarp
def findDis(pts1, pts2):
return ((pts2[0] - pts1[0]) ** 2 + (pts2[1] - pts1[1]) ** 2) ** 0.5

How to remove shadow of moving object from image using opencv (python)?

I am trying to do background subtraction using MOG2, It was working fine, but when there is deep shadow of a moving object then the shadow is considered as foreground object and I don't want that shadow as foreground object (I'm running MOG2 for 13 images). How can I remove these shadow so that it does not come in foreground?
Here is a sample image...
original img
image after applying MOG2
here is my sample code...
import os
import numpy as np
import cv2
import glob
import imutils
i=0
bg_flag = 0
image_list = []
bgs_list = []
#bgsfinal function
def detection(image_list):
global i
global bg_flag
bgs3_img = None
backsub = cv2.createBackgroundSubtractorMOG2(128, cv2.THRESH_BINARY, 1)
print("start2")
for k in range(len(image_list)):
frame = image_list[k]
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imwrite('./gray/'+str(k)+'.jpg', frame)
#blur = cv2.medianBlur(frame, 21)
blur = frame
bgs_list.append(blur)
for bg in range(len(bgs_list)):
rects = []
#start_time = time.time()
frame_blur = bgs_list[bg]
img = image_list[bg].copy()
s_frame = image_list[bg]
new_frame = s_frame.copy()
fgmask = backsub.apply(frame_blur)
cv2.imwrite("./bgs/"+str(i)+".jpg", fgmask)
fgmask[fgmask==127] = 0
cv2.imwrite("./dilate/"+str(i)+".jpg", fgmask)
thresh = cv2.threshold(fgmask, 128, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.erode(thresh, None, iterations = 1)
thresh = cv2.dilate(thresh, None, iterations=1)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
for c in cnts:
#M = cv2.moments(c)
A = cv2.contourArea(c)
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(new_frame, (x, y), (x + w, y + h), (0,0, 255), 1)
cv2.putText(new_frame, str(A), (x - 10, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
cv2.imwrite("./area/"+str(i)+".jpg", new_frame)
cv2.rectangle(thresh, (x, y), (x + w, y + h), (255,255, 255), 1)
cv2.putText(thresh, str(A), (x - 10, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
cv2.imwrite("./area_bgs/"+str(i)+".jpg", thresh)
i+=1
print("Done!")
#this folder contains 13 continuous images
images = glob.glob('./inci4/*.jpg')
for j in range(len(images)):
img = cv2.imread(images[j])
img = cv2.resize(img, (360, 640))
image_list.append(img)
detection(image_list)

Object Detection Using Raspberry Pi and Android IP Camera with Python and OpenCV

Here is my code that I have used for object detection using raspberry pi and Android Ip Camera. Here I'm not getting any output and the code does not provide any errors. Can someone figure out what is the error?
import urllib.request
import cv2
import numpy as np
import datetime
import math
#global variables
width = 0
height = 0
EntranceCounter = 0
ExitCounter = 0
MinCountourArea = 3000 #Adjust ths value according to your usage
BinarizationThreshold = 70 #Adjust ths value according to your usage
OffsetRefLines = 150 #Adjust ths value according to your usage
#Check if an object in entering in monitored zone
def CheckEntranceLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYEntranceLine)
if ((AbsDistance <= 2) and (y < CoorYExitLine)):
return 1
else:
return 0
#Check if an object in exitting from monitored zone
def CheckExitLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYExitLine)
if ((AbsDistance <= 2) and (y > CoorYEntranceLine)):
return 1
else:
return 0
This is the code i have used to obtain the video stream from my IP camera
ReferenceFrame = None
while True:
camera=cv2.VideoCapture("http://192.168.1.6:8080/shot.jpg")
camera.set(3,640)
camera.set(4,480)
(ret,Frame)=camera.read()
height = np.size(Frame,0)
width = np.size(Frame,1)
#if cannot grab a frame, this program ends here.
if not ret:
break
This is the code part i have used to display the lines and frame for object detection and object counting
#gray-scale convertion and Gaussian blur filter applying
GrayFrame = cv2.cvtColor(Frame, cv2.COLOR_BGR2GRAY)
GrayFrame = cv2.GaussianBlur(GrayFrame, (21, 21), 0)
if ReferenceFrame is None:
ReferenceFrame = GrayFrame
continue
#Background subtraction and image binarization
FrameDelta = cv2.absdiff(ReferenceFrame, GrayFrame)
FrameThresh = cv2.threshold(FrameDelta, BinarizationThreshold, 255, cv2.THRESH_BINARY)[1]
#Dilate image and find all the contours
FrameThresh = cv2.dilate(FrameThresh, None, iterations=2)
_, cnts, _ = cv2.findContours(FrameThresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
QttyOfContours = 0
#plot reference lines (entrance and exit lines)
CoorYEntranceLine = (height / 2)-OffsetRefLines
CoorYExitLine = (height / 2)+OffsetRefLines
cv2.line(Frame, (0,CoorYEntranceLine), (width,CoorYEntranceLine), (255, 0, 0), 2)
cv2.line(Frame, (0,CoorYExitLine), (width,CoorYExitLine), (0, 0, 255), 2)
#check all found countours
for c in cnts:
#if a contour has small area, it'll be ignored
if cv2.contourArea(c) < MinCountourArea:
continue
QttyOfContours = QttyOfContours+1
#draw an rectangle "around" the object
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(Frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
#find object's centroid
CoordXCentroid = (x+x+w)/2
CoordYCentroid = (y+y+h)/2
ObjectCentroid = (CoordXCentroid,CoordYCentroid)
cv2.circle(Frame, ObjectCentroid, 1, (0, 0, 0), 5)
if (CheckEntranceLineCrossing(CoordYCentroid,CoorYEntranceLine,CoorYExitLine)):
EntranceCounter += 1
if (CheckExitLineCrossing(CoordYCentroid,CoorYEntranceLine,CoorYExitLine)):
ExitCounter += 1
print ("Total countours found: "+str(QttyOfContours))
#Write entrance and exit counter values on frame and shows it
cv2.putText(Frame, "Entrances: {}".format(str(EntranceCounter)), (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (250, 0, 1), 2)
cv2.putText(Frame, "Exits: {}".format(str(ExitCounter)), (10, 70),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.imshow('Salida',Frame)
cv2.waitKey(1);
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
The correct code
import numpy as np
import math
def nothing(x):
pass
width=0
height=0
EntranceCounter = 0
OffsetRefLines = 150
ExitCounter = 0
BinarizationThreshold = 70
MinCountourArea = 3000
cap = cv2.VideoCapture(0);
path="http://192.168.1.6:8080/video"
cap.open(path)
ReferenceFrame = None
#Check if an object in entering in monitored zone
def CheckEntranceLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYEntranceLine)
if ((AbsDistance <= 2) and (y < CoorYExitLine)):
return 1
else:
return 0
#Check if an object in exitting from monitored zone
def CheckExitLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYExitLine)
if ((AbsDistance <= 2) and (y > CoorYEntranceLine)):
return 1
else:
return 0
#cv2.namedWindow("Tracking")
cv2.createTrackbar("LH", "Tracking", 0, 255, nothing)
cv2.createTrackbar("LS", "Tracking", 0, 255, nothing)
cv2.createTrackbar("LV", "Tracking", 0, 255, nothing)
cv2.createTrackbar("UH", "Tracking", 255, 255, nothing)
cv2.createTrackbar("US", "Tracking", 255, 255, nothing)
cv2.createTrackbar("UV", "Tracking", 255, 255, nothing)
while True:
#frame = cv2.imread('smarties.png')
if cap.isOpened():
rval, frame = cap.read()
while rval:
rval,frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
hsv = cv2.GaussianBlur(hsv, (21, 21), 0)
if ReferenceFrame is None:
ReferenceFrame = hsv
continue
#Background subtraction and image binarization
FrameDelta = cv2.absdiff(ReferenceFrame, hsv)
FrameThresh = cv2.threshold(FrameDelta, 25, 255, cv2.THRESH_BINARY)[1]
#Dilate image and find all the contours
FrameThresh = cv2.dilate(FrameThresh, None, iterations=2)
cnts, _ = cv2.findContours(FrameThresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
QttyOfContours = 0
#plot reference lines (entrance and exit lines)
cv2.line(frame, (0,170), (2000,170), (255, 0, 0), 5)
cv2.line(frame, (0,470), (2000,470), (0, 0, 255), 5)
#check all found countours
for c in cnts:
#if a contour has small area, it'll be ignored
if cv2.contourArea(c) < MinCountourArea:
continue
QttyOfContours = QttyOfContours+1
#draw an rectangle "around" the object
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
#find object's centroid
CoordXCentroid = int(x+x+w)/2
CoordYCentroid = int(y+y+h)/2
ObjectCentroid = (x,y)
cv2.circle(frame, ObjectCentroid, 2, (0, 255, 0), 5)
if (CheckEntranceLineCrossing(CoordYCentroid,170,470)):
EntranceCounter += 1
if (CheckExitLineCrossing(CoordYCentroid,170,470)):
ExitCounter += 1
print ("Total countours found: "+str(QttyOfContours))
#Write entrance and exit counter values on frame and shows it
cv2.putText(frame, "Entrances: {}".format(str(EntranceCounter)), (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 2, (250, 0, 1), 2)
cv2.putText(frame, "Exits: {}".format(str(ExitCounter)), (10, 110),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 2)
imS = cv2.resize(frame, (400, 400)) # Resize image
#imSS = cv2.resize(mask, (200, 200))
#imSSS = cv2.resize(frame, (200, 200))
cv2.imshow("frame", imS)
#cv2.imshow("mask", imSS)
#cv2.imshow("res", imSSS)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()

What is the best way to detect the color of circles detected using cv2.HoughCircles?

I am fairly new to image processing and opencv. I need to detect the colors of some balls. I started by detecting these balls using cv2.HoughCircles but after that i am stuck.
Most answers online suggest using contours and matching them with color ranges or calculating the distance to the closest color. But i couldn't find a way to transform the output of cv2.HoughCircles to contours (i am not sure if this is possible)
while reader.isOpened():
ret, frame = reader.getCapture()
if ret:
workingFrame = imutils.resize(frame, width=600)
copy = frame.copy()
grayMask = cv2.cvtColor(copy, cv2.COLOR_BGR2GRAY)
circlesArr = cv2.HoughCircles(grayMask, cv2.HOUGH_GRADIENT, 1.3, 20, param1=40, param2=30, minRadius=5, maxRadius=20)
if circlesArr is not None:
for circle in circlesArr[0, :]:
# detect colors
cv2.circle(frame, (circle[0], circle[1]), circle[2], GREEN, 1)
cv2.circle(frame, (circle[0], circle[1]), 2, GREEN, 2)
cv2.imshow("frame", frame)
else:
break
if quitKeyPressed():
break
I managed to find the dominant color for each circle using the following steps:
First i detected the circles with cv2.HoughCircles:
def detectCirclesWithDp(frame, dp=1):
blurred = cv2.medianBlur(frame, 25)
grayMask = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
# cannyMask = cv2.Canny(grayMask, 50, 240)
return cv2.HoughCircles(grayMask, cv2.HOUGH_GRADIENT, dp, 40, param1=10, param2=30, minRadius=20, maxRadius=70)
Then for each circle i selected a roi around it:
def getROI(frame, x, y, r):
return frame[int(y-r/2):int(y+r/2), int(x-r/2):int(x+r/2)]
After that i calculate the most dominant color in the region using kmeans and color masks:
COLOR_NAMES = [RED, ORANGE, YELLOW, GREEN, CYAN, BLUE, PURPLE, RED_2]
COLOR_RANGES_HSV = {
"red": [(0, 50, 10), (10, 255, 255)],
"orange": [(10, 50, 10), (25, 255, 255)],
"yellow": [(25, 50, 10), (35, 255, 255)],
"green": [(35, 50, 10), (80, 255, 255)],
"cyan": [(80, 50, 10), (100, 255, 255)],
"blue": [(100, 50, 10), (130, 255, 255)],
"purple": [(130, 50, 10), (170, 255, 255)],
"red ": [(170, 50, 10), (180, 255, 255)]
}
def getMask(frame, color):
blurredFrame = cv2.GaussianBlur(frame, (3, 3), 0)
hsvFrame = cv2.cvtColor(blurredFrame, cv2.COLOR_BGR2HSV)
colorRange = COLOR_RANGES_HSV[color]
lower = np.array(colorRange[0])
upper = np.array(colorRange[1])
colorMask = cv2.inRange(hsvFrame, lower, upper)
colorMask = cv2.bitwise_and(blurredFrame, blurredFrame, mask=colorMask)
return colorMask
def getDominantColor(roi):
roi = np.float32(roi)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
K = 4
ret, label, center = cv2.kmeans(roi, K, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
center = np.uint8(center)
res = center[label.flatten()]
res2 = res.reshape(roi.shape)
pixelsPerColor = []
for color in COLOR_NAMES:
mask = getMask(res2, color)
greyMask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
count = cv2.countNonZero(greyMask)
pixelsPerColor.append(count)
return COLOR_NAMES[pixelsPerColor.index(max(pixelsPerColor))]
Everything put together:
def detect():
circles = detector.detectCirclesWithDp(imgCopy)
if circles is not None:
for circle in circles[0, :]:
if imageUtils.inFrame(img, circle[0], circle[1]):
roi = imageUtils.getROI(imgCopy, circle[0], circle[1], circle[2])
color = colorlabeler.getDominantColor(roi)
cv2.circle(img, (circle[0], circle[1]), circle[2], colorlabeler.COLORS_RGB["green"], 1)
cv2.circle(img, (circle[0], circle[1]), 2, colorlabeler.COLORS_RGB["green"], 2)
cv2.putText(img, color, (int(circle[0] + 40), int(circle[1] + 20)), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
colorlabeler.COLORS_RGB["green"])
while True:
cv2.imshow("frame", img)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
And finally the result:

Categories