I am attempting to count the dark objects in the first image. I've been working with the supplied code using the SimpleBlobDetector (found here on SO), but can't seem to work out how to reliably count each object. As images can be taken from varying heights, the min/maxAreas vary greatly. However, the backgrounds are always white.
Before
After
#!/usr/bin/env python3
import cv2
import numpy as np
def ResizeWithAspectRatio(image, width=None, height=None, inter=cv2.INTER_AREA):
dim = None
(h, w) = image.shape[:2]
if width is None and height is None:
return image
if width is None:
r = height / float(h)
dim = (int(w * r), height)
else:
r = width / float(w)
dim = (width, int(h * r))
return cv2.resize(image, dim, interpolation=inter)
# Read image
im = cv2.imread("image7-1.jpg", cv2.IMREAD_GRAYSCALE)
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = 10
params.maxThreshold = 200
# Filter by Area.
params.filterByArea = True
params.minArea = 80
params.maxArea = 450
# Filter by Circularity
params.filterByCircularity = False
params.minCircularity = 0.1
# Filter by Convexity
params.filterByConvexity = False
params.minConvexity = 0.87
# Filter by Inertia
params.filterByInertia = False
params.minInertiaRatio = 0.01
# Create a detector with the parameters
ver = (cv2.__version__).split('.')
if int(ver[0]) < 3 :
detector = cv2.SimpleBlobDetector(params)
else:
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs.
keypoints = detector.detect(im)
print(len(keypoints))
im_with_keypoints = cv2.drawKeypoints(im, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imwrite("result.png",im_with_keypoints)
resize = ResizeWithAspectRatio(im_with_keypoints, width=900)
cv2.imshow("Result",resize)
cv2.waitKey(0)
I can do this extremely reliably in MatLab with ~98% accuracy. However, I really want to port this into python. Any ideas on what I can do to produce better/more accurate results?:
Rmin = 6;
Rmax = 15;
color = imread('image7.jpg');
grayImage=rgb2gray(color);
imshow(color), title('Image');
[centersNew, radiiNew] = imfindcircles(grayImage,[Rmin Rmax],'ObjectPolarity','dark','Sensitivity',0.965);
[centersNew,radiiNew] = RemoveOverLap(centersNew,radiiNew,5,1);
viscircles(centersNew,radiiNew, 'EdgeColor','r');
numel(radiiNew)
Result
Related
I was trying to predict defects on a metal plate using yolov5 pre-trained weights.it was throwing this error:
**
File "C:\Users\acer.spyder-py3\metallic surface defect detection\untitled3.py", line 59, in post_process
if confidence >= CONFIDENCE_THRESHOLD:
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
**
import cv2
import numpy as np
# Constants.
INPUT_WIDTH = 640
INPUT_HEIGHT = 640
SCORE_THRESHOLD = 0.5
NMS_THRESHOLD = 0.45
CONFIDENCE_THRESHOLD = 0.45
# Text parameters.
FONT_FACE = cv2.FONT_HERSHEY_SIMPLEX
FONT_SCALE = 0.7
THICKNESS = 1
# Colors.
BLACK = (0,0,0)
BLUE = (255,178,50)
YELLOW = (0,255,255)
def draw_label(im, label, x, y):
"""Draw text onto image at location."""
# Get text size.
text_size = cv2.getTextSize(label, FONT_FACE, FONT_SCALE, THICKNESS)
dim, baseline = text_size[0], text_size[1]
# Use text size to create a BLACK rectangle.
cv2.rectangle(im, (x,y), (x + dim[0], y + dim[1] + baseline), (0,0,0), cv2.FILLED);
# Display text inside the rectangle.
cv2.putText(im, label, (x, y + dim[1]), FONT_FACE, FONT_SCALE, YELLOW, THICKNESS, cv2.LINE_AA)
def pre_process(input_image, net):
# Create a 4D blob from a frame.
blob = cv2.dnn.blobFromImage(input_image, 1/255, (INPUT_WIDTH, INPUT_HEIGHT), [0,0,0], 1, crop=False)
# Sets the input to the network.
net.setInput(blob)
# Run the forward pass to get output of the output layers.
outputs = net.forward(net.getUnconnectedOutLayersNames())
return outputs
def post_process(input_image, outputs):
# Lists to hold respective values while unwrapping.
class_ids = []
confidences = []
boxes = []
# Rows.
rows = outputs[0].shape[1]
image_height, image_width = input_image.shape[:2]
# Resizing factor.
x_factor = image_width / INPUT_WIDTH
y_factor = image_height / INPUT_HEIGHT
# Iterate through detections.
for r in range(rows):
row = outputs[0][0][r]
confidence = row[4]
# Discard bad detections and continue.
if confidence >= CONFIDENCE_THRESHOLD:
classes_scores = row[5:]
# Get the index of max class score.
class_id = np.argmax(classes_scores)
# Continue if the class score is above threshold.
if (classes_scores[class_id] > SCORE_THRESHOLD):
confidences.append(confidence)
class_ids.append(class_id)
cx, cy, w, h = row[0], row[1], row[2], row[3]
left = int((cx - w/2) * x_factor)
top = int((cy - h/2) * y_factor)
width = int(w * x_factor)
height = int(h * y_factor)
box = np.array([left, top, width, height])
boxes.append(box)
# Perform non maximum suppression to eliminate redundant, overlapping boxes with lower confidences.
indices = cv2.dnn.NMSBoxes(boxes, confidences, CONFIDENCE_THRESHOLD, NMS_THRESHOLD)
for i in indices:
box = boxes[i]
left = box[0]
top = box[1]
width = box[2]
height = box[3]
# Draw bounding box.
cv2.rectangle(input_image, (left, top), (left + width, top + height), BLUE, 3*THICKNESS)
# Class label.
label = "{}:{:.2f}".format(classes[class_ids[i]], confidences[i])
# Draw label.
draw_label(input_image, label, left, top)
return input_image
if __name__ == '__main__':
# Load class names.
classesFile = "defects.names"
classes = None
with open(classesFile, 'rt') as f:
classes = f.read().rstrip('\n').split('\n')
# Load image.
frame = cv2.imread('img_02_3436787300_00007_jpg.rf.e9923d3a70d1aeb92e45896b9c12cfa3.jpg')
# Give the weight files to the model and load the network using them.
modelWeights = "models_train/best.onnx"
net = cv2.dnn.readNet(modelWeights)
# Process image.
detections = pre_process(frame, net)
img = post_process(frame.copy(), detections)
"""
Put efficiency information. The function getPerfProfile returns the overall time for inference(t)
and the timings for each of the layers(in layersTimes).
"""
t, _ = net.getPerfProfile()
label = 'Inference time: %.2f ms' % (t * 1000.0 / cv2.getTickFrequency())
print(label)
cv2.putText(img, label, (20, 40), FONT_FACE, FONT_SCALE, (0, 0, 255), THICKNESS, cv2.LINE_AA)
cv2.imshow('Output', img)
cv2.waitKey(0)
I have little bit idea of deploying models into commercial use. If you find any other errors also please inform me . thanks in advance
A simple search led me to this SO post, highlighting a common issue recently.
Following this blog got me close but I faced the issue above.
net.getUnconnectedOutLayers() returns an array of index values. The output layers are obtained from net.getLayerNames() based on these index values.
In the following case net.getUnconnectedOutLayers() returns:
array([200, 227, 254])
We get the output layers from output_layers = [layer_names[i - 1] for i in net.getUnconnectedOutLayers() which returns:
['yolo_82', 'yolo_94', 'yolo_106']
Code:
The following is the complete working code for OpenCV version 4.5.5 (CPU):
image = cv2.imread(os.path.join(path, 'horse.jpg'))
Width = image.shape[1]
Height = image.shape[0]
scale = 0.00392
classes = None
with open(os.path.join(path, 'coco.names'), 'r') as f:
classes = [line.strip() for line in f.readlines()]
COLORS = np.random.uniform(0, 255, size=(len(classes), 3))
net = cv2.dnn.readNet(os.path.join(path, 'yolov3.weights'), os.path.join(path, 'yolov3.cfg'))
blob = cv2.dnn.blobFromImage(image, scale, (416,416), (0,0,0), True, crop=False)
net.setInput(blob)
def get_output_layers(net):
layer_names = net.getLayerNames()
output_layers = [layer_names[i - 1] for i in net.getUnconnectedOutLayers()]
return output_layers
def draw_bounding_box(img, class_id, confidence, x, y, x_plus_w, y_plus_h):
label = str(classes[class_id])
color = COLORS[class_id]
img = cv2.rectangle(img, (x,y), (x_plus_w,y_plus_h), color, 2)
img = cv2.putText(img, label, (x-10,y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
# run inference through the network
# and gather predictions from output layers
outs = net.forward(get_output_layers(net))
# initialization
class_ids = []
confidences = []
boxes = []
conf_threshold = 0.5
nms_threshold = 0.4
image2 = image.copy()
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
center_x = int(detection[0] * Width)
center_y = int(detection[1] * Height)
w = int(detection[2] * Width)
h = int(detection[3] * Height)
x = center_x - w / 2
y = center_y - h / 2
class_ids.append(class_id)
confidences.append(float(confidence))
boxes.append([x, y, w, h])
# apply non-max suppression
indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold)
for i in indices:
i = i # i[0]
box = boxes[i]
x = box[0]
y = box[1]
w = box[2]
h = box[3]
draw_bounding_box(image2, class_ids[i], confidences[i], round(x), round(y), round(x+w), round(y+h))
Result:
Sample output:
The problem may be related to incompatible version of your installed modules.
Download .pt model.
wget https://github.com/ultralytics/YOLOv5/releases/download/v6.1/YOLOv5s.pt
And export to ONNX using your machine:
git clone https://github.com/ultralytics/YOLOv5
cd YOLOv5
pip install -r requirements.txt
pip install onnx
python3 export.py --weights models/YOLOv5s.pt --include onnx
Use the new converted .onnx file can solve the problem.
output
input
I'm trying to isolate the iris but the pupil gets isolated in a circle, how can I change it so it surrounds the iris and not the pupil. Also i using stock jpeg files. I tried a lot of stuff but I'm really new to opencv and image processing so any help would be admirable. Also in some images it makes a circle in very weird places which makes me think something else is also up with the code.
#import numpy
import cv2
import numpy as np
class pupil_detection():
def __init__(self, image_path):
'''
initialize the class and set the class attributes
'''
self._img = None
self._img_path = image_path
self._pupil = None
self._centroid = None
def load_image(self):
'''
load the image based on the path passed to the class
it should use the method cv2.imread to load the image
it should also detect if the file exists
'''
self._img = cv2.imread(self._img_path)
#self._img = cv2.resize(self._img, (300,300))
# If the image doesn't exists or is not valid then imread returns None
if type(self._img) == None:
return False
else:
return True
def show_image (self,img):
cv2.imshow("Result",img)
cv2.waitKey(0)
def centroid (self):
# convert image to grayscale image
gray_image = cv2.cvtColor(self._img, cv2.COLOR_BGR2GRAY)
# convert the grayscale image to binary image
ret,thresh = cv2.threshold(gray_image,127,255,0)
# calculate moments of binary image
M = cv2.moments(thresh)
# calculate x,y coordinate of center
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
self._centroid = (cX,cY)
cv2.circle(self._img, (cX, cY), 5, (255, 255, 255), -1)
def detect_pupil (self):
dst = cv2.fastNlMeansDenoisingColored(self._img,None,10,10,7,21)
blur = cv2.GaussianBlur(dst,(5,5),0)
inv = cv2.bitwise_not(blur)
thresh = cv2.cvtColor(inv, cv2.COLOR_BGR2GRAY)
kernel = np.ones((2,2),np.uint8)
erosion = cv2.erode(thresh,kernel,iterations = 1)
ret,thresh1 = cv2.threshold(erosion,210,255,cv2.THRESH_BINARY)
cnts, hierarchy = cv2.findContours(thresh1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
flag = 10000
final_cnt = None
for cnt in cnts:
(x,y),radius = cv2.minEnclosingCircle(cnt)
distance = abs(self._centroid[0]-x)+abs(self._centroid[1]-y)
if distance < flag :
flag = distance
final_cnt = cnt
else:
continue
(x,y),radius = cv2.minEnclosingCircle(final_cnt)
center = (int(x),int(y))
radius = int(radius)
cv2.circle(self._img,center,radius,(255,0,0),2)
self._pupil = (center[0],center[1],radius)
self.show_image(self._img)
def start_detection(self):
if(self.load_image()):
self.centroid()
self.detect_pupil()
else:
print('Image file "' + self._img_path + '" could not be loaded.')
id = pupil_detection(r'rightlook2.jpg')
id.start_detection()
I am creating a dataset for UNET where I the Image I want to get as Y is a binarized image. I have written a code for OpenCv which uses input from users as sliding bars and saves the image after pressing given key.
Could someone please Help me apply Perspective Transformation to different images. I can get the respective values from the TrackBar if needed.
My images look something like this:
Below is the code.
import numpy as np
import cv2
import skimage.filters as filters
from os import listdir
from os.path import isfile, join
class InteractiveBinarization():
def __init__(self,path='./images/',out='./out/'):
'''
args:
path: Path of the directory which has all the images
out: Path of directory where your binarized images will be saved
'''
self.path = path
self.images = [f for f in listdir(path) if isfile(join(path, f))]
self.N = len(self.images)
self.out = out
def dummy(self,x=None)->None:
'''
Does not do anything. Used to pass to crateTrackbar as it needs a function
'''
pass
def binarize(self,window_width:int=350,window_height:int=350)->None:
'''
Method to binarize the Image based on the sliding values from the bars. It accepts Gauss Kernal, Sharpeen Amount, Sharpen Radius, Rotation Angle
Press 'esc' or 'q' to quit, 's' to save the binarized image, 't' for printing the current bar values to image, 'p' for previous image and 'n' for next image
args:
window_width: Width of the Window which has sliding bars
window_height: Height of window for the sliding bars
'''
cv2.namedWindow('Tracking Window',cv2.WINDOW_FULLSCREEN)
cv2.resizeWindow('Tracking Window', window_width, window_height)
cv2.createTrackbar('kernel','Tracking Window',3,513,self.dummy) # gauss kernal size
cv2.createTrackbar('x_sigma','Tracking Window',0,100,self.dummy) # gauss X sigma
cv2.createTrackbar('y_sigma','Tracking Window',0,100,self.dummy) # gauss Y sigma
cv2.createTrackbar('amount1','Tracking Window',0,7,self.dummy) # sharpen amount number
cv2.createTrackbar('amount2','Tracking Window',1,100,self.dummy) # sharpen amount decimal
cv2.createTrackbar('radius1','Tracking Window',0,7,self.dummy) # sharpen radius
cv2.createTrackbar('radius2','Tracking Window',1,100,self.dummy) # sharpen radius decimal
cv2.createTrackbar('angle','Tracking Window',0,360,self.dummy) # rotation angle
QUIT = False
put_text = False
read_image = True
counter = 0
while not QUIT:
if read_image:
img_name = self.images[counter]
img = cv2.imread(self.path+img_name)
read_image = False
g_k = cv2.getTrackbarPos('kernel','Tracking Window')
if g_k % 2 == 0:
g_k+=1
g_x_sigma = cv2.getTrackbarPos('x_sigma','Tracking Window')
g_y_sigma = cv2.getTrackbarPos('y_sigma','Tracking Window')
s_a1 = cv2.getTrackbarPos('amount1','Tracking Window') # 1,2,3,4
s_a2 = cv2.getTrackbarPos('amount2','Tracking Window') # .01, ..... 0.99
s_r1 = cv2.getTrackbarPos('radius1','Tracking Window') # same as above
s_r2 = cv2.getTrackbarPos('radius2','Tracking Window')
s_a = round(s_a1 + s_a2/100,2) # 1.01.......... 7.99
s_r = round(s_r1 + s_r2/100,2) # same asa above
angle = cv2.getTrackbarPos('angle','Tracking Window')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
smooth = cv2.GaussianBlur(gray, (g_k,g_k), g_x_sigma,sigmaY=g_y_sigma)
division = cv2.divide(gray, smooth, scale=255)
sharp = filters.unsharp_mask(division, radius=s_r, amount=s_a, multichannel=False, preserve_range=False)
sharp = (255*sharp).clip(0,255).astype(np.uint8)
kernel = np.ones((5,5),np.uint8)
opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
thresh = cv2.threshold(sharp, 0, 255, cv2.THRESH_OTSU )[1]
# rotate
(h, w) = thresh.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, angle, 1)
thresh = cv2.warpAffine(thresh, M, (w, h), flags=cv2.INTER_CUBIC, borderMode = cv2.BORDER_CONSTANT, borderValue=255)
if put_text:
text = f"g_k: {g_k} , g_x_sigma: {g_x_sigma} , g_y_sigma: {g_y_sigma} , s_a: {s_a} , s_r: {s_r} , angle: {angle}"
cv2.putText(thresh,text,org=(30,30),fontFace=cv2.FONT_HERSHEY_SIMPLEX,fontScale=0.5,color=(0,128,0),thickness=1)
cv2.imshow('Image', thresh)
key = cv2.waitKey(1) # show for 1 miliseconds. Because the loop is infinite, it'll be infinitely showing the results
if key==27 or key == ord('q'): # Press escape / q to close all windows
QUIT = True
break
elif key == ord('s'): # save binary image
cv2.imwrite(self.out+'binary_'+img_name, thresh)
elif key == ord('t'): # show or hide text on image
put_text = not put_text
elif key == ord('n'):
if counter < self.N-1:
read_image = True
counter += 1
elif key == ord('p'):
if counter > 0:
read_image = True
counter -= 1
cv2.destroyAllWindows()
I am beginner at using OpenCv2, I am trying to detect faces with the following function:
def faceDetection(test_img):
gray_img=cv2.cvtColor(test_img,cv2.COLOR_BGR2GRAY)#convert color image to grayscale
face_haar_cascade=cv2.CascadeClassifier(cv2.data.haarcascades +'haarcascade_frontalface_default.xml')#Load haar classifier
faces=face_haar_cascade.detectMultiScale(gray_img,scaleFactor=1.32,minNeighbors=5)#detectMultiScale returns rectangles
return faces,gray_img
However, sometimes the faces are detected for some of the photos and the others not. For example it's dected the face in this photo:
However, it didn't detect the face in this photo
I do not know what went wrong in the second picture, as I believe it's with good quality and the face is shown almost similar to the first photo. Any idea?
My reference is here.
Here is the code and output:
import cv2
import sys
def detectFaceOpenCVHaar(faceCascade, frame, inHeight=300, inWidth=0):
frameOpenCVHaar = frame.copy()
frameHeight = frameOpenCVHaar.shape[0]
frameWidth = frameOpenCVHaar.shape[1]
if not inWidth:
inWidth = int((frameWidth / frameHeight) * inHeight)
scaleHeight = frameHeight / inHeight
scaleWidth = frameWidth / inWidth
frameOpenCVHaarSmall = cv2.resize(frameOpenCVHaar, (inWidth, inHeight))
frameGray = cv2.cvtColor(frameOpenCVHaarSmall, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(frameGray)
bboxes = []
for (x, y, w, h) in faces:
x1 = x
y1 = y
x2 = x + w
y2 = y + h
cvRect = [int(x1 * scaleWidth), int(y1 * scaleHeight),
int(x2 * scaleWidth), int(y2 * scaleHeight)]
bboxes.append(cvRect)
cv2.rectangle(frameOpenCVHaar, (cvRect[0], cvRect[1]), (cvRect[2], cvRect[3]), (0, 255, 0),
int(round(frameHeight / 150)), 4)
return frameOpenCVHaar, bboxes
if __name__ == "__main__" :
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
frame = cv2.imread("/ur/image/directory/to/face.jpg")
outOpencvHaar, bboxes = detectFaceOpenCVHaar(faceCascade, frame)
cv2.imshow("Face Detection Comparison", outOpencvHaar)
key = cv2.waitKey(0)
cv2.destroyAllWindows()
Output
I am looking for pupil detection from image using pythonwith opencvpackage. In my test images, I am able to detect pupil in (a) part but whenever there is a presence of reflection/ glare, I am unable to detect blob of pupil pixels accurately in (b) part of image. Can anybody help me out? Here is the code I am trying.
import numpy as np
import cv2
name = 'two_eyes1.png'
# reading an image
img = cv2.imread(name, cv2.IMREAD_COLOR)
# inverting image
img_inv = cv2.bitwise_not(img)
gray = cv2.cvtColor(img_inv, cv2.COLOR_BGR2GRAY)
ret, threshold = cv2.threshold(gray, 225, 255, cv2.THRESH_BINARY)
#----- Blob detector parameters initiation
params = cv2.SimpleBlobDetector_Params()
#change thresholds
params.minThreshold = 0;
params.maxThreshold = 255;
#filter by area
params.filterByArea = True
params.minArea = 70
# filter by cicularity
params.filterByCircularity = True
params.minCircularity = 0.1
# filter by convexity
params.filterByConvexity = True
params.minConvexity = 0.87
# filter by inertia
params.filterByInertia = True
params.minInertiaRatio = 0.01
det = cv2.SimpleBlobDetector_create(params)
keypoints = det.detect(img)
im_with_key = cv2.drawKeypoints(img, keypoints, np.array([]),
(0,0,255),cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
#----------
titles = ['Input','Inverted','Grayscaled','Thresholded','blobpart']
images = [img, img_inv, gray, threshold, im_with_key]
for i in range(5):
cv2.imshow(titles[i], images[i])
cv2.waitKey(0)
cv2.destroyAllWindows()