How can I number circles in a certain order using python? - python

I want to get the shade value of each circles from an image.
I try to detect circles using HoughCircle.
I get the center of each circle.
I put the text (the circle numbers) in a circle.
I set the pixel subset to obtain the shading values and calculate the averaged shading values.
I want to get the results of circle number, the coordinates of the center, and averaged shading values in CSV format.
But, in the 3rd step, the circle numbers were randomly assigned. So, it's so hard to find circle number.
How can I number circles in a sequence?
# USAGE
# python detect_circles.py --image images/simple.png
# import the necessary packages
import numpy as np
import argparse
import cv2
import csv
# define a funtion of ROI calculating the average value in specified sample size
def ROI(img,x,y,sample_size):
Each_circle=img[y-sample_size:y+sample_size, x-sample_size:x+sample_size]
average_values=np.mean(Each_circle)
return average_values
# open the csv file named circles_value
circles_values=open('circles_value.csv', 'w')
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True, help = "Path to the image")
args = vars(ap.parse_args())
# load the image, clone it for output, and then convert it to grayscale
image = cv2.imread(args["image"])
output = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detect circles in the image
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1.2,50, 100, 1, 1, 20, 30)
# ensure at least some circles were found
if circles is not None:
# convert the (x, y) coordinates and radius of the circles to integers
circles = np.round(circles[0, :]).astype("int")
number=1
font = cv2.FONT_HERSHEY_SIMPLEX
# loop over the (x, y) coordinates and radius of the circles
for (x, y, r) in circles:
# draw the circle in the output image, then draw a rectangle
# corresponding to the center of the circle
number=str(number)
cv2.circle(output, (x, y), r, (0, 255, 0), 4)
cv2.rectangle(output, (x - 10, y - 10), (x + 10, y + 10), (0, 128, 255), -1)
# number each circle, but its result shows irregular pattern
cv2.putText(output, number, (x,y), font,0.5,(0,0,0),2,cv2.LINE_AA)
# get the average value in specified sample size (20 x 20)
sample_average_value=ROI(output, x, y, 20)
# write the csv file with number, (x,y), and average pixel value
circles_values.write(number+','+str(x)+','+str(y)+','+str(sample_average_value)+'\n')
number=int(number)
number+=1
# show the output image
cv2.namedWindow("image", cv2.WINDOW_NORMAL)
cv2.imshow("image", output)
cv2.waitKey(0)
# close the csv file
circles_values.close()

You could sort your circles based on their x, y values, the width of the image and a rough line height, for example:
import numpy as np
import argparse
import cv2
import csv
# define a funtion of ROI calculating the average value in specified sample size
def ROI(img,x,y,sample_size):
Each_circle=img[y-sample_size:y+sample_size, x-sample_size:x+sample_size]
average_values=np.mean(Each_circle)
return average_values
# open the csv file named circles_value
with open('circles_value.csv', 'wb') as circles_values:
csv_output = csv.writer(circles_values)
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True, help = "Path to the image")
args = vars(ap.parse_args())
# load the image, clone it for output, and then convert it to grayscale
image = cv2.imread(args["image"])
output = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detect circles in the image
circles = cv2.HoughCircles(gray, cv2.cv.CV_HOUGH_GRADIENT, 1.2,50, 100, 1, 1, 20, 30)
# ensure at least some circles were found
if circles is not None:
# convert the (x, y) coordinates and radius of the circles to integers
circles = np.round(circles[0, :]).astype("int")
font = cv2.FONT_HERSHEY_SIMPLEX
height = 40
# loop over the (x, y) coordinates and radius of the circles
for number, (x, y, r) in enumerate(sorted(circles, key=lambda v: v[0] + (v[1] / height) * image.shape[1]), start=1):
text = str(number)
(tw, th), bl = cv2.getTextSize(text, font, 0.5, 2) # So the text can be centred in the circle
tw /= 2
th = th / 2 + 2
# draw the circle in the output image, then draw a rectangle
# corresponding to the center of the circle
cv2.circle(output, (x, y), r, (0, 255, 0), 3)
cv2.rectangle(output, (x - tw, y - th), (x + tw, y + th), (0, 128, 255), -1)
# number each circle, centred in the rectangle
cv2.putText(output, text, (x-tw, y + bl), font, 0.5, (0,0,0), 2, cv2.CV_AA)
# get the average value in specified sample size (20 x 20)
sample_average_value = ROI(output, x, y, 20)
# write the csv file with number, (x,y), and average pixel value
csv_output.writerow([number, x, y, sample_average_value])
# show the output image
cv2.namedWindow("image", cv2.WINDOW_NORMAL)
cv2.imshow("image", output)
cv2.waitKey(0)
Also, it is easier to use Python's CSV library to write entries to your output file. This way you don't need to convert each entry to a string and add commas between each entry. enumerate() can be used to count each circle automatically. Also getTextSize() can be used to determine the dimensions of the text to be printed enabling you to centre it in the rectangle.
This would give you an output as follows:
And a CSV starting as:
1,2,29,nan
2,51,19,nan
3,107,22,100.72437499999999
4,173,23,102.33291666666666
5,233,26,88.244791666666671
6,295,22,92.953541666666666
7,358,28,142.51625000000001
8,418,26,155.12875
9,484,31,127.02541666666667
10,547,25,112.57958333333333

The mistake in your code is that your number is dependent upon the order of circles in list returned from cv2.HoughCircles which can be random, So what I would have done in this situation is to devise a formula which would convert the center(x, y) value of each circle to an ID, and the same circle would yield same ID given its center position remains same:
def get_id_from_center(x, y):
return x + y*50
for (x, y, r) in circles:
number = str(get_id_from_center(x, y))

Related

Color detection using opencv-python

How to detect color of balls in the given image using python-opencv?
Introduction
I will dismantle the question in the following three sections
Obtain the English name of a color from a RGB or Hex value
Locate the circles on the image
Obtain the English name on per circle
Obtain color name from RGB or Hex
Using the following answer:
Convert RGB color to English color name, like 'green' with Python
We are almost done, except for the small change that cv2 uses BGR instead of RGB, therefore we take RGB[2] (the blue channel) to match the red channel of the webcolors.
def color_rgb_to_name(rgb: tuple[int, int, int]) -> str:
"""
Translates an rgb value to the closest English color name known
Args:
rgb: The rgb value that has to be translated to the color name.
Returns:
The name of the colors that most closely defines the rgb value in CSS3.
"""
min_colours = {}
for key, name in webcolors.CSS3_HEX_TO_NAMES.items():
r_c, g_c, b_c = webcolors.hex_to_rgb(key)
rd = (r_c - rgb[2]) ** 2
gd = (g_c - rgb[1]) ** 2
bd = (b_c - rgb[0]) ** 2
min_colours[(rd + gd + bd)] = name
return min_colours[min(min_colours.keys())]
Which is already enough to solve the question if you only care about the colors that are used in the image.
image = cv2.imread('image.jpg')
colors = set([color_rgb_to_name(val) for val in np.unique(image.reshape(-1, 3), axis=0)])
Colors:
{'firebrick', 'cadetblue', 'peru', 'indianred', 'darkturquoise', 'cyan', 'darkviolet', 'darkorange', 'midnightblue', 'indigo', 'lightseagreen', 'mediumturquoise', 'blue', 'brown', 'chocolate', 'saddlebrown', 'mediumblue', 'darkslateblue', 'turquoise', 'blueviolet', 'sienna', 'black', 'orangered', 'slateblue'}
Notes:
This uses the webcolors package, but you can create your own dictionary. This gives you a higher control on the colors that you allow / disallow.
Locate the Circles
The colors that we found above are all the unique colors that are contained in the image. This is often not really what we want. Instead we want to find the color that is most commonly used inside the circle.
In order to define the color in a circle there are several sources that we can use:
https://www.tutorialspoint.com/find-circles-in-an-image-using-opencv-in-python
https://www.pyimagesearch.com/2014/07/21/detecting-circles-images-using-opencv-hough-circles/
How to find the circle in the given images using opencv python (hough circles )?
Which combines to the following code:
def locate_circles(img: np.ndarray, vmin=10, vmax=30) -> np.ndarray:
"""
Locates circles on a gray image.
Args:
img: a gray image with black background.
vmin: The minimum radius value of the circles.
vmax: The maximum radius value of the circles.
Returns:
A numpy array containing the center location of the circles and the radius.
"""
img = cv2.medianBlur(img, 5)
circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 20, param1=50, param2=20, minRadius=vmin, maxRadius=vmax)
circles = np.round(circles[0, :]).astype("int")
return circles
I added the medianBlur to increase the consistency in locating the circles, alternatively you could play a bit more with the param values or radius sizes.
Test code:
image = cv2.imread('image.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
for (x, y, r) in locate_circles(gray, vmin=10, vmax=30):
print(x, y, r)
Answers:
262 66 12
186 74 12
136 60 12
Obtain the English name per circle
Now that we know where the circle is located, we can get the average color per circle and combine this with the above code obtain the final result.
The following code locates all x and y values that are inside the circle.
def coordinates(x: int, y: int, r: int, width: int, height: int) -> np.ndarray:
"""
Locates all valid x and y coordinates inside a circle.
Args:
x: Center column position.
y: Center row position.
r: Radius of the circle.
width: the maximum width value that is still valid (in bounds)
height: the maximum height values that is still valid (in bounds)
Returns:
A numpy array with all valid x and y coordinates that fall within the circle.
"""
indices = []
for dx in range(-r, r):
for dy in range(-r, r):
if 0 <= x + dx < width and 0 <= y + dy < height:
indices.append([x + dx, y + dy])
return np.array(indices).T.reshape(2, -1)
Which can then be used to obtain the average color value per circle.
image = cv2.imread('image.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
for (x, y, r) in locate_circles(gray, vmin=10, vmax=30):
columns, rows = coordinates(x, y, r, *gray.shape[:2])
color = np.average(image[rows, columns], axis=0).astype(np.uint8)
name = color_rgb_to_name(color)
# Draw the information on the screen
cv2.putText(image, name, (x - 20, y - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 0, 0), 1)
Answer:
indigo
firebrick
darkturquoise
TL;DR
import cv2
import numpy as np
import webcolors
def imshow(img, delay=0):
cv2.imshow('Test', img)
cv2.waitKey(delay)
def locate_circles(img: np.ndarray, vmin=10, vmax=30) -> np.ndarray:
"""
https://www.tutorialspoint.com/find-circles-in-an-image-using-opencv-in-python
https://www.pyimagesearch.com/2014/07/21/detecting-circles-images-using-opencv-hough-circles/
https://stackoverflow.com/questions/67764821/how-to-find-the-circle-in-the-given-images-using-opencv-python-hough-circles
Locates circles on a gray image.
Args:
img: a gray image with black background.
vmin: The minimum radius value of the circles.
vmax: The maximum radius value of the circles.
Returns:
A numpy array containing the center location of the circles and the radius.
"""
img = cv2.medianBlur(img, 5)
circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 20, param1=50, param2=20, minRadius=vmin, maxRadius=vmax)
circles = np.round(circles[0, :]).astype("int")
return circles
def coordinates(x: int, y: int, r: int, width: int, height: int) -> np.ndarray:
"""
Locates all valid x and y coordinates inside a circle.
Args:
x: Center column position.
y: Center row position.
r: Radius of the circle.
width: the maximum width value that is still valid (in bounds)
height: the maximum height values that is still valid (in bounds)
Returns:
A numpy array with all valid x and y coordinates that fall within the circle.
"""
indices = []
for dx in range(-r, r):
for dy in range(-r, r):
if 0 <= x + dx < width and 0 <= y + dy < height:
indices.append([x + dx, y + dy])
return np.array(indices).T.reshape(2, -1)
def draw_circles(img: np.ndarray, x: int, y: int, r: int):
"""
draw the circle in the output image, then draw a rectangle corresponding to the center of the circle
Args:
img: Image on which to draw the circle location and center.
x: Center column position.
y: Center row position.
r: Radius of the circle.
Modifies:
The input image by drawing a circle on it and a rectangle on the image.
"""
cv2.circle(img, (x, y), r, (0, 255, 0), 4)
cv2.rectangle(img, (x - 2, y - 2), (x + 2, y + 2), (0, 128, 255), -1)
def color_rgb_to_name(rgb: tuple[int, int, int]) -> str:
"""
https://stackoverflow.com/questions/9694165/convert-rgb-color-to-english-color-name-like-green-with-python
Translates an rgb value to the closest English color name known
Args:
rgb: The rgb value that has to be translated to the color name.
Returns:
The name of the colors that most closely defines the rgb value in CSS3.
"""
min_colours = {}
for key, name in webcolors.CSS3_HEX_TO_NAMES.items():
r_c, g_c, b_c = webcolors.hex_to_rgb(key)
rd = (r_c - rgb[2]) ** 2
gd = (g_c - rgb[1]) ** 2
bd = (b_c - rgb[0]) ** 2
min_colours[(rd + gd + bd)] = name
return min_colours[min(min_colours.keys())]
if __name__ == '__main__':
image = cv2.imread('image.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
for (x, y, r) in locate_circles(gray, vmin=10, vmax=30):
columns, rows = coordinates(x, y, r, *gray.shape[:2])
color = np.average(image[rows, columns], axis=0).astype(np.uint8)
name = color_rgb_to_name(color)
print(name)
# Draw extra information on the screen
# draw_circles(image, x, y, r)
cv2.putText(image, name, (x - 20, y - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 0, 0), 1)
# show the output image
imshow(image)

How to detect a grainy line?

I am trying to detect a grainy printed line on a paper with cv2. I need the angle of the line. I dont have much knowledge in image processing and I only need to detect the line. I tried to play with the parameters but the angle is always detected wrong. Could someone help me. This is my code:
import cv2
import numpy as np
import matplotlib.pylab as plt
from matplotlib.pyplot import figure
img = cv2.imread('CamXY1_1.bmp')
crop_img = img[100:800, 300:900]
blur = cv2.GaussianBlur(crop_img, (1,1), 0)
ret,thresh = cv2.threshold(blur,150,255,cv2.THRESH_BINARY)
gray = cv2.cvtColor(thresh,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 60, 150)
figure(figsize=(15, 15), dpi=150)
plt.imshow(edges, 'gray')
lines = cv2.HoughLines(edges,1,np.pi/180,200)
for rho,theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 3000*(-b))
y1 = int(y0 + 3000*(a))
x2 = int(x0 - 3000*(-b))
y2 = int(y0 - 3000*(a))
cv2.line(img,(x1,y1),(x2,y2),(0, 255, 0),2)
imagetobedetected
Here's a possible solution to estimate the line (and its angle) without using the Hough line transform. The idea is to locate the start and ending points of the line using the reduce function. This function can reduce an image to a single column or row. If we reduce the image we can also get the total SUM of all the pixels across the reduced image. Using this info we can estimate the extreme points of the line and calculate its angle. This are the steps:
Resize your image because it is way too big
Get a binary image via adaptive thresholding
Define two extreme regions of the image and crop them
Reduce the ROIs to a column using the SUM mode, which is the sum of all rows
Accumulate the total values above a threshold value
Estimate the starting and ending points of the line
Get the angle of the line
Here's the code:
# imports:
import cv2
import numpy as np
import math
# image path
path = "D://opencvImages//"
fileName = "mmCAb.jpg"
# Reading an image in default mode:
inputImage = cv2.imread(path + fileName)
# Scale your BIG image into a small one:
scalePercent = 0.3
# Calculate the new dimensions
width = int(inputImage.shape[1] * scalePercent)
height = int(inputImage.shape[0] * scalePercent)
newSize = (width, height)
# Resize the image:
inputImage = cv2.resize(inputImage, newSize, None, None, None, cv2.INTER_AREA)
# Deep copy for results:
inputImageCopy = inputImage.copy()
# Convert BGR to grayscale:
grayInput = cv2.cvtColor(inputImage, cv2.COLOR_BGR2GRAY)
# Adaptive Thresholding:
windowSize = 51
windowConstant = 11
binaryImage = cv2.adaptiveThreshold(grayInput, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, windowSize, windowConstant)
The first step is to get the binary image. Note that I previously downscaled your input because it is too big and we don't need all that info. This is the binary mask:
Now, we don't need most of the image. In fact, since the line is across the whole image, we can only "trim" the first and last column and check out where the white pixels begin. I'll crop a column a little bit wider, though, so we can ensure we have enough data and as less noise as possible. I'll define two Regions of Interest (ROIs) and crop them. Then, I'll reduce each ROI to a column using the SUM mode, this will give me the summation of all intensity across each row. After that, I can accumulate the locations where the sum exceeds a certain threshold and approximate the location of the line, like this:
# Define the regions that will be cropped
# from the original image:
lineWidth = 5
cropPoints = [(0, 0, lineWidth, height), (width-lineWidth, 0, lineWidth, height)]
# Store the line points here:
linePoints = []
# Loop through the crop points and
# crop de ROI:
for p in range(len(cropPoints)):
# Get the ROI:
(x,y,w,h) = cropPoints[p]
# Crop the ROI:
imageROI = binaryImage[y:y+h, x:x+w]
# Reduce the ROI to a n row x 1 columns matrix:
reducedImg = cv2.reduce(imageROI, 1, cv2.REDUCE_SUM, dtype=cv2.CV_32S)
# Get the height (or lenght) of the arry:
reducedHeight = reducedImg.shape[0]
# Define a threshold and accumulate
# the coordinate of the points:
threshValue = 100
pointSum = 0
pointCount = 0
for i in range(reducedHeight):
currentValue = reducedImg[i]
if currentValue > threshValue:
pointSum = pointSum + i
pointCount = pointCount + 1
# Get average coordinate of the line:
y = int(accX / pixelCount)
# Store in list:
linePoints.append((x, y))
The red rectangles show the regions I cropped from the input image:
Note that I've stored both points in the linePoints list. Let's check out our approximation by drawing a line that connects both points:
# Get the two points:
p0 = linePoints[0]
p1 = linePoints[1]
# Draw the line:
cv2.line(inputImageCopy, (p0[0], p0[1]), (p1[0], p1[1]), (255, 0, 0), 1)
cv2.imshow("Line", inputImageCopy)
cv2.waitKey(0)
Which yields:
Not bad, huh? Now that we have both points, we can estimate the angle of this line:
# Get angle:
adjacentSide = p1[0] - p0[0]
oppositeSide = p0[1] - p1[1]
# Compute the angle alpha:
alpha = math.degrees(math.atan(oppositeSide / adjacentSide))
print("Angle: "+str(alpha))
This prints:
Angle: 0.534210901840831

Detect a certain Object in a video stream

I am trying to detect a white Object on a black/white road to let an autonmous RC car drive around it. And i am detecting everything but the white box on the road.
What I tried can be seen in my code Example
#input= one video stream frame 320x240
frame = copy.deepcopy(input)
grayFrame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
threshGray = cv2.adaptiveThreshold(
grayFrame,
255,
cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY,
blockSize=123,
C=-19,
)
contours,_ = cv2.findContours(threshGray, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
#some filtering needs to be done
#
#after filtering append contour
filteredContours.append(cnt)
cv2.rectangle(frame, (x, y), (x + w, y + h), (3, 244, 244), 1)
cv2.drawContours(frame, filteredContours, -1, (255, 0, 255),1 )
cv2.imshow("with contours", frame)
cv2.imshow("adaptiveThreshhold", threshGray)
cv2.imshow("input", input)
I'm looking for a way to draw a bounding box around the obstacle.
Problem is I dont know how to extract this box from the rest.
It is probably because the contour of the box and the lines on the right are connected and thats why the bounding box is that big. Would be great if someone knows a way to do that.
Click here to see the Result
First: Input image
Second: after adaptiveThreshold
third: with contours(pink) and bounding boxes(yellow)
At this point in time, you got several candidates of white color value.
You need to add code in to the #some filtering needs to be done to rid candidate list of NOT bounding box you want to find.
I suggest you to compare your candidate list with square box as bigger as enough.
Because all of contours without BOX(that you want to find on the road) do not satisfy condition about square box as I mentioned above.
I think what you are looking for is triangular masking, as seen in the input image you have lane marking as well. Did try using a lane detector with this all the areas out of lane can be masked and only the spaces in lane can be processed.
Below I have tried to use Lane detector using HoughLinesP and added Contours as well. Try to use this, I did not test this code but I see no issues.
#! /usr/bin/env python 3
"""
Lane detector using the Hog transform method
"""
import cv2 as cv
import numpy as np
# import matplotlib.pyplot as plt
import random as rng
rng.seed(369)
def do_canny(frame):
# Converts frame to grayscale because we only need the luminance channel for detecting edges - less computationally expensive
gray = cv.cvtColor(frame, cv.COLOR_RGB2GRAY)
# Applies a 5x5 gaussian blur with deviation of 0 to frame - not mandatory since Canny will do this for us
blur = cv.GaussianBlur(gray, (5, 5), 0)
# Applies Canny edge detector with minVal of 50 and maxVal of 150
canny = cv.Canny(blur, 50, 150)
return canny
def do_segment(frame):
# Since an image is a multi-directional array containing the relative intensities of each pixel in the image, we can use frame.shape to return a tuple: [number of rows, number of columns, number of channels] of the dimensions of the frame
# frame.shape[0] give us the number of rows of pixels the frame has. Since height begins from 0 at the top, the y-coordinate of the bottom of the frame is its height
height = frame.shape[0]
# Creates a triangular polygon for the mask defined by three (x, y) coordinates
polygons = np.array([
[(0, height), (800, height), (380, 290)]
])
# Creates an image filled with zero intensities with the same dimensions as the frame
mask = np.zeros_like(frame)
# Allows the mask to be filled with values of 1 and the other areas to be filled with values of 0
cv.fillPoly(mask, polygons, 255)
# A bitwise and operation between the mask and frame keeps only the triangular area of the frame
segment = cv.bitwise_and(frame, mask)
return segment
def calculate_lines(frame, lines):
# Empty arrays to store the coordinates of the left and right lines
left = []
right = []
# Loops through every detected line
for line in lines:
# Reshapes line from 2D array to 1D array
x1, y1, x2, y2 = line.reshape(4)
# Fits a linear polynomial to the x and y coordinates and returns a vector of coefficients which describe the slope and y-intercept
parameters = np.polyfit((x1, x2), (y1, y2), 1)
slope = parameters[0]
y_intercept = parameters[1]
# If slope is negative, the line is to the left of the lane, and otherwise, the line is to the right of the lane
if slope < 0:
left.append((slope, y_intercept))
else:
right.append((slope, y_intercept))
# Averages out all the values for left and right into a single slope and y-intercept value for each line
left_avg = np.average(left, axis = 0)
right_avg = np.average(right, axis = 0)
# Calculates the x1, y1, x2, y2 coordinates for the left and right lines
left_line = calculate_coordinates(frame, left_avg)
right_line = calculate_coordinates(frame, right_avg)
return np.array([left_line, right_line])
def calculate_coordinates(frame, parameters):
slope, intercept = parameters
# Sets initial y-coordinate as height from top down (bottom of the frame)
y1 = frame.shape[0]
# Sets final y-coordinate as 150 above the bottom of the frame
y2 = int(y1 - 150)
# Sets initial x-coordinate as (y1 - b) / m since y1 = mx1 + b
x1 = int((y1 - intercept) / slope)
# Sets final x-coordinate as (y2 - b) / m since y2 = mx2 + b
x2 = int((y2 - intercept) / slope)
return np.array([x1, y1, x2, y2])
def visualize_lines(frame, lines):
# Creates an image filled with zero intensities with the same dimensions as the frame
lines_visualize = np.zeros_like(frame)
# Checks if any lines are detected
if lines is not None:
for x1, y1, x2, y2 in lines:
# Draws lines between two coordinates with green color and 5 thickness
cv.line(lines_visualize, (x1, y1), (x2, y2), (0, 255, 0), 5)
return lines_visualize
# The video feed is read in as a VideoCapture object
cap = cv.VideoCapture(1)
while (cap.isOpened()):
# ret = a boolean return value from getting the frame, frame = the current frame being projected in the video
ret, frame = cap.read()
canny = do_canny(frame)
cv.imshow("canny", canny)
# plt.imshow(frame)
# plt.show()
segment = do_segment(canny)
hough = cv.HoughLinesP(segment, 2, np.pi / 180, 100, np.array([]), minLineLength = 100, maxLineGap = 50)
# Averages multiple detected lines from hough into one line for left border of lane and one line for right border of lane
lines = calculate_lines(frame, hough)
# Visualizes the lines
lines_visualize = visualize_lines(frame, lines)
cv.imshow("hough", lines_visualize)
# Overlays lines on frame by taking their weighted sums and adding an arbitrary scalar value of 1 as the gamma argument
output = cv.addWeighted(frame, 0.9, lines_visualize, 1, 1)
contours, _ = cv.findContours(output, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
contours_poly = [None]*len(contours)
boundRect = [None]*len(contours)
centers = [None]*len(contours)
radius = [None]*len(contours)
for i, c in enumerate(contours):
contours_poly[i] = cv.approxPolyDP(c, 3, True)
boundRect[i] = cv.boundingRect(contours_poly[i])
centers[i], radius[i] = cv.minEnclosingCircle(contours_poly[i])
## [allthework]
## [zeroMat]
drawing = np.zeros((output.shape[0], output.shape[1], 3), dtype=np.uint8)
## [zeroMat]
## [forContour]
# Draw polygonal contour + bonding rects + circles
for i in range(len(contours)):
color = (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256))
cv.drawContours(drawing, contours_poly, i, color)
cv.rectangle(drawing, (int(boundRect[i][0]), int(boundRect[i][1])), \
(int(boundRect[i][0]+boundRect[i][2]), int(boundRect[i][1]+boundRect[i][3])), color, 2)
# Opens a new window and displays the output frame
cv.imshow('Contours', drawing)
# Frames are read by intervals of 10 milliseconds. The programs breaks out of the while loop when the user presses the 'q' key
if cv.waitKey(10) & 0xFF == ord('q'):
break
# The following frees up resources and closes all windows
cap.release()
cv.destroyAllWindows()
try different values in the threshold for canny.

How can I extract image segment with specific color in OpenCV?

I work with logos and other simple graphics, in which there are no gradients or complex patterns. My task is to extract from the logo segments with letters and other elements.
To do this, I define the background color, and then I go through the picture in order to segment the images. Here is my code for more understanding:
MAXIMUM_COLOR_TRANSITION_DELTA = 100 # 0 - 765
def expand_segment_recursive(image, unexplored_foreground, segment, point, color):
height, width, _ = image.shape
# Unpack coordinates from point
py, px = point
# Create list of pixels to check
neighbourhood_pixels = [(py, px + 1), (py, px - 1), (py + 1, px), (py - 1, px)]
allowed_zone = unexplored_foreground & np.invert(segment)
for y, x in neighbourhood_pixels:
# Add pixel to segment if its coordinates within the image shape and its color differs from segment color no
# more than MAXIMUM_COLOR_TRANSITION_DELTA
if y in range(height) and x in range(width) and allowed_zone[y, x]:
color_delta = np.sum(np.abs(image[y, x].astype(np.int) - color.astype(np.int)))
print(color_delta)
if color_delta <= MAXIMUM_COLOR_TRANSITION_DELTA:
segment[y, x] = True
segment = expand_segment_recursive(image, unexplored_foreground, segment, (y, x), color)
allowed_zone = unexplored_foreground & np.invert(segment)
return segment
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Pass image as the argument to use the tool")
exit(-1)
IMAGE_FILENAME = sys.argv[1]
print(IMAGE_FILENAME)
image = cv.imread(IMAGE_FILENAME)
height, width, _ = image.shape
# To filter the background I use median value of the image, as background in most cases takes > 50% of image area.
background_color = np.median(image, axis=(0, 1))
print("Background color: ", background_color)
# Create foreground mask to find segments in it (TODO: Optimize this part)
foreground = np.zeros(shape=(height, width, 1), dtype=np.bool)
for y in range(height):
for x in range(width):
if not np.array_equal(image[y, x], background_color):
foreground[y, x] = True
unexplored_foreground = foreground
for y in range(height):
for x in range(width):
if unexplored_foreground[y, x]:
segment = np.zeros(foreground.shape, foreground.dtype)
segment[y, x] = True
segment = expand_segment_recursive(image, unexplored_foreground, segment, (y, x), image[y, x])
cv.imshow("segment", segment.astype(np.uint8) * 255)
while cv.waitKey(0) != 27:
continue
Here is the desired result:
In the end of run-time I expect 13 extracted separated segments (for this particular image). But instead I got RecursionError: maximum recursion depth exceeded, which is not surprising as expand_segment_recursive() can be called for every pixel of the image. And since even with small image resolution of 600x500 i got at maximum 300K calls.
My question is how can I get rid of recursion in this case and possibly optimize the algorithm with Numpy or OpenCV algorithms?
You can actually use a thresholded image (binary) and connectedComponents to do this job in a couple of steps. Also, you may use findContours or other methods.
Here is the code:
import numpy as np
import cv2
# load image as greyscale
img = cv2.imread("hp.png", 0)
# puts 0 to the white (background) and 255 in other places (greyscale value < 250)
_, thresholded = cv2.threshold(img, 250, 255, cv2.THRESH_BINARY_INV)
# gets the labels and the amount of labels, label 0 is the background
amount, labels = cv2.connectedComponents(thresholded)
# lets draw it for visualization purposes
preview = np.zeros((img.shape[0], img.shape[2], 3), dtype=np.uint8)
print (amount) #should be 3 -> two components + background
# draw label 1 blue and label 2 green
preview[labels == 1] = (255, 0, 0)
preview[labels == 2] = (0, 255, 0)
cv2.imshow("frame", preview)
cv2.waitKey(0)
At the end, the thresholded image will look like this:
and the preview image (the one with the colored segments) will look like this:
With the mask you can always use numpy functions to get things like, coordinates of the segments you want or to color them (like I did with preview)
UPDATE
To get different colored segments, you may try to create a "border" between the segments. Since they are plain colors and not gradients, you can try to do an edge detector like canny and then put it black in the image....
import numpy as np
import cv2
img = cv2.imread("total.png", 0)
# background to black
img[img>=200] = 0
# get edges
canny = cv2.Canny(img, 60, 180)
# make them thicker
kernel = np.ones((3,3),np.uint8)
canny = cv2.morphologyEx(canny, cv2.MORPH_DILATE, kernel)
# apply edges as border in the image
img[canny==255] = 0
# same as before
amount, labels = cv2.connectedComponents(img)
preview = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
print (amount) #should be 14 -> 13 components + background
# color them randomly
for i in range(1, amount):
preview[labels == i] = np.random.randint(0,255, size=3, dtype=np.uint8)
cv2.imshow("frame", preview )
cv2.waitKey(0)
The result is:

Harris corner detection and localization in OpenCV with Python

I'm using the following code to try to detect corners of polylines in order to 'measure' the lines. The code is based on a snippet I found somewhere on SO and is based on cv2.cornerHarris():
cornerimg = cv2.cornerHarris( gray, # src
2, # blockSize
3, # ksize / aperture
0.04 # k
# dst
# borderType
)
# ?
cornerimg = cv2.normalize( cornerimg, # src
None, # dst
0, # alpha
255, # beta
cv2.NORM_MINMAX, # norm type
cv2.CV_32FC1, # dtype
None # mask
)
# ?
cornerimg = cv2.convertScaleAbs( cornerimg )
cornershow = cornerimg.copy()
# iterate over pixels to get corner positions
w, h = gray.shape
for y in range(0, h):
for x in range (0, w):
#harris = cv2.cv.Get2D( cv2.cv.fromarray(cornerimg), y, x)
#if harris[0] > 10e-06:
if cornerimg[x,y] > 64:
print("corner at ", x, y)
cv2.circle( cornershow, # dest
(x,y), # pos
4, # radius
(115,0,25) # color
)
cv2.imshow('harris cornerimg', cornershow)
The original code results in white spots at the corner location and the level seems to be an indicator of "corneryness".
The snippet (updated to use cv2) iterates over the resulting image and scans for values lager than 10e-06 for some reason, I have replaced this with a comparison of what I think should be the brightness in the image.
However, the circles drawn at those locations are nowhere near the actual hot-spots found in the normalized harris output.
What am I doing wrong?
Alternatively, cv2.goodFeaturesToTrack() can be set to use Harris (useHarrisDetector=True) but my attempt to use it does not result in what cornerHarris appears to detect properly:
cv2.goodFeaturesToTrack( blurred, # img
500, # maxCorners
0.03, # qualityLevel
10, # minDistance
None, # corners,
None, # mask,
2, # blockSize,
useHarrisDetector=True, # useHarrisDetector,
k=0.04 # k
)
What would be the equivalent function call to cv2.cornerHarris()?
The output seems to be transposed, swapping x and y indices on a square image fixes it (circles are on corner maxima).
try below:
cv2.circle( cornershow, # dest
(y,x), # pos
4, # radius
(115,0,25) # color
)

Categories