In my image processing project, I have already obtained a masked image (black-and-white image) and its contours using the cv.findContours function. My goal now is to create an algorithm that can draw a middle line for this contour. The masked image and its contour are shown in the following images.
Masked image:
Contour:
In my imagination, for that contour, I would like to create a middle line which is near horizontal. I have manually marked my ideal middle line in red. Please check the following image for the red middle line that I have mentioned.
Contour with the middle line:
It is noticeable that my ultimate goal is to find the tip point that I have marked in yellow. If you have other ideas that can directly find the yellow tip point, please also let me know. For finding the yellow tip point, I have tried two approaches cv.convexHull and cv.minAreaRect, but the issue is the robustness. I made these two approaches worked for some images but for some other images in my dataset, they are not working very well. Therefore, I think to find the middle line might be a good approach that I can try.
I believe you're trying to determine the contour's center of gravity and orientation. We can easily do this using Central Moments. More info on that here.
The code below generates this plot. Is this the result you wanted?
# Determine contour
img = cv2.imread(img_file, cv2.IMREAD_GRAYSCALE)
img_bin = (img>128).astype(np.uint8)
contours, _ = cv2.findContours(img_bin, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE)
# Determine center of gravity and orientation using Moments
M = cv2.moments(contours[0])
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
theta = 0.5*np.arctan2(2*M["mu11"],M["mu20"]-M["mu02"])
endx = 600 * np.cos(theta) + center[0] # linelength 600
endy = 600 * np.sin(theta) + center[1]
# Display results
plt.imshow(img_bin, cmap='gray')
plt.scatter(center[0], center[1], marker="X")
plt.plot([center[0], endx], [center[1], endy])
plt.show()
My goal right now is to create an algorithm that can draw a middle line for this contour.
If you detect the upper and lower bounds of your horizontal-lines, then you can calculate the middle-line coordinates.
For instance:
Middle-line will be:
If you change the size to the width of the image:
Code:
import cv2
img = cv2.imread("contour.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
(h, w) = img.shape[:2]
x1_upper = h
x1_lower = 0
x2_upper = h
x2_lower = 0
y1_upper = h
y1_lower = 0
y2_upper = h
y2_lower = 0
lines = cv2.ximgproc.createFastLineDetector().detect(gray)
for cur in lines:
x1 = cur[0][0]
y1 = cur[0][1]
x2 = cur[0][2]
y2 = cur[0][3]
# upper-bound coords
if y1 < y1_upper and y2 < y2_upper:
y1_upper = y1
y2_upper = y2
x1_upper = x1
x2_upper = x2
elif y1 > y1_lower and y2 > y2_lower:
y1_lower = y1
y2_lower = y2
x1_lower = x1
x2_lower = x2
print("\n\n-lower-bound-\n")
print("({}, {}) - ({}, {})".format(x1_lower, y1_lower, x2_lower, y2_lower))
print("\n\n-upper-bound-\n")
print("({}, {}) - ({}, {})".format(x1_upper, y1_upper, x2_upper, y2_upper))
cv2.line(img, (x1_lower, y1_lower), (x2_lower, y2_lower), (0, 255, 0), 5)
cv2.line(img, (x1_upper, y1_upper), (x2_upper, y2_upper), (0, 0, 255), 5)
x1_avg = int((x1_lower + x1_upper) / 2)
y1_avg = int((y1_lower + y1_upper) / 2)
x2_avg = int((x2_lower + x2_upper) / 2)
y2_avg = int((y2_lower + y2_upper) / 2)
cv2.line(img, (0, y1_avg), (w, y2_avg), (255, 0, 0), 5)
cv2.imshow("result", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
I beleive skeleton is what you are looking for.
import cv2
import timeit
img = cv2.imread('Ggh8d - Copy.jpg',0)
s = timeit.default_timer()
thinned = cv2.ximgproc.thinning(img, thinningType = cv2.ximgproc.THINNING_ZHANGSUEN)
e = timeit.default_timer()
print(e-s)
cv2.imwrite("thinned1.png", thinned)
if smooth the edge a little bit
Actually the line will not torch the yellow point, since the algorithm have to check distance from edges, yellow point is located on the edge.
Here is another way to do that by computing the centerline of the rotated bounding box about your object in Python/OpenCV.
Input:
import cv2
import numpy as np
# load image
img = cv2.imread("blob_mask.jpg")
# convert to gray
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# threshold the grayscale image
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)[1]
# get coordinates of all non-zero pixels
# NOTE: must transpose since numpy coords are y,x and opencv uses x,y
coords = np.column_stack(np.where(thresh.transpose() > 0))
# get rotated rectangle from
rotrect = cv2.minAreaRect(coords)
box = cv2.boxPoints(rotrect)
box = np.int0(box)
print (box)
# get center line from box
# note points are clockwise from bottom right
x1 = (box[0][0] + box[3][0]) // 2
y1 = (box[0][1] + box[3][1]) // 2
x2 = (box[1][0] + box[2][0]) // 2
y2 = (box[1][1] + box[2][1]) // 2
# draw rotated rectangle on copy of img as result
result = img.copy()
cv2.drawContours(result, [box], 0, (0,0,255), 2)
cv2.line(result, (x1,y1), (x2,y2), (255,0,0), 2)
# write result to disk
cv2.imwrite("blob_mask_rotrect.png", result)
# display results
cv2.imshow("THRESH", thresh)
cv2.imshow("RESULT", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:
Related
I have the following image containing a dartboard
After processing the image looks as follows:
In addition, I have a function that creates a theoretical dartboard:
import cv2
import numpy as np
def draw_dartboard():
IMG = np.ones((400, 400), 'uint8') * 255
center = (int(IMG.shape[0] // 2), int(IMG.shape[1] // 2))
size_dartboard = int(340)
r_board = int(170)
r_db = int(6.35)
r_sb = int(15.9)
r_doubles = int(162)
r_triples = int(99)
width_rings = int(8)
cv2.circle(IMG, center, r_doubles + width_rings, (0,0,0), -1)
cv2.circle(IMG, center, r_doubles, (255,255,255), -1)
cv2.circle(IMG, center, r_triples + width_rings, (0,0,0), -1)
cv2.circle(IMG, center, r_triples, (255,255,255), -1)
thetas_min = np.radians([(18 * t - 9) for t in range(20)])
thetas_max = np.radians([(18 * t + 9) for t in range(20)])
for idx, (theta_min, theta_max) in enumerate(zip(thetas_min, thetas_max)):
if (idx % 2) == 0:
x_min = int(center[0] + r_board * np.cos(theta_min))
y_min = int(center[1] + r_board * np.sin(theta_min))
x_max = int(center[0] + r_board * np.cos(theta_max))
y_max = int(center[1] + r_board * np.sin(theta_max))
cv2.fillPoly(IMG, np.array([(center, (x_min,y_min), (x_max,y_max))]), (0,0,0))
cv2.circle(IMG, center, r_sb, (0,0,0), -1)
return IMG
The output of this image looks as follows:
How can I “fit” the theoretical dartboard in the real image? Clearly, there is a mismatch in orientation and scale. What's the best way to do this?
You can register your dartboard image (i.e. source image) to the one you processed (i.e. destination image) by using affine transformations.
Here is my approach, and the outcome.
import cv2
import matplotlib.pyplot as plt
import numpy as np
# read images and remove matplotlib axes
src = cv2.imread('source.png',0)
src = src[20:-30,40:-20]
dest = cv2.imread('dest.png',0)
dest = dest[40:-40,40:-40]
# find matching points manually
dest_pts = np.array([[103,29],[215,13],[236,125]]).astype(np.float32) # x,y
src_pts = np.array([[19,175],[145,158],[176,284]]).astype(np.float32) #x,y
# calculate the affine transformation matrix
warp_mat = cv2.getAffineTransform(src_pts, dest_pts)
# get the registered source image
warp_dst = cv2.warpAffine(src, warp_mat, (dest.shape[1], dest.shape[0]))
fig,ax = plt.subplots(1,3)
ax[0].imshow(src,'gray')
ax[0].scatter(src_pts[:,0],src_pts[:,1],s=1,c='r')
ax[0].set_title('src')
ax[1].imshow(dest,'gray')
ax[1].scatter(dest_pts[:,0],dest_pts[:,1],s=1,c='r')
ax[1].set_title('dest')
ax[2].imshow(warp_dst,'gray')
ax[2].set_title('registered src')
plt.savefig('result.png')
fig, ax = plt.subplots(1)
ax.imshow(dest,'gray')
ax.imshow(warp_dst,cmap='jet',alpha=0.5)
plt.savefig('overlayed_result.png')
# plt.show()
In order to calculate affine transformation matrix, you will need 3 matching points on both images. I highlighted the points I chose on both images. FYI, you can develop a way to automate finding matching points, let us know in your question if you need that.
As you have already done the image processing, I will take it from there. So just to be clear, this is the image I will be working with (I cropped out the matplotlib axises, as I'm sure they aren't present in your actual image):
The concept is really simple:
Find the bounding box of the contour of the target.
With the bounding box, we can find the radius of the target by selecting the greatest among the dimensions (width and height) of the bounding box, and dividing it by 2.
With the radius of the target and the top-left corner coordinates of the target (returned when finding the bounding box of the target), we can find the center of the target with the expressions x + r and y + h - r.
With the radius of the target, you can scale your theoretical target accordingly, and with the center of the target, you can draw your theoretical target at the right coordinates.
Here is how the code goes, where Image.png is the above image. Note that I only draw one circle onto the image; the rest of them can be drawn on using the same way, with just some added scaling:
import cv2
import numpy as np
img = cv2.imread("Image.png")
img_processed = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
contours, _ = cv2.findContours(img_processed, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
cnt = sorted(contours, key=cv2.contourArea)[-2]
x, y, w, h = cv2.boundingRect(cnt)
r = max(w, h) // 2
center_x = x + r
center_y = y + h - r
cv2.circle(img, (center_x, center_y), r, (0, 255, 0), 5)
cv2.imshow("Image", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Output:
Note that at this line:
cnt = sorted(contours, key=cv2.contourArea)[-2]
I am getting the contour with the second-greatest area, as the one with the greatest area would be the border of the image.
I am new to OpenCV and I am not even sure how to tackle this problem. I have this image of 500x500 pixel with red dots and white lines in it.
Considering each red dot as center and could I draw a fixed bounding box of 25X25 size around the red dot? I need to identify every red dot in the image.
Note: condition is that I need to find a bounding box of fixed size (25x25) and the red dot must be in the center of the bounding box.
Any help would be appreciated. Thank you in advance.
Another solution, using numpy slicing to get the red channel, where to create a mask of the red dots and cv2.findContours to get the bounding rectangles of the dots. We can use this info to draw the new 25 x 25 rectangles:
# Imports
import cv2
import numpy as np
# Read image
imagePath = "C://opencvImages//"
inputImage = cv2.imread(imagePath + "oHk9s.png")
# Deep copy for results:
inputImageCopy = inputImage.copy()
# Slice the Red channel from the image:
r = inputImage[:, :, 2]
# Convert type to unsigned integer (8 bit):
r = np.where(r == 237, 255, 0).astype("uint8")
# Extract blobs (the red dots are all the white pixels in this mask):
contours, _ = cv2.findContours(r, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# Store bounding rectangles here:
boundingRectangles = []
# Loop through the blobs and draw a 25 x 25 green rectangle around them:
for c in contours:
# Get dot bounding box:
x, y, w, h = cv2.boundingRect(c)
# Set new bounding box dimensions:
boxWidth = 25
boxHeight = 25
# Center rectangle around blob:
boxX = int(x + 0.5 * (w - boxWidth))
boxY = int(y + 0.5 * (h - boxHeight))
# Store data:
boundingRectangles.append((boxX, boxY, boxWidth, boxHeight))
# Draw and show new bounding rectangles
color = (0, 255, 0)
cv2.rectangle(inputImageCopy, (boxX, boxY), (boxX + boxWidth, boxY + boxHeight), color, 2)
cv2.imshow("Boxes", inputImageCopy)
cv2.waitKey(0)
Additionally, I've stored the top left coordinate, width and height of the rectangles in the boundingRectangles list. This is the output:
Here is how you can use an HSV mask to mask out everything in your image except for the red pixels:
import cv2
import numpy as np
def draw_box(img, cnt):
x, y, w, h = cv2.boundingRect(cnt)
half_w = w // 2
half_h = h // 2
x1 = x + half_h - 12
x2 = x + half_h + 13
y1 = y + half_w - 12
y2 = y + half_w + 13
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0))
img = cv2.imread("red_dots.png")
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
ranges = np.array([[100, 0, 0], [179, 255, 255]])
mask = cv2.inRange(img_hsv, *ranges)
img_masked = cv2.bitwise_and(img, img, mask=mask)
img_gray = cv2.cvtColor(img_masked, cv2.COLOR_BGR2GRAY)
contours, _ = cv2.findContours(img_gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for cnt in contours:
draw_box(img, cnt)
cv2.imshow("Image", img)
cv2.waitKey(0)
Output:
Notice at this part of the draw_box() function:
x1 = x + half_h - 12
x2 = x + half_h + 13
y1 = y + half_w - 12
y2 = y + half_w + 13
Ideally, instead of - 12 and + 13, it should be - 12.5 and + 12.5, but there cannot be half pixels in OpenCV, or an error would be thrown.
I'm working on a project using Python(3.7) and OpenCV in which I have an Image(captured using the camera) of a document with a QR code placed on it.
This QR code has 6 variables respectively as:
Size of QR code image
Top
Right
Bottom
Left
Unit
Latest Update:
Here are the steps I need to perform in the same order:
Detect the qr code & decode it to read size values
So, if the size of QR-code(image) is not equal to the size which is mentioned inside it then scale the image to equal both size values.
Then crop the image towards all sides from QR code image according to the values mentioned inside qr code.
I have tried this code:
def decodeAndCrop(inputImage):
print(str(inputImage))
image = cv2.imread(str(inputImage))
qrCodeDetector = cv2.QRCodeDetector()
decodedText, points, _ = qrCodeDetector.detectAndDecode(image)
qr_data = decodedText.split(",")
print("qr data from fucntion: {}".format(qr_data))
if points is not None:
pts = len(points)
# print(pts)
for i in range(pts):
nextPointIndex = (i + 1) % pts
if str(inputImage) == "scaled_img.jpg":
cv2.line(
image,
tuple(points[i][0]),
tuple(points[nextPointIndex][0]),
(255, 0, 0),
5,
)
print(points[i][0])
width = int(
math.sqrt(
(points[0][0][0] - points[1][0][0]) ** 2
+ (points[0][0][1] - points[1][0][1]) ** 2
)
)
height = int(
math.sqrt(
(points[1][0][0] - points[2][0][0]) ** 2
+ (points[1][0][1] - points[2][0][1]) ** 2
)
)
print("height and width after scaling: {} {}".format(height, width))
if not str(inputImage) == "scaled_img.jpg":
scaled_img = None
if width == qr_data[0] and height == qr_data[0]:
print("Sizes are equal")
# Add the extension values to points and crop
y = int(points[0][0][1]) - int(qr_data[1])
x = int(points[0][0][0]) - int(qr_data[4])
roi = image[
y : y + height + int(qr_data[3]), x : x + width + int(qr_data[2])
]
scaled_img = cv2.imwrite("scaled_img.jpg", roi)
return scaled_img
else:
print(
"Width and height "
+ str(width)
+ "x"
+ str(height)
+ " not equal to "
+ str(qr_data[0])
+ "x"
+ str(qr_data[0])
)
if height > int(qr_data[0]):
scale_width = int(width) - int(qr_data[0])
scale_height = int(height) - int(qr_data[0])
print(f"scaled width: {scale_width} scaled height: {scale_height}")
dimension = (scale_width, scale_height)
scaled_img = cv2.resize(
image, dimension, interpolation=cv2.INTER_AREA
)
print("new img dims: {}".format(scaled_img.shape))
cv2.imshow("scaled image:", scaled_img)
cv2.imwrite("scaled_img.jpg", scaled_img)
elif height < int(qr_data[0]):
scale_width = int(qr_data[0]) - width
scale_height = int(qr_data[0] - height)
print(f"scaled width: {scale_width} scaled height: {scale_height}")
dimension = (scale_width, scale_height)
scaled_img = cv2.resize(
image, dimension, interpolation=cv2.INTER_AREA
)
print("new img dims: {}".format(scaled_img.shape))
cv2.imshow("scaled image:", scaled_img)
cv2.imwrite("scaled_img.jpg", scaled_img)
cv2.imshow("final output:", roi)
return scaled_img
else:
y = int(points[0][0][1]) - int(qr_data[1])
x = int(points[0][0][0]) - int(qr_data[4])
print(" x and y")
print(x)
print(y)
roi = image[
y : y + height + int(qr_data[3]), x : x + width + int(qr_data[2])
]
final_img = cv2.imwrite("finalized_image.jpg", roi)
cv2.imshow("finalized image:", final_img)
return final_img
if __name__ == "__main__":
image_to_crop = decodeAndCrop("example_input_1.jpg")
final_image = decodeAndCrop("scaled_img.jpg")
cv2.imshow("Cropped:", image_to_crop)
# cv2.imshow("Final: ", final_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
The code above gives an error as:
final_img = cv2.imwrite("finalized_image.jpg", roi)
cv2.error: OpenCV(4.2.0) /Users/travis/build/skvark/opencv-python/opencv/modules/imgcodecs/src/loadsave.cpp:715: error: (-215:Assertion failed) !_img.empty() in function 'imwrite'
End of Latest Update:
An example decoded information of a QR code is as: 100, 20, 40, 60, 20, px
Now, I need to detect the QR code from this document image and in the first step I need to compare the size of QR code in captured image of document with the size which is mentioned in the decoded information for example if in the captured image the size of the QR image is 90X90px and the size from decoded info is 100X100px we need to compare that.
Then, in the second step I have to crop the complete image by using the Top, Right, Bottom & Left variables accordingly. According to the above example we need to crop the image from the position of detected QR code to 20px Top, 40px Right, 60px Bottom and 20px Right. I have added an example Image below.
I have done to decode the QR code information but how can I take the detected QR code area as a seprate image and compare it's size with the mentioned size and then crop the Image accordingly?
Here's what I have tried so far:
import cv2
image = cv2.imread('/Users/abdul/PycharmProjects/QScanner/images/second.jpg')
qrCodeDetector = cv2.QRCodeDetector()
decodedText, points, _ = qrCodeDetector.detectAndDecode(image)
qr_data = decodedText.split(',')
qr_size = qr_data[0]
top = qr_data[1]
right = qr_data[2]
bottom = qr_data[3]
left = qr_data[4]
print(f'Size: {qr_size}' + str(qr_data[5]))
print(f'Top: {top}')
print(f'Right: {right}')
print(f'Bottom: {bottom}')
print(f'Left: {left}')
if points is not None:
pts = len(points)
print(pts)
for i in range(pts):
nextPointIndex = (i+1) % pts
cv2.line(image, tuple(points[i][0]), tuple(points[nextPointIndex][0]), (255,0,0), 5)
print(points[i][0])
print(decodedText)
cv2.imshow("Image", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
print("QR code not detected")
Here's an example Image:
and here's a sample of input image:
Here's a simple approach using thresholding, morphological operations, and contour filtering.
Obtain binary image. Load image, grayscale, Gaussian blur, Otsu's threshold
Connect individual QR contours. Create a rectangular structuring kernel with cv2.getStructuringElement() then perform morphological operations with cv2.MORPH_CLOSE.
Filter for QR code. Find contours
and filter using contour approximation, contour area, and aspect ratio.
Detected QR code
Extracted QR code
From here you can compare the QR code with your reference information
Code
import cv2
import numpy as np
# Load imgae, grayscale, Gaussian blur, Otsu's threshold
image = cv2.imread('1.jpg')
original = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (9,9), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Morph close
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
close = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=2)
# Find contours and filter for QR code
cnts = cv2.findContours(close, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.04 * peri, True)
x,y,w,h = cv2.boundingRect(approx)
area = cv2.contourArea(c)
ar = w / float(h)
if len(approx) == 4 and area > 1000 and (ar > .85 and ar < 1.3):
cv2.rectangle(image, (x, y), (x + w, y + h), (36,255,12), 3)
ROI = original[y:y+h, x:x+w]
cv2.imwrite('ROI.png', ROI)
cv2.imshow('thresh', thresh)
cv2.imshow('close', close)
cv2.imshow('image', image)
cv2.imshow('ROI', ROI)
cv2.waitKey()
I got the width and height data using points and compare it with the qr_data size. Then cropped the QR according to needed.
import cv2
import math
image = cv2.imread('/ur/image/directory/qr.jpg')
qrCodeDetector = cv2.QRCodeDetector()
decodedText, points, _ = qrCodeDetector.detectAndDecode(image)
qr_data = decodedText.split(',')
qr_size = qr_data[0]
top = qr_data[1]
right = qr_data[2]
bottom = qr_data[3]
left = qr_data[4]
if points is not None:
pts = len(points)
print(pts)
for i in range(pts):
nextPointIndex = (i+1) % pts
cv2.line(image, tuple(points[i][0]), tuple(points[nextPointIndex][0]), (255,0,0), 5)
print(points[i][0])
width = int(math.sqrt((points[0][0][0]-points[1][0][0])**2 + (points[0][0][1]-points[1][0][1])**2))
height = int(math.sqrt((points[1][0][0]-points[2][0][0])**2 + (points[1][0][1]-points[2][0][1])**2))
# Compare the size
if(width==qr_data[0] and height==qr_data[0]):
print("Sizes are equal")
else:
print("Width and height " + str(width) + "x" + str(height) + " not equal to "
+ str(qr_data[0]) + "x" + str(qr_data[0]))
# Add the extension values to points and crop
y = int(points[0][0][1]) - int(qr_data[1])
x = int(points[0][0][0]) - int(qr_data[4])
roi = image[y:y+height + int(qr_data[3]), x:x+width + int(qr_data[2])]
print(decodedText)
cv2.imshow("Image", image)
cv2.imshow("Crop", roi)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
print("QR code not detected")
Result:
So, you mainly have 3 problems here.
If the image is rotated with an angle \theta,
If the sheet is one a plane. (i.e., in the images, the upper line doesn't seem to be linear. But it should not be a big deal.)
The black borders. Will you always have those or may it be a different background? This is important because without cropping out those, you won't be able to get a reasonable result.
I improved your code a little bit and removed the border pixels:
import cv2
import matplotlib.pyplot as plt
import math
import numpy as np
image = cv2.imread('/Users/samettaspinar/Public/im.jpg')
qrCodeDetector = cv2.QRCodeDetector()
decodedText, points, _ = qrCodeDetector.detectAndDecode(image)
qr_data = decodedText.split(',')
qr_size = int(qr_data[0])
top = int(qr_data[1])
right = int(qr_data[2])
bottom = int(qr_data[3])
left = int(qr_data[4])
print(f'Size: {qr_size}' + str(qr_data[5]))
print(f'Top: {top}')
print(f'Right: {right}')
print(f'Bottom: {bottom}')
print(f'Left: {left}')
plt.imshow(image)
plt.show()
dists = [] #This is for estimating distances between corner points.
#I will average them to find ratio of pixels in image vs qr_size
#in the optimal case, all dists should be equal
if points is not None:
pts = len(points)
for i in range(pts):
p1 = points[i][0]
p2 = points[(i+1) % pts][0]
dists.append(math.sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2))
print('line', tuple(p1), tuple(p2))
image = cv2.line(image, tuple(p1), tuple(p2), (255,0,0), 5)
else:
print("QR code not detected")
print('distances: ', dists)
# Remove the black border pixels. I had a simple idea for this
# Get the average intensity of the gray image
# If count the row average of the first half that are less than intensity/2.
# It approx gives number of black borders on the left. etc.
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
inten = np.mean(gray)
x = np.mean(gray, axis=0) # finds the vertical average
y = np.mean(gray, axis=1) # finds horizontal average
bl_left = np.sum([x[:int(col/2)] < inten/2])
bl_right = np.sum([x[int(col/2)+1:] < inten/2])
bl_top = np.sum([y[:int(row/2)] < inten/2])
bl_bottom = np.sum([y[int(row/2)+1:] < inten/2])
print('black margins: ', bl_left, bl_right, bl_top, bl_bottom)
# Estimate how many pixel you will crop out
ratio = np.mean(dists)/ int(qr_size)
print('actual px / qr_size in px: ', ratio)
row,col,dim = image.shape
top, left, right, bottom = int(top*ratio), int(left*ratio), int(right*ratio), int(bottom*ratio)
top += bl_top
left += bl_left
right += bl_right
bottom += bl_bottom
print('num pixels to be cropped: ', top, left, right, bottom)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image2 = image[top:row-bottom, left:col-right, :]
plt.imshow(image2)
plt.show()
Notice that I ignored the rotation issue. If there is rotation, you can find the angle by calculating the tangents/arctan where I calculated the distances.
For QR detection and parsing
import cv2
import sys
filename = sys.argv[1]
# read the QRCODE image
#in case if QR code is not black/white it is better to convert it into grayscale
img = cv2.imread(filename, 0)# Zero means grayscale
img_origin = cv2.imread(filename)
# initialize the cv2 QRCode detector
detector = cv2.QRCodeDetector()
# detect and decode
data, bbox, straight_qrcode = detector.detectAndDecode(img)
# if there is a QR code
if bbox is not None:
print(f"QRCode data:\n{data}")
# display the image with lines
# length of bounding box
n_lines = len(bbox[0])#Cause bbox = [[[float, float]]], we need to convert fload into int and loop over the first element of array
bbox1 = bbox.astype(int) #Float to Int conversion
for i in range(n_lines):
# draw all lines
point1 = tuple(bbox1[0, [i][0]])
point2 = tuple(bbox1[0, [(i+1) % n_lines][0]])
cv2.line(img_origin, point1, point2, color=(255, 0, 0), thickness=2)
# display the result
cv2.imshow("img", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
print("QR code not detected")
I'm putting together an image processing tool to follow the deformation of a part using images. The part has rectangular markers that get detected with image segmentation and cv2.findContours function. Contour centers are then used to calculate distances and to bend radiuses. Everything seems to work fine, but I found out that the contours aren't sorted how I would like to sort them when reviewing results.
The part is repeatedly bent, and the contours are positioned in a circle.
I found this article that describes the sorting horizontally and vertically:
https://www.pyimagesearch.com/2015/04/20/sorting-contours-using-python-and-opencv/
Does anyone have any idea how to sort the contours in a clockwise direction?
The code is below.
import os
import exifread
import cv2
import numpy as np
import scipy
from matplotlib import pyplot as plt
import imutils
import pandas as pd
#---------- INPUT ----------
# Define the image filename
img_filename = 'frame397.jpg'
img_path = img_filename
# Define values for cropping
x = 0
y = 200
w = 1200
h = 800
# Define color values for segmentation
# the values can be probed with GIMP
h1 = 0
s1 = 70
v1 = 120
h2 = 255
s2 = 255
v2 = 255
red_lower = np.array([h1,s1,v1])
red_upper = np.array([h2,s2,v2])
# Define desired area size
# desired area size is pixel count - use GIMP for probe
s1 = 500
s2 = 10000
#---------- PROCESS IMAGES ----------
# Create an empty dataframe for storing results
# in shape of (image_name,time,angle,angle_smooth,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11)
# Define the results dataframe shape and column names
results_df = pd.DataFrame(columns=['image_name','alpha','r1','r2','r3','r4','r5','r6','r7','r8','r9','r10','r11',
'center_dist1', 'center_dist2','center_dist3','center_dist4',
'center_dist5','center_dist6','center_dist7','center_dist8',
'center_dist9','center_dist10','center_dist11'])
# Open image, make it black and white and find contours
img = cv2.imread(img_path)
crop = img[y:y+h, x:x+w]
blur = cv2.blur(crop,(2,2))
hsv = cv2.cvtColor(blur,cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, red_lower, red_upper)
mask_copy = mask.copy()
cnts = cv2.findContours(mask_copy,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
#print cnts
x = []
y = []
# Loop through contours, calculate the centers and prepare the
#contours and contour centers display
#define the font for the text on the image
font = cv2.FONT_HERSHEY_SIMPLEX
for cnt in cnts:
area = cv2.contourArea(cnt)
moment = cv2.moments(cnt)
if s1<area<s2:
print area
c_x = int(moment["m10"]/moment["m00"])
c_y = int(moment["m01"]/moment["m00"])
#draw contours
cv2.drawContours(crop, cnt, -1, (0,255,0),3)
#draw a circle in the center of every contour, -1 is for thickness, this means
#that the cirlce will get filled in
cv2.circle(crop, (c_x,c_y), 10, (0,255,0),-1)
#display center coordinates on the image
string = str(c_x) + ',' + str(c_y)
cv2.putText(crop,string,(c_x,c_y),font,0.5,(255,255,255),2)
x.append(float(c_x))
y.append(float(c_y))
print (c_x, c_y)
print x
print y
# Display image
cv2.namedWindow('Contours', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Contours', 1200,900)
cv2.imshow('Contours', crop)
# Wait for windows closing
cv2.waitKey() & 0xFF
cv2.destroyAllWindows
Image is here:
I used openCV's minEnclosingCircle to "fit" a circle to the points (it's not actually a fit, but it's good enough for finding a point inside the curvature of the markers). Marking each contour with the angle from its centroid to the circle's center gave me a set of angles that I could sort with.
import cv2
import numpy as np
import math
# 2d distance
def dist2D(one, two):
dx = one[0] - two[0];
dy = one[1] - two[1];
return math.sqrt(dx*dx + dy*dy);
# angle between three points (the last point is the middle)
def angle3P(p1, p2, p3):
# get distances
a = dist2D(p3, p1);
b = dist2D(p3, p2);
c = dist2D(p1, p2);
# calculate angle // assume a and b are nonzero
# (law of cosines)
numer = c**2 - a**2 - b**2;
denom = -2 * a * b;
if denom == 0:
denom = 0.000001;
rads = math.acos(numer / denom);
degs = math.degrees(rads);
# check if past 180 degrees
if p1[1] > p3[1]:
degs = 360 - degs;
return degs;
# load image
img = cv2.imread("slinky.jpg");
# rescale
scale = 0.5;
h, w = img.shape[:2];
h = int(h * scale);
w = int(w * scale);
img = cv2.resize(img, (w,h));
# change color space
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB);
l,a,b = cv2.split(lab);
# threshold
thresh = cv2.inRange(a, 140, 255);
# get rid of little dots
kernel = np.ones((3,3),np.uint8)
thresh = cv2.erode(thresh,kernel,iterations = 1);
thresh = cv2.dilate(thresh,kernel, iterations = 1);
# contours
_, contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE);
# get centroids
centroids = [];
centers = [];
for con in contours:
m = cv2.moments(con);
cx = int(m['m10'] / m['m00']);
cy = int(m['m01'] / m['m00']);
centers.append([cx, cy]);
centroids.append([[cx, cy], con]);
img = cv2.circle(img, (cx, cy), 10, (0,0,255), -1);
# find circle around points
# NOTE: this doesn't "fit" a circle to the points
# I'm just using this to find a "good enough" center
# that's in the direction of the curve
numped = np.array(centers);
(x, y), radius = cv2.minEnclosingCircle(numped);
img = cv2.circle(img, (int(x), int(y)), int(radius), (255,0,0), 2);
middle = [x,y];
offshoot = [x + 100, y];
# get angles
angles = [];
for cen in centroids:
center, contour = cen;
angle = angle3P(center, offshoot, middle);
angles.append([angle, center, contour]);
# sort by angle
final = sorted(angles, key = lambda a: a[0], reverse = True);
# pull out just the contours
contours = [clump[2] for clump in final];
# draw contours in order
marked = img.copy();
counter = 0;
for con in contours:
cv2.drawContours(marked, [con], -1, (0, 255, 0), 2);
cv2.imshow("marked", marked);
cv2.imwrite("marking_seq/" + str(counter) + ".png", marked);
counter += 1;
cv2.waitKey(0);
# show
cv2.imshow("orig", img);
cv2.imshow("a", a);
cv2.imshow("thresh", thresh);
cv2.waitKey(0);
I am facing a contour problem, so that you can read the license plate in a correct way, it is best to take out the contours, and thus perhaps apply some OCR.
For example if I want to use this photo, you can see that on the axis it has a white outline, how could I eliminate those white outlines in a generic way? for can be used on more license plates
I am thinking of applying a threshold in the 2 axis (horizontally and vertically) again to clean possible white borders, any ideas?
A little what I'm have made:
# Creating copies of the original images
output_cp = output.copy()
img_cp = straightened.copy()
# threshold
ret,thresh = cv2.threshold(output_cp, 215, 255, cv2.THRESH_BINARY_INV)
imshow(thresh)
But then when It suposed to work, doesn't work because it is a binary image,
pseudo-code, when xxx I don't know what contour apply:
mask = np.zeros(image.shape, np.uint8)
mask_cnt = cv2.drawContours(mask.copy(), [xxx], 0, (255,255,255,255), -1)
removed = cv2.subtract(mask_cnt, image)
plt.figure()
plt.imshow(removed, cmap='gray')
Any help is welcome!
To remove the white margin around the plate, you can :
find the external contours of the image with findCountours
take its rotated bounding box with minAreaRect
compute a transformation to correct the orientation with getPerspectiveTransforme
then apply it with warpPerspective
This remove the margin and also correct the orientation of the plate which should make it much easier to read by any OCR.
Here is a python implementation of my solution:
#!/usr/bin/env python3
import numpy as np
import cv2
img = cv2.imread("plate.png")
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# invert image and detect contours
inverted = cv2.bitwise_not(gray)
contours, hierarchy = cv2.findContours(inverted,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
# get the biggest contour
biggest_index = -1
biggest_area = -1
i = 0
for c in contours:
area = cv2.contourArea(c)
if area > biggest_area:
biggest_area = area
biggest_index = i
i = i+1
print("biggest area: " + str(biggest_area) + " index: " + str(biggest_index))
cv2.drawContours(img, contours, biggest_index, [0,0,255])
center, size, angle = cv2.minAreaRect(contours[biggest_index])
rot_mat = cv2.getRotationMatrix2D(center, angle, 1.)
#cv2.warpPerspective()
print(size)
dst = cv2.warpAffine(gray, rot_mat, (int(size[0]), int(size[1])))
mask = dst * 0
x1 = max([int(center[0] - size[0] / 2)+1, 0])
y1 = max([int(center[1] - size[1] / 2)+1, 0])
x2 = int(center[0] + size[0] / 2)-1
y2 = int(center[1] + size[1] / 2)-1
point1 = (x1, y1)
point2 = (x2, y2)
print(point1)
print(point2)
cv2.rectangle(dst, point1, point2, [0,0,0])
cv2.rectangle(mask, point1, point2, [255,255,255], cv2.FILLED)
masked = cv2.bitwise_and(dst, mask)
cv2.imshow("img", img)
cv2.imshow("dst", dst)
cv2.imshow("masked", masked)
cv2.imshow("mask", mask)
key = -1;
while key != 27:
key = cv2.waitKey(1)
And the resulting image :
This is not perfect, but a good start I think, slightly different approche than thresholding.
You might also try to apply some morphological operator to close some gap or remove dirty parts.