image rectification on pressure gauge, rotate it vertically - python

im currently trying to implement a paper of reading pressure gauges. There is a step where i dont know what to do. It says: "The display contour is then rotated to vertically align the long
axis of the ellipse and inscribed in a rectangle used to crop the gauge image." So im not sure how i can rotate this image vertically, for better understanding i will show you a example what the current image is and what it needs to be.
Im currently at step C and need to get the image in position of step D. The text detection in the example is currently not important.
At the moment i have the correct contours and ellipse for the display.
(cnts, boundingBoxes) = sort_contours(cnts)
#find correct contours and fitellipse
if len(cnts) != 0:
for i in range(len(cnts)):
if len(cnts[i]) >= 5: #if contours has more than 5 points
# cv2.drawContours(image,cnts[0],-1,(150,10,255),3)
ellipse = cv2.fitEllipse(cnts[i])
finalElps.append(ellipse) #(centx,centy), (width,height), angle
for i in range(len(finalElps)):
centx = finalElps[i][0][0]
centy = finalElps[i][0][1]
eWidth = finalElps[i][1][0]
eHeight = finalElps[i][1][1]
sfRes = Sf(eWidth, eHeight)
cfRes = Cf(centx, imgCenterX, centy, imgCenterY)
afRes = Af(eWidth,eHeight,imgWidth,imgHeight)
print("SF: " + str(sfRes) + "| " + "CF: " + str(cfRes) + "| Af: " + str(afRes))
if(sfRes < 0.4 and cfRes < 6 and afRes < 0.9):
print(finalElps[i])
cv2.ellipse(image, finalElps[i], (255,0,0), 2)
plt.imshow(image)
sfRes, cfRes and afRes are just calculations to find the right ellipse.
What should be my next step to reach the vertical rotation? I think the correct name for it is "image rectification" but im not 100% sure about it

Here is how to get the image D from the image C:
Find the binary mask
Find an ellipse (center, width, height, angle)
Find 4 points on opposite sides of the ellipse
Use these 4 points to get perspective transform that can be used to warp the rotated gauge image to rectangular image. The final result:
Code:
import cv2
import numpy as np
image = cv2.imread("gauge.png")
# find the parameters of the ellipse
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
mask = cv2.threshold(gray, 10, 255, cv2.THRESH_BINARY)[1]
points = np.stack(np.nonzero(mask.T)).T
hull = cv2.convexHull(points)
(cx, cy), (width, height), angle = cv2.fitEllipse(hull)
# for visualization
# cv2.ellipse(image, (int(cx), int(cy)), (int(width/2), int(height/2)), angle, 0, 360, (0, 0, 255), 2)
# find the points on the opposite sides of the ellipse
# define rectangular homogenuous coordinates and rotate them using a rotation matrix
mat = cv2.getRotationMatrix2D((cx, cy), -angle, 1)
mat = np.vstack((mat, [0, 0, 1]))
coords = np.array(
[
[cx + width // 2, cy, 1],
[cx - width // 2, cy, 1],
[cx, cy + height // 2, 1],
[cx, cy - height // 2, 1],
]
)
points = (mat # coords.T)[:2].T # drop the homogenuos part
# for visualization
# for px, py in points.astype(int)[:2]:
# cv2.circle(image, (px, py), 10, (0, 0, 255), -1)
# for px, py in points.astype(int)[2:]:
# cv2.circle(image, (px, py), 10, (255, 0, 0), -1)
# define points on the target image to which the ellipse points should be mapped
size = 300
target = np.float32(
[
[size, size // 2],
[0, size // 2],
[size // 2, size],
[size // 2, 0],
]
)
mat = cv2.getPerspectiveTransform(points.astype(np.float32), target)
rect_image = cv2.warpPerspective(image, mat, (size, size))
cv2.imwrite("rect_gauge.png", rect_image)

Related

Error : Invalid number of channels in input image: 'VScn::contains(scn)' using opencv?

I am trying to unshear the image , just like cam scanner does, It is working for some images , IfI give any random image it is not working , The image named as new_image.jpeg is not working and image named as 1111.jpeg is working . Although the picture is totally same .
Code:
import numpy as np
import cv2
import re
from matplotlib import pyplot as plt
import warnings
warnings.filterwarnings('ignore')
# ## **Use Gaussian Blurring combined with Adaptive Threshold**
def blur_and_threshold(gray):
gray = cv2.GaussianBlur(gray,(3,3),2)
threshold = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,11,2)
threshold = cv2.fastNlMeansDenoising(threshold, 11, 31, 9)
return threshold
# ## **Find the Biggest Contour**
# **Note: We made sure the minimum contour is bigger than 1/10 size of the whole picture. This helps in removing very small contours (noise) from our dataset**
def biggest_contour(contours,min_area):
biggest = None
max_area = 0
biggest_n=0
approx_contour=None
for n,i in enumerate(contours):
area = cv2.contourArea(i)
if area > min_area/10:
peri = cv2.arcLength(i,True)
approx = cv2.approxPolyDP(i,0.02*peri,True)
if area > max_area and len(approx)==4:
biggest = approx
max_area = area
biggest_n=n
approx_contour=approx
return biggest_n,approx_contour
def order_points(pts):
# initialzie a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
pts=pts.reshape(4,2)
rect = np.zeros((4, 2), dtype = "float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect
### Find the exact (x,y) coordinates of the biggest contour and crop it out
def four_point_transform(image, pts):
# obtain a consistent order of the points and unpack them
# individually
rect = order_points(pts)
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
# return the warped image
return warped
# # Transformation the image
# **1. Convert the image to grayscale**
# **2. Remove noise and smoothen out the image by applying blurring and thresholding techniques**
# **3. Use Canny Edge Detection to find the edges**
# **4. Find the biggest contour and crop it out**
def transformation(image):
image=image.copy()
height, width, channels = image.shape
gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
image_size=gray.size
threshold=blur_and_threshold(gray)
# We need two threshold values, minVal and maxVal. Any edges with intensity gradient more than maxVal
# are sure to be edges and those below minVal are sure to be non-edges, so discarded.
# Those who lie between these two thresholds are classified edges or non-edges based on their connectivity.
# If they are connected to "sure-edge" pixels, they are considered to be part of edges.
# Otherwise, they are also discarded
edges = cv2.Canny(threshold,50,150,apertureSize = 7)
contours, hierarchy = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
simplified_contours = []
for cnt in contours:
hull = cv2.convexHull(cnt)
simplified_contours.append(cv2.approxPolyDP(hull,
0.001*cv2.arcLength(hull,True),True))
simplified_contours = np.array(simplified_contours)
biggest_n,approx_contour = biggest_contour(simplified_contours,image_size)
threshold = cv2.drawContours(image, simplified_contours ,biggest_n, (0,255,0), 1)
dst = 0
if approx_contour is not None and len(approx_contour)==4:
approx_contour=np.float32(approx_contour)
dst=four_point_transform(threshold,approx_contour)
croppedImage = dst
return croppedImage
# **Increase the brightness of the image by playing with the "V" value (from HSV)**
def increase_brightness(img, value=30):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
lim = 255 - value
v[v > lim] = 255
v[v <= lim] += value
final_hsv = cv2.merge((h, s, v))
img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
return img
# **Sharpen the image using Kernel Sharpening Technique**
def final_image(rotated):
# Create our shapening kernel, it must equal to one eventually
kernel_sharpening = np.array([[0,-1,0],
[-1, 5,-1],
[0,-1,0]])
# applying the sharpening kernel to the input image & displaying it.
sharpened = cv2.filter2D(rotated, -1, kernel_sharpening)
sharpened=increase_brightness(sharpened,30)
return sharpened
# ## 1. Pass the image through the transformation function to crop out the biggest contour
# ## 2. Brighten & Sharpen the image to get a final cleaned image
path = "/home/hamza/Desktop/"
image = cv2.imread("path of image")
blurred_threshold = transformation(image)
cleaned_image = final_image(blurred_threshold)
cv2.imwrite(path + "Final_Image4.jpg", cleaned_image)
Pictures
first pic
second image
Edit 1:
Picture 1 test image 1
Picture 2 test image 2
Picture 3 test image 3
Picture 4 test image 4
Edit 2:
If I just pass only black and white image can it remove shearness , you may try it if it works please share with me .
Pic:
black and white image
Note: If image comes unsheared , then that particular image should displayed exactly same means code should not touched it . Hope so you got the point.
Nothing is wrong with your images, I tested your code and indeed the first image works with the code as you wrote it, for the second image, with some quick debugging you can figure out that no contour was found in your transformation function - you have the block :
if approx_contour is not None and len(approx_contour)==4:
approx_contour=np.float32(approx_contour)
dst=four_point_transform(threshold,approx_contour)
Just add:
else:
print("no contour found")
to see for yourself.
The problem is with your Canny filter. With apertureSize = 7 your first image works but not the second one, with apertureSize = 3 your second image work but not the first one.
So both of your image work, but not with the same parameters. If processing time is not an issue for your task, you could iterate several values of the parameters, or else steer away from the Canny method. On both your images the paper is a lot brighter than the background so a convexHull on the threshold image would work.
Since your initial image is in color, another approach is to use the H place to detect the 'white color' of the paper. I have made some adjustments in the code and it now works for both your images.
import numpy as np
import cv2
import imutils
def order_points(pts):
""" Return sorted list of corners, from top-left then clockwise """
pts = np.reshape(pts, (6,2))
rect = np.zeros((4, 2), dtype = "float32")
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
return rect
def four_point_transform(image, pts):
(tl, tr, br, bl) = rect
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
return warped
image1 = cv2.imread('im1.jpg')
mask = np.zeros(image1.shape, np.uint8)
gray = cv2.cvtColor(image1,cv2.COLOR_BGR2GRAY)
# Hue thresholding for the white paper
HSV = cv2.cvtColor(image1, cv2.COLOR_BGR2HSV)
lo_H = 100
hi_H = 200
thresh = cv2.inRange(HSV, (lo_H, 0, 0), (hi_H, 255, 255))
# Find the contour of the paper sheet - use convexhull
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnt = sorted(contours, key=cv2.contourArea, reverse=True)
hull = []
for i in range(len(cnt)):
hull.append(cv2.convexHull(cnt[i], False))
cv2.drawContours(mask, hull, 0, (255, 255, 255), thickness=cv2.FILLED)
mask = cv2.cvtColor(mask,cv2.COLOR_BGR2GRAY)
gray = cv2.bitwise_and(gray, gray, mask=mask)
# Corner detection - keep 6 best corners
corners = cv2.goodFeaturesToTrack(mask, 6, 0.01, 50)
corners = np.int0(corners)
# Order the corners and keep 4
rect = order_points(corners)
wrap = four_point_transform(gray, rect)
# Display results
cv2.drawContours(image1, hull, 0, (0,255,125), 3)
cv2.imshow("image", image1)
#cv2.imshow('thresh', thresh)
#cv2.imshow("mask", mask)
cv2.imshow("gray", gray)
cv2.imshow("wrap", wrap)

OpenCV Find a middle line of a contour [Python]

In my image processing project, I have already obtained a masked image (black-and-white image) and its contours using the cv.findContours function. My goal now is to create an algorithm that can draw a middle line for this contour. The masked image and its contour are shown in the following images.
Masked image:
Contour:
In my imagination, for that contour, I would like to create a middle line which is near horizontal. I have manually marked my ideal middle line in red. Please check the following image for the red middle line that I have mentioned.
Contour with the middle line:
It is noticeable that my ultimate goal is to find the tip point that I have marked in yellow. If you have other ideas that can directly find the yellow tip point, please also let me know. For finding the yellow tip point, I have tried two approaches cv.convexHull and cv.minAreaRect, but the issue is the robustness. I made these two approaches worked for some images but for some other images in my dataset, they are not working very well. Therefore, I think to find the middle line might be a good approach that I can try.
I believe you're trying to determine the contour's center of gravity and orientation. We can easily do this using Central Moments. More info on that here.
The code below generates this plot. Is this the result you wanted?
# Determine contour
img = cv2.imread(img_file, cv2.IMREAD_GRAYSCALE)
img_bin = (img>128).astype(np.uint8)
contours, _ = cv2.findContours(img_bin, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE)
# Determine center of gravity and orientation using Moments
M = cv2.moments(contours[0])
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
theta = 0.5*np.arctan2(2*M["mu11"],M["mu20"]-M["mu02"])
endx = 600 * np.cos(theta) + center[0] # linelength 600
endy = 600 * np.sin(theta) + center[1]
# Display results
plt.imshow(img_bin, cmap='gray')
plt.scatter(center[0], center[1], marker="X")
plt.plot([center[0], endx], [center[1], endy])
plt.show()
My goal right now is to create an algorithm that can draw a middle line for this contour.
If you detect the upper and lower bounds of your horizontal-lines, then you can calculate the middle-line coordinates.
For instance:
Middle-line will be:
If you change the size to the width of the image:
Code:
import cv2
img = cv2.imread("contour.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
(h, w) = img.shape[:2]
x1_upper = h
x1_lower = 0
x2_upper = h
x2_lower = 0
y1_upper = h
y1_lower = 0
y2_upper = h
y2_lower = 0
lines = cv2.ximgproc.createFastLineDetector().detect(gray)
for cur in lines:
x1 = cur[0][0]
y1 = cur[0][1]
x2 = cur[0][2]
y2 = cur[0][3]
# upper-bound coords
if y1 < y1_upper and y2 < y2_upper:
y1_upper = y1
y2_upper = y2
x1_upper = x1
x2_upper = x2
elif y1 > y1_lower and y2 > y2_lower:
y1_lower = y1
y2_lower = y2
x1_lower = x1
x2_lower = x2
print("\n\n-lower-bound-\n")
print("({}, {}) - ({}, {})".format(x1_lower, y1_lower, x2_lower, y2_lower))
print("\n\n-upper-bound-\n")
print("({}, {}) - ({}, {})".format(x1_upper, y1_upper, x2_upper, y2_upper))
cv2.line(img, (x1_lower, y1_lower), (x2_lower, y2_lower), (0, 255, 0), 5)
cv2.line(img, (x1_upper, y1_upper), (x2_upper, y2_upper), (0, 0, 255), 5)
x1_avg = int((x1_lower + x1_upper) / 2)
y1_avg = int((y1_lower + y1_upper) / 2)
x2_avg = int((x2_lower + x2_upper) / 2)
y2_avg = int((y2_lower + y2_upper) / 2)
cv2.line(img, (0, y1_avg), (w, y2_avg), (255, 0, 0), 5)
cv2.imshow("result", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
I beleive skeleton is what you are looking for.
import cv2
import timeit
img = cv2.imread('Ggh8d - Copy.jpg',0)
s = timeit.default_timer()
thinned = cv2.ximgproc.thinning(img, thinningType = cv2.ximgproc.THINNING_ZHANGSUEN)
e = timeit.default_timer()
print(e-s)
cv2.imwrite("thinned1.png", thinned)
if smooth the edge a little bit
Actually the line will not torch the yellow point, since the algorithm have to check distance from edges, yellow point is located on the edge.
Here is another way to do that by computing the centerline of the rotated bounding box about your object in Python/OpenCV.
Input:
import cv2
import numpy as np
# load image
img = cv2.imread("blob_mask.jpg")
# convert to gray
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# threshold the grayscale image
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)[1]
# get coordinates of all non-zero pixels
# NOTE: must transpose since numpy coords are y,x and opencv uses x,y
coords = np.column_stack(np.where(thresh.transpose() > 0))
# get rotated rectangle from
rotrect = cv2.minAreaRect(coords)
box = cv2.boxPoints(rotrect)
box = np.int0(box)
print (box)
# get center line from box
# note points are clockwise from bottom right
x1 = (box[0][0] + box[3][0]) // 2
y1 = (box[0][1] + box[3][1]) // 2
x2 = (box[1][0] + box[2][0]) // 2
y2 = (box[1][1] + box[2][1]) // 2
# draw rotated rectangle on copy of img as result
result = img.copy()
cv2.drawContours(result, [box], 0, (0,0,255), 2)
cv2.line(result, (x1,y1), (x2,y2), (255,0,0), 2)
# write result to disk
cv2.imwrite("blob_mask_rotrect.png", result)
# display results
cv2.imshow("THRESH", thresh)
cv2.imshow("RESULT", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:

How to find and draw largest rectangle inside contour area?

i want to compare an image with specific pixel using
(score, diff) = compare_ssim(grayA[y:y+h, x:x+w], grayB[y:y+h, x:x+w], full=True)
But that function only support rectangle ROI. And my ROI is a contour.
To compare that i need largest rectangle inside the contour. How to find largest rectangle inside contour area ?
Sample image
According to your OP, I suggest to use warpAffine to rotate the ROI to a rectangle shape, because the ROI is already in rectangle shape but rotated. Here is a simple sample:
import cv2
import numpy as np
img = cv2.imread("1.png")
(H,W,c) = img.shape
print("shape = {},{}".format(H,W))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_,thresh = cv2.threshold(gray,128,255,cv2.THRESH_BINARY_INV)
_,contours,_ = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
res = np.zeros_like(img)
c = np.squeeze(contours[0])
# find rectangle's conner points
x = sorted(c, key=lambda a:a[0])
left = x[0]
right = x[-1]
y= sorted(c, key=lambda a:a[1])
top = y[0]
bottom = y[-1]
cv2.circle(img, (left[0],left[1]), 4, (0, 0, 255), -1)
cv2.circle(img, (right[0],right[1]), 4, (0, 0, 255), -1)
cv2.circle(img, (top[0],top[1]), 4, (0, 0, 255), -1)
cv2.circle(img, (bottom[0],bottom[1]), 4, (0, 0, 255), -1)
#calculate rectangle's shape
roi_w = int(np.sqrt((top[0]-right[0])*(top[0]-right[0])+(top[1]-right[1])*(top[1]-right[1])))
roi_h = int(np.sqrt((top[0]-left[0])*(top[0]-left[0])+(top[1]-left[1])*(top[1]-left[1])))
pts1 = np.float32([top,right,left])
# keep the top coords and calculate new coords for left and right
new_top = top
new_right = [top[0] + roi_w, top[1]]
new_left = [top[0], top[1] + roi_h]
pts2 = np.float32([new_top,new_right,new_left])
#rotate
matrix = cv2.getAffineTransform(pts1, pts2)
result = cv2.warpAffine(img, matrix, (W,H))
cv2.drawContours(res, [contours[0]], 0, (0,255,0), 3)
# extract roi
roi = result[new_top[1]:new_left[1],new_top[0]:new_right[0]]
cv2.imshow("img",img)
cv2.imshow("result",result)
cv2.waitKey(0)

Blur a region shaped like a rounded rectangle inside an Image

I want to blur a rectangle (with rounded corners) in an image using python pillow. I already found a way to blur only a certain part of a picture.
img = Image.open('assets/images/image.png')
x, y = 300, 1600
cropped_img = img.crop((x, y, 1000, 2600))
blurred_img = cropped_img.filter(ImageFilter.GaussianBlur(20))
img.paste(blurred_img, (x, y))
img.save('assets/images/new.png')
img.show()
Furthermore I found a method to add rounded corners on a rectangle(Transparency issues drawing a rectangle with rounded corners)
def round_corner(radius):
corner = Image.new('RGBA', (radius, radius), (0, 0, 0, 0))
draw = ImageDraw.Draw(corner)
draw.pieslice((0, 0, radius * 2, radius * 2), 180, 270)
return corner
def round_rectangle(rectangle, radius):
corner = round_corner(radius)
rectangle.paste(corner, (0, 0))
rectangle.paste(corner.rotate(90), (0, rectangle.size[1] - radius))
rectangle.paste(corner.rotate(180), (rectangle.size[0] - radius, rectangle.size[1] - radius))
rectangle.paste(corner.rotate(270), (rectangle.size[0] - radius, 0))
return rectangle
Unfortunately, I can't find a way to combine these two source codes so that they work.
My Example Image:
What you need to do, is essentially to create a mask for the Image.paste() that only pastes those parts of the blurred image that lie inside the rounded rectangle.
import PIL
from PIL import Image
from PIL import ImageFilter
from PIL import ImageDraw
# when using an image as mask only the alpha channel is important
solid_fill = (50,50,50,255)
def create_rounded_rectangle_mask(rectangle, radius):
# create mask image. all pixels set to translucent
i = Image.new("RGBA",rectangle.size,(0,0,0,0))
# create corner
corner = Image.new('RGBA', (radius, radius), (0, 0, 0, 0))
draw = ImageDraw.Draw(corner)
# added the fill = .. you only drew a line, no fill
draw.pieslice((0, 0, radius * 2, radius * 2), 180, 270, fill = solid_fill)
# max_x, max_y
mx,my = rectangle.size
# paste corner rotated as needed
# use corners alpha channel as mask
i.paste(corner, (0, 0), corner)
i.paste(corner.rotate(90), (0, my - radius),corner.rotate(90))
i.paste(corner.rotate(180), (mx - radius, my - radius),corner.rotate(180))
i.paste(corner.rotate(270), (mx - radius, 0),corner.rotate(270))
# draw both inner rects
draw = ImageDraw.Draw(i)
draw.rectangle( [(radius,0),(mx-radius,my)],fill=solid_fill)
draw.rectangle( [(0,radius),(mx,my-radius)],fill=solid_fill)
return i
Mask:
Apply the mask to your image:
img = Image.open('pic.jpg')
x, y = 300, 160
radius = 75
cropped_img = img.crop((x, y, 600, 600))
# the filter removes the alpha, you need to add it again by converting to RGBA
blurred_img = cropped_img.filter(ImageFilter.GaussianBlur(20),).convert("RGBA")
# paste blurred, uses alphachannel of create_rounded_rectangle_mask() as mask
# only those parts of the mask that have a non-zero alpha gets pasted
img.paste(blurred_img, (x, y), create_rounded_rectangle_mask(cropped_img,radius))
img.save('new2.png')
img.show()
I changed some dimensions and paths. Your code lacked the imports, I completed it to a minimal verifyable complete example.

python opencv-finding circle (Sun) , coordinates of center the circle from picture

I am new here and a little bit newbie in programming.
I have one question. I have picture of Sun in bmp file and 16 bit. The picture look as white circle with black backround.
I want to find a circle and identify its center in x,y coordinates.
I have this script
import cv
import numpy as np
orig = cv.LoadImage('sun0016.bmp')
grey_scale = cv.CreateImage(cv.GetSize(orig), 8, 1)
processed = cv.CreateImage(cv.GetSize(orig), 8, 1)
cv.Smooth(orig, orig, cv.CV_GAUSSIAN, 5, 5)
cv.CvtColor(orig, grey_scale, cv.CV_RGB2GRAY)
cv.Erode(grey_scale, processed, None, 10)
cv.Dilate(processed, processed, None, 10)
cv.Canny(processed, processed, 5, 70, 3)
cv.Smooth(processed, processed, cv.CV_GAUSSIAN, 15, 15)
storage = cv.CreateMat(orig.width, 1, cv.CV_32FC3)
cv.HoughCircles(processed, storage, cv.CV_HOUGH_GRADIENT, 1, 16.0, 10, 140)
for i in range(0, len(np.asarray(storage))):
print "circle #%d" %i
Radius = int(np.asarray(storage)[i][0][2])
x = int(np.asarray(storage)[i][0][0])
y = int(np.asarray(storage)[i][0][1])
center = (x, y)
print x,y
cv.Circle(orig, center, 1, cv.CV_RGB(0, 255, 0), 1, 8, 0)
cv.Circle(orig, center, Radius, cv.CV_RGB(255, 0, 0), 1, 8, 0)
cv.Circle(processed, center, 1, cv.CV_RGB(0, 0, 0), -1, 8, 0)
cv.Circle(processed, center, Radius, cv.CV_RGB(255, 0, 0), 3, 8, 0)
cv.ShowImage("sun0016", orig)
cv.ShowImage("processed", processed)
cv_key = cv.WaitKey(0)
And when I run this I find edge of Sun which is circle with center but very inaccurately.
Pls know you setting of parameters HoughCircles module for precise search circles.
Thanks
The main problem here is finding a good range for your radius.
You may have a look at your picture and guess the Radius.
From the Picture you have given I would guess 180 - 220 would be a good range.
Your code would look like:
cv.HoughCircles(processed, storage, cv.CV_HOUGH_GRADIENT, 1, 16.0, 180, 220)
Just try to find good Values for minRadius and maxRadius and this should work fine.
here is solution of my problem
import numpy as np
import cv2
im = cv2.imread('sun0016.bmp')
height, width, depth = im.shape
print height, width, depth
thresh = 132
imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(imgray,(5,5),0)
edges = cv2.Canny(blur,thresh,thresh*2)
contours, hierarchy = cv2.findContours(edges,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cnt = contours[0]
cv2.drawContours(im,contours,-1,(0,255,0),-1)
#centroid_x = M10/M00 and centroid_y = M01/M00
M = cv2.moments(cnt)
x = int(M['m10']/M['m00'])
y = int(M['m01']/M['m00'])
print x,y
print width/2.0,height/2.0
print width/2-x,height/2-y
cv2.circle(im,(x,y),1,(0,0,255),2)
cv2.putText(im,"center of Sun contour", (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255))
cv2.circle(im,(width/2,height/2),1,(255,0,0),2)
cv2.putText(im,"center of image", (width/2,height/2), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0))
cv2.imshow('contour',im)
cv2.waitKey(0)
Thought I would chime in with an alternative solution in case anyone stumbles upon this question in the future.
The following function uses cv2.inRange instead of cv2.Canny, and cv2.minEnclosingCircle instead of cv2.moments. It selects the largest contour found by cv2.findContours by measuring the radius of candidates' minimum enclosing circle. This filtering helps reject false positives from e.g. watermarks or dust, but depending on your requirements you might want to go about this step differently or omit it entirely.
The function returns both the x,y coordinates as well as the radius of the detected disk, a requirement for the project I was working on.
import cv2
def find_disk(img, threshold=10):
"""Finds the center and radius of a single solar disk present in the supplied image.
Uses cv2.inRange, cv2.findContours and cv2.minEnclosingCircle to determine the centre and
radius of the solar disk present in the supplied image.
Args:
img (numpy.ndarray): greyscale image containing a solar disk against a background that is below `threshold`.
threshold (int): threshold of min pixel value to consider as part of the solar disk
Returns:
tuple: center coordinates in x,y form (int)
int: radius
"""
if img is None:
raise TypeError("img argument is None - check that the path of the loaded image is correct.")
if len(img.shape) > 2:
raise TypeError("Expected single channel (grayscale) image.")
blurred = cv2.GaussianBlur(img, (5, 5), 0)
mask = cv2.inRange(blurred, threshold, 255)
img_mod, contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Find and use the biggest contour
r = 0
for cnt in contours:
(c_x, c_y), c_r = cv2.minEnclosingCircle(cnt)
# cv2.circle(img, (round(c_x), round(c_y)), round(c_r), (255, 255, 255), 2)
if c_r > r:
x = c_x
y = c_y
r = c_r
# print("Number of contours found: {}".format(len(contours)))
# cv2.imwrite("mask.jpg", mask)
# cv2.imwrite("circled_contours.jpg", img)
if x is None:
raise RuntimeError("No disks detected in the image.")
return (round(x), round(y)), round(r)
if __name__ == "__main__":
image = cv2.imread("path/to/your/image.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
center, radius = find_disk(img=gray, threshold=20)
print("circle x,y: {},{}".format(center[0], center[1]))
print("circle radius: {}".format(radius))
# Output the original image with the detected disk superimposed
cv2.circle(image, center, radius, (0, 0, 255), 1)
cv2.rectangle(image, (center[0] - 2, center[1] - 2), (center[0] + 2, center[1] + 2), (0, 0, 255), -1)
cv2.imwrite("disk_superimposed.jpg", image)
I have left in some commented debug statements that may come in handy if you find the need to tinker with this further.
You might want to use a higher threshold if your images include a lot of glare.

Categories