Remove Contours OpenCV - python

My Image
I want to get
https://ibb.co/t8hNkM2
I could only get
I was able to find the maximum contour
def img_counter_max(image_file: str):
img = cv2.imread(image_file)
# grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # меняем цветовую модель с BGR на HSV
cv2.waitKey(0)
# binarize
ret, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV)
cv2.waitKey(0)
# find contours
ctrs, hier = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# sort contours
sorted_ctrs = sorted(ctrs, key=lambda ctr: cv2.boundingRect(ctr)[0])
# sorted_ctrs sorted(ctrs, key=cv2.contourArea, reverse=True)[0]
contour_sizes = [(cv2.contourArea(contour), contour) for contour in sorted_ctrs]
biggest_contour = max(contour_sizes, key=lambda x: x[0])[1]
x, y, w, h = cv2.boundingRect(biggest_contour)
roi = img[y:y + h, x:x + w]
cv2.imwrite("C:\\Users\\dennn\\PycharmProjects\\untitled2\\imag\\roi1.jpg",
roi)
cv2.rectangle(img, (x, y), (x + w, y + h), (90, 255, 0), 2)
from tensorflow.python import Size
resize_img = cv2.resize(img, (512,512))
# cv2.resize(img, Size(512,512), interpolation=cv2.INTER_AREA)
cv2.namedWindow("Display frame", cv2.WINDOW_AUTOSIZE);
cv2.imshow('Display frame', resize_img)
cv2.waitKey(0)
How do I get the image I need?

I found that sorting by contourArea() gives wrong results. Probably it calculates all points inside contour but not rectangle area which it uses - and this rectangle can be bigger.
I use boundingRect() to get rectangle used by contour and later calculate size using w*h and then it sorts contours in correct way.
I use for-loop to display image with different rectangles and see which contour gives expected region. And this way I see that third contour gives expected region so I can use [2] to get it and save it.
Eventually I would use size to select region which has w*h is in some range
expecte_region_size - range < w*h < expecte_region_size + range
Eventually I would use for-loop which display image with different rectangles to select manually which rectangle to use to save in file.
import cv2
img = cv2.imread('image.jpg')
# grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # меняем цветовую модель с BGR на HSV
# binarize
ret, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV)
# find contours
ctrs, hier = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# find rect and area - and create items [contour, rect, area] - but sorting by area gives wrong results
#items = [[ctr, cv2.boundingRect(ctr), cv2.contourArea(ctr)] for ctr in ctrs]
# find rect - and create items [contour, rect]
items = [[ctr, cv2.boundingRect(ctr)] for ctr in ctrs]
# find rect's size and create items [contour, rect, size]
items = [[ctr, rect, rect[2]*rect[3]] for ctr, rect in items]
# sort by size
items = sorted(items, key=lambda x: x[2], reverse=True)
for index, item in enumerate(items[:5]):
contour = item[0]
x, y, w, h = item[1]
size = item[2]
print(index, '->', size, '(', x, y, w, h, ')')
img_copy = img.copy()
cv2.rectangle(img_copy, (x, y), (x + w, y + h), (0, 0, 255), 15)
resize_img = cv2.resize(img_copy, (512,512))
cv2.imshow('frame', resize_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# --- save image ---
item = items[2]
contour = item[0]
x, y, w, h = item[1]
size = item[2]
img = img[y:y+h, x:x+w]
cv2.imwrite('output.jpg', img)
Preview:
Output:

The code finds characters well,but outputs them out of order
I found a piece of code that should solve this problem, but I can't -
after finding the contours using contours=cv2.findContours(),use -
boundary=[]
for c,cnt in enumerate(contours):
x,y,w,h = cv2.boundingRect(cnt)
boundary.append((x,y,w,h))
count=np.asarray(boundary)
max_width = np.sum(count[::, (0, 2)], axis=1).max()
max_height = np.max(count[::, 3])
nearest = max_height * 1.4
ind_list=np.lexsort((count[:,0],count[:,1]))
c=count[ind_list]
Find symbols
img = "C:\\Users\\dennn\\PycharmProjects\\untitled2\\output.jpg" dir = os.curdir
path = os.path.join(dir,img)
raw_image = cv2.imread(path,0)
cv2.imshow("original",raw_image)
plt.subplot(2,3,1)
plt.title("Original")
plt.imshow(raw_image,'gray')
plt.xticks([]),plt.yticks([]);
sm_image = cv2.blur(raw_image,(8,8))
cv2.imshow("smoothed",sm_image)
plt.subplot(2,3,2)
plt.title("Smoothed")
plt.imshow(sm_image,'gray')
plt.xticks([]),plt.yticks([]);
#cv2.imshow("smoothed",sm_image)
ret,bw_image = cv2.threshold(sm_image,160,255,cv2.THRESH_BINARY_INV)
cv2.imshow("thresholded",bw_image)
plt.subplot(2,3,3)
plt.title("Thresholded")
plt.imshow(bw_image,'gray')
plt.xticks([]),plt.yticks([]);
kernel = np.ones((4,4),np.uint8)
er_image = cv2.erode(bw_image,kernel)
cv2.imshow("eroded",er_image)
plt.subplot(2,3,4)
plt.title("Eroded")
plt.imshow(er_image,'gray')
plt.xticks([]),plt.yticks([]);
kernel = np.ones((2,2),np.uint8)
di_image = cv2.dilate(er_image,kernel)
cv2.imshow("dilated",di_image)
plt.title("Dilated")
plt.subplot(2,3,5)
plt.imshow(di_image,'gray')
plt.xticks([]),plt.yticks([]);
mo_image = di_image.copy()
contour0 =
cv2.findContours(mo_image.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
contours = [cv2.approxPolyDP(cnt,3,True) for cnt in contour0[0]]
maxArea = 0
rect = []
for ctr in contours:
maxArea = max(maxArea, cv2.contourArea(ctr))
if img == "C:\\Users\\dennn\\PycharmProjects\\untitled2\\output.jpg":
areaRatio = 0.05
for ctr in contours:
if cv2.contourArea(ctr) > maxArea * areaRatio:
rect.append(cv2.boundingRect(cv2.approxPolyDP(ctr, 1, True)))
symbols = []
for i in rect:
x = i[0]
y = i[1]
w = i[2]
h = i[3]
p1 = (x, y)
p2 = (x + w, y + h)
cv2.rectangle(mo_image, p1, p2, 255, 2)
image = cv2.resize(mo_image[y:y + h, x:x + w], (32, 32))
symbols.append(image.reshape(1024, ).astype("uint8"))
testset_data = np.array(symbols)
cv2.imshow("segmented", mo_image)
plt.subplot(2, 3, 6)
plt.title("Segmented")
plt.imshow(mo_image, 'gray')
plt.xticks([]), plt.yticks([]);
# plt.show()
# garbage collection
cv2.destroyAllWindows()
plt.close()
# show glyphs
for i in range(len(symbols)):
image = np.zeros(shape=(64,64))
image[15:47,15:47] = symbols[i].reshape((32,32))
cv2.imshow("sym",image)
cv2.waitKey(0)
cv2.destroyAllWindows()
plt.close()

Related

Draw bounding box around a sequence of contours

I am trying to put a bounding box around a sequence of contours like the following. A top contour and a bottom contour
image1
I wrote the following basic code and this was the result image2
import cv2
import numpy as np
img = cv2.imread('light2.png')
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, (0, 0, 46), (179, 255, 255))
kernel = np.ones((5,5),np.uint8)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
try: hierarchy = hierarchy[0]
except: hierarchy = []
height, width, _ = img.shape
min_x, min_y = width, height
max_x = max_y = 0
for contour, hier in zip(contours, hierarchy):
(x, y, w, h) = cv2.boundingRect(contour)
min_x, max_x = min(x, min_x), max(x+w, max_x)
min_y, max_y = min(y, min_y), max(y+h, max_y)
if w > 80 and h > 80:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
if max_x - min_x > 0 and max_y - min_y > 0:
cv2.rectangle(img, (min_x, min_y), (max_x, max_y), (255, 0, 0), 2)
I am kind of struggling with the logic when there are other contours in the environment like in image3. And still want to put a bounding box around the top and bottom contour detection only (something like this image4). But with the current code, it puts the bounding box like this image5. Any help is appreciated.
You need to explain computer what you want using tools that you have. I suggest use threshold -> connectedComponents -> filter wrong bboxes -> find 2 bbox with same X position and ≈ area (not implemented) -> union bboxes
Code example:
import cv2
import numpy as np
def drawStats(img: np.array, arr: np.array):
for i in range(arr.shape[0]):
w = arr[i, cv2.CC_STAT_WIDTH]
h = arr[i, cv2.CC_STAT_HEIGHT]
l = arr[i, cv2.CC_STAT_LEFT]
t = arr[i, cv2.CC_STAT_TOP]
cv2.rectangle(img, (l, t), (l+w,t+h), (20, 0, 255), 3)
def filterStats(arr: np.array) -> np.array:
result = []
for i in range(arr.shape[0]):
w = arr[i, cv2.CC_STAT_WIDTH]
h = arr[i, cv2.CC_STAT_HEIGHT]
if w > h * 4:
result.append(arr[i])
result = np.array(result)
return result
img = cv2.imread("/Users/alex/Downloads/exo7R.jpg", cv2.IMREAD_GRAYSCALE)
_, img2 = cv2.threshold(img, 230, 255, cv2.THRESH_BINARY)
comp = cv2.connectedComponentsWithStats(img2, connectivity=8)
debugImg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
stats = filterStats(comp[2])
drawStats(debugImg, stats)
cv2.imshow("threshold", img2)
cv2.imshow("found components", debugImg)
cv2.waitKey()

Python - How to process a binary image to align sparse letters in a row

I'm trying to align letters from an image in order to obtain the full word with tesseract OCR:
import cv2
import numpy as np
img = cv2.imread("captcha.png", 0)
h1, w1 = img.shape
img = cv2.resize(img, (w1*5, h1*5))
# Threshold the image and find the contours
_, thresh = cv2.threshold(img, 123, 255, cv2.THRESH_BINARY_INV)
contours, hierarchy = cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Create a white background iamge to paste the letters on
bg = np.zeros((200, 200), np.uint8)
bg[:] = 255
left = 5
# Iterate through the contours
for contour, h in zip(contours, hierarchy[0]):
# Ignore inside parts (circle in a 'p' or 'b')
if h[3] == -1:
# Get the bounding rectangle
x, y, w, h = cv2.boundingRect(contour)
# Paste it onto the background
bg[5:5+h, left:left+w] = img[y:y+h, x:x+w]
left += (w + 5)
cv2.imshow('thresh', bg)
cv2.waitKey()
And the image that I want to process is this one
However, I got this message:
>Traceback (most recent call last):
File ".\img.py", line 24, in <module>
bg[5:5+h, left:left+w] = img[y:y+h, x:x+w]
ValueError: could not broadcast input array from shape (72,750) into shape (72,195)
Just with tesseract OCR I got "acba" without the zero and four so I need to reorder the letters to obtain it. Any suggestions?
You try to put bigger image in smaller area - but they have to be the same.
You may get shapes for both objects and get min() for width and height and use it
h1, w1 = bg[5:5+h, left:left+w].shape
h2, w2 = img[y:y+h, x:x+w].shape
min_h = min(h1, h2)
min_w = min(w1, w2)
bg[5:5+min_h, left:left+min_w] = img[y:y+min_h, x:x+min_w]
EDIT:
OR maybe you should use x,y instead of 5 and left (also 5)
bg[y:y+h, x:x+w] = img[y:y+h, x:x+w]
And maybe you should create bg with the same size as img (after resizing)
h1, w1 = img.shape
bg = np.zeros((h1, w1), np.uint8)
EDIT:
Full working code with other changes.
I read image in RGB to see what contours it found because it seems it found something different then you may expect.
import cv2
import numpy as np
print('CV:', cv2.__version__)
img_color = cv2.imread("ZzSgt.png", cv2.IMREAD_UNCHANGED)
h, w = img_color.shape[:2]
print('original shape (W,H):', w, h)
img_color = cv2.resize(img_color, (w*5, h*5))
h, w = img_color.shape[:2]
print('resized shape (W,H) :', w, h)
img = cv2.cvtColor(img_color, cv2.COLOR_BGR2GRAY)
# Threshold the image and find the contours
_, thresh = cv2.threshold(img, 123, 255, cv2.THRESH_BINARY_INV)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Create a white background image to paste the letters on
bg = np.full((h, w), 255, np.uint8)
# Create image to display contours
img_contours = np.full((h, w, 3), 255, np.uint8)
left = 5
# Iterate through the contours
for contour, h in zip(contours, hierarchy[0]):
# Ignore inside parts (circle in a 'p' or 'b')
if h[3] == -1:
# Get the bounding rectangle
x, y, w, h = cv2.boundingRect(contour)
print('contour (X,Y,W,H):', x, y, w, h)
# Paste it onto the background
h1, w1 = bg[5:5+h, left:left+w].shape
h2, w2 = img[y:y+h, x:x+w].shape
min_h = min(h1, h2)
min_w = min(w1, w2)
bg[5:5+min_h, left:left+min_w] = img[y:y+min_h, x:x+min_w]
left += (w + 5)
# Copy color regions and draw contours
img_contours[y:y+h, x:x+w] = img_color[y:y+h, x:x+w]
img_contours = cv2.drawContours(img_contours, [contour], 0, (0,0,255))
cv2.imshow('contours', img_contours)
cv2.imshow('background', bg)
cv2.waitKey()
cv2.destroyAllWindows()
contours
background
EDIT:
I get better result if I revese image img = ~img and change threshold from 123 to 30
thresh
contours
background (and now I see it could have size even (75, 255) or safer (100, 300))
import cv2
import numpy as np
print('CV:', cv2.__version__)
#img_color = cv2.imread("captcha.png", cv2.IMREAD_UNCHANGED)
img_color = cv2.imread("ZzSgt.png", cv2.IMREAD_UNCHANGED)
h, w = img_color.shape[:2]
print('original shape (W,H):', w, h)
img_color = cv2.resize(img_color, (w*5, h*5))
h, w = img_color.shape[:2]
print('resized shape (W,H) :', w, h)
img = cv2.cvtColor(img_color, cv2.COLOR_BGR2GRAY)
img = ~img
# Threshold the image and find the contours
_, thresh = cv2.threshold(img, 30, 255, cv2.THRESH_BINARY_INV)
cv2.imshow('thresh', thresh)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Create a white background image to paste the letters on
bg = np.full((h, w), 255, np.uint8)
# Create image to display contours
img_contours = np.full((h, w, 3), 255, np.uint8)
left = 5
# Iterate through the contours
for contour, h in zip(contours, hierarchy[0]):
# Ignore inside parts (circle in a 'p' or 'b')
if h[3] == -1:
# Get the bounding rectangle
x, y, w, h = cv2.boundingRect(contour)
print('contour (X,Y,W,H):', x, y, w, h)
# Paste it onto the background
h1, w1 = bg[5:5+h, left:left+w].shape
h2, w2 = img[y:y+h, x:x+w].shape
min_h = min(h1, h2)
min_w = min(w1, w2)
bg[5:5+min_h, left:left+min_w] = img[y:y+min_h, x:x+min_w]
left += (w + 5)
# Copy (color) region and draw contour
img_contours[y:y+h, x:x+w] = img_color[y:y+h, x:x+w]
img_contours = cv2.drawContours(img_contours, [contour], 0, (0,0,255))
cv2.imshow('contours', img_contours)
cv2.imshow('background', bg)
cv2.waitKey()
cv2.destroyAllWindows()

How to extract diagram from an image?

I've used Contour based approach but its detecting so many contours. How can I extract my ROI contour?
image = cv2.imread('ULTI.png')
original = image.copy()
cv2.imwrite("bg.png",bg)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (3, 3), 0)
canny = cv2.Canny(blurred, 120, 255, 1)
kernel = np.ones((5,5),np.uint8)
dilate = cv2.dilate(canny, kernel, iterations=1)
# Find contours
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
# Iterate thorugh contours and filter for ROI
image_number = 0
cnts = max(cnts, key = cv2.contourArea)
print("no ",len(cnts))
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(image, (x, y), (x + w, y + h), (36,255,12), 2)
ROI = original[y:y+h, x:x+w]
#cv2.imwrite("ROI_{}.png".format(image_number), ROI)
image_number += 1
You can get your ROI by specifying that you only want to use the contour that has the greatest area, that is, if your diagram will produce a contour with an area greater then the rest of the components in your image.
Here is an example:
import cv2
def preprocess(img):
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_blur = cv2.GaussianBlur(img_gray, (5, 5), 1)
img_canny = cv2.Canny(img_blur, 50, 50)
return img_canny
def get_roi(img, pad=3):
contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
max_area = 0
for cnt in contours:
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.02 * peri, True)
x, y, w, h = cv2.boundingRect(approx)
rect_area = w * h
if rect_area > max_area:
max_area = rect_area
dim = x, y, w, h
if max_area:
x, y, w, h = dim
return x - pad, y - pad, w + pad * 2, h + pad * 2
img = cv2.imread("ULTI.png")
img_processed = preprocess(img)
x, y, w, h = get_roi(img_processed)
cv2.imshow("Image", img[y:y + h, x:x + w])
cv2.waitKey(0)
Output:
Explanation:
Import the necessary module(s):
import cv2
Define a function that will take in an image, and return a processed version of the image that will allow python to properly detect the necessary contours (you can tweak the values to further meet your needs):
def preprocess(img):
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_blur = cv2.GaussianBlur(img_gray, (5, 5), 1)
img_canny = cv2.Canny(img_blur, 50, 50)
return img_canny
Now, let's see how we can define a function that will retrieve the ROI of the image. First, define it so that a processed image array and a pad amount (optional) can be passed in as parameters:
def get_roi(img, pad=3):
Find the contours of the processed image, and define a variable to store the greatest area of the contours:
contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
max_area = 0
Use a for loop to loop through the contours, and find the area of the contour of each iteration of the loop:
for cnt in contours:
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.02 * peri, True)
area = cv2.contourArea(approx)
Use an if statement to check if the area is greater than the defined variable that should store that greatest area. If the are of the contour of that iteration is greater than the variable, update the value of the variable to be equal to the new area. Also, save the contour of that iteration to a variable:
if area > max_area:
max_area = area
max_cnt = approx
After the for loop, if the max_area variable doesn't equal to 0, then a max_cnt has also been defined. Use the cv2.boundingRect to get the x, y, w and h properties:
if max_area:
x, y, w, h = cv2.boundingRect(max_cnt)
return x - pad, y - pad, w + pad * 2, h + pad * 2
Finally, after reading the image into a variable, you can utilize the 2 functions we defined, and display the resulting image:
img = cv2.imread("ULTI.png")
img_processed = preprocess(img)
x, y, w, h = get_roi(img_processed)
cv2.imshow("Image", img[y:y + h, x:x + w])
cv2.waitKey(0)
Note: The code likely will not work for all diagrams. But again, you can tweak the values in the preprocess function to meet your needs.

How could I crop image in exact location where numbers are situated?

I have my code prototype:
import cv2
import numpy as np
img = cv2.imread('/home/follia/Pictures/scan.jpg')
h, w, k = img.shape
M = cv2.getRotationMatrix2D((w / 2, h / 2), 15.5, 1)
img = cv2.warpAffine(img, M, (w, h))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 100, 200, apertureSize=3)
lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 80)
for x in range(0, len(lines)):
for x1,y1,x2,y2 in lines[x]:
cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2)
cv2.imshow("origin", img)
cv2.waitKey(0)
Original image:
and it return this image:
And I need this image to be cropped and show only numbers:
Could you please help me out, how could I cut this location?
And then, how could I recognize numbers and extract it from image to text?
Try this:
Basic idea of this solution is, get the contours of the image after performing threshold() and detect the biggest contour among contours.
INPUT:
CODE:
import cv2
image = cv2.imread("test.jpg", 1)
h, w, k = image.shape
M = cv2.getRotationMatrix2D((w / 2, h / 2), 15.5, 1)
image = cv2.warpAffine(image, M, (w, h), cv2.INTER_LINEAR, cv2.BORDER_CONSTANT, borderValue=(255, 255, 255))
img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
threshold = 80
cv2.threshold(img,threshold,255,cv2.THRESH_BINARY,img)
cv2.bitwise_not(img,img)
cv2.imshow("Result", img)
cv2.waitKey(0)
im2, contours, hier = cv2.findContours(img, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
if len(contours) != 0:
#find the biggest area
c = max(contours, key = cv2.contourArea)
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
crop_img = image[y:y + h, x:x + w]
cv2.imshow("Result", crop_img)
cv2.waitKey(0)
cv2.imshow("Result", image)
cv2.waitKey(0)
OUTPUT:

Find the center line between object's edges

Today I am trying to identify the edge of an object.
I got a great result by doing this.
import cv2
img = cv2.imread("0.png")
img2 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img2 = cv2.equalizeHist(img2)
img2 = cv2.GaussianBlur(img2, (7, 7), 0)
edges = cv2.Canny(img2, 180, 300)
im2, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img, contours, -1, (0, 255, 0), 1)
cv2.imshow('img', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
and the image looks like: (it's a welding's x-ray)
My ultimate goal is to find the center line between the edges,
(the collection of (MaxY+MinY)/2 on each X)
the ideal result should look like this: (sorry for the bad hand drawing)
Could anyone let me know how should I do this?
Thank you very much.
First of all you should prepare your image so that you can found your one contour (threshold, histogram equalization etc.). The contour returns you a set of (x,y) coordinates that represent the contour so for the first step you should seperate the upper edge from the bottom (split it on half). In my example I made it complementary to momements of the contour but note that this will not work for curved lines! You will have to make an algorithm to divide upper side and down side. Once you have done this you can make two list, containing one element per x coordinate. Then simply calculate the midle and make a point on the image.
Example code:
import cv2
import numpy as np
img = cv2.imread('centerline.png')
mask = np.zeros((img.shape[:2]), np.uint8)
h2, w2 = img.shape[:2]
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
equ = cv2.equalizeHist(gray)
_, thresh = cv2.threshold(equ,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
kernel = np.ones((5,5),np.uint8)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
_, contours, hierarchy = cv2.findContours(opening,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
print(h, w)
if h < 30 and w > 270:
cv2.drawContours(mask, [cnt], 0, (255,255,255), -1)
res = cv2.bitwise_and(img, img, mask=mask)
gray = cv2.cvtColor(res,cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
blur = cv2.GaussianBlur(thresh,(5,5),0)
_, contours, hierarchy = cv2.findContours(blur,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cnt = max(contours, key=cv2.contourArea)
M = cv2.moments(cnt)
cy = int(M['m01']/M['m00'])
mask = np.zeros((img.shape[:2]), np.uint8)
cv2.drawContours(mask, [cnt], 0, (255,255,255), -1)
up = []
down = []
for i in cnt:
x = i[0][0]
y = i[0][1]
if x == 0:
pass
elif x == w2:
pass
else:
if y > cy:
down.append(tuple([x,y]))
elif y < cy:
up.append(tuple([x,y]))
else:
pass
up.sort(key = lambda x: x[0])
down.sort(key = lambda x: x[0])
up_1 = []
down_1 = []
for i in range(0, len(up)-1):
if up[i][0] != up[i+1][0]:
up_1.append(up[i])
else:
pass
for i in range(0, len(down)-1):
if down[i][0] != down[i+1][0]:
down_1.append(down[i])
else:
pass
lines = zip(up_1, down_1)
for i in lines:
x1 = i[0][0]
y1 = i[0][1]
x2 = i[1][0]
y2 = i[1][1]
middle = np.sqrt(((x2-x1)**2)+((y2-y1)**2))
cv2.circle(img, (x1, y1+int(middle/2)), 1, (0,0,255), -1)
cv2.imshow('img', img)
Result:

Categories