Extracting polygons from superimposed images - python

I have 2 images composed of triangles. I add them together and new polygons formed.
Is it possible to determine the polygons when these two images superimposed?
Should I aim for processing the resultant image or can I determine it from the locations of input triangles?
Note: I know exact locations of 1st and 2nd image vertices and triangles as (x,y)
Clockwise coordinates of triangles [Rectangle Width 512 pixels, Height 256 pixels]
triangle a1 = [0,0] [512,128] [0,256]
triangle a2 = [0,0] [512,0] [512,128]
triangle a3 = [0,256] [512,128] [512,256]
triangle b1 = [0,0] [200,256] [0,256]
triangle b2 = [0,0] [150,0] [200,256]
triangle b3 = [150,0] [512,0] [200,256]
triangle b4 = [512,0] [512,256] [200,256]

I went for a visual rather than analytical approach:
draw the "a" triangles in your left picture filled with 1, 2, 3
draw the "b" triangles in your right picture filled with 100, 200, 300
add the left and right pictures
find the unique colours in the result, each will correspond to a polygon and its value will tell you which two initial triangles intersect there
This code is all just set-up for the left image:
#!/usr/bin/env python3
# https://stackoverflow.com/q/68938410/2836621
import cv2
import numpy as np
# Make black canvas for left image and right image
left = np.zeros((256,512),np.uint16)
right = np.zeros((256,512),np.uint16)
# Draw "a" triangles filled with 1, 2, 3 onto left image
a1 = np.array([[0,0],[512,128],[0,256]], np.int32).reshape((-1,1,2))
cv2.fillPoly(left,[a1],(1),8)
a2 = np.array([[0,0],[512,0],[512,128]], np.int32).reshape((-1,1,2))
cv2.fillPoly(left,[a2],(2),8)
a3 = np.array([[0,256],[512,128],[512,256]], np.int32).reshape((-1,1,2))
cv2.fillPoly(left,[a3],(3),8)
cv2.imwrite('left.png', left)
Note that I contrast-stretched the left image below so you can see it:
This code is all just set-up for the right image:
# Draw "b" triangles filled with 100, 200, 300 onto right image
b1 = np.array([[0,0],[200,256],[0,256]], np.int32).reshape((-1,1,2))
cv2.fillPoly(right,[b1],(100),8)
b2 = np.array([[0,0],[150,0],[200,256]], np.int32).reshape((-1,1,2))
cv2.fillPoly(right,[b2],(200),8)
b3 = np.array([[150,0],[512,0],[200,256]], np.int32).reshape((-1,1,2))
cv2.fillPoly(right,[b3],(300),8)
b4 = np.array([[512,0],[512,256],[200,256]], np.int32).reshape((-1,1,2))
cv2.fillPoly(right,[b4],(400),8)
cv2.imwrite('right.png', right)
Note that I contrast-stretched the right image below so you can see it:
And the following code is the actual answer:
# Add the two images
result = left + right
cv2.imwrite('result.png', result)
# Find the unique colours in the image - that is the number of polygons
colours = np.unique(result)
print(f'Colours in result: {colours}')
# Iterate over the polygons, making one at a time black on a grey background
for c in colours:
masked = np.where(result==c, 0, 128)
cv2.imwrite(f'result-{c}.png', masked)
Sample Output
Colours in result: [101 103 201 202 203 301 302 303 401 402 403]
Output Images
Hopefully you can see that colour 402 for example in the output image is where the triangle filled with 2 intersects with the triangle filled with 400, and so on.
Note that you can run findContours() on each masked polygon to get its vertices and area, if you want to.

For each pair of triangles, you can use the Sutherland-Hodgman algorithm to find the polygon formed by their intersection.

If you can calculate the constructed polygons with a mathematical model, you can probably achieve the desired output with better accuracy.
The method I suggest is not very accurate but it may help you.
I show an algorithm that helps you extract and store polygons separately. From this point on, you need to find and arrange the corners in each polygon (this part does not exist in the algorithm).
import sys
import cv2
import numpy as np
# Load images
i1 = cv2.imread(sys.path[0]+'/rect1.jpg', cv2.IMREAD_GRAYSCALE)
i2 = cv2.imread(sys.path[0]+'/rect2.jpg', cv2.IMREAD_GRAYSCALE)
# Make a copy of images
r1 = i1.copy()
r2 = i2.copy()
# Get size of image
H, W = i1.shape[:2]
# Convert images to black/white
i1 = cv2.threshold(i1, 90, 255, cv2.THRESH_BINARY)[1]
i2 = cv2.threshold(i2, 90, 255, cv2.THRESH_BINARY)[1]
# Mix images together and make a copy
i1[np.where(i2 != 255)] = 0
mix = i1.copy()
# Try to focus of output lines
mix = cv2.GaussianBlur(mix, (3, 3), 2)
mix = cv2.threshold(mix, 225, 255, cv2.THRESH_BINARY)[1]
# Make a mask to find the center of each polygon
msk = i1.copy()
msk = cv2.erode(msk, np.ones((6, 6)))
msk = cv2.medianBlur(msk, 3)
# Fill the mask area with black color
cv2.floodFill(msk, np.zeros((H+2, W+2), np.uint8), (0, 0), 0)
# Find the position of each polygon
pos = msk.copy()
cnts, _ = cv2.findContours(pos, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
pos = cv2.cvtColor(pos, cv2.COLOR_GRAY2BGR)
c, i = 0, 0
for cnt in cnts:
c += 25
i += 1
x, y, w, h = cv2.boundingRect(cnt)
center = (x+w//2, y+h//2)
cv2.rectangle(pos, (x, y), (x+w, y+h), (c, 220, 255-c), 1)
# Extract each polygon in a separate image
cur = mix.copy()
cv2.floodFill(cur, np.zeros((H+2, W+2), np.uint8), (0, 0), 0)
cv2.floodFill(cur, np.zeros((H+2, W+2), np.uint8), center, 127)
cur[np.where(cur == 255)] = 30
cur[np.where(cur == 127)] = 255
cv2.imwrite(sys.path[0]+f'/tri_{i}.jpg', cur)
if c >= 255:
c = 0
# Print number of polygones
print(len(cnts))
# Change type of images
i1 = cv2.cvtColor(i1, cv2.COLOR_GRAY2BGR)
r1 = cv2.cvtColor(r1, cv2.COLOR_GRAY2BGR)
r2 = cv2.cvtColor(r2, cv2.COLOR_GRAY2BGR)
msk = cv2.cvtColor(msk, cv2.COLOR_GRAY2BGR)
mix = cv2.cvtColor(mix, cv2.COLOR_GRAY2BGR)
# Save the output
top = np.hstack((r1, r2, i1))
btm = np.hstack((mix, msk, pos))
cv2.imwrite(sys.path[0]+'/rect_out.jpg', np.vstack((top, btm)))
Steps of making masks to find the coordinates and center of each polygon.
As indicated; Each polygon is stored as a separate image. From here you have to think about the next step; You can find and arrange the corners of each polygon in each image.
I emphasize; In my opinion, this method is not logical and is not accurate enough. But if you do not find a better solution, it may be useful for you.
Update
I drew this hypothetical image with graphic software and updated the code. I think it works great. You can adjust the parameters according to your needs. The final image was not supposed to be in color. I just wanted to show that it works properly.
import sys
import cv2
import numpy as np
from tqdm import tqdm
import random
# Load images
mix = cv2.imread(sys.path[0]+'/im.png', cv2.IMREAD_GRAYSCALE)
im = mix.copy()
H, W = mix.shape[:2]
# Try to focus of output lines
mix = cv2.GaussianBlur(mix, (3, 3), 2)
mix = cv2.threshold(mix, 225, 255, cv2.THRESH_BINARY)[1]
# Make a mask to find the center of each polygon
msk = mix.copy()
msk = cv2.erode(msk, np.ones((3, 3)))
msk = cv2.medianBlur(msk, 3)
# Fill the mask area with black color
cv2.floodFill(msk, np.zeros((H+2, W+2), np.uint8), (0, 0), 0)
# Find the position of each polygon
pos = msk.copy()
out = msk.copy()
out[:] = 0
out = cv2.cvtColor(out, cv2.COLOR_GRAY2BGR)
cnts, _ = cv2.findContours(pos, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
pos = cv2.cvtColor(pos, cv2.COLOR_GRAY2BGR)
c, i = 0, 0
for cnt in tqdm(cnts):
c += 25
i += 1
x, y, w, h = cv2.boundingRect(cnt)
center = (x+w//2, y+h//2)
cv2.rectangle(pos, (x, y), (x+w, y+h), (c, 220, 255-c), 1)
# Extract each polygon in a separate image
cur = mix.copy()
cv2.floodFill(cur, np.zeros((H+2, W+2), np.uint8), (0, 0), 0)
cv2.floodFill(cur, np.zeros((H+2, W+2), np.uint8), center, 127)
cur[np.where(cur == 255)] = 30
cur[np.where(cur == 127)] = 255
out[np.where(cur == 255)] = (random.randint(50, 255),
random.randint(50, 255),
random.randint(50, 255))
#cv2.imwrite(sys.path[0]+f'/tri_{i}.jpg', cur)
if c >= 255:
c = 0
# Print number of polygones
print(len(cnts))
# Change type of images
im = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)
msk = cv2.cvtColor(msk, cv2.COLOR_GRAY2BGR)
mix = cv2.cvtColor(mix, cv2.COLOR_GRAY2BGR)
# Save the output
top = np.hstack((im, mix))
btm = np.hstack((msk, pos))
cv2.imwrite(sys.path[0]+'/rect_out.jpg', np.vstack((top, btm)))
cv2.imwrite(sys.path[0]+'/rect_out2.jpg', np.vstack((im, out)))

Related

How to deal with DICOM by OpenCV FindContours with ambiguous edge

I want to segment the left auricle DICOM image for the calculation of cv2.minAreaRect.
I use cv2.GaussianBlur to filtter noise first, then use cv2.kmeans to segment the image as mask.
Afterwards, I use cv2.Canny and cv2.findContours to find the edge I want.
But after I apply cv2.minAreaRect to the contours, I get many small rectangles.
This isn't what I expect, I want to find the whole minimum bounding rectangle as below.
The following is my code.
import cv2
import numpy as np
# read input and convert to range 0-1
image = cv2.imread('1.jpg')
image = cv2.GaussianBlur(image, (15, 15), 0)
h, w, c = image.shape
# reshape to 1D array
image_2d = image.reshape(h*w, c).astype(np.float32)
# set number of colors
numcolors = 2
numiters = 10
epsilon = 1
attempts = 10
# do kmeans processing
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, numiters, epsilon)
ret, labels, centers = cv2.kmeans(image_2d, numcolors, None, criteria, attempts, cv2.KMEANS_RANDOM_CENTERS)
# reconstitute 2D image of results
centers = np.uint8(centers)
newimage = centers[labels.flatten()]
newimage = newimage.reshape(image.shape)
cv2.imshow('new image', newimage)
cv2.waitKey(0)
k = 0
for center in centers:
# select color and create mask
layer = newimage.copy()
mask = cv2.inRange(layer, center, center)
#print(mask[203][130])
# apply mask to layer
layer[mask == 0] = [0,0,100]
#cv2.imshow('layer', layer)
#cv2.waitKey(0)
num = 0
# save kmeans clustered image and layer
if(k == 0):
edges = cv2.Canny(mask, 1, 10)
contours, hierarchy = cv2.findContours(edges,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)[-2:]
raw_img3 = cv2.drawContours(image.copy(), contours, -1, (0, 0, 255), 3)
cnts = contours
cv2.imshow('Canny', raw_img3)
cv2.waitKey(0)
for cnt in cnts:
# find minimum bounding rectangle
rect = cv2.minAreaRect(cnt)
box2 = cv2.boxPoints(rect)
box2 = np.int0(box2)
aa = cv2.drawContours(image.copy(), [box2], 0, (255, 0, 0), 4)
cv2.imshow('Canny', aa)
cv2.waitKey(0)
k = k + 1

Approximating a quadrilateral from a given mask

Goal:
I'd like to estimate a 4 coordinates quadrilateral (not only rectangles) of a given masked object as shown in the image + without losing any pixel of the masked object.
Trials:
I tried using CV2 however couldn't end up with a solution.
cv2.boundingRect: returns the coordinates of the bounding rectangle (while the quadrilateral estimation is not always necessary to be a perfect rectangle)
cv2.findContours + cv2.approxPolyDP: isn't that accurate and returns an estimate extreme points of the object (Needs more work to estimate the quadrilateral 4 coordinates and there might be an easier and faster solution).
Code Snippets:
Trying cv2.boundinRect:
#mask = grayed image with only a specific object being masked
#image = the original rgb image
x,y,x_width,y_height = cv2.boundingRect(mask)
image=np.array(im[0])
cv2.rectangle(image,(x,y),(x+x_width,y+y_height),(0,255,0),2)
plt.imshow(image)
Trying cv2.findContours + cv2.approxPolyDP:
#mask = grayed image with only a specific object being masked
#image = the original rgb image
contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
selected_contour = max(contours, key=lambda x: cv2.contourArea(x))
approx = cv2.approxPolyDP(selected_contour, 0.0035 * cv2.arcLength(selected_contour, True), True)
cv2.drawContours(image, [approx], 0, (0, 0, 255), 5)
plt.imshow(image)
I am not sure if there is a better or built-in version; but i have a simple idea based on random numbers:
I only did this for the top, but you can do the same for other sides. The idea is to find the bounding-box of object first; and then divide the object into equal parts so that we can find the highest peaks.
In each range, You can find points randomly; But for best results, it is best to check all the top points of the shape to find the highest peaks correctly.
After finding the highest peaks, we have to calculate a line equation with respect to those 2 points so that we can draw a global line with respect to that line equation.
import sys
import cv2
import random
import numpy as np
from tqdm import tqdm
def rndPt(l, t, r, b):
# Generate a random point in given ROI
return (random.randint(int(l), int(r)), random.randint(int(t), int(b)))
def intArr(arr):
# Cast each item of 1D array to integer
return [int(x) for x in arr]
# Load our image
pth = sys.path[0]
org = cv2.imread(pth+'/bound.png')
im = org.copy()
H, W = im.shape[:2]
# Make mask and copy from that image
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
bw = cv2.threshold(im, 127, 255, cv2.THRESH_BINARY)[1]
im = bw.copy()
# Find the ROI of object
cnts, _ = cv2.findContours(bw, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cnts.sort(key=lambda x: cv2.boundingRect(x)[0])
ROI = None
for cnt in cnts:
x, y, w, h = cv2.boundingRect(cnt)
if w < W-1 and h < H-1:
cv2.rectangle(bw, (x, y), (x+w, y+h), 127, 2)
ROI = {'x': x, 'y': y, 'w': w, 'h': h, 'h2': y+h}
# We have to find the peaks; so we have to
# divide the bounding-box of shape into several
# ranges.
spaces = 5
sw = ROI['w']//spaces
# Each range can have a peak as a candidate point
candidates = [(ROI['x']+(sw*x)+sw//2, ROI['h']//2) for x in range(0, spaces)]
# Divide the object and find the highest point in
# each range
for s in tqdm(range(0, spaces)):
l = ROI['x']+(sw*s)
cv2.line(im, pt1=(l, ROI['y']), pt2=(l, ROI['h2']),
color=127, thickness=2)
for x in range(0, sw):
for i in range(0, 200):
pt = rndPt(l, ROI['y'], l+sw, ROI['h2']//4)
if pt[1] < candidates[s][1] and bw[pt[1], pt[0]] == 0:
candidates[s] = pt
l = ROI['x']+(sw*spaces)
cv2.line(im, pt1=(l, ROI['y']), pt2=(l, ROI['h2']), color=127, thickness=2)
print(candidates)
# We remove duplicate points and also sort the points
# according to the peak
candidates = list(set(candidates))
candidates.sort(key=lambda p: p[1])
print(candidates)
c = candidates
# Now that we have found two of the highest points, we can
# write a line equation for these two points
xA, xB = ROI['x'], ROI['x']+ROI['w']
x1, y1 = c[0][0], c[0][1]
x2, y2 = c[1][0], c[1][1]
m = (y2-y1)/(x2-x1)
# y=mx+b -> y-mx=b
b = y1-m*x1
yA = m*xA+b
yB = m*xB+b
# Convert images to BGR
im = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)
bw = cv2.cvtColor(bw, cv2.COLOR_GRAY2BGR)
# Make a copy of image to draw candidate points
marker = im.copy()
for p in candidates:
cv2.circle(marker, (p[0],p[1]),
h//25, color=(50, 100, 200),thickness=4)
# Draw lines
cv2.line(im, pt1=intArr((xA, yA)), pt2=intArr((xB, yB)),
color=(255, 0, 100), thickness=4, lineType=cv2.LINE_AA)
cv2.line(bw, pt1=intArr(c[0]), pt2=intArr(c[1]),
color=(100, 0, 255), thickness=4, lineType=cv2.LINE_AA)
# Save final output
top = np.hstack((org, marker))
btm = np.hstack((bw, im))
cv2.imwrite(pth+'/out.png', np.vstack((top, btm)))

split on each edge in an image

I'm new to open cv. what I want to do is splitting on every edge that I detected with canny.
can someone help me how can I do this?
enter image description here
please check the image I point where I want to split with two red arrows.
split at the first position of the next edge i mean where i showed in the image.
I think this is what you want:
# Import preprocessors
import os
import cv2
import numpy as np
# Read image
dir = os.path.abspath(os.path.dirname(__file__))
org = cv2.imread(dir+'/im.png')
# Make a copy from that image
im = org.copy()
imH, imW = im.shape[:2]
# Gray version of that image
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
# Remove red arrows
im[np.where(im < 130)] = 0
im[np.where(im >= 130)] = 255
# Keep a copy of image without arrow
org = im.copy()
org = cv2.cvtColor(org, cv2.COLOR_GRAY2BGR)
cv2.imwrite(dir+'/out_1_no_arrow.png', im)
# Dim the horizontal lines
im = cv2.GaussianBlur(im, (1, 11), 20)
cv2.imwrite(dir+'/out_2_dim.png', im)
# Remove the horizontal lines
im[np.where(im < 190)] = 0
im[np.where(im > 190)] = 255
cv2.imwrite(dir+'/out_3_ptrs.png', im)
# Find contours and sort them by position
cnts, _ = cv2.findContours(im, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cnts.sort(key=lambda x: cv2.boundingRect(x)[0], reverse=True)
# Find and save blocks
x2, i, off = imW, 0, imW/5
lastX=None
for cnt in cnts:
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(org, (x, y), (x+w, y+h), (0, 255, 0), 2)
if abs(x2-x)>=off:
i += 1
cv2.imwrite(dir+'/out_block_'+str(i)+".jpg", org[0:imH, x:x2])
x2 = x
lastX=x
i += 1
cv2.imwrite(dir+'/out_block_'+str(i)+".jpg", org[0:imH, 0:lastX])
# Save the processed images
cv2.imwrite(dir+'/out_4_cut_positions.png', org)
Removed Red Arrows from original image:
Blur to remove horizontal lines:
Remove horizontal lines and keep candidate places:
Show candidate locations on the original image:
Final result and isolated letters:

Detecting tick marks with python opencv

So I am given an image of a box and within the box are many tick marks with various sizes just like a ruler. As depicted below:
This is the input picture
Where I am at so far is that with edge detection I am only able to detect the outer rectangle as a rectangle but not any of the tick marks within the rectangle. Code shown below:
import numpy as np
import cv2
image = cv2.imread('images\Ruler.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (3, 3), 0)
edges = cv2.Canny(blur, 50, 200)
cnts, hierarchy = cv2.findContours(edges, cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)
corner_points = []
for index, cnt_points in enumerate(cnts):
perimeter = cv2.arcLength(cnts[index], True)
approx = cv2.approxPolyDP(cnts[index], 0.02 * perimeter, True)
corner_points.append(approx)
x, y, w, h = cv2.boundingRect(corner_points[index])
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
print(corner_points)
cv2.imshow("Contour", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
I would like the resulting image to look something like this image represented below:
Ideal result
As you can see both the tick marks (in red outline) and the outer rectangle (in green outline) are not only detected but you are able to distinguish a tick mark from the outer rectangle. I am also trying to obtain pixel location of the corner points of the tick marks as well as seen in my code where i store the corner points into "corner points = []"
Also i am not sure if tick marks are considered as thick lines or rectangles. so the location corner points can either be just the 2 end points of the tick mark "line" or they could be 4 vertices of the tick mark "rectangle".
import cv2
img = cv2.imread('images/Ruler.png', cv2.IMREAD_GRAYSCALE)
h, w, _ = img.shape
bw = img > 128
corner_points = []
# if the pixel length of a line is higher than this threshold
# add the start and end points to corner_points
accepted_length = 10
for i in range (0, h):
start = -1 # the first True pixel in the row
stop = -1 # the first False pixel after start
for j in range (0, w):
if bw(i,j) and start is -1:
start = j
if start is not -1 and not bw(i,j):
stop = j
# I added 50 here to avoid adding floor and ceil lines
if stop - start > accepted_length and stop - start < 50:
corner_points.append([start end])
continue

Separate rooms in a floor plan using OpenCV

Input floor plan image
Above images are my input floor plan and I need to identify each room separately and then crop those rooms. after that, I can use those images for the next steps. So far I was able to Remove Small Items from input floor plans by using cv2.connectedComponentsWithStats. So that I think it will help to identify wall easily. after that my input images look like this.
output image after removing small objects
Then I did MorphologicalTransform to remove text and other symbols from image to leave only the walls. after that my input image look like this.
after MorphologicalTransform
So I was able to identify walls. then how I use those wall to crop rooms from the original input floor plan. Can someone help me? You can find my python code in this link. Download My Code
or
#Import packages
import os
import cv2
import numpy as np
import tensorflow as tf
import sys
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# Import utilites
from utils import label_map_util
from utils import visualization_utils as vis_util
# Name of the directory containing the object detection module we're using
MODEL_NAME = 'inference_graph'
IMAGE_NAME = 'floorplan2.jpg'
#Remove Small Items
im_gray = cv2.imread(IMAGE_NAME, cv2.IMREAD_GRAYSCALE)
(thresh, im_bw) = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
thresh = 127
im_bw = cv2.threshold(im_gray, thresh, 255, cv2.THRESH_BINARY)[1]
#find all your connected components
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(im_bw, connectivity=8)
#connectedComponentswithStats yields every seperated component with information on each of them, such as size
#the following part is just taking out the background which is also considered a component, but most of the time we don't want that.
sizes = stats[1:, -1]; nb_components = nb_components - 1
# minimum size of particles we want to keep (number of pixels)
#here, it's a fixed value, but you can set it as you want, eg the mean of the sizes or whatever
min_size = 150
#your answer image
img2 = np.zeros((output.shape))
#for every component in the image, you keep it only if it's above min_size
for i in range(0, nb_components):
if sizes[i] >= min_size:
img2[output == i + 1] = 255
cv2.imshow('room detector', img2)
#MorphologicalTransform
kernel = np.ones((5, 5), np.uint8)
dilation = cv2.dilate(img2, kernel)
erosion = cv2.erode(img2, kernel, iterations=6)
#cv2.imshow("img2", img2)
cv2.imshow("Dilation", dilation)
cv2.imwrite("Dilation.jpg", dilation)
#cv2.imshow("Erosion", erosion)
# Press any key to close the image
cv2.waitKey(0)
# Clean up
cv2.destroyAllWindows()
Here is something that I've come up with. It is not perfect (I made some comments what you might want to try), and it will be better if you improve the input image quality.
import cv2
import numpy as np
def find_rooms(img, noise_removal_threshold=25, corners_threshold=0.1,
room_closing_max_length=100, gap_in_wall_threshold=500):
"""
:param img: grey scale image of rooms, already eroded and doors removed etc.
:param noise_removal_threshold: Minimal area of blobs to be kept.
:param corners_threshold: Threshold to allow corners. Higher removes more of the house.
:param room_closing_max_length: Maximum line length to add to close off open doors.
:param gap_in_wall_threshold: Minimum number of pixels to identify component as room instead of hole in the wall.
:return: rooms: list of numpy arrays containing boolean masks for each detected room
colored_house: A colored version of the input image, where each room has a random color.
"""
assert 0 <= corners_threshold <= 1
# Remove noise left from door removal
img[img < 128] = 0
img[img > 128] = 255
_, contours, _ = cv2.findContours(~img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
mask = np.zeros_like(img)
for contour in contours:
area = cv2.contourArea(contour)
if area > noise_removal_threshold:
cv2.fillPoly(mask, [contour], 255)
img = ~mask
# Detect corners (you can play with the parameters here)
dst = cv2.cornerHarris(img ,2,3,0.04)
dst = cv2.dilate(dst,None)
corners = dst > corners_threshold * dst.max()
# Draw lines to close the rooms off by adding a line between corners on the same x or y coordinate
# This gets some false positives.
# You could try to disallow drawing through other existing lines for example.
for y,row in enumerate(corners):
x_same_y = np.argwhere(row)
for x1, x2 in zip(x_same_y[:-1], x_same_y[1:]):
if x2[0] - x1[0] < room_closing_max_length:
color = 0
cv2.line(img, (x1, y), (x2, y), color, 1)
for x,col in enumerate(corners.T):
y_same_x = np.argwhere(col)
for y1, y2 in zip(y_same_x[:-1], y_same_x[1:]):
if y2[0] - y1[0] < room_closing_max_length:
color = 0
cv2.line(img, (x, y1), (x, y2), color, 1)
# Mark the outside of the house as black
_, contours, _ = cv2.findContours(~img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contour_sizes = [(cv2.contourArea(contour), contour) for contour in contours]
biggest_contour = max(contour_sizes, key=lambda x: x[0])[1]
mask = np.zeros_like(mask)
cv2.fillPoly(mask, [biggest_contour], 255)
img[mask == 0] = 0
# Find the connected components in the house
ret, labels = cv2.connectedComponents(img)
img = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)
unique = np.unique(labels)
rooms = []
for label in unique:
component = labels == label
if img[component].sum() == 0 or np.count_nonzero(component) < gap_in_wall_threshold:
color = 0
else:
rooms.append(component)
color = np.random.randint(0, 255, size=3)
img[component] = color
return rooms, img
#Read gray image
img = cv2.imread("/home/veith/Pictures/room.png", 0)
rooms, colored_house = find_rooms(img.copy())
cv2.imshow('result', colored_house)
cv2.waitKey()
cv2.destroyAllWindows()
This will show an image like this, where each room has a random color:
You can see that it sometimes finds a room where there is none, but I think this is a decent starting point for you.
I've used a screenshot of the image in your question for this.
You can use the returned masks of each room to index the original image and crop that.
To crop just use something like (untested, but should work for the most part):
for room in rooms:
crop = np.zeros_like(room).astype(np.uint8)
crop[room] = original_img[room] # Get the original image from somewhere
# if you need to crop the image into smaller parts as big as each room
r, c = np.nonzero(room)
min_r, max_r = r.argmin(), r.argmax()
min_c, max_c = c.argmin(), c.argmax()
crop = crop[min_r:max_r, min_c:max_c]
cv2.imshow("cropped room", crop)
cv2.waitKey()
cv2.destroyAllWindows()
I used three for loops to crop each room.
height, width = img.shape[:2]
rooms, colored_house = find_rooms(img.copy())
roomId = 0
images = []
for room in rooms:
x = 0
image = np.zeros ((height, width, 3), np.uint8)
image[np.where ((image == [0, 0, 0]).all (axis=2))] = [0, 33, 166]
roomId = roomId + 1
for raw in room:
y = 0
for value in raw:
if value == True:
image[x,y] = img[x,y]
y = y +1
#print (value)
#print (img[x,y])
x = x + 1
cv2.imwrite ('result' + str(roomId)+ '.jpg', image)

Categories