Separate rooms in a floor plan using OpenCV - python

Input floor plan image
Above images are my input floor plan and I need to identify each room separately and then crop those rooms. after that, I can use those images for the next steps. So far I was able to Remove Small Items from input floor plans by using cv2.connectedComponentsWithStats. So that I think it will help to identify wall easily. after that my input images look like this.
output image after removing small objects
Then I did MorphologicalTransform to remove text and other symbols from image to leave only the walls. after that my input image look like this.
after MorphologicalTransform
So I was able to identify walls. then how I use those wall to crop rooms from the original input floor plan. Can someone help me? You can find my python code in this link. Download My Code
or
#Import packages
import os
import cv2
import numpy as np
import tensorflow as tf
import sys
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# Import utilites
from utils import label_map_util
from utils import visualization_utils as vis_util
# Name of the directory containing the object detection module we're using
MODEL_NAME = 'inference_graph'
IMAGE_NAME = 'floorplan2.jpg'
#Remove Small Items
im_gray = cv2.imread(IMAGE_NAME, cv2.IMREAD_GRAYSCALE)
(thresh, im_bw) = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
thresh = 127
im_bw = cv2.threshold(im_gray, thresh, 255, cv2.THRESH_BINARY)[1]
#find all your connected components
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(im_bw, connectivity=8)
#connectedComponentswithStats yields every seperated component with information on each of them, such as size
#the following part is just taking out the background which is also considered a component, but most of the time we don't want that.
sizes = stats[1:, -1]; nb_components = nb_components - 1
# minimum size of particles we want to keep (number of pixels)
#here, it's a fixed value, but you can set it as you want, eg the mean of the sizes or whatever
min_size = 150
#your answer image
img2 = np.zeros((output.shape))
#for every component in the image, you keep it only if it's above min_size
for i in range(0, nb_components):
if sizes[i] >= min_size:
img2[output == i + 1] = 255
cv2.imshow('room detector', img2)
#MorphologicalTransform
kernel = np.ones((5, 5), np.uint8)
dilation = cv2.dilate(img2, kernel)
erosion = cv2.erode(img2, kernel, iterations=6)
#cv2.imshow("img2", img2)
cv2.imshow("Dilation", dilation)
cv2.imwrite("Dilation.jpg", dilation)
#cv2.imshow("Erosion", erosion)
# Press any key to close the image
cv2.waitKey(0)
# Clean up
cv2.destroyAllWindows()

Here is something that I've come up with. It is not perfect (I made some comments what you might want to try), and it will be better if you improve the input image quality.
import cv2
import numpy as np
def find_rooms(img, noise_removal_threshold=25, corners_threshold=0.1,
room_closing_max_length=100, gap_in_wall_threshold=500):
"""
:param img: grey scale image of rooms, already eroded and doors removed etc.
:param noise_removal_threshold: Minimal area of blobs to be kept.
:param corners_threshold: Threshold to allow corners. Higher removes more of the house.
:param room_closing_max_length: Maximum line length to add to close off open doors.
:param gap_in_wall_threshold: Minimum number of pixels to identify component as room instead of hole in the wall.
:return: rooms: list of numpy arrays containing boolean masks for each detected room
colored_house: A colored version of the input image, where each room has a random color.
"""
assert 0 <= corners_threshold <= 1
# Remove noise left from door removal
img[img < 128] = 0
img[img > 128] = 255
_, contours, _ = cv2.findContours(~img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
mask = np.zeros_like(img)
for contour in contours:
area = cv2.contourArea(contour)
if area > noise_removal_threshold:
cv2.fillPoly(mask, [contour], 255)
img = ~mask
# Detect corners (you can play with the parameters here)
dst = cv2.cornerHarris(img ,2,3,0.04)
dst = cv2.dilate(dst,None)
corners = dst > corners_threshold * dst.max()
# Draw lines to close the rooms off by adding a line between corners on the same x or y coordinate
# This gets some false positives.
# You could try to disallow drawing through other existing lines for example.
for y,row in enumerate(corners):
x_same_y = np.argwhere(row)
for x1, x2 in zip(x_same_y[:-1], x_same_y[1:]):
if x2[0] - x1[0] < room_closing_max_length:
color = 0
cv2.line(img, (x1, y), (x2, y), color, 1)
for x,col in enumerate(corners.T):
y_same_x = np.argwhere(col)
for y1, y2 in zip(y_same_x[:-1], y_same_x[1:]):
if y2[0] - y1[0] < room_closing_max_length:
color = 0
cv2.line(img, (x, y1), (x, y2), color, 1)
# Mark the outside of the house as black
_, contours, _ = cv2.findContours(~img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contour_sizes = [(cv2.contourArea(contour), contour) for contour in contours]
biggest_contour = max(contour_sizes, key=lambda x: x[0])[1]
mask = np.zeros_like(mask)
cv2.fillPoly(mask, [biggest_contour], 255)
img[mask == 0] = 0
# Find the connected components in the house
ret, labels = cv2.connectedComponents(img)
img = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)
unique = np.unique(labels)
rooms = []
for label in unique:
component = labels == label
if img[component].sum() == 0 or np.count_nonzero(component) < gap_in_wall_threshold:
color = 0
else:
rooms.append(component)
color = np.random.randint(0, 255, size=3)
img[component] = color
return rooms, img
#Read gray image
img = cv2.imread("/home/veith/Pictures/room.png", 0)
rooms, colored_house = find_rooms(img.copy())
cv2.imshow('result', colored_house)
cv2.waitKey()
cv2.destroyAllWindows()
This will show an image like this, where each room has a random color:
You can see that it sometimes finds a room where there is none, but I think this is a decent starting point for you.
I've used a screenshot of the image in your question for this.
You can use the returned masks of each room to index the original image and crop that.
To crop just use something like (untested, but should work for the most part):
for room in rooms:
crop = np.zeros_like(room).astype(np.uint8)
crop[room] = original_img[room] # Get the original image from somewhere
# if you need to crop the image into smaller parts as big as each room
r, c = np.nonzero(room)
min_r, max_r = r.argmin(), r.argmax()
min_c, max_c = c.argmin(), c.argmax()
crop = crop[min_r:max_r, min_c:max_c]
cv2.imshow("cropped room", crop)
cv2.waitKey()
cv2.destroyAllWindows()

I used three for loops to crop each room.
height, width = img.shape[:2]
rooms, colored_house = find_rooms(img.copy())
roomId = 0
images = []
for room in rooms:
x = 0
image = np.zeros ((height, width, 3), np.uint8)
image[np.where ((image == [0, 0, 0]).all (axis=2))] = [0, 33, 166]
roomId = roomId + 1
for raw in room:
y = 0
for value in raw:
if value == True:
image[x,y] = img[x,y]
y = y +1
#print (value)
#print (img[x,y])
x = x + 1
cv2.imwrite ('result' + str(roomId)+ '.jpg', image)

Related

split on each edge in an image

I'm new to open cv. what I want to do is splitting on every edge that I detected with canny.
can someone help me how can I do this?
enter image description here
please check the image I point where I want to split with two red arrows.
split at the first position of the next edge i mean where i showed in the image.
I think this is what you want:
# Import preprocessors
import os
import cv2
import numpy as np
# Read image
dir = os.path.abspath(os.path.dirname(__file__))
org = cv2.imread(dir+'/im.png')
# Make a copy from that image
im = org.copy()
imH, imW = im.shape[:2]
# Gray version of that image
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
# Remove red arrows
im[np.where(im < 130)] = 0
im[np.where(im >= 130)] = 255
# Keep a copy of image without arrow
org = im.copy()
org = cv2.cvtColor(org, cv2.COLOR_GRAY2BGR)
cv2.imwrite(dir+'/out_1_no_arrow.png', im)
# Dim the horizontal lines
im = cv2.GaussianBlur(im, (1, 11), 20)
cv2.imwrite(dir+'/out_2_dim.png', im)
# Remove the horizontal lines
im[np.where(im < 190)] = 0
im[np.where(im > 190)] = 255
cv2.imwrite(dir+'/out_3_ptrs.png', im)
# Find contours and sort them by position
cnts, _ = cv2.findContours(im, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cnts.sort(key=lambda x: cv2.boundingRect(x)[0], reverse=True)
# Find and save blocks
x2, i, off = imW, 0, imW/5
lastX=None
for cnt in cnts:
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(org, (x, y), (x+w, y+h), (0, 255, 0), 2)
if abs(x2-x)>=off:
i += 1
cv2.imwrite(dir+'/out_block_'+str(i)+".jpg", org[0:imH, x:x2])
x2 = x
lastX=x
i += 1
cv2.imwrite(dir+'/out_block_'+str(i)+".jpg", org[0:imH, 0:lastX])
# Save the processed images
cv2.imwrite(dir+'/out_4_cut_positions.png', org)
Removed Red Arrows from original image:
Blur to remove horizontal lines:
Remove horizontal lines and keep candidate places:
Show candidate locations on the original image:
Final result and isolated letters:

How to count different grains in an image using cv2?

I have an image that has cereal items below:
The image has:
3 walnuts
3 raisins
3 pumpkin seeds
27 similar looking cereal
I wish to count them separately using opencv, I do not want to recognize them. So far, I have tailored the AdaptiveThreshold method to count all the seeds, but not sure how to do it separately. This is my scripts:
import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread('/Users/vaibhavsaxena/Desktop/Screen Shot 2021-04-27 at 12.22.46.png', 0)
#img = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21)
windowSize = 31
windowConstant = 40
mask = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, windowSize, windowConstant)
plt.imshow(mask)
stats = cv2.connectedComponentsWithStats(mask, 8)[2]
label_area = stats[1:, cv2.CC_STAT_AREA]
min_area, max_area = 345, max(list(label_area)) # min/max for a single circle
singular_mask = (min_area < label_area) & (label_area <= max_area)
circle_area = np.mean(label_area[singular_mask])
n_circles = int(np.sum(np.round(label_area / circle_area)))
print('Total circles:', n_circles)
36
But this one seems extremely hard coded. For example, if I zoom in or zoom out the image, it yields a different count.
Can anyone please help?
Your lighting is not good, as HansHirse suggested, try normalizing the conditions in which you take your photos. There's, however, a method that can somewhat normalize the lighting and get it as uniform as possible. The method is called gain division. The idea is that you try to build a model of the background and then weight each input pixel by that model. The output gain should be relatively constant during most of the image. Let's give it a try:
# imports:
import cv2
import numpy as np
# Reading an image in default mode:
inputImage = cv2.imread(path + fileName)
# Deep copy for results:
inputImageCopy = inputImage.copy()
# Get local maximum:
kernelSize = 30
maxKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernelSize, kernelSize))
localMax = cv2.morphologyEx(inputImage, cv2.MORPH_CLOSE, maxKernel, None, None, 1, cv2.BORDER_REFLECT101)
# Perform gain division
gainDivision = np.where(localMax == 0, 0, (inputImage/localMax))
# Clip the values to [0,255]
gainDivision = np.clip((255 * gainDivision), 0, 255)
# Convert the mat type from float to uint8:
gainDivision = gainDivision.astype("uint8")
Gotta be careful with those data types and their conversions. This is the result:
As you can see, most of the background is now uniform, that's pretty cool, because now we can apply a simple thresholding method. Let's try Otsu's Thresholding to get a nice binary mask of the elements:
# Convert RGB to grayscale:
grayscaleImage = cv2.cvtColor(gainDivision, cv2.COLOR_BGR2GRAY)
# Get binary image via Otsu:
_, binaryImage = cv2.threshold(grayscaleImage, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
Which yields this binary mask:
The mask can be improved by applying morphology, let's try to join those blobs applying a gentle closing operation:
# Set kernel (structuring element) size:
kernelSize = 3
# Set morph operation iterations:
opIterations = 2
# Get the structuring element:
morphKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernelSize, kernelSize))
# Perform closing:
binaryImage = cv2.morphologyEx( binaryImage, cv2.MORPH_CLOSE, morphKernel, None, None, opIterations, cv2.BORDER_REFLECT101 )
This is the result:
Alright, now, just for completeness, let's try to compute the bounding rectangles of all the elements. We can also filter blobs of small area and store each bounding rectangle in a list:
# Find the blobs on the binary image:
contours, hierarchy = cv2.findContours(binaryImage, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Store the bounding rectangles here:
rectanglesList = []
# Look for the outer bounding boxes (no children):
for _, c in enumerate(contours):
# Get blob area:
currentArea = cv2.contourArea(c)
# Set a min area threshold:
minArea = 100
if currentArea > minArea:
# Approximate the contour to a polygon:
contoursPoly = cv2.approxPolyDP(c, 3, True)
# Get the polygon's bounding rectangle:
boundRect = cv2.boundingRect(contoursPoly)
# Store rectangles in list:
rectanglesList.append(boundRect)
# Get the dimensions of the bounding rect:
rectX = boundRect[0]
rectY = boundRect[1]
rectWidth = boundRect[2]
rectHeight = boundRect[3]
# Set bounding rect:
color = (0, 0, 255)
cv2.rectangle( inputImageCopy, (int(rectX), int(rectY)),
(int(rectX + rectWidth), int(rectY + rectHeight)), color, 2 )
cv2.imshow("Rectangles", inputImageCopy)
cv2.waitKey(0)
The final image is this:
This is the total of detected elements:
print("Elements found: "+str(len(rectanglesList)))
Elements found: 37
As you can see, there's a false positive. A bit of the shadow of a grain gets detected as an actual grain. Maybe adjusting the minimum area will get rid of the problem. Or maybe, if you are classifying each grain anyway, you could filter this kind of noise.

Object Detection with OpenCV-Python

I am trying to detect all of the overlapping circle/ellipses shapes in this image all of which have digits on them. I have tried different types of image processing techniques using OpenCV, however I cannot detect the shapes that overlap the tree. I have tried erosion and dilation however it has not helped.
Any pointers on how to go about this would be great. I have attached my code below
original = frame.copy()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (3, 3), 0)
canny = cv2.Canny(blurred, 120, 255, 1)
kernel = np.ones((5, 5), np.uint8)
dilate = cv2.dilate(canny, kernel, iterations=1)
# Find contours
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
image_number = 0
for c in cnts:
x, y, w, h = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (36, 255, 12), 2)
ROI = original[y:y + h, x:x + w]
cv2.imwrite("ROI_{}.png".format(image_number), ROI)
image_number += 1
cv2.imshow('canny', canny)
cv2.imshow('image', frame)
cv2.waitKey(0)
Here's a possible solution. I'm assuming that the target blobs (the saucer-like things) are always labeled - that is, they always have a white number inside them. The idea is to create a digits mask, because their size and color seem to be constant. I use the digits as guide to obtain sample pixels of the ellipses. Then, I convert these BGR pixels to HSV, create a binary mask and use that info to threshold and locate the ellipses. Let's check out the code:
# imports:
import cv2
import numpy as np
# image path
path = "D://opencvImages//"
fileName = "4dzfr.png"
# Reading an image in default mode:
inputImage = cv2.imread(path + fileName)
# Deep copy for results:
inputImageCopy = inputImage.copy()
# Convert RGB to grayscale:
grayscaleImage = cv2.cvtColor(inputImage, cv2.COLOR_BGR2GRAY)
# Get binary image via Otsu:
binaryImage = np.where(grayscaleImage >= 200, 255, 0)
# The above operation converted the image to 32-bit float,
# convert back to 8-bit uint
binaryImage = binaryImage.astype(np.uint8)
The first step is to make a mask of the digits. I also created a deep copy of the BGR image. The digits are close to white (That is, an intensity close to 255). I use 200 as threshold and obtain this result:
Now, let's locate these contours on this binary mask. I'm filtering based on aspect ratio, as the digits have a distinct aspect ratio close to 0.70. I'm also filtering contours based on hierarchy - as I'm only interested on external contours (the ones that do not have children). That's because I really don't need contours like the "holes" inside the digit 8:
# Find the contours on the binary image:
contours, hierarchy = cv2.findContours(binaryImage, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
# Store the sampled pixels here:
sampledPixels = []
# Look for the outer bounding boxes (no children):
for i, c in enumerate(contours):
# Get the contour bounding rectangle:
boundRect = cv2.boundingRect(c)
# Get the dimensions of the bounding rect:
rectX = boundRect[0]
rectY = boundRect[1]
rectWidth = boundRect[2]
rectHeight = boundRect[3]
# Compute the aspect ratio:
aspectRatio = rectWidth / rectHeight
# Create the filtering threshold value:
delta = abs(0.7 - aspectRatio)
epsilon = 0.1
# Get the hierarchy:
currentHierarchy = hierarchy[0][i][3]
# Prepare the list of sampling points (One for the ellipse, one for the circle):
samplingPoints = [ (rectX - rectWidth, rectY), (rectX, rectY - rectHeight) ]
# Look for the target contours:
if delta < epsilon and currentHierarchy == -1:
# This list will hold both sampling pixels:
pixelList = []
# Get sampling pixels from the two locations:
for s in range(2):
# Get sampling point:
sampleX = samplingPoints[s][0]
sampleY = samplingPoints[s][1]
# Get sample BGR pixel:
samplePixel = inputImageCopy[sampleY, sampleX]
# Store into temp list:
pixelList.append(samplePixel)
# convert list to tuple:
pixelList = tuple(pixelList)
# Save pixel value:
sampledPixels.append(pixelList)
Ok, there area a couple of things happening in the last snippet of code. We want to sample pixels from both the ellipse and the circle. We will use two sampling locations that are function of each digit's original position. These positions are defined in the samplingPoints tuple. For the ellipse, I'm sampling at a little before the top right position of the digit. For the circle, I'm sapling directly above the top right position - we end up with two pixels for each figure.
You'll notice I'm doing a little bit of data type juggling, converting lists to tuples. I want these pixels stored as a tuple for convenience. If I draw bounding rectangles of the digits, this would be the resulting image:
Now, let's loop through the pixel list, convert them to HSV and create a HSV mask over the original BGR image. The final bounding rectangles of the ellipses are stored in boundingRectangles, additionally I draw results on the deep copy of the original input:
# Final bounding rectangles are stored here:
boundingRectangles = []
# Loop through sampled pixels:
for p in range(len(sampledPixels)):
# Get current pixel tuple:
currentPixelTuple = sampledPixels[p]
# Prepare the HSV mask:
imageHeight, imageWidth = binaryImage.shape[:2]
hsvMask = np.zeros((imageHeight, imageWidth), np.uint8)
# Process the two sampling pixels:
for m in range(len(currentPixelTuple)):
# Get current pixel in the list:
currentPixel = currentPixelTuple[m]
# Create BGR Mat:
pixelMat = np.zeros([1, 1, 3], dtype=np.uint8)
pixelMat[0, 0] = currentPixel
# Convert the BGR pixel to HSV:
hsvPixel = cv2.cvtColor(pixelMat, cv2.COLOR_BGR2HSV)
H = hsvPixel[0][0][0]
S = hsvPixel[0][0][1]
V = hsvPixel[0][0][2]
# Create HSV range for this pixel:
rangeThreshold = 5
lowerValues = np.array([H - rangeThreshold, S - rangeThreshold, V - rangeThreshold])
upperValues = np.array([H + rangeThreshold, S + rangeThreshold, V + rangeThreshold])
# Create HSV mask:
hsvImage = cv2.cvtColor(inputImage.copy(), cv2.COLOR_BGR2HSV)
tempMask = cv2.inRange(hsvImage, lowerValues, upperValues)
hsvMask = hsvMask + tempMask
First, I create a 1 x 1 Matrix (or Numpy Array) with just a BGR pixel value - the first of two I previously sampled. In this way, I can use cv2.cvtColor to get the corresponding HSV values. Then, I create lower and upper threshold values for the HSV mask. However, the image seems synthetic, and a range-based thresholding could be reduced to a unique tuple. After that, I create the HSV mask using cv2.inRange.
This will yield the HSV mask for the ellipse. After applying the method for the circle we will end up with two HSV masks. Well, I just added the two arrays to combine both masks. At the end you will have something like this, this is the "composite" HSV mask created for the first saucer-like figure:
We can apply a little bit of morphology to join both shapes, just a little closing will do:
# Set kernel (structuring element) size:
kernelSize = 3
# Set morph operation iterations:
opIterations = 2
# Get the structuring element:
morphKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernelSize, kernelSize))
# Perform closing:
hsvMask = cv2.morphologyEx(hsvMask, cv2.MORPH_CLOSE, morphKernel, None, None, opIterations,cv2.BORDER_REFLECT101)
This is the result:
Nice. Let's continue and get the bounding rectangles of all the shapes. I'm using the boundingRectangles list to store each bounding rectangle, like this:
# Process current contour:
currentContour, _ = cv2.findContours(hsvMask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for _, c in enumerate(currentContour):
# Get the contour's bounding rectangle:
boundRect = cv2.boundingRect(c)
# Get the dimensions of the bounding rect:
rectX = boundRect[0]
rectY = boundRect[1]
rectWidth = boundRect[2]
rectHeight = boundRect[3]
# Store and set bounding rect:
boundingRectangles.append(boundRect)
color = (0, 0, 255)
cv2.rectangle(inputImageCopy, (int(rectX), int(rectY)),
(int(rectX + rectWidth), int(rectY + rectHeight)), color, 2)
cv2.imshow("Objects", inputImageCopy)
cv2.waitKey(0)
This is the image of the located rectangles once every sampled pixel is processed:

Image Differencing: Ignore translational movement

I'm looking for some insight into what the best approach might be to my problem.
I'm comparing two separate images for differences, but I'm running into a problem with small translational movements.
I have a "gospel" image which is the "gold standard" per se:
gospel image
Then I have multiple different taken images to compare against.
Here's an example: example image
Here's an example difference image showing my problem: difference image
As you can see, they are quite small. The way that I am differencing the images now is by first resizing the images to 32x32, manually decreasing the contrast by 100 and then applying a blur using OpenCV.
After, I am using skimage's 'structural_integrity' function to subtract and quantify the differences between the images. The rest is purely for viewing.
import cv2
import numpy as np
from PIL import Image
from skimage.metrics import structural_similarity
def change_contrast(img, level):
img = Image.fromarray(img)
factor = (259 * (level + 255)) / (255 * (259 - level))
def contrast(c):
return 128 + factor * (c - 128)
return np.asarray(img.point(contrast))
# Open and preprocess the images
image_orig = cv2.imread(IMAGE_PATH)
image = cv2.resize(image, (32, 32))
image = change_contrast(image_orig, -100)
image = cv2.blur(image, (5, 5))
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gospel_orig = cv2.imread(GOSPEL_PATH)
gospel = cv2.resize(gospel_orig, (32, 32))
gospel = change_contrast(gospel, -100)
gospel = cv2.blur(gospel, (5, 5))
gospel = cv2.cvtColor(gospel, cv2.COLOR_BGR2GRAY)
# Get image similarities and an output difference image
(score, diff) = structural_similarity(image, gospel, full=True)
print("Image similarity", score)
diff = (diff * 255).astype("uint8")
# Viewing stuff below
thresh = cv2.threshold(diff, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
filled_gospel = cv2.cvtColor(gospel, cv2.COLOR_GRAY2BGR)
for c in contours:
area = cv2.contourArea(c)
if area > 40:
x,y,w,h = cv2.boundingRect(c)
cv2.drawContours(filled_gospel, [c], 0, (0,255,0), -1)
cv2.imshow('image', image)
cv2.imshow('gospel', gospel)
cv2.imshow('diff',diff)
cv2.imshow('filled gospel',filled_gospel)
cv2.waitKey(0)
When I do the above steps, you can see some translational differences between the 'gospel' and the taken image. What would be the best way to combat this as I only want to get the differences in the black of the letter, not how well it is aligned?
Here is how I would do template matching and differencing in Python/OpenCV.
Read the reference and example images
Pad the example image to twice its dimensions with its background gray color.
Do template matching with the reference to find the best match location and match score.
Crop the padded example image with its top left corner at the match location, but the size of the reference image
Get the absolute difference image
Save results
Reference:
Example:
import cv2
import numpy as np
# read reference and convert to gray
ref = cv2.imread('reference.png')
ref_gray = cv2.cvtColor(ref, cv2.COLOR_BGR2GRAY)
hr, wr = ref_gray.shape
# read example and convert to gray
ex = cv2.imread('example.png')
ex_gray = cv2.cvtColor(ex, cv2.COLOR_BGR2GRAY)
he, we = ex_gray.shape
# pad the example to double its dimensions with gray=190
color=190
wp = we // 2
hp = he // 2
ex_gray = cv2.copyMakeBorder(ex_gray, hp,hp,wp,wp, cv2.BORDER_CONSTANT, value=color)
# do template matching
corrimg = cv2.matchTemplate(ref_gray,ex_gray,cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(corrimg)
max_val_corr = '{:.3f}'.format(max_val)
print("correlation: " + max_val_corr)
xx = max_loc[0]
yy = max_loc[1]
print('x_match_loc =',xx,'y_match_loc =',yy)
# crop the padded example image at top left corner of xx,yy and size hr x wr
ex_gray_crop = ex_gray[yy:yy+hr, xx:xx+wr]
# get absolute difference image
ref_grayf = ref_gray.astype(np.float32)
ex_gray_cropf = ex_gray_crop.astype(np.float32)
diff = 255 - np.abs(cv2.add(ref_gray, -ex_gray_crop))
# compute mean of diff
mean = cv2.mean(diff)[0]
print("mean of diff in range 0 to 100 =",mean)
cv2.imshow('ref_gray', ref_gray)
cv2.imshow('ex_gray', ex_gray)
cv2.imshow('ex_gray_crop', ex_gray_crop)
cv2.imshow('correlation image', corrimg)
cv2.imshow('diff', diff)
cv2.waitKey(0)
cv2.destroyAllWindows()
# save results
cv2.imwrite('reference_gray.jpg', ref_gray)
cv2.imwrite('example_gray_padded.jpg', ex_gray)
cv2.imwrite('reference_example_correlation.jpg', (255*corrimg).clip(0,255).astype(np.uint8))
cv2.imwrite('example_gray_padded_cropped.jpg', ex_gray_crop)
cv2.imwrite('reference_example_diff.jpg', diff)
Example padded:
Correlation Image showing locations of best match:
Match Results:
correlation: 0.969
x_match_loc = 10 y_match_loc = 9
mean of diff in range 0 to 100 = 1.3956887102667155
Example cropped to align with reference:
Diff image (white is where they differ):

OpenCV - using cv2.canny and cv2.findContours to find area with data on the heterogeneous images

This is my first question here so I'm asking for understanding. I have to process hundreds of the satellites images.
I try to find contour of the area of the useful data located on the image - only the largest one.
Then I want to save the coordinates of the few points (x,y) corresponding to this contour. In simplest case, the area is a square and can be represented by 4 points, but for more complicated shapes the contour will be approximated by a little more points (preferably no more than ~ fifteen). However I am still not be able to find the areas on my images. Sometimes the area touches the edge of the image. Therefore, in this script I enlarge the pictures and add additional boundaries filled by the background color. Examples of pictures you will find here satellite1,satellite2,satellite3
As you see the images can have different background colors and in addition they contain countries borders and legend. I have tried to use Aidenhjj tips OpenCV - using cv2.approxPolyDP() correctly and prepared my script. I tried many approaches, filtering and tune parameters but still can't succeed with my data. I am asking you for help.
import numpy as np
import cv2
import matplotlib.pyplot as plt
image = cv2.imread('image1.jpg')
image = cv2.resize(image, None,fx=0.25, fy=0.25, interpolation = cv2.INTER_CUBIC)
ysize, xsize, channels = image.shape
print("Image size: {} x {}".format(xsize, ysize))
#calculate the histograms in r,g,b channels, measure background color
r, g, b = cv2.split(image)
image_data = image
histr = cv2.calcHist([r],[0],None,[256],[0,256])
for y in range(0,len(histr)):
elem = histr[y]
if elem == histr.max():
break
else:
y = none
R=y
histr = cv2.calcHist([g],[0],None,[256],[0,256])
for y in range(0,len(histr)):
elem = histr[y]
if elem == histr.max():
break
else:
y = none
G=y
histr = cv2.calcHist([b],[0],None,[256],[0,256])
for y in range(0,len(histr)):
elem = histr[y]
if elem == histr.max():
break
else:
y = none
B=y
color = (R, G, B)
#add borders around the image colorized as background. This will allow me to find closed contour around area with data.
bordersize=100
new_xsize = xsize + bordersize*2
new_ysize = ysize + bordersize*2
#image_border.show()
image_border=cv2.copyMakeBorder(image, top=bordersize, bottom=bordersize, left=bordersize, right=bordersize, borderType= cv2.BORDER_CONSTANT, value=[R,G,B] )
#ysizeb, xsizeb, channelsb = image_border.shape
# get a blank canvas for drawing contour on and convert image to grayscale
canvas = np.zeros(image_border.shape, np.uint8)
#imgc = cv2.medianBlur(img,21)
img2gray = cv2.cvtColor(image_border,cv2.COLOR_BGR2GRAY)
# filter out country borders
kernel = np.ones((5,5),np.float32)/25
img2gray = cv2.filter2D(img2gray,-1,kernel)
# threshold the image and extract contours
thresh = cv2.adaptiveThreshold(img2gray,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,11)
contours,hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
plt.subplot(111),plt.imshow(thresh,'gray')
plt.show()
# find the biggest area
cnt = contours[0]
max_area = cv2.contourArea(cnt)
for cont in contours:
if cv2.contourArea(cont) > max_area:
cnt = cont
max_area = cv2.contourArea(cont)
perimeter = cv2.arcLength(cnt,True)
epsilon = 0.01*cv2.arcLength(cnt,True)
approx = cv2.approxPolyDP(cnt,epsilon,True)
hull = cv2.convexHull(cnt)
# cv2.isContourConvex(cnt)
cv2.drawContours(canvas, cnt, -1, (0, 255, 0), 3)
cv2.drawContours(canvas, approx, -1, (0, 0, 255), 3)
#cv2.drawContours(canvas, [hull], -1, (0, 0, 255), 3)
cv2.imshow("Contour", canvas)
k = cv2.waitKey(0)
if k == 27: # wait for ESC key to exit
cv2.destroyAllWindows()

Categories