I have a project where people can add data about utility bills, and there's also an OCR service inside. So people from my city can recognize data from bills just by loading their photos of bills. The trouble is that I can't reach this goal fully.
So I have 4 templates of bills (like for heating, water, gas and so on) in high quality. Example is below:
My user can load a picture like that:
And after alignment I get this result:
And it's obvious that I can't get good recognition with such image.
My code which I use for image alignment:
import os
import cv2
import numpy as np
from config import folder_path_aligned_images
MAX_FEATURES = 500
GOOD_MATCH_PERCENT = 0.15
class OpenCV:
#classmethod
def match_img(cls, im1, im2):
# Convert images to grayscale
im1_gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
im2_gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
# Detect ORB features and compute descriptors.
orb = cv2.ORB_create(MAX_FEATURES)
keypoints_1, descriptors_1 = orb.detectAndCompute(im1_gray, None)
keypoints_2, descriptors_2 = orb.detectAndCompute(im2_gray, None)
# Match features.
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = matcher.match(descriptors_1, descriptors_2, None)
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
num_good_matches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:num_good_matches]
# Draw top matches
im_matches = cv2.drawMatches(im1, keypoints_1, im2, keypoints_2, matches, None)
cv2.imwrite(os.path.join(folder_path_aligned_images, "matches.jpg"), im_matches)
# Extract location of good matches
points_1 = np.zeros((len(matches), 2), dtype=np.float32)
points_2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points_1[i, :] = keypoints_1[match.queryIdx].pt
points_2[i, :] = keypoints_2[match.trainIdx].pt
# Find homography
h, mask = cv2.findHomography(points_1, points_2, cv2.RANSAC)
# Use homography
height, width, channels = im2.shape
im1_reg = cv2.warpPerspective(im1, h, (width, height))
return im1_reg, h
#classmethod
def align_img(cls, template_path, raw_img_path, result_img_path):
# Read reference image
ref_filename = template_path
print("Reading reference image: ", ref_filename)
im_reference = cv2.imread(ref_filename, cv2.IMREAD_COLOR)
# Read image to be aligned
im_filename = raw_img_path
print("Reading image to align: ", im_filename)
im = cv2.imread(raw_img_path, cv2.IMREAD_COLOR)
print("Aligning images ...")
# Registered image will be resorted in im_reg.
im_reg, h = OpenCV.match_img(im, im_reference)
# Write aligned image to disk.
print("Saving aligned image : ", result_img_path)
cv2.imwrite(result_img_path, im_reg)
return result_img_path
How can I improve this?
EDIT: image with matches:
Don't know if this helps almost a year on, but I used a similar code that you have, and what worked for me was to increase the number of MAX_FEATURES (I use 80000, but you might not even need that much) and to decrease the GOOD_MATCH_PERCENT to like 0.05. Try playing with the numbers!
Related
The scenario is, Let's suppose I took random images from my Gallery and put in some folder, and also print these image in hard form. Now I turned on my laptop camera and place an image in front of camera, My code need the tell the name of matching image from the folder. Actually the code I have add in the question works fine but when I increases the number of image in folder then Identification start taking more time. I want to know, is there any method that I can try to improve my speed? Here is the working Demo of my scenario.
import cv2
import os
from pathlib import Path
import time
# Define path
pathImages = 'ImagesQuery'
# Set the Number of detected Features
orb = cv2.ORB.create(nfeatures=1000)
# Set the threshold of minimum Features detected to give a positive, around 20 to 30
thres = 27
# List Images and Print out their Names and how many there are in the Folder
images = []
classNamesImages = []
myListImages = os.listdir(pathImages)
print(myListImages)
print('Total Classes Detected', len(myListImages))
# this will read in the images
for cl in myListImages:
imgCur = cv2.imread(f'{pathImages}/{cl}', 0)
images.append(imgCur)
# delete the file extension
classNamesImages.append(os.path.splitext(cl)[0])
print(classNamesImages)
# this will find the matching points in the images
def findDes(images):
desList = []
for img in images:
kp, des = orb.detectAndCompute(img, None)
desList.append(des)
return desList
# this will compare the matches and find the corresponding image
def findID(img, desList):
image_matching_time = time.time()
kp2, des2 = orb.detectAndCompute(img, None)
bf = cv2.BFMatcher()
matchList = []
finalVal = -1
try:
for des in desList:
matches = bf.knnMatch(des, des2, k=2)
good = []
for m, n in matches:
# the distance of the matches in comparison to each other
if m.distance < 0.75 * n.distance:
good.append([m])
matchList.append(len(good))
except:
pass
# uncomment to see how many positive matches, according to this the thres is set
# print(matchList)
if len(matchList) != 0:
if max(matchList) > thres:
finalVal = matchList.index(max(matchList))
print("matching time --- %s seconds ---" % (time.time() - image_matching_time))
return finalVal
desList = findDes(images)
print(len(desList))
# open Webcam
cap = cv2.VideoCapture(1)
while True:
success, img2 = cap.read()
imgOriginal = img2.copy()
# convert Camera to Grayscale
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
# if Matching with Image in List, send the respective Name
id = findID(img2, desList)
if id != -1:
# put text for the found Image
cv2.putText(imgOriginal, classNamesImages[id], (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
# show the final Image
cv2.imshow('img2', imgOriginal)
cv2.waitKey(1)
I have tried with deep learning base solution but it did't work. Second I have tried FLANN feature matcher its showing the same time delay 0.4 sec as in upper shown code. I am looking for any fast solution. Here is a sample of original image from folder and query image from the camera.
[Edit]:
Number of query images is around 1000 and I want to compare 20 fps with all query images.
I'm a novice at openCV, currently i'm following this tutorial on image alignment, i have the following image and template for testing
scanned image(test_image.jpg):
template image(template.jpg):
and the following python code:
from __future__ import print_function
import cv2
import numpy as np
MAX_FEATURES = 500
GOOD_MATCH_PERCENT = 0.15
def alignImages(im1, im2):
# Convert images to grayscale
im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
# Detect ORB features and compute descriptors.
orb = cv2.ORB_create(MAX_FEATURES)
keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)
keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)
# Match features.
matcher = cv2.DescriptorMatcher_create(
cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = list(matcher.match(descriptors1, descriptors2, None))
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
# Draw top matches
imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None)
cv2.imwrite("matches.jpg", imMatches)
# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
# Find homography
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
# Use homography
height, width, channels = im2.shape
im1Reg = cv2.warpPerspective(im1, h, (width, height))
return im1Reg, h
if __name__ == '__main__':
# Read reference image
refFilename = "template.jpg"
print("Reading reference image : ", refFilename)
imReference = cv2.imread(refFilename, cv2.IMREAD_COLOR)
# Read image to be aligned
imFilename = "test_image.jpg"
print("Reading image to align : ", imFilename)
im = cv2.imread(imFilename, cv2.IMREAD_COLOR)
print("Aligning images ...")
# Registered image will be resotred in imReg.
# The estimated homography will be stored in h.
imReg, h = alignImages(im, imReference)
# Write aligned image to disk.
outFilename = "aligned.jpg"
print("Saving aligned image : ", outFilename)
cv2.imwrite(outFilename, imReg)
# Print estimated homography
print("Estimated homography : \n", h)
I get the following results after i ran the script:
matches.jpg:
UPDATE:
I was able to get the image when i increase the amount of orb features to 2000
aligned.jpg
But the homography is still not rotating the image, how can i rotate the image to the same position as the template?
There are two types of forms to finding a homography (forward and backward), but if you already found the homography, applying it can be done without using opencv as follows:
import numpy as np
from scipy.interpolate import griddata
# creating the homogenious coordinates
src_h, src_w, _ = src_image.shape
values = np.matrix.reshape(src_image, (-1, 3), order='F')
yy, xx = np.meshgrid(np.arange(src_h), np.arange(src_w))
input_flat = np.concatenate((xx.reshape((1, -1)), yy.reshape((1, -1)), np.ones_like(xx.reshape((1, -1)))), axis=0)
# applying the homography and converting back to homogenious coordinates
points = np.matmul(homography, input_flat)
points_homogeneous = points[0:2, :] / points[2, :]
# interpolating the result to nicely fit the grid coordinates
dst_image_shape = [400, 400] # could be any number here
yy, xx = np.meshgrid(np.arange(dst_image_shape[1]), np.arange(dst_image_shape[0]))
src_image_warp = griddata(np.transpose(points_homogeneous ), values_relevant, (yy, xx), method='linear')
#numerical rounding
src_image_warp[np.isnan(src_image_warp)] = 0
src_image_warp[src_image_warp > 255] = 255
src_image_warp = np.uint8(src_image_warp)
Note that this is done for a 1 channel image, for RGB image this has to be done for each channel searately. In addition, this could be made to run faster by interpolating only the relevant coordinates since the interpolation is the most time-consuming operation.
With opencv this can be done by:
import cv2
image_dst = cv2.warpPerspective(image_src, homography, size) # size is a tuple (width, height) of the destination image
Read more on homographies and the opencv implementation here.
Finding the homography
The homography can be found without using opencv but that requires knowlage in linear algebra adn the explanation is a bit lengthy, if needed I will post it as an edit. For any practical case however, the homography can be found using opencv as follows:
homography, status = cv2.findHomography(pts_src, pts_dst)
where pts_src are coordinates in the original image and pts_dst are their matching location in the destination image. Since you already found the point pairs, this will yield you the homography (opencv optimizes the hmography for minimal distortion in the backward operation which is the correct way to perform homography computations).
You have a homography h calculated from findHomography and you can use warpPerspective to transform the template to have the same perspective as the photo.
Now you just need to invert the homography, and apply it to the photo instead of the template.
Either use np.linalg.inv for that, or pass the WARP_INVERSE_MAP flag to warpPerspetive instead.
I am trying to find the closest match of an image, to a large list of other images (+10.000). Background color is all white, same camera angle and the image content shape is close to each other (see image below). I tried using opencv and ORB and BFMatcher with knnMatch to find the closest match. But I am not even close to find the match I want.
To my understanding, images needs to be greyscale, but in my case I think colors would be a very important descriptor?
I am new to both opencv and image matching, so can you help me to if I need to use another approach?
import cv2
import os
orb = cv2.ORB_create(nfeatures=1000) # Find 1000 features to match from
bf = cv2.BFMatcher()
# Image to match
findImg = 'captainA.png'
imgCur = cv2.imread(f'{"Images"}/{findImg}', 0)
kp1,des1 = orb.detectAndCompute(imgCur,None)
# Loop through all superheroe images and find closest match
images = ["img1.png","img2.png","img3.png","img4.png","img5.png","img6.png","img7.png","img8.png","img9.png","img10.png","img11.png","img12.png"]
matchList = []
names = []
for img in images:
imgCur = cv2.imread(f'{Superheroes}/{img}', 0)
kp2,des2 = orb.detectAndCompute(imgCur,None)
matches = bf.knnMatch(des1,des2,k=2)
goodMatches = []
for m, n in matches:
if m.distance < 0.75 * n.distance: # Use 75 as a threshold defining a good match
goodMatches.append([m])
matchList.append(len(goodMatches))
names.append(img)
matchIdx = matchList.index(max(matchList))
# Name of matched image
print(names[matchIdx])
What I want to find:
Here is a small code there should do the job.
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import VGG16, preprocess_input
from tensorflow.keras.models import Model
import numpy as np
from PIL import Image
base_model = VGG16(weights='imagenet')
model = Model(inputs=base_model.input, outputs=base_model.get_layer('fc1').output)
def extract(img):
img = img.resize((224, 224)) # Resize the image
img = img.convert('RGB') # Convert the image color space
x = image.img_to_array(img) # Reformat the image
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
feature = model.predict(x)[0] # Extract Features
return feature / np.linalg.norm(feature)
# Iterate through images and extract Features
images = ["img1.png","img2.png","img3.png","img4.png","img5.png"...+2000 more]
all_features = np.zeros(shape=(len(images),4096))
for i in range(len(images)):
feature = extract(img=Image.open(images[i]))
all_features[i] = np.array(feature)
# Match image
query = extract(img=Image.open("image_to_match.png")) # Extract its features
dists = np.linalg.norm(all_features - query, axis=1) # Calculate the similarity (distance) between images
ids = np.argsort(dists)[:5] # Extract 5 images that have lowest distance
I am trying to learn OpenCV in order to improve a script I wrote for comparing engineering drawings. I am using the code (see below) found on this tutorial but I am having zero success with it. In the tutorial the author uses the example of a blank form for the reference image and a photo of the completed form as the image to align. My situation is very similar because I am attempting to use a blank drawing title block as my reference image and a scanned image of a drawing as my image to align.
My goal is to use OpenCV to clean up the scanned engineering drawings so that they are aligned properly but no matter what I try in the MAX_FEATURES and GOOD_MATCH_PERCENT parameters, I get an image that looks like a black and white star burst. Also, when I review the "matches.jpg" file generated by the script, it appears that there are no correct matches. I have tried multiple drawings and I get the same results.
Can anyone see a reason why this script would not work in the way I am trying to use it?
from __future__ import print_function
import cv2
import numpy as np
MAX_FEATURES = 500
GOOD_MATCH_PERCENT = 0.15
def alignImages(im1, im2):
# Convert images to grayscale
im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
# Detect ORB features and compute descriptors.
orb = cv2.ORB_create(MAX_FEATURES)
keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)
keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)
# Match features.
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = matcher.match(descriptors1, descriptors2, None)
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
# Draw top matches
imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None)
cv2.imwrite("matches.jpg", imMatches)
# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
# Find homography
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
# Use homography
height, width, channels = im2.shape
im1Reg = cv2.warpPerspective(im1, h, (width, height))
return im1Reg, h
if __name__ == '__main__':
# Read reference image
refFilename = "form.jpg"
print("Reading reference image : ", refFilename)
imReference = cv2.imread(refFilename, cv2.IMREAD_COLOR)
# Read image to be aligned
imFilename = "scanned-form.jpg"
print("Reading image to align : ", imFilename);
im = cv2.imread(imFilename, cv2.IMREAD_COLOR)
print("Aligning images ...")
# Registered image will be resotred in imReg.
# The estimated homography will be stored in h.
imReg, h = alignImages(im, imReference)
# Write aligned image to disk.
outFilename = "aligned.jpg"
print("Saving aligned image : ", outFilename);
cv2.imwrite(outFilename, imReg)
# Print estimated homography
print("Estimated homography : \n", h)
Template Image:
Image to Align:
Expected output Image:
Here is one way in Python/OpenCV using a Rigid Affine Transformation (scale, rotation and translation only - no skew or perspective) to warp one image to match the other. It uses findTransformECC() -- Enhanced Correlation Coefficient Maximization) -- to get the rotation matrix and then uses warpAffine to do the rigid warping.
Template:
Image to be warped:
import cv2
import numpy as np
import math
import sys
# Get the image files from the command line arguments
# These are full paths to the images
# image2 will be warped to match image1
# argv[0] is name of script
image1 = sys.argv[1]
image2 = sys.argv[2]
outfile = sys.argv[3]
# Read the images to be aligned
# im2 is to be warped to match im1
im1 = cv2.imread(image1);
im2 = cv2.imread(image2);
# Convert images to grayscale for computing the rotation via ECC method
im1_gray = cv2.cvtColor(im1,cv2.COLOR_BGR2GRAY)
im2_gray = cv2.cvtColor(im2,cv2.COLOR_BGR2GRAY)
# Find size of image1
sz = im1.shape
# Define the motion model - euclidean is rigid (SRT)
warp_mode = cv2.MOTION_EUCLIDEAN
# Define 2x3 matrix and initialize the matrix to identity matrix I (eye)
warp_matrix = np.eye(2, 3, dtype=np.float32)
# Specify the number of iterations.
number_of_iterations = 5000;
# Specify the threshold of the increment
# in the correlation coefficient between two iterations
termination_eps = 1e-3;
# Define termination criteria
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps)
# Run the ECC algorithm. The results are stored in warp_matrix.
(cc, warp_matrix) = cv2.findTransformECC (im1_gray, im2_gray, warp_matrix, warp_mode, criteria, None, 1)
# Warp im2 using affine
im2_aligned = cv2.warpAffine(im2, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP);
# write output
cv2.imwrite(outfile, im2_aligned)
# Print rotation angle
row1_col0 = warp_matrix[0,1]
angle = math.degrees(math.asin(row1_col0))
print(angle)
Result:
Resulting Angle of Rotation (in deg):
-0.3102187026194794
Note, you can change the background color in the affineWarp to white if desired.
Also make the termination epsilon smaller by an order of magnitude or two for more accuracy, but longer processing times.
The other Rigid Affine approach that I mentioned in my comments earlier is to use ORB feature matching, filter the key points, then use estimateAffinePartial2D() to get the rigid affine matrix. Then use that to warp the image. For large angles this seems to me to be more reliable than the ECC method. But the ECC method seems more accurate for small rotations.
import cv2
import numpy as np
import math
import sys
MAX_FEATURES = 10000
GOOD_MATCH_PERCENT = 0.15
DIFFY_THRESH = 2
# Get the image files from the command line arguments
# These are full paths to the images
# image[2] will be warped to match image[1]
# argv[0] is name of script
file1 = sys.argv[1]
file2 = sys.argv[2]
outFile = sys.argv[3]
# Read image1
image1 = cv2.imread(file1, cv2.IMREAD_COLOR)
# Read image2 to be warped to match image1
image2 = cv2.imread(file2, cv2.IMREAD_COLOR)
# Convert images to grayscale
image1Gray = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
image2Gray = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
# Detect ORB features and compute descriptors.
orb = cv2.ORB_create(MAX_FEATURES)
keypoints1, descriptors1 = orb.detectAndCompute(image1Gray, None)
keypoints2, descriptors2 = orb.detectAndCompute(image2Gray, None)
# Match features.
matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = matcher.match(descriptors1, descriptors2, None)
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
#print('numgood',numGoodMatches)
# Extract location of good matches and filter by diffy if rotation is small
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
# initialize empty arrays for newpoints1 and newpoints2 and mask
newpoints1 = np.empty(shape=[0, 2], dtype=np.float32)
newpoints2 = np.empty(shape=[0, 2], dtype=np.float32)
matches_Mask = [0] * len(matches)
count=0
for i in range(len(matches)):
pt1 = points1[i]
pt2 = points2[i]
pt1x, pt1y = zip(*[pt1])
pt2x, pt2y = zip(*[pt2])
diffy = np.float32( np.float32(pt2y) - np.float32(pt1y) )
if abs(diffy) < DIFFY_THRESH:
newpoints1 = np.append(newpoints1, [pt1], axis=0).astype(np.uint8)
newpoints2 = np.append(newpoints2, [pt2], axis=0).astype(np.uint8)
matches_Mask[i]=1
count += 1
# Find Affine Transformation
# note swap of order of newpoints here so that image2 is warped to match image1
m, inliers = cv2.estimateAffinePartial2D(newpoints2,newpoints1)
# Use affine transform to warp im2 to match im1
height, width, channels = image1.shape
image2Reg = cv2.warpAffine(image2, m, (width, height))
# Write aligned image to disk.
cv2.imwrite(outFile, image2Reg)
# Print angle
row1_col0 = m[1,0]
print('row1_col0:',row1_col0)
angle = math.degrees(math.asin(row1_col0))
print('angle', angle)
Result Image:
Result Rotation Angle:
-0.6123936361765413
After some trial and error I determined that I don't need to find a homography in order to align my images properly. Since my images only need to be scaled and rotated slightly, my best option is to find the outer most points of the drawing title block and align one image to the other with a transform.
My approach is to use the Harris corner finding function to find all of the corners on the drawing, then do a simple calculation to find the points that are the shortest distance to the corners of the drawing canvas (these are the outside corners of the drawing title block). I then take 3 of the points (top left, top right, and bottom left) and use a transform to scale/rotate one drawing to the other.
Below is the code that I used:
import cv2
import numpy as np
import math
img1 = cv2.imread('reference.jpg')
img2 = cv2.imread('to-be-aligned.jpg')
#Find the corner points of img1
h1,w1,c=img1.shape
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
gray1 = np.float32(gray1)
dst1 = cv2.cornerHarris(gray1,5,3,0.04)
ret1, dst1 = cv2.threshold(dst1,0.1*dst1.max(),255,0)
dst1 = np.uint8(dst1)
ret1, labels1, stats1, centroids1 = cv2.connectedComponentsWithStats(dst1)
criteria1 = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
corners1 = cv2.cornerSubPix(gray1,np.float32(centroids1),(5,5),(-1,-1),criteria1)
#Find the corner points of img2
h2,w2,c=img2.shape
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
gray2 = np.float32(gray2)
dst2 = cv2.cornerHarris(gray2,5,3,0.04)
ret2, dst2 = cv2.threshold(dst2,0.1*dst2.max(),255,0)
dst2 = np.uint8(dst2)
ret2, labels2, stats2, centroids2 = cv2.connectedComponentsWithStats(dst2)
criteria2 = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
corners2 = cv2.cornerSubPix(gray2,np.float32(centroids2),(5,5),(-1,-1),criteria2)
#Find the top left, top right, and bottom left outer corners of the drawing frame for img1
a1=[0,0]
b1=[w1,0]
c1=[0,h1]
a1_dist=[]
b1_dist=[]
c1_dist=[]
for i in corners1:
temp_a1=math.sqrt((i[0]-a1[0])**2+(i[1]-a1[1])**2)
temp_b1=math.sqrt((i[0]-b1[0])**2+(i[1]-b1[1])**2)
temp_c1=math.sqrt((i[0]-c1[0])**2+(i[1]-c1[1])**2)
a1_dist.append(temp_a1)
b1_dist.append(temp_b1)
c1_dist.append(temp_c1)
print("Image #1 (reference):")
print("Top Left:")
print(corners1[a1_dist.index(min(a1_dist))])
print("Top Right:")
print(corners1[b1_dist.index(min(b1_dist))])
print("Bottom Left:")
print(corners1[c1_dist.index(min(c1_dist))])
#Find the top left, top right, and bottom left outer corners of the drawing frame for img2
a2=[0,0]
b2=[w2,0]
c2=[0,h2]
a2_dist=[]
b2_dist=[]
c2_dist=[]
for i in corners2:
temp_a2=math.sqrt((i[0]-a2[0])**2+(i[1]-a2[1])**2)
temp_b2=math.sqrt((i[0]-b2[0])**2+(i[1]-b2[1])**2)
temp_c2=math.sqrt((i[0]-c2[0])**2+(i[1]-c2[1])**2)
a2_dist.append(temp_a2)
b2_dist.append(temp_b2)
c2_dist.append(temp_c2)
print("Image #2 (image to align):")
print("Top Left:")
print(corners2[a2_dist.index(min(a2_dist))])
print("Top Right:")
print(corners2[b2_dist.index(min(b2_dist))])
print("Bottom Left:")
print(corners2[c2_dist.index(min(c2_dist))])
#Create the points for img1
point1 = np.zeros((3,2), dtype=np.float32)
point1[0][0]=corners1[a1_dist.index(min(a1_dist))][0]
point1[0][1]=corners1[a1_dist.index(min(a1_dist))][1]
point1[1][0]=corners1[b1_dist.index(min(b1_dist))][0]
point1[1][1]=corners1[b1_dist.index(min(b1_dist))][1]
point1[2][0]=corners1[c1_dist.index(min(c1_dist))][0]
point1[2][1]=corners1[c1_dist.index(min(c1_dist))][1]
#Create the points for img2
point2 = np.zeros((3,2), dtype=np.float32)
point2[0][0]=corners2[a2_dist.index(min(a2_dist))][0]
point2[0][1]=corners2[a2_dist.index(min(a2_dist))][1]
point2[1][0]=corners2[b2_dist.index(min(b2_dist))][0]
point2[1][1]=corners2[b2_dist.index(min(b2_dist))][1]
point2[2][0]=corners2[c2_dist.index(min(c2_dist))][0]
point2[2][1]=corners2[c2_dist.index(min(c2_dist))][1]
#Make sure points look ok:
print(point1)
print(point2)
#Transform the image
m = cv2.getAffineTransform(point2,point1)
image2Reg = cv2.warpAffine(img2, m, (w1, h1), borderValue=(255,255,255))
#Highlight found points in red:
img1[dst1>0.1*dst1.max()]=[0,0,255]
img2[dst2>0.1*dst2.max()]=[0,0,255]
#Output the images:
cv2.imwrite("output-img1-harris.jpg", img1)
cv2.imwrite("output-img2-harris.jpg", img2)
cv2.imwrite("output-harris-transform.jpg",image2Reg)
I am using opencv and want to stick with it.
I have 5 images with some common areas in a pairwise manner. I want to merge them together in a single image. I have been successful joining two images together, as they were of the same resolution(a little tweak brought them to the same resolution without distorting the contents significantly). But now this first stage of merging gives me a highly inflated image, the resolution has gone significantly up(kind of an addition of two images).
To merge the two images I had brought their resolutions to the same value and it didn't cause much distortion. But now there's this image with double the length. If I change its resolution to the level of the image next in line for stitching, it is going to highly distort the content of the first stage and hence the result from here on.
How do I fix this issue given that I need to go through 5-6 iterations of stitching where the resolution is going to keep increasing?
Also, if there is any text which goes into details of image processing with examples, like above.
Stitcher.py
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
# import the necessary packages
import numpy as np
import imutils
import cv2
class Stitcher:
def __init__(self):
# determine if we are using OpenCV v3.X
self.isv3 = imutils.is_cv3()
def stitch(self, images, ratio=0.75, reprojThresh=4.0,
showMatches=False):
# unpack the images, then detect keypoints and extract
# local invariant descriptors from them
(imageB, imageA) = images
#(b, g, r) = cv2.split(imageA)
#imageA = cv2.merge([r,g,b])
#(b, g, r) = cv2.split(imageB)
#imageB = cv2.merge([r,g,b])
(kpsA, featuresA) = self.detectAndDescribe(imageA)
(kpsB, featuresB) = self.detectAndDescribe(imageB)
# match features between the two images
M = self.matchKeypoints(kpsA, kpsB,
featuresA, featuresB, ratio, reprojThresh)
# if the match is None, then there aren't enough matched
# keypoints to create a panorama
if M is None:
return None
# otherwise, apply a perspective warp to stitch the images
# together
(matches, H, status) = M
result = cv2.warpPerspective(imageA, H,
(imageA.size[1] + imageB.size[1], imageA.size[0]))
result[0:imageB.size[0], 0:imageB.size[1]] = imageB
# check to see if the keypoint matches should be visualized
if showMatches:
vis = self.drawMatches(imageA, imageB, kpsA, kpsB, matches,
status)
# return a tuple of the stitched image and the
# visualization
return (result, vis)
# return the stitched image
return result
def detectAndDescribe(self, image):
# convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# check to see if we are using OpenCV 3.X
if self.isv3:
# detect and extract features from the image
descriptor = cv2.xfeatures2d.SIFT_create()
(kps, features) = descriptor.detectAndCompute(image, None)
# otherwise, we are using OpenCV 2.4.X
else:
# detect keypoints in the image
detector = cv2.FeatureDetector_create("SIFT")
kps = detector.detect(gray)
# extract features from the image
extractor = cv2.DescriptorExtractor_create("SIFT")
(kps, features) = extractor.compute(gray, kps)
# convert the keypoints from KeyPoint objects to NumPy
# arrays
kps = np.float32([kp.pt for kp in kps])
# return a tuple of keypoints and features
return (kps, features)
def matchKeypoints(self, kpsA, kpsB, featuresA, featuresB,
ratio, reprojThresh):
# compute the raw matches and initialize the list of actual
# matches
matcher = cv2.DescriptorMatcher_create("BruteForce")
rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
matches = []
# loop over the raw matches
for m in rawMatches:
# ensure the distance is within a certain ratio of each
# other (i.e. Lowe's ratio test)
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
matches.append((m[0].trainIdx, m[0].queryIdx))
# computing a homography requires at least 4 matches
if len(matches) > 4:
# construct the two sets of points
ptsA = np.float32([kpsA[i] for (_, i) in matches])
ptsB = np.float32([kpsB[i] for (i, _) in matches])
# compute the homography between the two sets of points
(H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,
reprojThresh)
# return the matches along with the homograpy matrix
# and status of each matched point
return (matches, H, status)
# otherwise, no homograpy could be computed
return None
def drawMatches(self, imageA, imageB, kpsA, kpsB, matches, status):
# initialize the output visualization image
(hA, wA) = imageA.shape[:2]
(hB, wB) = imageB.shape[:2]
vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8")
vis[0:hA, 0:wA] = imageA
vis[0:hB, wA:] = imageB
# loop over the matches
for ((trainIdx, queryIdx), s) in zip(matches, status):
# only process the match if the keypoint was successfully
# matched
if s == 1:
# draw the match
ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1]))
ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1]))
cv2.line(vis, ptA, ptB, (0, 255, 0), 1)
# return the visualization
return vis
run.py
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 18 11:13:23 2017
#author: user
"""
# import the necessary packages
import os
os.chdir('/home/user/Desktop/stitcher')
from str import Stitcher
import argparse
import imutils
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--first", required=True,
help="path to the first image")
ap.add_argument("-s", "--second", required=True,
help="path to the second image")
args = vars(ap.parse_args())
# load the two images and resize them to have a width of 400 pixels
# (for faster processing)
#from PIL import Image
#imageA = Image.open(args['first']).convert('RGB')
#imageB = Image.open(args['second']).convert('RGB')
imageA = cv2.imread(args["first"])
imageB = cv2.imread(args["second"])
#imageA = imutils.resize(imageA, width=400)
#imageB = imutils.resize(imageB, width=400)
imageA = cv2.resize(imageA,(2464,832)) #hardcoded values
imageB = cv2.resize(imageB,(2464,832)) #hardcoded values
# stitch the images together to create a panorama
stitcher = Stitcher()
(result, vis) = stitcher.stitch([imageA, imageB], showMatches=True)
cv2.imwrite('stage1.png',result)
# show the images
cv2.imshow("Image A", imageA)
cv2.imshow("Image B", imageB)
cv2.imshow("Keypoint Matches", vis)
cv2.imshow("Result", result)
cv2.waitKey(0)
As you can see, I have resized the images so that they have the same height and width with hardcoded values. I could have just got the minimum of two and put that as their length and breadth.
When I bring in the third image, I can't inflate it to match the resolution of stage1 or neither can I decrease the stage1's resolution to match the third image.
P.S. : imgutils didn't give me a way to choose both length and breadth.