making borders on matches in feature matching open cv - python

im trying to create a project in which we provide the template image and a scene and then use feature matching of open cv python to detect if that object exist in that scene and make a rectangle or square around the matche found.......this the first step of the project after this im gonna integrate it.
`import cv2 as cv
import numpy as np
#reading images
template = cv.imread("dimitrescu1.jpg")
template = cv.cvtColor(template, cv.COLOR_BGR2GRAY)
w,h = template.shape[::-1]
template = cv.resize(template, (int(w/2),int(h/2)))
image = cv.imread("dimitrescu2.jpg")
image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
w,h = image.shape[::-1]
image = cv.resize(image, (int(w/2),int(h/2)))
#initiate sift detector
orb = cv.ORB_create(nfeatures = 1500)
#creating keypoints and descriptors
kp1,des1 = orb.detectAndCompute(template,None)
kp2,des2 = orb.detectAndCompute(image,None)
#creating a brute force matcher and using knearest approach
bf = cv.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
#findind matches and setting threshold
good = []
for m,n in matches:
if m.distance < 0.8*n.distance:
good.append([m])
#drawing matches
img3 = cv.drawMatchesKnn(template,kp1,image,kp2,good,None,flags = 2)
cv.imshow('hey',img3)
cv.waitKey(0)
print(len(good))
`
this is my code so far.i just cant figure out how to draw the borders around the matches.
any help will be appereciated.

Related

Find closest match of image to +10.000 others with similar features

I am trying to find the closest match of an image, to a large list of other images (+10.000). Background color is all white, same camera angle and the image content shape is close to each other (see image below). I tried using opencv and ORB and BFMatcher with knnMatch to find the closest match. But I am not even close to find the match I want.
To my understanding, images needs to be greyscale, but in my case I think colors would be a very important descriptor?
I am new to both opencv and image matching, so can you help me to if I need to use another approach?
import cv2
import os
orb = cv2.ORB_create(nfeatures=1000) # Find 1000 features to match from
bf = cv2.BFMatcher()
# Image to match
findImg = 'captainA.png'
imgCur = cv2.imread(f'{"Images"}/{findImg}', 0)
kp1,des1 = orb.detectAndCompute(imgCur,None)
# Loop through all superheroe images and find closest match
images = ["img1.png","img2.png","img3.png","img4.png","img5.png","img6.png","img7.png","img8.png","img9.png","img10.png","img11.png","img12.png"]
matchList = []
names = []
for img in images:
imgCur = cv2.imread(f'{Superheroes}/{img}', 0)
kp2,des2 = orb.detectAndCompute(imgCur,None)
matches = bf.knnMatch(des1,des2,k=2)
goodMatches = []
for m, n in matches:
if m.distance < 0.75 * n.distance: # Use 75 as a threshold defining a good match
goodMatches.append([m])
matchList.append(len(goodMatches))
names.append(img)
matchIdx = matchList.index(max(matchList))
# Name of matched image
print(names[matchIdx])
What I want to find:
Here is a small code there should do the job.
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import VGG16, preprocess_input
from tensorflow.keras.models import Model
import numpy as np
from PIL import Image
base_model = VGG16(weights='imagenet')
model = Model(inputs=base_model.input, outputs=base_model.get_layer('fc1').output)
def extract(img):
img = img.resize((224, 224)) # Resize the image
img = img.convert('RGB') # Convert the image color space
x = image.img_to_array(img) # Reformat the image
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
feature = model.predict(x)[0] # Extract Features
return feature / np.linalg.norm(feature)
# Iterate through images and extract Features
images = ["img1.png","img2.png","img3.png","img4.png","img5.png"...+2000 more]
all_features = np.zeros(shape=(len(images),4096))
for i in range(len(images)):
feature = extract(img=Image.open(images[i]))
all_features[i] = np.array(feature)
# Match image
query = extract(img=Image.open("image_to_match.png")) # Extract its features
dists = np.linalg.norm(all_features - query, axis=1) # Calculate the similarity (distance) between images
ids = np.argsort(dists)[:5] # Extract 5 images that have lowest distance

How to improve image alignment with OpenCV

I have a project where people can add data about utility bills, and there's also an OCR service inside. So people from my city can recognize data from bills just by loading their photos of bills. The trouble is that I can't reach this goal fully.
So I have 4 templates of bills (like for heating, water, gas and so on) in high quality. Example is below:
My user can load a picture like that:
And after alignment I get this result:
And it's obvious that I can't get good recognition with such image.
My code which I use for image alignment:
import os
import cv2
import numpy as np
from config import folder_path_aligned_images
MAX_FEATURES = 500
GOOD_MATCH_PERCENT = 0.15
class OpenCV:
#classmethod
def match_img(cls, im1, im2):
# Convert images to grayscale
im1_gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
im2_gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
# Detect ORB features and compute descriptors.
orb = cv2.ORB_create(MAX_FEATURES)
keypoints_1, descriptors_1 = orb.detectAndCompute(im1_gray, None)
keypoints_2, descriptors_2 = orb.detectAndCompute(im2_gray, None)
# Match features.
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = matcher.match(descriptors_1, descriptors_2, None)
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
num_good_matches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:num_good_matches]
# Draw top matches
im_matches = cv2.drawMatches(im1, keypoints_1, im2, keypoints_2, matches, None)
cv2.imwrite(os.path.join(folder_path_aligned_images, "matches.jpg"), im_matches)
# Extract location of good matches
points_1 = np.zeros((len(matches), 2), dtype=np.float32)
points_2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points_1[i, :] = keypoints_1[match.queryIdx].pt
points_2[i, :] = keypoints_2[match.trainIdx].pt
# Find homography
h, mask = cv2.findHomography(points_1, points_2, cv2.RANSAC)
# Use homography
height, width, channels = im2.shape
im1_reg = cv2.warpPerspective(im1, h, (width, height))
return im1_reg, h
#classmethod
def align_img(cls, template_path, raw_img_path, result_img_path):
# Read reference image
ref_filename = template_path
print("Reading reference image: ", ref_filename)
im_reference = cv2.imread(ref_filename, cv2.IMREAD_COLOR)
# Read image to be aligned
im_filename = raw_img_path
print("Reading image to align: ", im_filename)
im = cv2.imread(raw_img_path, cv2.IMREAD_COLOR)
print("Aligning images ...")
# Registered image will be resorted in im_reg.
im_reg, h = OpenCV.match_img(im, im_reference)
# Write aligned image to disk.
print("Saving aligned image : ", result_img_path)
cv2.imwrite(result_img_path, im_reg)
return result_img_path
How can I improve this?
EDIT: image with matches:
Don't know if this helps almost a year on, but I used a similar code that you have, and what worked for me was to increase the number of MAX_FEATURES (I use 80000, but you might not even need that much) and to decrease the GOOD_MATCH_PERCENT to like 0.05. Try playing with the numbers!

Featured based image alignment issue

I am trying to learn OpenCV in order to improve a script I wrote for comparing engineering drawings. I am using the code (see below) found on this tutorial but I am having zero success with it. In the tutorial the author uses the example of a blank form for the reference image and a photo of the completed form as the image to align. My situation is very similar because I am attempting to use a blank drawing title block as my reference image and a scanned image of a drawing as my image to align.
My goal is to use OpenCV to clean up the scanned engineering drawings so that they are aligned properly but no matter what I try in the MAX_FEATURES and GOOD_MATCH_PERCENT parameters, I get an image that looks like a black and white star burst. Also, when I review the "matches.jpg" file generated by the script, it appears that there are no correct matches. I have tried multiple drawings and I get the same results.
Can anyone see a reason why this script would not work in the way I am trying to use it?
from __future__ import print_function
import cv2
import numpy as np
MAX_FEATURES = 500
GOOD_MATCH_PERCENT = 0.15
def alignImages(im1, im2):
# Convert images to grayscale
im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
# Detect ORB features and compute descriptors.
orb = cv2.ORB_create(MAX_FEATURES)
keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)
keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)
# Match features.
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = matcher.match(descriptors1, descriptors2, None)
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
# Draw top matches
imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None)
cv2.imwrite("matches.jpg", imMatches)
# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
# Find homography
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
# Use homography
height, width, channels = im2.shape
im1Reg = cv2.warpPerspective(im1, h, (width, height))
return im1Reg, h
if __name__ == '__main__':
# Read reference image
refFilename = "form.jpg"
print("Reading reference image : ", refFilename)
imReference = cv2.imread(refFilename, cv2.IMREAD_COLOR)
# Read image to be aligned
imFilename = "scanned-form.jpg"
print("Reading image to align : ", imFilename);
im = cv2.imread(imFilename, cv2.IMREAD_COLOR)
print("Aligning images ...")
# Registered image will be resotred in imReg.
# The estimated homography will be stored in h.
imReg, h = alignImages(im, imReference)
# Write aligned image to disk.
outFilename = "aligned.jpg"
print("Saving aligned image : ", outFilename);
cv2.imwrite(outFilename, imReg)
# Print estimated homography
print("Estimated homography : \n", h)
Template Image:
Image to Align:
Expected output Image:
Here is one way in Python/OpenCV using a Rigid Affine Transformation (scale, rotation and translation only - no skew or perspective) to warp one image to match the other. It uses findTransformECC() -- Enhanced Correlation Coefficient Maximization) -- to get the rotation matrix and then uses warpAffine to do the rigid warping.
Template:
Image to be warped:
import cv2
import numpy as np
import math
import sys
# Get the image files from the command line arguments
# These are full paths to the images
# image2 will be warped to match image1
# argv[0] is name of script
image1 = sys.argv[1]
image2 = sys.argv[2]
outfile = sys.argv[3]
# Read the images to be aligned
# im2 is to be warped to match im1
im1 = cv2.imread(image1);
im2 = cv2.imread(image2);
# Convert images to grayscale for computing the rotation via ECC method
im1_gray = cv2.cvtColor(im1,cv2.COLOR_BGR2GRAY)
im2_gray = cv2.cvtColor(im2,cv2.COLOR_BGR2GRAY)
# Find size of image1
sz = im1.shape
# Define the motion model - euclidean is rigid (SRT)
warp_mode = cv2.MOTION_EUCLIDEAN
# Define 2x3 matrix and initialize the matrix to identity matrix I (eye)
warp_matrix = np.eye(2, 3, dtype=np.float32)
# Specify the number of iterations.
number_of_iterations = 5000;
# Specify the threshold of the increment
# in the correlation coefficient between two iterations
termination_eps = 1e-3;
# Define termination criteria
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps)
# Run the ECC algorithm. The results are stored in warp_matrix.
(cc, warp_matrix) = cv2.findTransformECC (im1_gray, im2_gray, warp_matrix, warp_mode, criteria, None, 1)
# Warp im2 using affine
im2_aligned = cv2.warpAffine(im2, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP);
# write output
cv2.imwrite(outfile, im2_aligned)
# Print rotation angle
row1_col0 = warp_matrix[0,1]
angle = math.degrees(math.asin(row1_col0))
print(angle)
Result:
Resulting Angle of Rotation (in deg):
-0.3102187026194794
Note, you can change the background color in the affineWarp to white if desired.
Also make the termination epsilon smaller by an order of magnitude or two for more accuracy, but longer processing times.
The other Rigid Affine approach that I mentioned in my comments earlier is to use ORB feature matching, filter the key points, then use estimateAffinePartial2D() to get the rigid affine matrix. Then use that to warp the image. For large angles this seems to me to be more reliable than the ECC method. But the ECC method seems more accurate for small rotations.
import cv2
import numpy as np
import math
import sys
MAX_FEATURES = 10000
GOOD_MATCH_PERCENT = 0.15
DIFFY_THRESH = 2
# Get the image files from the command line arguments
# These are full paths to the images
# image[2] will be warped to match image[1]
# argv[0] is name of script
file1 = sys.argv[1]
file2 = sys.argv[2]
outFile = sys.argv[3]
# Read image1
image1 = cv2.imread(file1, cv2.IMREAD_COLOR)
# Read image2 to be warped to match image1
image2 = cv2.imread(file2, cv2.IMREAD_COLOR)
# Convert images to grayscale
image1Gray = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
image2Gray = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
# Detect ORB features and compute descriptors.
orb = cv2.ORB_create(MAX_FEATURES)
keypoints1, descriptors1 = orb.detectAndCompute(image1Gray, None)
keypoints2, descriptors2 = orb.detectAndCompute(image2Gray, None)
# Match features.
matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = matcher.match(descriptors1, descriptors2, None)
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
#print('numgood',numGoodMatches)
# Extract location of good matches and filter by diffy if rotation is small
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
# initialize empty arrays for newpoints1 and newpoints2 and mask
newpoints1 = np.empty(shape=[0, 2], dtype=np.float32)
newpoints2 = np.empty(shape=[0, 2], dtype=np.float32)
matches_Mask = [0] * len(matches)
count=0
for i in range(len(matches)):
pt1 = points1[i]
pt2 = points2[i]
pt1x, pt1y = zip(*[pt1])
pt2x, pt2y = zip(*[pt2])
diffy = np.float32( np.float32(pt2y) - np.float32(pt1y) )
if abs(diffy) < DIFFY_THRESH:
newpoints1 = np.append(newpoints1, [pt1], axis=0).astype(np.uint8)
newpoints2 = np.append(newpoints2, [pt2], axis=0).astype(np.uint8)
matches_Mask[i]=1
count += 1
# Find Affine Transformation
# note swap of order of newpoints here so that image2 is warped to match image1
m, inliers = cv2.estimateAffinePartial2D(newpoints2,newpoints1)
# Use affine transform to warp im2 to match im1
height, width, channels = image1.shape
image2Reg = cv2.warpAffine(image2, m, (width, height))
# Write aligned image to disk.
cv2.imwrite(outFile, image2Reg)
# Print angle
row1_col0 = m[1,0]
print('row1_col0:',row1_col0)
angle = math.degrees(math.asin(row1_col0))
print('angle', angle)
Result Image:
Result Rotation Angle:
-0.6123936361765413
After some trial and error I determined that I don't need to find a homography in order to align my images properly. Since my images only need to be scaled and rotated slightly, my best option is to find the outer most points of the drawing title block and align one image to the other with a transform.
My approach is to use the Harris corner finding function to find all of the corners on the drawing, then do a simple calculation to find the points that are the shortest distance to the corners of the drawing canvas (these are the outside corners of the drawing title block). I then take 3 of the points (top left, top right, and bottom left) and use a transform to scale/rotate one drawing to the other.
Below is the code that I used:
import cv2
import numpy as np
import math
img1 = cv2.imread('reference.jpg')
img2 = cv2.imread('to-be-aligned.jpg')
#Find the corner points of img1
h1,w1,c=img1.shape
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
gray1 = np.float32(gray1)
dst1 = cv2.cornerHarris(gray1,5,3,0.04)
ret1, dst1 = cv2.threshold(dst1,0.1*dst1.max(),255,0)
dst1 = np.uint8(dst1)
ret1, labels1, stats1, centroids1 = cv2.connectedComponentsWithStats(dst1)
criteria1 = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
corners1 = cv2.cornerSubPix(gray1,np.float32(centroids1),(5,5),(-1,-1),criteria1)
#Find the corner points of img2
h2,w2,c=img2.shape
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
gray2 = np.float32(gray2)
dst2 = cv2.cornerHarris(gray2,5,3,0.04)
ret2, dst2 = cv2.threshold(dst2,0.1*dst2.max(),255,0)
dst2 = np.uint8(dst2)
ret2, labels2, stats2, centroids2 = cv2.connectedComponentsWithStats(dst2)
criteria2 = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
corners2 = cv2.cornerSubPix(gray2,np.float32(centroids2),(5,5),(-1,-1),criteria2)
#Find the top left, top right, and bottom left outer corners of the drawing frame for img1
a1=[0,0]
b1=[w1,0]
c1=[0,h1]
a1_dist=[]
b1_dist=[]
c1_dist=[]
for i in corners1:
temp_a1=math.sqrt((i[0]-a1[0])**2+(i[1]-a1[1])**2)
temp_b1=math.sqrt((i[0]-b1[0])**2+(i[1]-b1[1])**2)
temp_c1=math.sqrt((i[0]-c1[0])**2+(i[1]-c1[1])**2)
a1_dist.append(temp_a1)
b1_dist.append(temp_b1)
c1_dist.append(temp_c1)
print("Image #1 (reference):")
print("Top Left:")
print(corners1[a1_dist.index(min(a1_dist))])
print("Top Right:")
print(corners1[b1_dist.index(min(b1_dist))])
print("Bottom Left:")
print(corners1[c1_dist.index(min(c1_dist))])
#Find the top left, top right, and bottom left outer corners of the drawing frame for img2
a2=[0,0]
b2=[w2,0]
c2=[0,h2]
a2_dist=[]
b2_dist=[]
c2_dist=[]
for i in corners2:
temp_a2=math.sqrt((i[0]-a2[0])**2+(i[1]-a2[1])**2)
temp_b2=math.sqrt((i[0]-b2[0])**2+(i[1]-b2[1])**2)
temp_c2=math.sqrt((i[0]-c2[0])**2+(i[1]-c2[1])**2)
a2_dist.append(temp_a2)
b2_dist.append(temp_b2)
c2_dist.append(temp_c2)
print("Image #2 (image to align):")
print("Top Left:")
print(corners2[a2_dist.index(min(a2_dist))])
print("Top Right:")
print(corners2[b2_dist.index(min(b2_dist))])
print("Bottom Left:")
print(corners2[c2_dist.index(min(c2_dist))])
#Create the points for img1
point1 = np.zeros((3,2), dtype=np.float32)
point1[0][0]=corners1[a1_dist.index(min(a1_dist))][0]
point1[0][1]=corners1[a1_dist.index(min(a1_dist))][1]
point1[1][0]=corners1[b1_dist.index(min(b1_dist))][0]
point1[1][1]=corners1[b1_dist.index(min(b1_dist))][1]
point1[2][0]=corners1[c1_dist.index(min(c1_dist))][0]
point1[2][1]=corners1[c1_dist.index(min(c1_dist))][1]
#Create the points for img2
point2 = np.zeros((3,2), dtype=np.float32)
point2[0][0]=corners2[a2_dist.index(min(a2_dist))][0]
point2[0][1]=corners2[a2_dist.index(min(a2_dist))][1]
point2[1][0]=corners2[b2_dist.index(min(b2_dist))][0]
point2[1][1]=corners2[b2_dist.index(min(b2_dist))][1]
point2[2][0]=corners2[c2_dist.index(min(c2_dist))][0]
point2[2][1]=corners2[c2_dist.index(min(c2_dist))][1]
#Make sure points look ok:
print(point1)
print(point2)
#Transform the image
m = cv2.getAffineTransform(point2,point1)
image2Reg = cv2.warpAffine(img2, m, (w1, h1), borderValue=(255,255,255))
#Highlight found points in red:
img1[dst1>0.1*dst1.max()]=[0,0,255]
img2[dst2>0.1*dst2.max()]=[0,0,255]
#Output the images:
cv2.imwrite("output-img1-harris.jpg", img1)
cv2.imwrite("output-img2-harris.jpg", img2)
cv2.imwrite("output-harris-transform.jpg",image2Reg)

Detecting similar objects in an Image

I have to detect same sized and same colored rectangles with same areas in an image for a project. This is an example image.
I don't know how to go about it. I am using OpenCV and python which I am new to.
I tried SIFT and SURF feature descriptors to get the similar features. I also tried template matching but it is not feasible in the case as the trainImage could change. But the main idea is to get those similar rectangles from the image provided.
I am using python3 and openCV3.
I took this code from the opencv tutorial site.
import numpy as np
import cv2
from matplotlib import pyplot as plt
img1 = cv2.imread('template.jpg',0) # queryImage
img2 = cv2.imread('input.jpg',0) # trainImage
sift=cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
# BFMatcher with default params
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1,des2, k=2)
# Apply ratio test
good = []
for m,n in matches:
if m.distance < 0.75*n.distance:
good.append([m])
# cv2.drawMatchesKnn expects list of lists as matches.
img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good,None,flags=2)
Image result for project
reslut
Here's a simple approach.
generate a list of the unique colours in the image
for each unique colour
make everything that colour in the image white and everything else black
run findContours() and compare shapes and sizes
end for
For increased fun, do each colour in a separate thread :-)

TypeError: data type not understood opencv python

thats my python3 opencv3 code i get this error when i run this code i did't finished it yet that's are the error can some one help ?
line 19, in <module>
matches = bf.match(np.array(kpTrain, desTrain))
TypeError: data type not understood
that's my code
import numpy as np
import cv2
camera = cv2.VideoCapture(0)
orb = cv2.ORB_create()
img = cv2.imread('/home/shar/home.jpg')
imggray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
kpTrain = orb.detect(imggray,None)
kpTrain, desTrain = orb.compute(imggray, kpTrain)
ret, imgCamColor = camera.read()
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(np.array(kpTrain, desTrain))
Again here, as in this question you are trying to to match keypoints and the descriptors from one image. The matching of descriptors is done with two images.
1. Find Keypoints in 2 images
2. Calculate descriptors for the two images
3. Perform the matching.
In your case it should be something like this:
import numpy as np
import cv2
orb = cv2.ORB_create()
img = cv2.imread('/home/shar/home.jpg')
imggray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# load second image in grayscale
imggray2=cv2.imread('/path/to/image.jpg',0)
#Detector and descriptors for 1st image
kpTrain = orb.detect(imggray,None)
kpTrain, desTrain = orb.compute(imggray, kpTrain)
#Detector and descriptors for 2nd image
kpTrain2 = orb.detect(imggray2,None)
kpTrain2, desTrain2 = orb.compute(imggray2, kpTrain2)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(desTrain,desTrain2)

Categories