I am trying to find the closest match of an image, to a large list of other images (+10.000). Background color is all white, same camera angle and the image content shape is close to each other (see image below). I tried using opencv and ORB and BFMatcher with knnMatch to find the closest match. But I am not even close to find the match I want.
To my understanding, images needs to be greyscale, but in my case I think colors would be a very important descriptor?
I am new to both opencv and image matching, so can you help me to if I need to use another approach?
import cv2
import os
orb = cv2.ORB_create(nfeatures=1000) # Find 1000 features to match from
bf = cv2.BFMatcher()
# Image to match
findImg = 'captainA.png'
imgCur = cv2.imread(f'{"Images"}/{findImg}', 0)
kp1,des1 = orb.detectAndCompute(imgCur,None)
# Loop through all superheroe images and find closest match
images = ["img1.png","img2.png","img3.png","img4.png","img5.png","img6.png","img7.png","img8.png","img9.png","img10.png","img11.png","img12.png"]
matchList = []
names = []
for img in images:
imgCur = cv2.imread(f'{Superheroes}/{img}', 0)
kp2,des2 = orb.detectAndCompute(imgCur,None)
matches = bf.knnMatch(des1,des2,k=2)
goodMatches = []
for m, n in matches:
if m.distance < 0.75 * n.distance: # Use 75 as a threshold defining a good match
goodMatches.append([m])
matchList.append(len(goodMatches))
names.append(img)
matchIdx = matchList.index(max(matchList))
# Name of matched image
print(names[matchIdx])
What I want to find:
Here is a small code there should do the job.
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import VGG16, preprocess_input
from tensorflow.keras.models import Model
import numpy as np
from PIL import Image
base_model = VGG16(weights='imagenet')
model = Model(inputs=base_model.input, outputs=base_model.get_layer('fc1').output)
def extract(img):
img = img.resize((224, 224)) # Resize the image
img = img.convert('RGB') # Convert the image color space
x = image.img_to_array(img) # Reformat the image
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
feature = model.predict(x)[0] # Extract Features
return feature / np.linalg.norm(feature)
# Iterate through images and extract Features
images = ["img1.png","img2.png","img3.png","img4.png","img5.png"...+2000 more]
all_features = np.zeros(shape=(len(images),4096))
for i in range(len(images)):
feature = extract(img=Image.open(images[i]))
all_features[i] = np.array(feature)
# Match image
query = extract(img=Image.open("image_to_match.png")) # Extract its features
dists = np.linalg.norm(all_features - query, axis=1) # Calculate the similarity (distance) between images
ids = np.argsort(dists)[:5] # Extract 5 images that have lowest distance
I am using python and openCv for a brain segmentation project. I have segmented the brain MRI image using K means segmentation. I want to get each segment resulted through k means segmentation in seperate images. please help me in this.
#k_means segmentation
epsilon = 0.01
number_of_iterations = 50
number_of_clusters = 4
print(criteria, 'Criteria K_means parameters')
#plt.imshow(criteria)
#k means segmentation
_, labels, centers =cv2.kmeans(kmeans_input, number_of_clusters, None,
flags)
print(labels.shape, 'k-means segmentation')
#plt.imshow(labels)
#Adopting the labels
labels = labels.flatten('F')
for x in range (number_of_clusters): labels[labels == x] = centers [x]
print(labels.shape, 'adopting the tables value')
#plt.imshow(labels)
I would do it using sklearn kmeans segmentation as follows. I show how to create the segmented image and then select one color to present. I create a mask from thresholding the one color and then apply the mask to blacken out the other colors in the segmented image. You can write a loop over each color to get them all. It is also possible to use the mask to make the non-color be transparent rather than black. But I do not show that here. Or you can just save the binary masks.
Input:
#!/bin/python3.7
from skimage import io
from sklearn import cluster
import sys
import cv2
# read input and convert to range 0-1
image = io.imread('barn.jpg')/255.0
h, w, c = image.shape
# reshape to 1D array
image_2d = image.reshape(h*w, c)
# set number of colors
numcolors = 6
# do kmeans processing
kmeans_cluster = cluster.KMeans(n_clusters=int(numcolors))
kmeans_cluster.fit(image_2d)
cluster_centers = kmeans_cluster.cluster_centers_
cluster_labels = kmeans_cluster.labels_
# need to scale result back to range 0-255
newimage = cluster_centers[cluster_labels].reshape(h, w, c)*255.0
newimage = newimage.astype('uint8')
io.imshow(newimage)
io.show()
# select cluster 3 (in range 1 to numcolors) and create mask
lower = cluster_centers[3]*255
upper = cluster_centers[3]*255
lower = lower.astype('uint8')
upper = upper.astype('uint8')
mask = cv2.inRange(newimage, lower, upper)
# apply mask to get layer 3
layer3 = newimage.copy()
layer3[mask == 0] = [0,0,0]
io.imshow(layer3)
io.show()
# save kmeans clustered image and layer 3
io.imsave('barn_kmeans.gif', newimage)
io.imsave('barn_kmeans_layer3.gif', layer3)
Clustered Image:
Result for color 3:
ADDITION:
For a grayscale image, the following works for me.
#!/bin/python3.7
from skimage import io
from sklearn import cluster
import sys
import cv2
# read input and convert to range 0-1
image = io.imread('barn_gray.jpg',as_gray=True)/255.0
h, w = image.shape
# reshape to 1D array
image_2d = image.reshape(h*w,1)
# set number of colors
numcolors = 6
# do kmeans processing
kmeans_cluster = cluster.KMeans(n_clusters=int(numcolors))
kmeans_cluster.fit(image_2d)
cluster_centers = kmeans_cluster.cluster_centers_
cluster_labels = kmeans_cluster.labels_
# need to scale result back to range 0-255
newimage = cluster_centers[cluster_labels].reshape(h, w)*255.0
newimage = newimage.astype('uint8')
io.imshow(newimage)
io.show()
# select cluster 3 (in range 1 to numcolors) and create mask
# note the cluster numbers and corresponding colors are not constant from run to run
lower = cluster_centers[3]*255
upper = cluster_centers[3]*255
lower = lower.astype('uint8')
upper = upper.astype('uint8')
print(lower)
print(upper)
mask = cv2.inRange(newimage, lower, upper)
# apply mask to get layer 3
layer3 = newimage.copy()
layer3[mask == 0] = [0]
io.imshow(layer3)
io.show()
# save kmeans clustered image and layer 3
io.imsave('barn_gray_kmeans.gif', newimage)
io.imsave('barn_gray_kmeans_layer3.gif', layer3)
I'm building an image similarity program and, as I am a begginer in CV, I talked with an expert who gave me the following recommended steps to get the really basic functionality:
Extract keypoints (DoG, Harris, etc.) and local invariant descriptors (SIFT, SURF, etc.) from all images.
Cluster them to form a codebook (bag of visual words dictionary; BOVW)
Quantize the features from each image into a BOVW histogram
Compare the BOVW histograms for each image (typically using chi-squared, cosine, or euclidean distance)
The point number one is easy, but I start getting confused at step 2. This is the code I've written so far:
import cv2
import numpy as np
dictionarySize = 20
BOW = cv2.BOWKMeansTrainer(dictionarySize)
for imgpath in ['testimg/testcropped1.jpg','testimg/testcropped2.jpg','testimg/testcropped3.jpg']:
img = cv2.imread(imgpath)
gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
dst = cv2.cornerHarris(gray,2,3,0.04)
sift = cv2.xfeatures2d.SIFT_create()
kp = sift.detect(gray,None)
kp,des = sift.compute(img,kp)
img=cv2.drawKeypoints(gray,kp,img)
cv2.imwrite('%s_keypoints.jpg' % imgpath, img)
BOW.add(des)
I extract some features using SIFT and then I try to build a BOVW o each image descriptor. The problem is I have no idea if this is correct and how to get the histograms.
I have been working on a piece of code to create a disparity map.
I don't want to use OpenCV for more than loading / saving the images converting them to grayscale.
So far, I've managed to implement the algorithm explained in this website. I'm using the version of the algorithm that uses the Sum of Absolute Differences (SAD). To test my implementation, I'm using the stereo images from this dataset.
Here's my code:
import cv2
import numpy as np
# Load the stereo images
img = cv2.imread('bow-view1.png')
img2 = cv2.imread('bow-view5.png')
# convert stereo images to grayscale
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
# get the size of the images
# l -> lines
# c -> columns
# v -> channel (RGB)
l,c,v = img.shape
# initialize arrays
minSAD = np.ones((l,c)) * 1000
sad = np.ones((l,c))
winsad = np.ones((l,c))
disp = np.zeros((l,c))
max_shift = 30
# set size of the SAD window
w_l = 2
w_c = 2
for shift in range(max_shift):
print("New Shift: %d"%(shift))
for u in range(0,l):
for v in range(0,c):
# calculate SAD
if(u+shift < l):
sad[u,v] = np.abs((int(gray[u,v]) - int(gray2[u+shift,v])))
sum_sad = 0
for d in range(w_l):
for e in range(w_c):
if(u+d < l and v+e < c):
sum_sad += sad[u+d,v+e]
winsad[u,v] = sum_sad
# Save disparity
if(sad[u,v] < minSAD[u,v]):
minSAD[u,v] = winsad[u,v]
disp[u,v] = shift
print("Process Complete")
# write disparity map to image
cv2.imwrite('outputHT/disparity/sad.png',disp)
print("Disparity Map Generated")
This is the output generated by that code:
I should get an output similar (or very close to) this:
I've tried several window sizes (in the SAD step), but I keep getting results like this one or images that are all black.
Any answer that helps me figure out the problem or that at least points me in the right direction will be very appreciated!
One thing you are missing here is that all the values in the disp array will be between 0 and 30 which correspond to black pixel, so in order to map these values between 0 and 255 you have to multiply the shift by 8.
I am trying to use opencv with python. I wrote a descriptor (SIFT, SURF, or ORB) matching code in C++ version of opencv 2.4. I want to convert this code to opencv with python. I found some documents about how to use opencv functions in c++ but many of the opencv function in python I could not find how to use them. Here is my python code, and my current problem is that I don't know how to use "drawMatches" of opencv c++ in python. I found cv2.DRAW_MATCHES_FLAGS_DEFAULT but I have no idea how to use it. Here is my python code of matching using ORB descriptors:
im1 = cv2.imread(r'C:\boldt.jpg')
im2 = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
im3 = cv2.imread(r'C:\boldt_resize50.jpg')
im4 = cv2.cvtColor(im3, cv2.COLOR_BGR2GRAY)
orbDetector2 = cv2.FeatureDetector_create("ORB")
orbDescriptorExtractor2 = cv2.DescriptorExtractor_create("ORB")
orbDetector4 = cv2.FeatureDetector_create("ORB")
orbDescriptorExtractor4 = cv2.DescriptorExtractor_create("ORB")
keypoints2 = orbDetector2.detect(im2)
(keypoints2, descriptors2) = orbDescriptorExtractor2.compute(im2,keypoints2)
keypoints4 = orbDetector4.detect(im4)
(keypoints4, descriptors4) = orbDescriptorExtractor4.compute(im4,keypoints4)
matcher = cv2.DescriptorMatcher_create('BruteForce-Hamming')
raw_matches = matcher.match(descriptors2, descriptors4)
img_matches = cv2.DRAW_MATCHES_FLAGS_DEFAULT(im2, keypoints2, im4, keypoints4, raw_matches)
cv2.namedWindow("Match")
cv2.imshow( "Match", img_matches);
Error message of the line "img_matches = cv2.DRAW_MATCHES_FLAGS_DEFAULT(im2, keypoints2, im4, keypoints4, raw_matches)"
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: 'long' object is not callable
I spent much time search documentation and examples of using opencv functions with python. However, I am very frustrated because there is very little information of using opencv functions in python. It will be extremely helpful if anyone can teach me where I can find the documentation of how to use every function of the opencv module in python. I appreciate your time and help.
I've also written something myself that just uses the OpenCV Python interface and I didn't use scipy. drawMatches is part of OpenCV 3.0.0 and isn't part of OpenCV 2, which is what I'm currently using. Even though I'm late to the party, here's my own implementation that mimics drawMatches to the best of my ability.
I've provided my own images where one is of a camera man, and the other one is the same image but rotated by 55 degrees counter-clockwise.
The basic premise of what I wrote is that I allocate an output RGB image where the amount of rows is the maximum of the two images to accommodate for placing both of the images in the output image and the columns are simply the summation of both the columns together. I place each image in their corresponding spots, then run through a loop of all of the matched keypoints. I extract which keypoints matched between the two images, then extract their (x,y) co-ordinates. I then draw circles at each of the detected locations, then draw a line connecting these circles together.
Bear in mind that the detected keypoint in the second image is with respect to its own co-ordinate system. If you want to place this in the final output image, you need to offset the column co-ordinate by the amount of columns from the first image so that the column co-ordinate is with respect to the co-ordinate system of the output image.
Without further ado:
import numpy as np
import cv2
def drawMatches(img1, kp1, img2, kp2, matches):
"""
My own implementation of cv2.drawMatches as OpenCV 2.4.9
does not have this function available but it's supported in
OpenCV 3.0.0
This function takes in two images with their associated
keypoints, as well as a list of DMatch data structure (matches)
that contains which keypoints matched in which images.
An image will be produced where a montage is shown with
the first image followed by the second image beside it.
Keypoints are delineated with circles, while lines are connected
between matching keypoints.
img1,img2 - Grayscale images
kp1,kp2 - Detected list of keypoints through any of the OpenCV keypoint
detection algorithms
matches - A list of matches of corresponding keypoints through any
OpenCV keypoint matching algorithm
"""
# Create a new output image that concatenates the two images together
# (a.k.a) a montage
rows1 = img1.shape[0]
cols1 = img1.shape[1]
rows2 = img2.shape[0]
cols2 = img2.shape[1]
out = np.zeros((max([rows1,rows2]),cols1+cols2,3), dtype='uint8')
# Place the first image to the left
out[:rows1,:cols1,:] = np.dstack([img1, img1, img1])
# Place the next image to the right of it
out[:rows2,cols1:cols1+cols2,:] = np.dstack([img2, img2, img2])
# For each pair of points we have between both images
# draw circles, then connect a line between them
for mat in matches:
# Get the matching keypoints for each of the images
img1_idx = mat.queryIdx
img2_idx = mat.trainIdx
# x - columns
# y - rows
(x1,y1) = kp1[img1_idx].pt
(x2,y2) = kp2[img2_idx].pt
# Draw a small circle at both co-ordinates
# radius 4
# colour blue
# thickness = 1
cv2.circle(out, (int(x1),int(y1)), 4, (255, 0, 0), 1)
cv2.circle(out, (int(x2)+cols1,int(y2)), 4, (255, 0, 0), 1)
# Draw a line in between the two points
# thickness = 1
# colour blue
cv2.line(out, (int(x1),int(y1)), (int(x2)+cols1,int(y2)), (255, 0, 0), 1)
# Show the image
cv2.imshow('Matched Features', out)
cv2.waitKey(0)
cv2.destroyAllWindows()
To illustrate that this works, here are the two images that I used:
I used OpenCV's ORB detector to detect the keypoints, and used the normalized Hamming distance as the distance measure for similarity as this is a binary descriptor. As such:
import numpy as np
import cv2
img1 = cv2.imread('cameraman.png') # Original image
img2 = cv2.imread('cameraman_rot55.png') # Rotated image
# Create ORB detector with 1000 keypoints with a scaling pyramid factor
# of 1.2
orb = cv2.ORB(1000, 1.2)
# Detect keypoints of original image
(kp1,des1) = orb.detectAndCompute(img1, None)
# Detect keypoints of rotated image
(kp2,des2) = orb.detectAndCompute(img2, None)
# Create matcher
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# Do matching
matches = bf.match(des1,des2)
# Sort the matches based on distance. Least distance
# is better
matches = sorted(matches, key=lambda val: val.distance)
# Show only the top 10 matches
drawMatches(img1, kp1, img2, kp2, matches[:10])
This is the image I get:
you can visualize the feature matching in Python as following. Note the use of scipy library.
# matching features of two images
import cv2
import sys
import scipy as sp
if len(sys.argv) < 3:
print 'usage: %s img1 img2' % sys.argv[0]
sys.exit(1)
img1_path = sys.argv[1]
img2_path = sys.argv[2]
img1 = cv2.imread(img1_path, cv2.CV_LOAD_IMAGE_GRAYSCALE)
img2 = cv2.imread(img2_path, cv2.CV_LOAD_IMAGE_GRAYSCALE)
detector = cv2.FeatureDetector_create("SURF")
descriptor = cv2.DescriptorExtractor_create("BRIEF")
matcher = cv2.DescriptorMatcher_create("BruteForce-Hamming")
# detect keypoints
kp1 = detector.detect(img1)
kp2 = detector.detect(img2)
print '#keypoints in image1: %d, image2: %d' % (len(kp1), len(kp2))
# descriptors
k1, d1 = descriptor.compute(img1, kp1)
k2, d2 = descriptor.compute(img2, kp2)
print '#keypoints in image1: %d, image2: %d' % (len(d1), len(d2))
# match the keypoints
matches = matcher.match(d1, d2)
# visualize the matches
print '#matches:', len(matches)
dist = [m.distance for m in matches]
print 'distance: min: %.3f' % min(dist)
print 'distance: mean: %.3f' % (sum(dist) / len(dist))
print 'distance: max: %.3f' % max(dist)
# threshold: half the mean
thres_dist = (sum(dist) / len(dist)) * 0.5
# keep only the reasonable matches
sel_matches = [m for m in matches if m.distance < thres_dist]
print '#selected matches:', len(sel_matches)
# #####################################
# visualization of the matches
h1, w1 = img1.shape[:2]
h2, w2 = img2.shape[:2]
view = sp.zeros((max(h1, h2), w1 + w2, 3), sp.uint8)
view[:h1, :w1, :] = img1
view[:h2, w1:, :] = img2
view[:, :, 1] = view[:, :, 0]
view[:, :, 2] = view[:, :, 0]
for m in sel_matches:
# draw the keypoints
# print m.queryIdx, m.trainIdx, m.distance
color = tuple([sp.random.randint(0, 255) for _ in xrange(3)])
cv2.line(view, (int(k1[m.queryIdx].pt[0]), int(k1[m.queryIdx].pt[1])) , (int(k2[m.trainIdx].pt[0] + w1), int(k2[m.trainIdx].pt[1])), color)
cv2.imshow("view", view)
cv2.waitKey()
As the error message says, DRAW_MATCHES_FLAGS_DEFAULT is of type 'long'. It is a constant defined by the cv2 module, not a function. Unfortunately, the function you want, 'drawMatches' only exists in OpenCV's C++ interface.