ValueError: total size of new array must be unchanged - python

I am trying to execute the code from this URL. However, I started getting this error:
des = np.array(des,np.float32).reshape((1,128))
ValueError: total size of new array must be unchanged
I have not made any major changes though. But I will paste what I did:
import scipy as sp
import numpy as np
import cv2
# Load the images
img =cv2.imread("image1.png")
# Convert them to grayscale
imgg =cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# SURF extraction
surf = cv2.FeatureDetector_create("SURF")
surfDescriptorExtractor = cv2.DescriptorExtractor_create("SURF")
kp = surf.detect(imgg)
kp, descritors = surfDescriptorExtractor.compute(imgg,kp)
# Setting up samples and responses for kNN
samples = np.array(descritors)
responses = np.arange(len(kp),dtype = np.float32)
# kNN training
knn = cv2.KNearest()
knn.train(samples,responses)
modelImages = ["image2.png"]
for modelImage in modelImages:
# Now loading a template image and searching for similar keypoints
template = cv2.imread(modelImage)
templateg= cv2.cvtColor(template,cv2.COLOR_BGR2GRAY)
keys = surf.detect(templateg)
keys,desc = surfDescriptorExtractor.compute(templateg, keys)
for h,des in enumerate(desc):
des = np.array(des,np.float32).reshape((1,128))
retval, results, neigh_resp, dists = knn.find_nearest(des,1)
res,dist = int(results[0][0]),dists[0][0]
if dist<0.1: # draw matched keypoints in red color
color = (0,0,255)
else: # draw unmatched in blue color
#print dist
color = (255,0,0)
#Draw matched key points on original image
x,y = kp[res].pt
center = (int(x),int(y))
cv2.circle(img,center,2,color,-1)
#Draw matched key points on template image
x,y = keys[h].pt
center = (int(x),int(y))
cv2.circle(template,center,2,color,-1)
cv2.imshow('img',img)
cv2.imshow('tm',template)
cv2.waitKey(0)
cv2.destroyAllWindows()
Any help on this is greatly appreciated.

I had the same issue. I found that I changed the data length. A product of reshape arguments should be equal to a length of an array which you are changing.
In your case:
des = np.array(des,np.float32).reshape(1, len(des))

Related

How to crop face detected via Mediapipe in Python

i have a problem with mediapipe coordinations. What i want to do is crop the box of the detected face.
https://google.github.io/mediapipe/solutions/face_detection.html
EXAMPLE OF PROCEDURE
And i use this code below:
mp_face_detection = mp.solutions.face_detection
# Setup the face detection function.
face_detection = mp_face_detection.FaceDetection(model_selection=0, min_detection_confidence=0.5)
# Initialize the mediapipe drawing class.
mp_drawing = mp.solutions.drawing_utils
# Read an image from the specified path.
sample_img = cv2.imread('12345.jpg')
# Specify a size of the figure.
plt.figure(figsize = [10, 10])
# Display the sample image, also convert BGR to RGB for display.
plt.title("Sample Image");plt.axis('off');plt.imshow(sample_img[:,:,::-1]);plt.show()
face_detection_results = face_detection.process(sample_img[:,:,::-1])
# Check if the face(s) in the image are found.
if face_detection_results.detections:
# Iterate over the found faces.
for face_no, face in enumerate(face_detection_results.detections):
# Display the face number upon which we are iterating upon.
print(f'FACE NUMBER: {face_no+1}')
print('---------------------------------')
# Display the face confidence.
print(f'FACE CONFIDENCE: {round(face.score[0], 2)}')
# Get the face bounding box and face key points coordinates.
face_data = face.location_data
# Display the face bounding box coordinates.
print(f'\nFACE BOUNDING BOX:\n{face_data.relative_bounding_box}')
# Iterate two times as we only want to display first two key points of each detected face.
for i in range(2):
# Display the found normalized key points.
print(f'{mp_face_detection.FaceKeyPoint(i).name}:')
print(f'{face_data.relative_keypoints[mp_face_detection.FaceKeyPoint(i).value]}')
So the results are in this form:
FACE NUMBER: 1
FACE CONFIDENCE: 0.89
FACE BOUNDING BOX:
xmin: 0.2784463167190552
ymin: 0.3503175973892212
width: 0.1538110375404358
height: 0.23071599006652832
RIGHT_EYE:
x: 0.3447018265724182
y: 0.4222590923309326
LEFT_EYE:
x: 0.39114508032798767
y: 0.3888365626335144
And i want to CROP the image in the coordinations of the BOX.
Like
face = Image.fromarray(image).crop(face_rect)
or any other crop procedure.
My problem is that i can't get the coords of the detected item from mediapipe.
Any ideas?
Got the solution guys
import dlib
from PIL import Image
from skimage import io
h, w, c = sample_img.shape
print('width: ', w)
print('height: ', h)
xleft = data.xmin*w
xleft = int(xleft)
xtop = data.ymin*h
xtop = int(xtop)
xright = data.width*w + xleft
xright = int(xright)
xbottom = data.height*h + xtop
xbottom = int(xbottom)
detected_faces = [(xleft, xtop, xright, xbottom)]
for n, face_rect in enumerate(detected_faces):
face = Image.fromarray(image_c).crop(face_rect)
face_np = np.asarray(face)
plt.imshow(face_np)
Assume, the objective is to crop a single detected face by mediapipe . Note the [0] to indicate that we are only interested in single face
results = mp_face.process(image_input)
detection=results.detections[0]
By default mediapipe returns detection data in normalize form and we have to convert to original size by multiplying x values by width and y values by height of input image.
We can employed the _normalized_to_pixel_coordinates available with the mediapipe
relative_bounding_box = location.relative_bounding_box
rect_start_point = _normalized_to_pixel_coordinates(
relative_bounding_box.xmin, relative_bounding_box.ymin, image_cols,
image_rows)
rect_end_point = _normalized_to_pixel_coordinates(
relative_bounding_box.xmin + relative_bounding_box.width,
relative_bounding_box.ymin + relative_bounding_box.height, image_cols,
image_rows)
This essentially produce
xleft,ytop=rect_start_point
xright,ybot=rect_end_point
In other word, ytop. ybot, xleft. xright represent face_top, face_bottom, face_left, and face_right, respectively.
Since the image is simply a 3D np array, we can crop it as below
crop_img = image_input[ytop: ybot, xleft: xright]
The complete code is as below
import cv2
import mediapipe as mp
from mediapipe.python.solutions.drawing_utils import _normalized_to_pixel_coordinates
# load face detection model
mp_face = mp.solutions.face_detection.FaceDetection(
model_selection=1, # model selection
min_detection_confidence=0.5 # confidence threshold
)
dframe= cv2.imread('xx.png',0)
image_rows, image_cols, _ = dframe.shape
image_input = cv2.cvtColor(dframe, cv2.COLOR_BGR2RGB)
results = mp_face.process(image_input)
detection=results.detections[0]
location = detection.location_data
relative_bounding_box = location.relative_bounding_box
rect_start_point = _normalized_to_pixel_coordinates(
relative_bounding_box.xmin, relative_bounding_box.ymin, image_cols,
image_rows)
rect_end_point = _normalized_to_pixel_coordinates(
relative_bounding_box.xmin + relative_bounding_box.width,
relative_bounding_box.ymin + relative_bounding_box.height, image_cols,
image_rows)
## Lets draw a bounding box
color = (255, 0, 0)
thickness = 2
cv2.rectangle(image_input, rect_start_point, rect_end_point, color, thickness)
xleft,ytop=rect_start_point
xright,ybot=rect_end_point
crop_img = image_input[ytop: ybot, xleft: xright]
cv2.imwrite('crop_image0.jpg', crop_img)

Unique Color Detection and Storing images dynamically

If an image is given , find out the unique colors in that image and write output images corresponding to each unique color.
In that all other pixels which don't have that unique color should me marked white.
for eg , if an image has 3 colors - in the output folder there should be three images where each color is separated. Using Open CV & Python.
I've created the unique color list using my methods. What I want is to give a count of all those unique colors in the sample.png image and give the corresponding images output as per the question.
I believe the code below (with comments) should help you with this!
Feel free to follow up if any of the code is unclear!
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
from copy import deepcopy
# Load image and convert it from BGR (opencv default) to RGB
fpath = "dog.png" # TODO: replace with your path
IMG = cv.cvtColor(cv.imread(fpath), cv.COLOR_BGR2RGB)
# Get dimensions and reshape into (H * W, C) vector - i.e. a long vector, where each element is a tuple corresponding to a color!
H, W, C = IMG.shape
IMG_FLATTENED = np.vstack([IMG[:, w, :] for w in range(W)])
# Get unique colors using np.unique function, and their counts
colors, counts = np.unique(IMG_FLATTENED, axis=0, return_counts = True)
# Jointly loop through colors and counts
for color, count in zip(colors, counts):
print("COLOR: {}, COUNT: {}".format(color, count))
# Create placeholder image and mark all pixels as white
SINGLE_COLOR = (255 * np.ones(IMG.shape)).astype(np.uint8) # Make sure casted to uint8
# Compute binary mask of pixel locations where color is, and set color in new image
color_idx = np.all(IMG[..., :] == color, axis=-1)
SINGLE_COLOR[color_idx, :] = color
# Write file to output with color and counts specified
cv.imwrite("color={}_count={}.png".format(color, count), SINGLE_COLOR)
Ack, he beat me to it. Well, here's what I've got.
Oh no, I don't think the line
blank[img == color] = img[img == color]
behaves how I think it does. I think it just coincidentally works for this case. I'll edit the code with a solution I'm more confident works for all cases.
Original Image
import cv2
import numpy as np
# load image
img = cv2.imread("circles.png");
# get uniques
unique_colors, counts = np.unique(img.reshape(-1, img.shape[-1]), axis=0, return_counts=True);
# split off each color
splits = [];
for a in range(len(unique_colors)):
# get the color
color = unique_colors[a];
blank = np.zeros_like(img);
mask = cv2.inRange(img, color, color); # edited line 1
blank[mask == 255] = img[mask == color]; # edited line 2
# show
cv2.imshow("Blank", blank);
cv2.waitKey(0);
# save each color with its count
file_str = "";
for b in range(3):
file_str += str(color[b]) + "_";
file_str += str(counts[a]) + ".png";
cv2.imwrite(file_str, blank);

Line detection issue - OpenCV in Python

I have written the following script with which I aim to detect lines in Gazebo (a simulation environment):
#!/usr/bin/env python
# rospy for the subscriber
import rospy
# ROS Image message
from sensor_msgs.msg import Image
# ROS Image message -> OpenCV2 image converter
from cv_bridge import CvBridge, CvBridgeError
# OpenCV2 for saving an image
import cv2
import matplotlib.pyplot as plt
import numpy as np
def gradient(img):
# grayscale the image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# gaussian blur of image with a 5x5 kernel
gauss = cv2.GaussianBlur(gray,(5,5),0)
# Return the canny of the image
return cv2.Canny(gauss,20,30)
def region_of_interest(img):
# Height of image (number of rows)
height = img.shape[0]
# Width of the image (number of columns)
width = img.shape[1]
# Create an array of polygons to use for the masking of the canny image
polygons = np.array([
[(200,height), (200,500), (600,500), (600,height)]
])
# Create the mask image's background (black color)
mask_bg = np.zeros_like(img)
# Create the mask image (image with black background an white region of interest)
mask = cv2.fillPoly(mask_bg, polygons, 255)
# Isolate the area of interest using the bitwise operator of the mask and canny image
masked_image = cv2.bitwise_and(img,cv2.fillPoly(mask_bg, polygons, 255))
# Return the updated image
return masked_image
def make_coordinates(img, line_parameters):
# Extract the average slope and intercept of the line
slope, intercept = line_parameters
# Coordinate y(1) of the calculated line
y1 = img.shape[0]
# Coordinate y(2) of the calculated line
y2 = int(y1*0.5)
# Coordinate x(1) of the calculated line
x1 = int((y1-intercept)/slope)
# Coordinate x(2) of the calculated line
x2 = int((y2-intercept)/slope)
# Return the coordinates of the average line
return np.array([x1,y1,x2,y2])
def average_slope_intercep(img,lines):
# Create an empty list containing the coordinates of the detected line
line_fit = []
# Loop through all the detected lines
for line in lines:
# Store the coordinates of the detected lines into an 1D array of 4 elements
x1,y1,x2,y2 = line.reshape(4)
# Create a line y = mx+b based on the coordinates
parameters = np.polyfit((x1,x2),(y1,y2),1)
# Extract the slope m
slope = parameters[0]
# Extract the intercept b
intercept = parameters[1]
# Add elements on the list
line_fit.append((slope,intercept))
# Check slope of line
# if slope < 0:
# continue
# else:
# continue
# Calculate the average of the line fit parameters list
line_fit_average = np.average(line_fit,axis=0)
# Extract the coordinates of the calculated line
main_line = make_coordinates(img,line_fit_average)
return np.array([main_line])
def display_lines(img,lines):
# Create a mask image that will have the drawn lines
line_image = np.zeros_like(img)
# If no lines were detected
if lines is not None:
# Loop through all the lines
for line in lines:
# Store the coordinates of the first and last point of the lines into 1D arrays
x1, y1, x2, y2 = line.reshape(4)
# Draw the lines on the image with blue color and thicknes of 10
cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),10)
# Return the mask image with the drawn lines
return line_image
def image_callback(msg):
# print("Received an image!")
# Instantiate CvBridge
bridge = CvBridge()
try:
# Convert your ROS Image message to OpenCV2
frame = bridge.imgmsg_to_cv2(msg, "bgr8")
except CvBridgeError, e:
print(e)
else:
# Copy of the original frame
frame_copy = np.copy(frame)
# Canny of image
canny_frame = gradient(frame_copy)
# Apply mask in region of interest
cropped_image = region_of_interest(canny_frame)
# Apply Hough Transform on the region of interest
lines = cv2.HoughLinesP(cropped_image,1,np.pi/180,30,np.array([]),minLineLength=10,maxLineGap=2)
# Calculate the average slope of the detected lines
averaged_lines = average_slope_intercep(frame_copy,lines)
# Create a mask image with the drawn lines
line_image = display_lines(frame_copy,averaged_lines)
# Plot lines on the camera feed frame
combo_image = cv2.addWeighted(frame_copy,0.8,line_image,1,1)
#Show manipulated image feed
cv2.imshow("Result feed", frame_copy)
# plt.imshow(canny_frame)
cv2.waitKey(1)
# plt.show()
def main():
rospy.init_node('image_listener')
# Define your image topic
image_topic = "rover/camera1/image_raw"
# Set up your subscriber and define its callback
rospy.Subscriber(image_topic, Image, image_callback)
# Spin until ctrl + c
rospy.spin()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
The code is integrated in ROS, so please focus your attention at the image_callback function. My issue is that the line that I want to detect is quite noisy and I cannot figure out how to detect it correctly.
To be more specific, from the following frame,
Original Frame
I get this image after gaussian blur and the canny algorithm,
Canny Frame
How could I filter the "noise" I see in the canny frame? I played a lot with the canny and gausian blur parameters but all that I have achieved is removing gradients instead of actually making it less "noisy".
This method might help you to remove noise from the frame.
import cv2
import numpy as np
from skimage.morphology import skeletonize
def get_skeleton_iamge(threshold_image):
skeleton = skeletonize(threshold_image / 255)
skeleton = skeleton.astype(np.uint8)
skeleton *= 255
return skeleton
image = cv2.imread("road.png", 0)
image = cv2.resize(image, (300, 300))
bilateral = cv2.bilateralFilter(image, 15, 100, 100)
cv2.imshow("bilateral_image", bilateral)
canny_image = cv2.Canny(bilateral, 20, 30)
cv2.imshow("canny_image", canny_image)
kernel = np.ones((10, 10))
dilate_image = cv2.dilate(canny_image, kernel, iterations=1)
erode_image = cv2.erode(dilate_image, kernel, iterations=1)
cv2.imshow("erode_image", erode_image)
skeleton_iamge = get_skeleton_iamge(erode_image)
cv2.imshow("skeleton_iamge", skeleton_iamge)
cv2.waitKey(0)

Featured based image alignment issue

I am trying to learn OpenCV in order to improve a script I wrote for comparing engineering drawings. I am using the code (see below) found on this tutorial but I am having zero success with it. In the tutorial the author uses the example of a blank form for the reference image and a photo of the completed form as the image to align. My situation is very similar because I am attempting to use a blank drawing title block as my reference image and a scanned image of a drawing as my image to align.
My goal is to use OpenCV to clean up the scanned engineering drawings so that they are aligned properly but no matter what I try in the MAX_FEATURES and GOOD_MATCH_PERCENT parameters, I get an image that looks like a black and white star burst. Also, when I review the "matches.jpg" file generated by the script, it appears that there are no correct matches. I have tried multiple drawings and I get the same results.
Can anyone see a reason why this script would not work in the way I am trying to use it?
from __future__ import print_function
import cv2
import numpy as np
MAX_FEATURES = 500
GOOD_MATCH_PERCENT = 0.15
def alignImages(im1, im2):
# Convert images to grayscale
im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
# Detect ORB features and compute descriptors.
orb = cv2.ORB_create(MAX_FEATURES)
keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)
keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)
# Match features.
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = matcher.match(descriptors1, descriptors2, None)
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
# Draw top matches
imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None)
cv2.imwrite("matches.jpg", imMatches)
# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
# Find homography
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
# Use homography
height, width, channels = im2.shape
im1Reg = cv2.warpPerspective(im1, h, (width, height))
return im1Reg, h
if __name__ == '__main__':
# Read reference image
refFilename = "form.jpg"
print("Reading reference image : ", refFilename)
imReference = cv2.imread(refFilename, cv2.IMREAD_COLOR)
# Read image to be aligned
imFilename = "scanned-form.jpg"
print("Reading image to align : ", imFilename);
im = cv2.imread(imFilename, cv2.IMREAD_COLOR)
print("Aligning images ...")
# Registered image will be resotred in imReg.
# The estimated homography will be stored in h.
imReg, h = alignImages(im, imReference)
# Write aligned image to disk.
outFilename = "aligned.jpg"
print("Saving aligned image : ", outFilename);
cv2.imwrite(outFilename, imReg)
# Print estimated homography
print("Estimated homography : \n", h)
Template Image:
Image to Align:
Expected output Image:
Here is one way in Python/OpenCV using a Rigid Affine Transformation (scale, rotation and translation only - no skew or perspective) to warp one image to match the other. It uses findTransformECC() -- Enhanced Correlation Coefficient Maximization) -- to get the rotation matrix and then uses warpAffine to do the rigid warping.
Template:
Image to be warped:
import cv2
import numpy as np
import math
import sys
# Get the image files from the command line arguments
# These are full paths to the images
# image2 will be warped to match image1
# argv[0] is name of script
image1 = sys.argv[1]
image2 = sys.argv[2]
outfile = sys.argv[3]
# Read the images to be aligned
# im2 is to be warped to match im1
im1 = cv2.imread(image1);
im2 = cv2.imread(image2);
# Convert images to grayscale for computing the rotation via ECC method
im1_gray = cv2.cvtColor(im1,cv2.COLOR_BGR2GRAY)
im2_gray = cv2.cvtColor(im2,cv2.COLOR_BGR2GRAY)
# Find size of image1
sz = im1.shape
# Define the motion model - euclidean is rigid (SRT)
warp_mode = cv2.MOTION_EUCLIDEAN
# Define 2x3 matrix and initialize the matrix to identity matrix I (eye)
warp_matrix = np.eye(2, 3, dtype=np.float32)
# Specify the number of iterations.
number_of_iterations = 5000;
# Specify the threshold of the increment
# in the correlation coefficient between two iterations
termination_eps = 1e-3;
# Define termination criteria
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps)
# Run the ECC algorithm. The results are stored in warp_matrix.
(cc, warp_matrix) = cv2.findTransformECC (im1_gray, im2_gray, warp_matrix, warp_mode, criteria, None, 1)
# Warp im2 using affine
im2_aligned = cv2.warpAffine(im2, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP);
# write output
cv2.imwrite(outfile, im2_aligned)
# Print rotation angle
row1_col0 = warp_matrix[0,1]
angle = math.degrees(math.asin(row1_col0))
print(angle)
Result:
Resulting Angle of Rotation (in deg):
-0.3102187026194794
Note, you can change the background color in the affineWarp to white if desired.
Also make the termination epsilon smaller by an order of magnitude or two for more accuracy, but longer processing times.
The other Rigid Affine approach that I mentioned in my comments earlier is to use ORB feature matching, filter the key points, then use estimateAffinePartial2D() to get the rigid affine matrix. Then use that to warp the image. For large angles this seems to me to be more reliable than the ECC method. But the ECC method seems more accurate for small rotations.
import cv2
import numpy as np
import math
import sys
MAX_FEATURES = 10000
GOOD_MATCH_PERCENT = 0.15
DIFFY_THRESH = 2
# Get the image files from the command line arguments
# These are full paths to the images
# image[2] will be warped to match image[1]
# argv[0] is name of script
file1 = sys.argv[1]
file2 = sys.argv[2]
outFile = sys.argv[3]
# Read image1
image1 = cv2.imread(file1, cv2.IMREAD_COLOR)
# Read image2 to be warped to match image1
image2 = cv2.imread(file2, cv2.IMREAD_COLOR)
# Convert images to grayscale
image1Gray = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
image2Gray = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
# Detect ORB features and compute descriptors.
orb = cv2.ORB_create(MAX_FEATURES)
keypoints1, descriptors1 = orb.detectAndCompute(image1Gray, None)
keypoints2, descriptors2 = orb.detectAndCompute(image2Gray, None)
# Match features.
matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = matcher.match(descriptors1, descriptors2, None)
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
#print('numgood',numGoodMatches)
# Extract location of good matches and filter by diffy if rotation is small
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
# initialize empty arrays for newpoints1 and newpoints2 and mask
newpoints1 = np.empty(shape=[0, 2], dtype=np.float32)
newpoints2 = np.empty(shape=[0, 2], dtype=np.float32)
matches_Mask = [0] * len(matches)
count=0
for i in range(len(matches)):
pt1 = points1[i]
pt2 = points2[i]
pt1x, pt1y = zip(*[pt1])
pt2x, pt2y = zip(*[pt2])
diffy = np.float32( np.float32(pt2y) - np.float32(pt1y) )
if abs(diffy) < DIFFY_THRESH:
newpoints1 = np.append(newpoints1, [pt1], axis=0).astype(np.uint8)
newpoints2 = np.append(newpoints2, [pt2], axis=0).astype(np.uint8)
matches_Mask[i]=1
count += 1
# Find Affine Transformation
# note swap of order of newpoints here so that image2 is warped to match image1
m, inliers = cv2.estimateAffinePartial2D(newpoints2,newpoints1)
# Use affine transform to warp im2 to match im1
height, width, channels = image1.shape
image2Reg = cv2.warpAffine(image2, m, (width, height))
# Write aligned image to disk.
cv2.imwrite(outFile, image2Reg)
# Print angle
row1_col0 = m[1,0]
print('row1_col0:',row1_col0)
angle = math.degrees(math.asin(row1_col0))
print('angle', angle)
Result Image:
Result Rotation Angle:
-0.6123936361765413
After some trial and error I determined that I don't need to find a homography in order to align my images properly. Since my images only need to be scaled and rotated slightly, my best option is to find the outer most points of the drawing title block and align one image to the other with a transform.
My approach is to use the Harris corner finding function to find all of the corners on the drawing, then do a simple calculation to find the points that are the shortest distance to the corners of the drawing canvas (these are the outside corners of the drawing title block). I then take 3 of the points (top left, top right, and bottom left) and use a transform to scale/rotate one drawing to the other.
Below is the code that I used:
import cv2
import numpy as np
import math
img1 = cv2.imread('reference.jpg')
img2 = cv2.imread('to-be-aligned.jpg')
#Find the corner points of img1
h1,w1,c=img1.shape
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
gray1 = np.float32(gray1)
dst1 = cv2.cornerHarris(gray1,5,3,0.04)
ret1, dst1 = cv2.threshold(dst1,0.1*dst1.max(),255,0)
dst1 = np.uint8(dst1)
ret1, labels1, stats1, centroids1 = cv2.connectedComponentsWithStats(dst1)
criteria1 = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
corners1 = cv2.cornerSubPix(gray1,np.float32(centroids1),(5,5),(-1,-1),criteria1)
#Find the corner points of img2
h2,w2,c=img2.shape
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
gray2 = np.float32(gray2)
dst2 = cv2.cornerHarris(gray2,5,3,0.04)
ret2, dst2 = cv2.threshold(dst2,0.1*dst2.max(),255,0)
dst2 = np.uint8(dst2)
ret2, labels2, stats2, centroids2 = cv2.connectedComponentsWithStats(dst2)
criteria2 = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
corners2 = cv2.cornerSubPix(gray2,np.float32(centroids2),(5,5),(-1,-1),criteria2)
#Find the top left, top right, and bottom left outer corners of the drawing frame for img1
a1=[0,0]
b1=[w1,0]
c1=[0,h1]
a1_dist=[]
b1_dist=[]
c1_dist=[]
for i in corners1:
temp_a1=math.sqrt((i[0]-a1[0])**2+(i[1]-a1[1])**2)
temp_b1=math.sqrt((i[0]-b1[0])**2+(i[1]-b1[1])**2)
temp_c1=math.sqrt((i[0]-c1[0])**2+(i[1]-c1[1])**2)
a1_dist.append(temp_a1)
b1_dist.append(temp_b1)
c1_dist.append(temp_c1)
print("Image #1 (reference):")
print("Top Left:")
print(corners1[a1_dist.index(min(a1_dist))])
print("Top Right:")
print(corners1[b1_dist.index(min(b1_dist))])
print("Bottom Left:")
print(corners1[c1_dist.index(min(c1_dist))])
#Find the top left, top right, and bottom left outer corners of the drawing frame for img2
a2=[0,0]
b2=[w2,0]
c2=[0,h2]
a2_dist=[]
b2_dist=[]
c2_dist=[]
for i in corners2:
temp_a2=math.sqrt((i[0]-a2[0])**2+(i[1]-a2[1])**2)
temp_b2=math.sqrt((i[0]-b2[0])**2+(i[1]-b2[1])**2)
temp_c2=math.sqrt((i[0]-c2[0])**2+(i[1]-c2[1])**2)
a2_dist.append(temp_a2)
b2_dist.append(temp_b2)
c2_dist.append(temp_c2)
print("Image #2 (image to align):")
print("Top Left:")
print(corners2[a2_dist.index(min(a2_dist))])
print("Top Right:")
print(corners2[b2_dist.index(min(b2_dist))])
print("Bottom Left:")
print(corners2[c2_dist.index(min(c2_dist))])
#Create the points for img1
point1 = np.zeros((3,2), dtype=np.float32)
point1[0][0]=corners1[a1_dist.index(min(a1_dist))][0]
point1[0][1]=corners1[a1_dist.index(min(a1_dist))][1]
point1[1][0]=corners1[b1_dist.index(min(b1_dist))][0]
point1[1][1]=corners1[b1_dist.index(min(b1_dist))][1]
point1[2][0]=corners1[c1_dist.index(min(c1_dist))][0]
point1[2][1]=corners1[c1_dist.index(min(c1_dist))][1]
#Create the points for img2
point2 = np.zeros((3,2), dtype=np.float32)
point2[0][0]=corners2[a2_dist.index(min(a2_dist))][0]
point2[0][1]=corners2[a2_dist.index(min(a2_dist))][1]
point2[1][0]=corners2[b2_dist.index(min(b2_dist))][0]
point2[1][1]=corners2[b2_dist.index(min(b2_dist))][1]
point2[2][0]=corners2[c2_dist.index(min(c2_dist))][0]
point2[2][1]=corners2[c2_dist.index(min(c2_dist))][1]
#Make sure points look ok:
print(point1)
print(point2)
#Transform the image
m = cv2.getAffineTransform(point2,point1)
image2Reg = cv2.warpAffine(img2, m, (w1, h1), borderValue=(255,255,255))
#Highlight found points in red:
img1[dst1>0.1*dst1.max()]=[0,0,255]
img2[dst2>0.1*dst2.max()]=[0,0,255]
#Output the images:
cv2.imwrite("output-img1-harris.jpg", img1)
cv2.imwrite("output-img2-harris.jpg", img2)
cv2.imwrite("output-harris-transform.jpg",image2Reg)

How to find max RGB values in a circle using OpenCV?

I have a picture with 9 water droplets that have different color intensities (i.e. they are all green, but different shades of green). The goal is to:
Identify 9 drops
Find relevant values (size, location, RGB values, etc.)
Plot data
I am using SimpleBlobDetector to identify the dots. This outputs the keypoints, which contains relevant information about each blob.
However, I do not know how to access the RGB (or HSV) values for the specific blob. How do you search only the pixels in the blob to determine min/max/avg color values?
Any advice is greatly appreciated!
Here is my full code. It just prints the x_position, y_position, and area of each blob. I've also attached the file I am using:
# Standard imports
import cv2
import numpy as np
from matplotlib import pyplot as plt
# Read image
filename= "C:\Users\Kevin\Pictures\Far 3.jpg"
img = cv2.imread(filename, 0)
img_color = cv2.imread(filename, cv2.IMREAD_ANYCOLOR)
img_c = cv2.resize(img_color,(800,600))
img1 = cv2.resize(img,(800,600))
ret,im = cv2.threshold(img1,120,255,cv2.THRESH_BINARY)
#######################################################
#######################################################
# Setup SimpleBlobDetector parameters.
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = 50
params.maxThreshold = 150
# Filter by Area.
params.filterByArea = True
params.minArea = 150
params.maxArea = 400
# Filter by Circularity
params.filterByCircularity = True
params.minCircularity = 0.2
# Filter by Convexity
params.filterByConvexity = True
params.minConvexity = 0.1
# Filter by Inertia
params.filterByInertia = True
params.minInertiaRatio = 0.01
detector = cv2.SimpleBlobDetector_create(params)
#######################################################
#######################################################
# Detect blobs.
keypoints = detector.detect(im)
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
im_with_keypoints = cv2.drawKeypoints(img_c, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# Show keypoints
cv2.imshow("Keypoints", im_with_keypoints)
x=[]
y=[]
area=[]
for i in xrange(9):
xx = keypoints[i].pt[0]
yy = keypoints[i].pt[1]
aarea = keypoints[i].size
print "PT.%f -- " %i, "x = %f," %xx, "y = %f," %yy,"area = %f," %aarea, "\n"
#######################################################
#######################################################
cv2.waitKey(0)
For each pixel in img, the B,G,R values can be read as:
B=img[xx,yy,0]
G=img[xx,yy,1]
R=img[xx,yy,2]
You can take the average B,G,R value of all the pixels in a blob and then find the maximum value among the blobs

Categories