Detecting missing components using Opencv and Python - python

So I have now a running code to detect each components by using template matching, I just used the code that the author has provided in this link (https://www.sicara.fr/blog-technique/object-detection-template-matching).
This code:
import cv2
import numpy as np
DEFAULT_TEMPLATE_MATCHING_THRESHOLD = 0.9
class Template:
"""
A class defining a template
"""
def __init__(self, image_path, label, color, matching_threshold=DEFAULT_TEMPLATE_MATCHING_THRESHOLD):
"""
Args:
image_path (str): path of the template image path
label (str): the label corresponding to the template
color (List[int]): the color associated with the label (to plot detections)
matching_threshold (float): the minimum similarity score to consider an object is detected by template
matching
"""
self.image_path = image_path
self.label = label
self.color = color
self.template = cv2.imread(image_path)
self.template_height, self.template_width = self.template.shape[:2]
self.matching_threshold = matching_threshold
image = cv2.imread("PCB_reference.jpg")
templates = [
Template(image_path="T1.png", label="1", color=(0, 0, 255), matching_threshold=0.88),
Template(image_path="T2.jpg", label="2", color=(0, 255, 0,), matching_threshold=0.8),
Template(image_path="T3.png", label="3", color=(255, 0, 0,), matching_threshold=0.85),
Template(image_path="T4.png", label="4", color=(0, 180, 200,), matching_threshold=0.81),
Template(image_path="T5.png", label="5", color=(110, 180, 200,), matching_threshold=0.91),
Template(image_path="T6.png", label="6", color=(150, 100, 150,), matching_threshold=0.83),
Template(image_path="T7.png", label="7", color=(0, 100, 150,), matching_threshold=0.84),
Template(image_path="T8.png", label="8", color=(110, 100, 200,), matching_threshold=0.96),
]
detections = []
for template in templates:
template_matching = cv2.matchTemplate(template.template, image, cv2.TM_CCOEFF_NORMED)
match_locations = np.where(template_matching >= template.matching_threshold)
for (x, y) in zip(match_locations[1], match_locations[0]):
match = {
"TOP_LEFT_X": x,
"TOP_LEFT_Y": y,
"BOTTOM_RIGHT_X": x + template.template_width,
"BOTTOM_RIGHT_Y": y + template.template_height,
"MATCH_VALUE": template_matching[y, x],
"LABEL": template.label,
"COLOR": template.color
}
detections.append(match)
def compute_iou(boxA, boxB):
xA = max(boxA["TOP_LEFT_X"], boxB["TOP_LEFT_X"])
yA = max(boxA["TOP_LEFT_Y"], boxB["TOP_LEFT_Y"])
xB = min(boxA["BOTTOM_RIGHT_X"], boxB["BOTTOM_RIGHT_X"])
yB = min(boxA["BOTTOM_RIGHT_Y"], boxB["BOTTOM_RIGHT_Y"])
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
boxAArea = (boxA["BOTTOM_RIGHT_X"] - boxA["TOP_LEFT_X"] + 1) * (boxA["BOTTOM_RIGHT_Y"] - boxA["TOP_LEFT_Y"] + 1)
boxBArea = (boxB["BOTTOM_RIGHT_X"] - boxB["TOP_LEFT_X"] + 1) * (boxB["BOTTOM_RIGHT_Y"] - boxB["TOP_LEFT_Y"] + 1)
iou = interArea / float(boxAArea + boxBArea - interArea)
return iou
def non_max_suppression(objects, non_max_suppression_threshold=0.5, score_key="MATCH_VALUE"):
"""
Filter objects overlapping with IoU over threshold by keeping only the one with maximum score.
Args:
objects (List[dict]): a list of objects dictionaries, with:
{score_key} (float): the object score
{top_left_x} (float): the top-left x-axis coordinate of the object bounding box
{top_left_y} (float): the top-left y-axis coordinate of the object bounding box
{bottom_right_x} (float): the bottom-right x-axis coordinate of the object bounding box
{bottom_right_y} (float): the bottom-right y-axis coordinate of the object bounding box
non_max_suppression_threshold (float): the minimum IoU value used to filter overlapping boxes when
conducting non-max suppression.
score_key (str): score key in objects dicts
Returns:
List[dict]: the filtered list of dictionaries.
"""
sorted_objects = sorted(objects, key=lambda obj: obj[score_key], reverse=True)
filtered_objects = []
for object_ in sorted_objects:
overlap_found = False
for filtered_object in filtered_objects:
iou = compute_iou(object_, filtered_object)
if iou > non_max_suppression_threshold:
overlap_found = True
break
if not overlap_found:
filtered_objects.append(object_)
return filtered_objects
NMS_THRESHOLD = 0.2
detections = non_max_suppression(detections, non_max_suppression_threshold=NMS_THRESHOLD)
image_with_detections = image.copy()
for detection in detections:
cv2.rectangle(
image_with_detections,
(detection["TOP_LEFT_X"], detection["TOP_LEFT_Y"]),
(detection["BOTTOM_RIGHT_X"], detection["BOTTOM_RIGHT_Y"]),
detection["COLOR"],
2,)
cv2.imshow("res", image_with_detections)
cv2.waitKey(0)
cv2.destroyAllWindows()
This is now my output where it detects the 8 components that I have as a template:
Current code output
I wanted to make an algorithm where I could detect a missing component. For example, supposedly there are four (4) A3 resistors in the reference image but what if my input reading (real-time camera feed) has only three A3 resistors the same goes to the missing one resistor (3220). I wanted to ask what is the best approach/method to use to make this work.
Example this is my desired output:
Desired output
Should I save the position of each template where they are supposed to be placed and make a database containing the template and their specific location? This is to tell that in this location (e.g x= 20. y = 50) a resistor should be present. I am thinking of having csv file but I don't think it's possible or efficient to store the templates image directory and also the position of each template.
I have read that SSIM could be use for the missing components detection and there are also some that uses deep-learning like YOLO for neural networks but I think that is too complex for me to begin with.

Related

Wrong remapping of keypoints of the object after image rotation

I have image with with many cars, every car has coordinates of polygon and keypoints. I use this code to crop object by polygon and get new keypoints.
x,y,w,h = cv2.boundingRect(points_poly_int)
cropped_img = img[y:y+h,x:x+w]
head_coords_after_crop = np.asarray([head_coords_old[0] - x, head_coords_old[1] -y])
center_coords_after_crop = np.asarray([center_coords_old[0] - x, center_coords_old[1] -y])
Here example of cropped image and keypoints:
What I need is rotate the whole image by any angle and remap coordinates of polygons and keypoints for every object
Here method which return rotated image and matrix of transformation:
def rotate_image(mat, angle):
"""
Rotates an image (angle in degrees) and expands image to avoid cropping
"""
height, width = mat.shape[:2] # image shape has 3 dimensions
image_center = (width/2, height/2) # getRotationMatrix2D needs coordinates in reverse order (width, height) compared to shape
rotation_mat = cv2.getRotationMatrix2D(image_center, angle, 1.)
# rotation calculates the cos and sin, taking absolutes of those.
abs_cos = abs(rotation_mat[0,0])
abs_sin = abs(rotation_mat[0,1])
# find the new width and height bounds
bound_w = int(height * abs_sin + width * abs_cos)
bound_h = int(height * abs_cos + width * abs_sin)
# subtract old image center (bringing image back to origo) and adding the new image center coordinates
rotation_mat[0, 2] += bound_w/2 - image_center[0]
rotation_mat[1, 2] += bound_h/2 - image_center[1]
# rotate image with the new bounds and translated rotation matrix
rotated_mat = cv2.warpAffine(mat, rotation_mat, (bound_w, bound_h))
return rotated_mat, rotation_mat
What I do next is multiplying old coordinates with matrix of transformation. Here code:
img_roated, C = rotate_image(img, 180)
#Remap polygons coordinates
ones = np.ones((points_poly.shape[0], 1))
new_poly = np.hstack((points_poly,ones))
new_poly = (C # new_poly.T).T
new_poly = new_poly.astype(np.int32)
#Crop by new polygons
x,y,w,h = cv2.boundingRect(new_poly)
cropped_img = img_roated[y:y+h,x:x+w]
#Reamp keypoints coordinates
head_coords_new = np.asarray([756.600, 1687.900, 1])
center_coords_new = np.asarray([762.300, 1708.400, 1])
head_coords_new = (C # head_coords_new.T).T
center_coords_new = (C # center_coords_new.T).T
head_coords_new = np.asarray([head_coords_old[0] - x, head_coords_old[1] - y])
center_coords_new = np.asarray([center_coords_old[0] - x, center_coords_old[1] - y])
head_coords_new = head_coords_new.astype(np.int32)
center_coords_new = center_coords_new.astype(np.int32)
But result is differnt from first picture, Here new picture:
Somehow keypoints shift, and it happens with every angle. And I don't know how to fix it.
Here the source image: https://drive.google.com/file/d/14K_MQHMwtWlw-QCQbaB5ecrREbWwyKhO/view?usp=sharing
And polygons with keypoints:
{'keypoints': [{'id': 'head', 'pos': '756.600;1687.900'},
{'id': 'roof_center', 'pos': '762.300;1708.400'}],
'polygon': '{(759.700;1717.300);(770.000;1714.200);(762.000;1687.400);(756.600;1687.900);(751.200;1690.700);(759.700;1717.300)}'}
If you wish to reproduce the issue.
Thanks in advnced
Here the differnce. Right pic is first image rotated in pic viewer. Left is transformed pic

idxs = cv2.dnn.NMSBoxes(boxes, confidence, MIN_CORP, NMS_THRESH) TypeError: Can't parse 'scores'. Input argument doesn't provide sequence protocol

help meee TT i received error in my coding of social distancing detection system using webcam. i done search the error but there is nothing difference with my code TT i wite my coding using notepad++ and run using command prompt. below is my error :
C:\Users\User\Downloads\Social_Distancing_Detection_Real_Time>python Run.py
[INFO] loading YOLO from disk...
[INFO] setting preferable backend and target to CUDA...
[INFO] accessing video stream...
[ WARN:0] global D:\a\opencv-python\opencv-python\opencv\modules\dnn\src\dnn.cpp (1447) cv::dnn::dnn4_v20211004::Net::Impl::setUpNet DNN module was not built with CUDA backend; switching to CPU
Traceback (most recent call last):
File "C:\Users\User\Downloads\Social_Distancing_Detection_Real_Time\Run.py", line 77, in <module>
results = detect_people(frame, net, ln,
File "C:\Users\User\Downloads\Social_Distancing_Detection_Real_Time\mylib\detection.py", line 58, in detect_people
idxs = cv2.dnn.NMSBoxes(boxes, confidence, MIN_CORP, NMS_THRESH)
TypeError: Can't parse 'scores'. Input argument doesn't provide sequence protocol
[ WARN:1] global D:\a\opencv-python\opencv-python\opencv\modules\videoio\src\cap_msmf.cpp (438) `anonymous-namespace'::SourceReaderCB::~SourceReaderCB terminating async callback
my error
below here is my full code of file detection.py
#import the necessary packages
from .config import NMS_THRESH, MIN_CORP, People_Counter
import numpy as np
import cv2
def detect_people(frame, net, In, personIdx = 0):
#grab the dimensions of the frame and initialize the list of results
(H, W) = frame.shape[:2]
results = []
#construct a blob from the input frame and then perform a forward
#pass of the YOLO object detector, giving us our boarding boxes
#add associated probabilities
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
layerOutputs = net.forward(In)
#initialize out lists of detected bounding boxes, centroids and
#confidence, respectively
boxes = []
centroids = []
confidences = []
#loop over each of the layer outputs
for output in layerOutputs:
#for detection in output;
for detection in output:
#extract the class ID and confidence[i.e., probability)
#of the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
#filter detections by (1) ensuring that the object
#detected was a person and (2) that the minimum
#confidence is met
if classID == personIdx and confidence > MIN_CORP:
#scale the bounding box coordinates back relative to
#the size of the image, keeping in mind that YOLO
#actually returns the center (x,y) -coordinates of
#the bounding box followed by the boxes' width and height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
#use the center (x,y) -coordinates to derive the top
#and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
#update our list of bounding box coordinates,
#centroids and confidences
boxes.append([x, y, int(width), int(height)])
centroids.append((centerX, centerY))
confidences.append(float(confidence))
#apply non-maxim suppression to suppress weak, overlapping bounding boxes
idxs = cv2.dnn.NMSBoxes(boxes, confidence, MIN_CORP, NMS_THRESH)
#print('Total people count:', len(idxs))
#compute the total people counter
#if People_Counter:
#human_count = "Human count: {}".format(len(idxs))
#cv2.putText(frame, human_count, (470, frame.shape[0] - 75), cv2.FONT_HERSHEY_SIMPLEX, 0.70, (0, 0, 0), 2)
#ensure at least one detection exists
if len(idxs) > 0:
#loop over the indexes we are keeping
for i in idxs.flatten():
#extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
#update our results list to consist of the person
#prediction probability, bounding box coordinates,
#and the centroids
r = (confidences[i], (x, y, x + w, y + h), centroids[i])
results.append(r)
#return the list of the results
return results
The answer to your problem (as usually) likes in response from the interpreter:
TypeError: Can't parse 'scores'. Input argument doesn't provide sequence protocol
scores is the second argument to cv2.dnn.NMSBoxes which in your case is confidence. confidence is a single number, you can't iterate over it. You've made a typo and probably you wanted to pass confidences which is a list.
Change your code to:
idxs = cv2.dnn.NMSBoxes(boxes, confidences, MIN_CORP, NMS_THRESH)

How to detect a grainy line?

I am trying to detect a grainy printed line on a paper with cv2. I need the angle of the line. I dont have much knowledge in image processing and I only need to detect the line. I tried to play with the parameters but the angle is always detected wrong. Could someone help me. This is my code:
import cv2
import numpy as np
import matplotlib.pylab as plt
from matplotlib.pyplot import figure
img = cv2.imread('CamXY1_1.bmp')
crop_img = img[100:800, 300:900]
blur = cv2.GaussianBlur(crop_img, (1,1), 0)
ret,thresh = cv2.threshold(blur,150,255,cv2.THRESH_BINARY)
gray = cv2.cvtColor(thresh,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 60, 150)
figure(figsize=(15, 15), dpi=150)
plt.imshow(edges, 'gray')
lines = cv2.HoughLines(edges,1,np.pi/180,200)
for rho,theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 3000*(-b))
y1 = int(y0 + 3000*(a))
x2 = int(x0 - 3000*(-b))
y2 = int(y0 - 3000*(a))
cv2.line(img,(x1,y1),(x2,y2),(0, 255, 0),2)
imagetobedetected
Here's a possible solution to estimate the line (and its angle) without using the Hough line transform. The idea is to locate the start and ending points of the line using the reduce function. This function can reduce an image to a single column or row. If we reduce the image we can also get the total SUM of all the pixels across the reduced image. Using this info we can estimate the extreme points of the line and calculate its angle. This are the steps:
Resize your image because it is way too big
Get a binary image via adaptive thresholding
Define two extreme regions of the image and crop them
Reduce the ROIs to a column using the SUM mode, which is the sum of all rows
Accumulate the total values above a threshold value
Estimate the starting and ending points of the line
Get the angle of the line
Here's the code:
# imports:
import cv2
import numpy as np
import math
# image path
path = "D://opencvImages//"
fileName = "mmCAb.jpg"
# Reading an image in default mode:
inputImage = cv2.imread(path + fileName)
# Scale your BIG image into a small one:
scalePercent = 0.3
# Calculate the new dimensions
width = int(inputImage.shape[1] * scalePercent)
height = int(inputImage.shape[0] * scalePercent)
newSize = (width, height)
# Resize the image:
inputImage = cv2.resize(inputImage, newSize, None, None, None, cv2.INTER_AREA)
# Deep copy for results:
inputImageCopy = inputImage.copy()
# Convert BGR to grayscale:
grayInput = cv2.cvtColor(inputImage, cv2.COLOR_BGR2GRAY)
# Adaptive Thresholding:
windowSize = 51
windowConstant = 11
binaryImage = cv2.adaptiveThreshold(grayInput, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, windowSize, windowConstant)
The first step is to get the binary image. Note that I previously downscaled your input because it is too big and we don't need all that info. This is the binary mask:
Now, we don't need most of the image. In fact, since the line is across the whole image, we can only "trim" the first and last column and check out where the white pixels begin. I'll crop a column a little bit wider, though, so we can ensure we have enough data and as less noise as possible. I'll define two Regions of Interest (ROIs) and crop them. Then, I'll reduce each ROI to a column using the SUM mode, this will give me the summation of all intensity across each row. After that, I can accumulate the locations where the sum exceeds a certain threshold and approximate the location of the line, like this:
# Define the regions that will be cropped
# from the original image:
lineWidth = 5
cropPoints = [(0, 0, lineWidth, height), (width-lineWidth, 0, lineWidth, height)]
# Store the line points here:
linePoints = []
# Loop through the crop points and
# crop de ROI:
for p in range(len(cropPoints)):
# Get the ROI:
(x,y,w,h) = cropPoints[p]
# Crop the ROI:
imageROI = binaryImage[y:y+h, x:x+w]
# Reduce the ROI to a n row x 1 columns matrix:
reducedImg = cv2.reduce(imageROI, 1, cv2.REDUCE_SUM, dtype=cv2.CV_32S)
# Get the height (or lenght) of the arry:
reducedHeight = reducedImg.shape[0]
# Define a threshold and accumulate
# the coordinate of the points:
threshValue = 100
pointSum = 0
pointCount = 0
for i in range(reducedHeight):
currentValue = reducedImg[i]
if currentValue > threshValue:
pointSum = pointSum + i
pointCount = pointCount + 1
# Get average coordinate of the line:
y = int(accX / pixelCount)
# Store in list:
linePoints.append((x, y))
The red rectangles show the regions I cropped from the input image:
Note that I've stored both points in the linePoints list. Let's check out our approximation by drawing a line that connects both points:
# Get the two points:
p0 = linePoints[0]
p1 = linePoints[1]
# Draw the line:
cv2.line(inputImageCopy, (p0[0], p0[1]), (p1[0], p1[1]), (255, 0, 0), 1)
cv2.imshow("Line", inputImageCopy)
cv2.waitKey(0)
Which yields:
Not bad, huh? Now that we have both points, we can estimate the angle of this line:
# Get angle:
adjacentSide = p1[0] - p0[0]
oppositeSide = p0[1] - p1[1]
# Compute the angle alpha:
alpha = math.degrees(math.atan(oppositeSide / adjacentSide))
print("Angle: "+str(alpha))
This prints:
Angle: 0.534210901840831

Fast and Robust Image Stitching Algorithm for many images in Python?

I have a stationary camera which takes photos rapidly of the continuosly moving product but in a fixed position just of the same angle (translation perspective). I need to stitch all images into a panoramic picture. I've tried by using the class Stitcher. It worked, but it took a long time to compute.
I also tried to use another method by using the SIFT detector, FNNbasedMatcher, finding Homography and then warping the images. This method works fine if I only use two images. For multiple images it still doesn't stitch them properly. Does anyone know the best and fastest image stitching algorithm for this case?
This is my code which uses the Stitcher class.
import time
import cv2
import os
import numpy as np
import sys
def main():
# read input images
imgs = []
path = 'pics_rotated/'
i = 0
for (root, dirs, files) in os.walk(path):
images = [f for f in files]
print(images)
for i in range(0,len(images)):
curImg = cv2.imread(path + images[i])
imgs.append(curImg)
stitcher = cv2.Stitcher.create(mode= 0)
status ,result = stitcher.stitch(imgs)
if status != cv2.Stitcher_OK:
print("Can't stitch images, error code = %d" % status)
sys.exit(-1)
cv2.imwrite("imagesout/output.jpg", result)
cv2.waitKey(0)
if __name__ == '__main__':
start = time.time()
main()
end = time.time()
print("Time --->>>>>", end - start)
cv2.destroyAllWindows()enter code here
Briefing
Although OpenCV Stitcher class provides lots of methods and options to perform stitching, I find it hard to use it because of the complexity.
Therefore, I will try to provide the minimum and fastest way to perform stitching.
In case you are wondering more sophisticated approachs such as exposure compensation, I highly recommend looking at the detailed sample code.
As a side note, I will be grateful if someone can convert the following functions to use Stitcher class.
Introduction
In order to combine multiple images into the same perspective, the following operations are needed:
Detect and match features.
Compute homography (perspective transform between frames).
Warp one image onto the other perspective.
Combine the base and warped images while keeping track of the shift in origin.
Given the combination pattern, stitch multiple images.
Feature detection and matching
What are features?
They are distinguishable parts, like corners of a square, that are preserved across images.
There are different algorithms proposed for obtaining these characteristic points, like Harris, ORB, SIFT, SURF, etc.
See cv::Feature2d for the full list.
I will use SIFT because it is accurate and sufficiently fast.
A feature consists of a KeyPoint, which is the location in the image, and a descriptor, which is a set of numbers (e.g. a 128-D vector) that represents the properties of the feature.
After finding distinct points in images, we need to match the corresponding point pairs.
See cv::DescriptionMatcher.
I will use Flann-based descriptor matcher.
First, we initialize the descriptor and matcher classes.
descriptor = cv.SIFT.create()
matcher = cv.DescriptorMatcher.create(cv.DescriptorMatcher.FLANNBASED)
Then, we find the features in each image.
(kps, desc) = descriptor.detectAndCompute(image, mask=None)
Now we find the corresponding point pairs.
if (desc1 is not None and desc2 is not None and len(desc1) >=2 and len(desc2) >= 2):
rawMatch = matcher->knnMatch(desc2, desc1, k=2)
matches = []
# ensure the distance is within a certain ratio of each other (i.e. Lowe's ratio test)
ratio = 0.75
for m in rawMatch:
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
matches.append((m[0].trainIdx, m[0].queryIdx))
Homography computation
Homography is the perspective transformation from one view to another.
The parallel lines in one view may not be parallel in another, like a road to sunset.
We need to have at least 4 corresponding point pairs.
The more means redundant data that have to be decomposed or eliminated.
Homography matrix that transforms the point in the initial view to its warped position.
It is a 3x3 matrix that is computed by Direct Linear Transform algorithm.
There are 8 DoF and the last element in the matrix is 1.
[pt2] = H * [pt1]
Now that we have corresponding point matches, we compute the homography.
The method we use to handle redundant data is RANSAC, which randomly selects 4 point pairs and uses the best fitting result.
See cv::findHomography for more options.
if len(matches) > 4:
(H, status) = cv.findHomography(pts1, pts2, cv.RANSAC)
Warping to perspective
By computing homography, we know which point in the source image corresponds to which point in the destination image.
In order not to lose information from the source image, we need to pad the destination image by the amount that the transformed point falls to negative regions.
At the same time, we need to keep track of the shift amount of the origin for stitching multiple images.
Auxilary functions
# find the ROI of a transformation result
def warpRect(rect, H):
x, y, w, h = rect
corners = [[x, y], [x, y + h - 1], [x + w - 1, y], [x + w - 1, y + h - 1]]
extremum = cv.transform(corners, H)
minx, miny = np.min(extremum[:,0]), np.min(extremum[:,1])
maxx, maxy = np.max(extremum[:,0]), np.max(extremum[:,1])
xo = int(np.floor(minx))
yo = int(np.floor(miny))
wo = int(np.ceil(maxx - minx))
ho = int(np.ceil(maxy - miny))
outrect = (xo, yo, wo, ho)
return outrect
# homography matrix is translated to fit in the screen
def coverH(rect, H):
# obtain bounding box of the result
x, y, _, _ = warpRect(rect, H)
# shift amount to the first quadrant
xpos = int(-x if x < 0 else 0)
ypos = int(-y if y < 0 else 0)
# correct the homography matrix so that no point is thrown out
T = np.array([[1, 0, xpos], [0, 1, ypos], [0, 0, 1]])
H_corr = T.dot(H)
return (H_corr, (xpos, ypos))
# pad image to cover ROI, return the shift amount of origin
def addBorder(img, rect):
x, y, w, h = rect
tl = (x, y)
br = (x + w, y + h)
top = int(-tl[1] if tl[1] < 0 else 0)
bottom = int(br[1] - img.shape[0] if br[1] > img.shape[0] else 0)
left = int(-tl[0] if tl[0] < 0 else 0)
right = int(br[0] - img.shape[1] if br[0] > img.shape[1] else 0)
img = cv.copyMakeBorder(img, top, bottom, left, right, cv.BORDER_CONSTANT, value=[0, 0, 0])
orig = (left, top)
return img, orig
def size2rect(size):
return (0, 0, size[1], size[0])
Warping function
def warpImage(img, H):
# tweak the homography matrix to move the result to the first quadrant
H_cover, pos = coverH(size2rect(img.shape), H)
# find the bounding box of the output
x, y, w, h = warpRect(size2rect(img.shape), H_cover)
width, height = x + w, y + h
# warp the image using the corrected homography matrix
warped = cv.warpPerspective(img, H_corr, (width, height))
# make the external boundary solid black, useful for masking
warped = np.ascontiguousarray(warped, dtype=np.uint8)
gray = cv.cvtColor(warped, cv.COLOR_RGB2GRAY)
_, bw = cv.threshold(gray, 1, 255, cv.THRESH_BINARY)
# https://stackoverflow.com/a/55806272/12447766
major = cv.__version__.split('.')[0]
if major == '3':
_, cnts, _ = cv.findContours(bw, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)
else:
cnts, _ = cv.findContours(bw, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)
warped = cv.drawContours(warped, cnts, 0, [0, 0, 0], lineType=cv.LINE_4)
return (warped, pos)
Combining warped and destination images
This is the step where image enhancement such as exposure compensation becomes involved.
In order to keep things simple, we will use mean value blending.
The easiest solution would be overriding the existing data in the destination image but averaging operation is not a burden for us.
# only the non-zero pixels are weighted to the average
def mean_blend(img1, img2):
assert(img1.shape == img2.shape)
locs1 = np.where(cv.cvtColor(img1, cv.COLOR_RGB2GRAY) != 0)
blended1 = np.copy(img2)
blended1[locs1[0], locs1[1]] = img1[locs1[0], locs1[1]]
locs2 = np.where(cv.cvtColor(img2, cv.COLOR_RGB2GRAY) != 0)
blended2 = np.copy(img1)
blended2[locs2[0], locs2[1]] = img2[locs2[0], locs2[1]]
blended = cv.addWeighted(blended1, 0.5, blended2, 0.5, 0)
return blended
def warpPano(prevPano, img, H, orig):
# correct homography matrix
T = np.array([[1, 0, -orig[0]], [0, 1, -orig[1]], [0, 0, 1]])
H_corr = H.dot(T)
# warp the image and obtain shift amount of origin
result, pos = warpImage(prevPano, H_corr)
xpos, ypos = pos
# zero pad the result
rect = (xpos, ypos, img.shape[1], img.shape[0])
result, _ = addBorder(result, rect)
# mean value blending
idx = np.s_[ypos : ypos + img.shape[0], xpos : xpos + img.shape[1]]
result[idx] = mean_blend(result[idx], img)
# crop extra paddings
x, y, w, h = cv.boundingRect(cv.cvtColor(result, cv.COLOR_RGB2GRAY))
result = result[y : y + h, x : x + w]
# return the resulting image with shift amount
return (result, (xpos - x, ypos - y))
Stitching multiple images given combination pattern
# base image is the last image in each iteration
def blend_multiple_images(images, homographies):
N = len(images)
assert(N >= 2)
assert(len(homographies) == N - 1)
pano = np.copy(images[0])
pos = (0, 0)
for i in range(N - 1):
img = images[i + 1]
# get homography matrix
H = homographies[i]
# warp pano onto image
pano, pos = warpPano(pano, img, H, pos)
return (pano, pos)
The method above warps the previously combined image, called pano, onto the next image subsequently.
A pattern, however, may have conjunction points for the best stitching view.
For example
1 2 3
4 5 6
The best pattern to combine these images is
1 -> 2 <- 3
|
V
4 -> 5 <- 6
Therefore, we need one last function to combine 1 & 2 with 2 & 3, or 1235 with 456 at node 5.
from operator import sub
# no warping here, useful for combining two different stitched images
# the image at given origin coordinates must be the same
def patchPano(img1, img2, orig1=(0,0), orig2=(0,0)):
# bottom right points
br1 = (img1.shape[1] - 1, img1.shape[0] - 1)
br2 = (img2.shape[1] - 1, img2.shape[0] - 1)
# distance from orig to br
diag2 = tuple(map(sub, br2, orig2))
# possible pano corner coordinates based on img1
extremum = np.array([(0, 0), br1,
tuple(map(sum, zip(orig1, diag2))),
tuple(map(sub, orig1, orig2))])
bb = cv.boundingRect(extremum)
# patch img1 to img2
pano, shift = addBorder(img1, bb)
orig = tuple(map(sum, zip(orig1, shift)))
idx = np.s_[orig[1] : orig[1] + img2.shape[0] - orig2[1],
orig[0] : orig[0] + img2.shape[1] - orig2[0]]
subImg = img2[orig2[1] : img2.shape[0], orig2[0] : img2.shape[1]]
pano[idx] = mean_blend(pano[idx], subImg)
return (pano, orig)
For a quick demo, you can run the Python code in GitHub.
If you want to use the above methods in C++, you can have a look at Stitch library.
Any PR or edit to this post is welcome.
As an alternative to the last step that #Burak gave, this is the way I used as I had the number of images for each of the rows (chunks), the multiStitching being nothing but a function to stitch images horizontally:
def stitchingImagesHV(img_list, size):
"""
As our multi stitching algorithm works on the horizontal line, we will hack
it to use also the vertical stitching by rotating each row "stitch_img" and
apply the same technique, and after that, the final result is rotated back to the
original direction.
"""
# Generate row chunks of "size" length from image list
chunks = [img_list[i:i + size] for i in range(0, len(img_list), size)]
list_rotated_images = []
for i in range(len(chunks)):
stitch_img = multiStitching(chunks[i])
stitch_img_rotated = cv2.rotate(stitch_img, cv2.ROTATE_90_COUNTERCLOCKWISE)
list_rotated_images.append(stitch_img_rotated.astype('uint8'))
stitch_img2 = multiStitching(list_rotated_images)
return cv2.rotate(stitch_img2, cv2.ROTATE_90_CLOCKWISE)

Convert YoloV3 output to coordinates of bounding box, label and confidence

I run YoloV3 model and get detections - dictionary of 3 entries:
"detector/yolo-v3/Conv_22/BiasAdd/YoloRegion" : numpy.ndarray with
shape (1,255,52,52),
"detector/yolo-v3/Conv_6/BiasAdd/YoloRegion" : numpy.ndarray with
shape (1,255,13,13),
"detector/yolo-v3/Conv_14/BiasAdd/YoloRegion" : numpy.ndarray with
shape (1,255,26,26).
I know that each entry in dictionary is other size of object detection.
Conv_22 is for small objects
Conv_14 is for medium objects
Conv_6 is for big objects
How can I convert this dictionary output to coordinates of bounding box, label and confidence?
Presuming you use python and opencv,
Pelase find the below code with comments where ever required, to extract the output using cv2.dnn module.
net.setInput(blob)
layerOutputs = net.forward(ln)
boxes = []
confidences = []
classIDs = []
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability) of
# the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > threshold:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
# and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates, confidences,
# and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
idxs = cv2.dnn.NMSBoxes(boxes, confidences, confidence, threshold)
#results are stored in idxs,boxes,confidences,classIDs

Categories