Average color of multiple images merged together - python

I have all these layers in Photoshop:
I also have these images in Python. In Photoshop, I can get the average color of the entire document from the RGB channel of the document. In Python, I was thinking I would blend the images together using cv2.addWeighted, and then use the histograms of the three channels to get the average color. However, my average color does not match with the one obtained from Photoshop.
Here is my code so far:
im1 = cv2.imread('a1.png')
im2 = cv2.imread('a2.png')
im3 = cv2.imread('a3.png')
alpha = 0.5
beta = (1.0 - alpha)
blended = cv2.addWeighted(im1, alpha, im2, beta, 0.0)
blended = cv2.addWeighted(blended, alpha, im3, beta, 0.0)
# cv2.imwrite('blended.png', blended)
averageColor = []
chans = cv2.split(blended)
colors = ("b", "g", "r")
for (chan, color) in zip(chans, colors):
# create a histogram for the current channel and plot it
hist = cv2.calcHist([chan], [0], None, [256], [0, 256])
for i in range(len(hist)):
if (hist[i]) != 0:
averageColor.append(i)
break
print("Average color: ", averageColor)
base_color = np.fill((aw, ah, 3), averageColor)
base_color_layer = Image.fromarray(base_color)
base_color_layer.save("Base Color.png")
im3 is the "dup" layer containing only one color (129, 127, 121). im2 is all the Mask layers summed together using np.add. im1 is the Background image.
Any thoughts?

# given
im1 = cv2.imread('a1.png')
im2 = cv2.imread('a2.png')
im3 = cv2.imread('a3.png')
# calculations
averageIm = np.mean([im1, im2, im3], axis=0)
averageColor = averageIm.mean(axis=(0,1))
Assuming the images are color without alpha, and assuming you want each picture to have equal weight.
This is not equivalent to a stack of layers and giving each layer the same transparency, because that is equivalent to exponential/geometric average, i.e. alpha * im1 + (1-alpha) * (alpha * im2 + (1-alpha) * (im3)) which gives an image successively less weight the lower it is on the stack. it goes like [0.5, 0.25, 0.25], or [0.5, 0.25, 0.125, 0.0625, ...]

Disclaimer: I would highly recommend you do this with the CUDA API (either in C or with the Python bindings) but since the qn is about pure python, see below
I'm not going to add all the cv2 reading code bc idk the API but let's assume you've got images 1...n in a list called images and each image is a 3d array of width x height x rgba and let's assume all images are the same size
Image_t = list[list[list[int]]]
def get_average_colour(images: list[Image_t]) -> Image_t:
new_image: Image_t = []
image_width = len(images[0][0]) # len of first row in first image
image_height = len(images[0]) # number of rows
colour_channel_size = len(images[0][0][0])
for h in range(image_height):
row = []
for w in range(image_width):
curr_pixel = [0] * colour_channel_size
for img in images:
for chan, val in enumerate(img[h][w]):
curr_pixel[chan] += val
row.append(list(map(lambda v: int(v / len(images), curr_pixel)) # This will average out your pixel values
new_image.append(row)
return new_image

Related

Getting a 3D map on Meshlab with my disparity map

I have a program that allows me to find the disparity map with 2 images from two non-stereocalibrated cameras. The disparity map looks good but when I add a piece of program to get a 3D map via meshlab, I get some scattered points (see photo result_clou.png)
On the other topics, I saw that I had to change the type and divide the disparity map by 16. I tried with a new map called disparity_SGBM2 as follows: disparity_SGBM2 = disparity_SGBM.astype(np.float32) / 16.0
I took a screenshot of the .ply with his error message (see result_disparity_SGBM2.png)
Does anyone have an idea how to unblock me please?
I also joined my python program below (because I can't send a python file) and the images used with the program (clou-l.png and clou-r.png).
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
# Read both images and convert to grayscale
img1 = cv.imread('clou-l.png', cv.IMREAD_GRAYSCALE)
img2 = cv.imread('clou-r.png', cv.IMREAD_GRAYSCALE)
# ------------------------------------------------------------
# PREPROCESSING
# Compare unprocessed images
#fig, axes = plt.subplots(1, 2, figsize=(15, 10))
#axes[0].imshow(img1, cmap="gray")
#axes[1].imshow(img2, cmap="gray")
#axes[0].axhline(250)
#axes[1].axhline(250)
#axes[0].axhline(450)
#axes[1].axhline(450)
#plt.suptitle("Original images")
#plt.savefig("original_images.png")
#plt.show()
# 1. Detect keypoints and their descriptors
# Based on: https://docs.opencv.org/master/dc/dc3/tutorial_py_matcher.html
# Initiate SIFT detector
sift = cv.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
# Visualize keypoints
imgSift = cv.drawKeypoints(
img1, kp1, None, flags=cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
#cv.imshow("SIFT Keypoints", imgSift)
#cv.imwrite("sift_keypoints.png", imgSift)
# Match keypoints in both images
# Based on: https://docs.opencv.org/master/dc/dc3/tutorial_py_matcher.html
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50) # or pass empty dictionary
flann = cv.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# Keep good matches: calculate distinctive image features
# Lowe, D.G. Distinctive Image Features from Scale-Invariant Keypoints. International Journal of Computer Vision 60, 91–110 (2004). https://doi.org/10.1023/B:VISI.0000029664.99615.94
# https://www.cs.ubc.ca/~lowe/papers/ijcv04.pdf
matchesMask = [[0, 0] for i in range(len(matches))]
good = []
pts1 = []
pts2 = []
for i, (m, n) in enumerate(matches):
if m.distance < 0.7*n.distance:
# Keep this keypoint pair
matchesMask[i] = [1, 0]
good.append(m)
pts2.append(kp2[m.trainIdx].pt)
pts1.append(kp1[m.queryIdx].pt)
# Draw the keypoint matches between both pictures
# Still based on: https://docs.opencv.org/master/dc/dc3/tutorial_py_matcher.html
draw_params = dict(matchColor=(0, 255, 0),
singlePointColor=(255, 0, 0),
matchesMask=matchesMask[300:500],
flags=cv.DrawMatchesFlags_DEFAULT)
keypoint_matches = cv.drawMatchesKnn(
img1, kp1, img2, kp2, matches[300:500], None, **draw_params)
#cv.imshow("Keypoint matches", keypoint_matches)
#cv.imwrite("keypoint_matches.png", keypoint_matches)
# ------------------------------------------------------------
# STEREO RECTIFICATION
# Calculate the fundamental matrix for the cameras
# https://docs.opencv.org/master/da/de9/tutorial_py_epipolar_geometry.html
pts1 = np.int32(pts1)
pts2 = np.int32(pts2)
fundamental_matrix, inliers = cv.findFundamentalMat(pts1, pts2, cv.FM_RANSAC)
# We select only inlier points
pts1 = pts1[inliers.ravel() == 1]
pts2 = pts2[inliers.ravel() == 1]
# Visualize epilines
# Adapted from: https://docs.opencv.org/master/da/de9/tutorial_py_epipolar_geometry.html
def drawlines(img1src, img2src, lines, pts1src, pts2src):
''' img1 - image on which we draw the epilines for the points in img2
lines - corresponding epilines '''
r, c = img1src.shape
img1color = cv.cvtColor(img1src, cv.COLOR_GRAY2BGR)
img2color = cv.cvtColor(img2src, cv.COLOR_GRAY2BGR)
# Edit: use the same random seed so that two images are comparable!
np.random.seed(0)
for r, pt1, pt2 in zip(lines, pts1src, pts2src):
color = tuple(np.random.randint(0, 255, 3).tolist())
x0, y0 = map(int, [0, -r[2]/r[1]])
x1, y1 = map(int, [c, -(r[2]+r[0]*c)/r[1]])
img1color = cv.line(img1color, (x0, y0), (x1, y1), color, 1)
img1color = cv.circle(img1color, tuple(pt1), 5, color, -1)
img2color = cv.circle(img2color, tuple(pt2), 5, color, -1)
return img1color, img2color
# Find epilines corresponding to points in right image (second image) and
# drawing its lines on left image
lines1 = cv.computeCorrespondEpilines(
pts2.reshape(-1, 1, 2), 2, fundamental_matrix)
lines1 = lines1.reshape(-1, 3)
img5, img6 = drawlines(img1, img2, lines1, pts1, pts2)
# Find epilines corresponding to points in left image (first image) and
# drawing its lines on right image
lines2 = cv.computeCorrespondEpilines(
pts1.reshape(-1, 1, 2), 1, fundamental_matrix)
lines2 = lines2.reshape(-1, 3)
img3, img4 = drawlines(img2, img1, lines2, pts2, pts1)
#plt.subplot(121), plt.imshow(img5)
#plt.subplot(122), plt.imshow(img3)
#plt.suptitle("Epilines in both images")
#plt.savefig("epilines.png")
#plt.show()
# Stereo rectification (uncalibrated variant)
# Adapted from: https://stackoverflow.com/a/62607343
h1, w1 = img1.shape
h2, w2 = img2.shape
_, H1, H2 = cv.stereoRectifyUncalibrated(
np.float32(pts1), np.float32(pts2), fundamental_matrix, imgSize=(w1, h1)
)
# Rectify (undistort) the images and save them
# Adapted from: https://stackoverflow.com/a/62607343
img1_rectified = cv.warpPerspective(img1, H1, (w1, h1))
img2_rectified = cv.warpPerspective(img2, H2, (w2, h2))
cv.imwrite("rectified_1.png", img1_rectified)
cv.imwrite("rectified_2.png", img2_rectified)
# Draw the rectified images
#fig, axes = plt.subplots(1, 2, figsize=(15, 10))
#axes[0].imshow(img1_rectified, cmap="gray")
#axes[1].imshow(img2_rectified, cmap="gray")
#axes[0].axhline(250)
#axes[1].axhline(250)
#axes[0].axhline(450)
#axes[1].axhline(450)
#plt.suptitle("Rectified images")
#plt.savefig("rectified_images.png")
#plt.show()
# ------------------------------------------------------------
# CALCULATE DISPARITY (DEPTH MAP)
# Adapted from: https://github.com/opencv/opencv/blob/master/samples/python/stereo_match.py
# and: https://docs.opencv.org/master/dd/d53/tutorial_py_depthmap.html
# StereoSGBM Parameter explanations:
# https://docs.opencv.org/4.5.0/d2/d85/classcv_1_1StereoSGBM.html
# Matched block size. It must be an odd number >=1 . Normally, it should be somewhere in the 3..11 range.
block_size = 11
min_disp = -128
max_disp = 128
# Maximum disparity minus minimum disparity. The value is always greater than zero.
# In the current implementation, this parameter must be divisible by 16.
num_disp = max_disp - min_disp
# Margin in percentage by which the best (minimum) computed cost function value should "win" the second best value to consider the found match correct.
# Normally, a value within the 5-15 range is good enough
uniquenessRatio = 5
# Maximum size of smooth disparity regions to consider their noise speckles and invalidate.
# Set it to 0 to disable speckle filtering. Otherwise, set it somewhere in the 50-200 range.
speckleWindowSize = 200
# Maximum disparity variation within each connected component.
# If you do speckle filtering, set the parameter to a positive value, it will be implicitly multiplied by 16.
# Normally, 1 or 2 is good enough.
speckleRange = 2
disp12MaxDiff = 0
stereo = cv.StereoSGBM_create(
minDisparity=min_disp,
numDisparities=num_disp,
blockSize=block_size,
uniquenessRatio=uniquenessRatio,
speckleWindowSize=speckleWindowSize,
speckleRange=speckleRange,
disp12MaxDiff=disp12MaxDiff,
P1=8 * 1 * block_size * block_size,
P2=32 * 1 * block_size * block_size,
)
disparity_SGBM = stereo.compute(img1_rectified, img2_rectified)
#disparity_SGBM2 = disparity_SGBM.astype(np.float32) / 16.0
#plt.imshow(disparity_SGBM, cmap='plasma')
#plt.colorbar()
#plt.show()
#Normalize the values to a range from 0..255 for a grayscale image
disparity_SGBM = cv.normalize(disparity_SGBM, disparity_SGBM, alpha=255,
beta=0, norm_type=cv.NORM_MINMAX)
disparity_SGBM = np.uint8(disparity_SGBM)
#cv.imshow("Disparity", disparity_SGBM)
#cv.imwrite("disparity_SGBM_norm.png", disparity_SGBM)
#cv.waitKey()
#cv.destroyAllWindows()
# ---------------------------------------------------------------
"""That's the new part of the program for reconstructing the 3D map from the disparity map.
For seeing the 3D result, you need to open the clou.ply folder with Meshlab"""
def create_output(vertices, colors, filename):
colors = colors.reshape(-1, 3)
vertices = np.hstack([vertices.reshape(-1,3), colors])
ply_header = '''ply
format ascii 1.0
element vertex %(vert_num)d
property float x
property float y
property float z
property uchar red
property uchar green
property uchar blue
end_header
'''
with open(filename, 'w') as f:
f.write(ply_header % dict(vert_num=len(vertices)))
np.savetxt(f, vertices, '%f %f %f %d %d %d')
print("\nGenerating the 3D map ...")
h, w = img1.shape[:2]
focal_length = 0.8*w
#Perspective transformation matrix
Q = np.float32([[1, 0, 0, -w/2.0],
[0,-1, 0, h/2.0],
[0, 0, 0, -focal_length],
[0, 0, 1, 0]])
output_file = 'clou' + '.ply'
points_3D = cv.reprojectImageTo3D(disparity_SGBM, Q, handleMissingValues=0)
colors = cv.cvtColor(img1, cv.COLOR_BGR2RGB)
mask_map = disparity_SGBM > disparity_SGBM.min()
output_points = points_3D[mask_map]
output_colors = colors[mask_map]
print("\nCreating the output file ...\n")
create_output(output_points, output_colors, output_file)
clou-l.png
clou-r.png
result_clou.png
result_disparity_SGBM2.png
I think the problem is that you're using very shiny objects, which are typically hard to match in stereo images and photogrammetry. You could try moving the illuminating lights, possibly to a more oblique angle, or fit polarizers over each lens, then illuminate with polarized light. Another technique you can employ is to cover the subject in white powder to create a matt/diffused surface, which can work better.
I've used DMAG (http://3dstereophoto.blogspot.com/2013/04/depth-map-automatic-generator-dmag.html) to produce depth maps (with varying degrees of success) but it can produce intermediate files that firstly show the features it can find, then another step to show which features match between images. I've run your script to produce the rectified images to get an epipolar projection, then I ran those through DMAG. It shows very few matches, Features L Features R Matches. With so few matches you're not going to produce much of a mesh.

How to detect a grainy line?

I am trying to detect a grainy printed line on a paper with cv2. I need the angle of the line. I dont have much knowledge in image processing and I only need to detect the line. I tried to play with the parameters but the angle is always detected wrong. Could someone help me. This is my code:
import cv2
import numpy as np
import matplotlib.pylab as plt
from matplotlib.pyplot import figure
img = cv2.imread('CamXY1_1.bmp')
crop_img = img[100:800, 300:900]
blur = cv2.GaussianBlur(crop_img, (1,1), 0)
ret,thresh = cv2.threshold(blur,150,255,cv2.THRESH_BINARY)
gray = cv2.cvtColor(thresh,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 60, 150)
figure(figsize=(15, 15), dpi=150)
plt.imshow(edges, 'gray')
lines = cv2.HoughLines(edges,1,np.pi/180,200)
for rho,theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 3000*(-b))
y1 = int(y0 + 3000*(a))
x2 = int(x0 - 3000*(-b))
y2 = int(y0 - 3000*(a))
cv2.line(img,(x1,y1),(x2,y2),(0, 255, 0),2)
imagetobedetected
Here's a possible solution to estimate the line (and its angle) without using the Hough line transform. The idea is to locate the start and ending points of the line using the reduce function. This function can reduce an image to a single column or row. If we reduce the image we can also get the total SUM of all the pixels across the reduced image. Using this info we can estimate the extreme points of the line and calculate its angle. This are the steps:
Resize your image because it is way too big
Get a binary image via adaptive thresholding
Define two extreme regions of the image and crop them
Reduce the ROIs to a column using the SUM mode, which is the sum of all rows
Accumulate the total values above a threshold value
Estimate the starting and ending points of the line
Get the angle of the line
Here's the code:
# imports:
import cv2
import numpy as np
import math
# image path
path = "D://opencvImages//"
fileName = "mmCAb.jpg"
# Reading an image in default mode:
inputImage = cv2.imread(path + fileName)
# Scale your BIG image into a small one:
scalePercent = 0.3
# Calculate the new dimensions
width = int(inputImage.shape[1] * scalePercent)
height = int(inputImage.shape[0] * scalePercent)
newSize = (width, height)
# Resize the image:
inputImage = cv2.resize(inputImage, newSize, None, None, None, cv2.INTER_AREA)
# Deep copy for results:
inputImageCopy = inputImage.copy()
# Convert BGR to grayscale:
grayInput = cv2.cvtColor(inputImage, cv2.COLOR_BGR2GRAY)
# Adaptive Thresholding:
windowSize = 51
windowConstant = 11
binaryImage = cv2.adaptiveThreshold(grayInput, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, windowSize, windowConstant)
The first step is to get the binary image. Note that I previously downscaled your input because it is too big and we don't need all that info. This is the binary mask:
Now, we don't need most of the image. In fact, since the line is across the whole image, we can only "trim" the first and last column and check out where the white pixels begin. I'll crop a column a little bit wider, though, so we can ensure we have enough data and as less noise as possible. I'll define two Regions of Interest (ROIs) and crop them. Then, I'll reduce each ROI to a column using the SUM mode, this will give me the summation of all intensity across each row. After that, I can accumulate the locations where the sum exceeds a certain threshold and approximate the location of the line, like this:
# Define the regions that will be cropped
# from the original image:
lineWidth = 5
cropPoints = [(0, 0, lineWidth, height), (width-lineWidth, 0, lineWidth, height)]
# Store the line points here:
linePoints = []
# Loop through the crop points and
# crop de ROI:
for p in range(len(cropPoints)):
# Get the ROI:
(x,y,w,h) = cropPoints[p]
# Crop the ROI:
imageROI = binaryImage[y:y+h, x:x+w]
# Reduce the ROI to a n row x 1 columns matrix:
reducedImg = cv2.reduce(imageROI, 1, cv2.REDUCE_SUM, dtype=cv2.CV_32S)
# Get the height (or lenght) of the arry:
reducedHeight = reducedImg.shape[0]
# Define a threshold and accumulate
# the coordinate of the points:
threshValue = 100
pointSum = 0
pointCount = 0
for i in range(reducedHeight):
currentValue = reducedImg[i]
if currentValue > threshValue:
pointSum = pointSum + i
pointCount = pointCount + 1
# Get average coordinate of the line:
y = int(accX / pixelCount)
# Store in list:
linePoints.append((x, y))
The red rectangles show the regions I cropped from the input image:
Note that I've stored both points in the linePoints list. Let's check out our approximation by drawing a line that connects both points:
# Get the two points:
p0 = linePoints[0]
p1 = linePoints[1]
# Draw the line:
cv2.line(inputImageCopy, (p0[0], p0[1]), (p1[0], p1[1]), (255, 0, 0), 1)
cv2.imshow("Line", inputImageCopy)
cv2.waitKey(0)
Which yields:
Not bad, huh? Now that we have both points, we can estimate the angle of this line:
# Get angle:
adjacentSide = p1[0] - p0[0]
oppositeSide = p0[1] - p1[1]
# Compute the angle alpha:
alpha = math.degrees(math.atan(oppositeSide / adjacentSide))
print("Angle: "+str(alpha))
This prints:
Angle: 0.534210901840831

Using masks to apply different thresholds to different parts of an image

I have an image, in which I want to threshold part of the image within a circular region, and then the remainder of the image outside of this region.
Unfortunately my attempts seem to be thresholding the image as a whole, ignoring the masks. How can this be properly achieved? See code attempt below.
def circular_mask(h, w, centre=None, radius=None):
if centre is None: # use the middle of the image
centre = [int(w / 2), int(h / 2)]
if radius is None: # use the smallest distance between the centre and image walls
radius = min(centre[0], centre[1], w - centre[0], h - centre[1])
Y, X = np.ogrid[:h, :w]
dist_from_centre = np.sqrt((X - centre[0]) ** 2 + (Y - centre[1]) ** 2)
mask = dist_from_centre <= radius
return mask
img = cv2.imread('image.png', 0) #read image
h,w = img.shape[:2]
mask = circular_mask(h,w, centre=(135,140),radius=75) #create a boolean circle mask
mask_img = img.copy()
inside = np.ma.array(mask_img, mask=~mask)
t1 = inside < 50 #threshold part of image within the circle, ignore rest of image
plt.imshow(inside)
plt.imshow(t1, alpha=.25)
plt.show()
outside = np.ma.array(mask_img, mask=mask)
t2 = outside < 20 #threshold image outside circle region, ignoring image in circle
plt.imshow(outside)
plt.imshow(t2, alpha=.25)
plt.show()
fin = np.logical_or(t1, t2) #combine the results from both thresholds together
plt.imshow(fin)
plt.show()
Working solution:
img = cv2.imread('image.png', 0)
h,w = img.shape[:2]
mask = circular_mask(h,w, centre=(135,140),radius=75)
inside = img.copy()*mask
t1 = inside < 50#get_threshold(inside, 1)
plt.imshow(inside)
plt.show()
outside = img.copy()*~mask
t2 = outside < 70
plt.imshow(outside)
plt.show()
plt.imshow(t1)
plt.show()
plt.imshow(t2)
plt.show()
plt.imshow(np.logical_and(t1,t2))
plt.show()
I assume your image is single layered (e.g. Grey Scale).
You can make 2 copies of the image. Multiply (or Logical AND) your mask with one of them and invert of that mask with the other one. Now apply your desired threshold to each of them. In the end merge both images using Logical OR operation.

OpenCV: Use findHomography() and warpPerspective to align a bigger image onto a smaller one

My goal is to
deskew a scanned image such that its text is perfectly placed on top of the text of the original image. (subtracting the images would remove the text)
prevent any loss of information on the deskewed image
I use SURF features to feed the findHomography function. Then I use the warpPerspective function to transform the scanned image. The resulting image almost perfectly fits onto the original image.
However, the scanned image has content on its corners which is lost after the transformation because the text in the scanned image is smaller and has to be scaled up.
Deskewing an image that has slightly smaller text
Information at the borders of the image is cropped
To avoid any loss of information, I convert the image to RGBA and set the borderValue parameter in warpPerspective such that any added background has transparent color. I remove the transparent pixels after the transformation again. This procedure works but seems highly inefficient.
Question: I'm looking for a working code example (C++ or Python) that shows how to do this more efficiently.
Image has been deskewed and content is preserved. However, the text of the two pictures isn't on top of each other anymore
Text position is off because the warped image has a different size than what warpPerspective expected
After transforming the image the problem is that the two images aren't aligned anymore because the dimensions of the transformed image are different than what the warpPerspective method expected.
Question: How can I realign the two images? It would be great if there was a way to do incorporate this into the previous step already. Again, a working code example would be very helpful.
Here's the code that I have so far. It deskews the image while preserving its content, however, the text is not on top of the original text anymore.
import math
import cv2
import numpy as np
class Deskewer:
def __init__(self, hessianTreshold = 5000):
self.__hessianThresh = hessianTreshold
self.imgOrigGray, self.imgSkewed, self.imgSkewedGray = None, None, None
def start(self, imgOrig, imgSkewed):
self.imgOrigGray = cv2.cvtColor(imgOrig, cv2.COLOR_BGR2GRAY)
self.imgSkewed = imgSkewed # final transformation will be performed on color image
self.imgSkewedGray = cv2.cvtColor(imgSkewed, cv2.COLOR_BGR2GRAY) # prior calculation is faster on gray
kp1, des1, kp2, des2 = self.__detectFeatures()
goodMatches = self.__flannMatch(des1, des2)
MIN_MATCH_COUNT = 10
M = None
if len(goodMatches) > MIN_MATCH_COUNT:
M, _ = self.__findHomography(kp1, kp2, goodMatches)
else:
print("Not enough matches are found - %d/%d" % (len(goodMatches), MIN_MATCH_COUNT))
return
return self.__deskew(M)
def __detectFeatures(self):
surf = cv2.xfeatures2d.SURF_create(self.__hessianThresh)
kp1, des1 = surf.detectAndCompute(self.imgOrigGray, None)
kp2, des2 = surf.detectAndCompute(self.imgSkewedGray, None)
return kp1, des1, kp2, des2
def __flannMatch(self, des1, des2):
global matches
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m, n in matches:
if m.distance < 0.7 * n.distance:
good.append(m)
return good
def __findHomography(self, kp1, kp2, goodMatches):
src_pts = np.float32([kp1[m.queryIdx].pt for m in goodMatches
]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in goodMatches
]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
matchesMask = mask.ravel().tolist()
i = matchesMask.index(1)
# TODO: This is a matching point before the warpPerspective call. How can I calculate this point AFTER the call?
print("POINTS: object(", src_pts[i][0][1], ",", src_pts[i][0][0], ") - scene(", dst_pts[i][0][1], ",", dst_pts[i][0][0], ")")
return M, mask
def getComponents(self, M):
# ((translationx, translationy), rotation, (scalex, scaley), shear)
a = M[0, 0]
b = M[0, 1]
c = M[0, 2]
d = M[1, 0]
e = M[1, 1]
f = M[1, 2]
p = math.sqrt(a * a + b * b)
r = (a * e - b * d) / (p)
q = (a * d + b * e) / (a * e - b * d)
translation = (c, f)
scale = (p, r) # p = x-Axis, r = y-Axis
shear = q
theta = math.atan2(b, a)
degrees = math.atan2(b, a) * 180 / math.pi
return (translation, theta, degrees, scale, shear)
def __deskew(self, M):
# this info might come in handy here for calculating the dsize of warpPerspective?
translation, theta, degrees, scale, shear = self.getComponents(M)
# Alpha channel allows me to set unique feature to pixels that are created during warpPerspective
imSkewedAlpha = cv2.cvtColor(self.imgSkewed, cv2.COLOR_BGR2BGRA)
# These sizes have been randomly choosen to make sure that all the contents fit in the new canvas
height = 5000
width = 5000
shift = -500
M2 = np.array([[1, 0, shift],
[0, 1, shift],
[0, 0, 1]])
M3 = np.dot(M, M2)
# TODO: How can I calculate the dsize argument?
# Newly created pixels are set to transparent
im_out = cv2.warpPerspective(imSkewedAlpha, M3,
(height, width), flags=cv2.WARP_INVERSE_MAP, borderMode=cv2.BORDER_CONSTANT, borderValue=(255, 0, 0, 0))
# http://codereview.stackexchange.com/a/132933
# Mask of non-black pixels (assuming image has a single channel).
mask = im_out[:, :, 3] == 255
# Coordinates of non-black pixels.
coords = np.argwhere(mask)
# Bounding box of non-black pixels.
x0, y0 = coords.min(axis=0)
x1, y1 = coords.max(axis=0) + 1 # slices are exclusive at the top
# Get the contents of the bounding box.
cropped = im_out[x0:x1, y0:y1]
# TODO: The warped image needs to align nicely on the original image
return cropped
origImg = cv2.imread("Letter.png")
skewedImg = cv2.imread("A4.png")
deskewed = Deskewer().start(origImg, skewedImg)
cv2.imshow("Original", origImg)
cv2.imshow("Deskewed", deskewed)
cv2.waitKey(0)
Original and skewed image (with additional content) for testing

Using openCV to overlay transparent image onto another image

How can I overlay a transparent PNG onto another image without losing it's transparency using openCV in python?
import cv2
background = cv2.imread('field.jpg')
overlay = cv2.imread('dice.png')
# Help please
cv2.imwrite('combined.png', background)
Desired output:
Sources:
Background Image
Overlay
import cv2
background = cv2.imread('field.jpg')
overlay = cv2.imread('dice.png')
added_image = cv2.addWeighted(background,0.4,overlay,0.1,0)
cv2.imwrite('combined.png', added_image)
The correct answer to this was far too hard to come by, so I'm posting this answer even though the question is really old. What you are looking for is "over" compositing, and the algorithm for this can be found on Wikipedia: https://en.wikipedia.org/wiki/Alpha_compositing
I am far from an expert with OpenCV, but after some experimentation this is the most efficient way I have found to accomplish the task:
import cv2
background = cv2.imread("background.png", cv2.IMREAD_UNCHANGED)
foreground = cv2.imread("overlay.png", cv2.IMREAD_UNCHANGED)
# normalize alpha channels from 0-255 to 0-1
alpha_background = background[:,:,3] / 255.0
alpha_foreground = foreground[:,:,3] / 255.0
# set adjusted colors
for color in range(0, 3):
background[:,:,color] = alpha_foreground * foreground[:,:,color] + \
alpha_background * background[:,:,color] * (1 - alpha_foreground)
# set adjusted alpha and denormalize back to 0-255
background[:,:,3] = (1 - (1 - alpha_foreground) * (1 - alpha_background)) * 255
# display the image
cv2.imshow("Composited image", background)
cv2.waitKey(0)
The following code will use the alpha channels of the overlay image to correctly blend it into the background image, use x and y to set the top-left corner of the overlay image.
import cv2
import numpy as np
def overlay_transparent(background, overlay, x, y):
background_width = background.shape[1]
background_height = background.shape[0]
if x >= background_width or y >= background_height:
return background
h, w = overlay.shape[0], overlay.shape[1]
if x + w > background_width:
w = background_width - x
overlay = overlay[:, :w]
if y + h > background_height:
h = background_height - y
overlay = overlay[:h]
if overlay.shape[2] < 4:
overlay = np.concatenate(
[
overlay,
np.ones((overlay.shape[0], overlay.shape[1], 1), dtype = overlay.dtype) * 255
],
axis = 2,
)
overlay_image = overlay[..., :3]
mask = overlay[..., 3:] / 255.0
background[y:y+h, x:x+w] = (1.0 - mask) * background[y:y+h, x:x+w] + mask * overlay_image
return background
This code will mutate background so create a copy if you wish to preserve the original background image.
Been a while since this question appeared, but I believe this is the right simple answer, which could still help somebody.
background = cv2.imread('road.jpg')
overlay = cv2.imread('traffic sign.png')
rows,cols,channels = overlay.shape
overlay=cv2.addWeighted(background[250:250+rows, 0:0+cols],0.5,overlay,0.5,0)
background[250:250+rows, 0:0+cols ] = overlay
This will overlay the image over the background image such as shown here:
Ignore the ROI rectangles
Note that I used a background image of size 400x300 and the overlay image of size 32x32, is shown in the x[0-32] and y[250-282] part of the background image according to the coordinates I set for it, to first calculate the blend and then put the calculated blend in the part of the image where I want to have it.
(overlay is loaded from disk, not from the background image itself,unfortunately the overlay image has its own white background, so you can see that too in the result)
If performance isn't a concern then you can iterate over each pixel of the overlay and apply it to the background. This isn't very efficient, but it does help to understand how to work with png's alpha layer.
slow version
import cv2
background = cv2.imread('field.jpg')
overlay = cv2.imread('dice.png', cv2.IMREAD_UNCHANGED) # IMREAD_UNCHANGED => open image with the alpha channel
height, width = overlay.shape[:2]
for y in range(height):
for x in range(width):
overlay_color = overlay[y, x, :3] # first three elements are color (RGB)
overlay_alpha = overlay[y, x, 3] / 255 # 4th element is the alpha channel, convert from 0-255 to 0.0-1.0
# get the color from the background image
background_color = background[y, x]
# combine the background color and the overlay color weighted by alpha
composite_color = background_color * (1 - overlay_alpha) + overlay_color * overlay_alpha
# update the background image in place
background[y, x] = composite_color
cv2.imwrite('combined.png', background)
result:
fast version
I stumbled across this question while trying to add a png overlay to a live video feed. The above solution is way too slow for that. We can make the algorithm significantly faster by using numpy's vector functions.
note: This was my first real foray into numpy so there may be better/faster methods than what I've come up with.
import cv2
import numpy as np
background = cv2.imread('field.jpg')
overlay = cv2.imread('dice.png', cv2.IMREAD_UNCHANGED) # IMREAD_UNCHANGED => open image with the alpha channel
# separate the alpha channel from the color channels
alpha_channel = overlay[:, :, 3] / 255 # convert from 0-255 to 0.0-1.0
overlay_colors = overlay[:, :, :3]
# To take advantage of the speed of numpy and apply transformations to the entire image with a single operation
# the arrays need to be the same shape. However, the shapes currently looks like this:
# - overlay_colors shape:(width, height, 3) 3 color values for each pixel, (red, green, blue)
# - alpha_channel shape:(width, height, 1) 1 single alpha value for each pixel
# We will construct an alpha_mask that has the same shape as the overlay_colors by duplicate the alpha channel
# for each color so there is a 1:1 alpha channel for each color channel
alpha_mask = np.dstack((alpha_channel, alpha_channel, alpha_channel))
# The background image is larger than the overlay so we'll take a subsection of the background that matches the
# dimensions of the overlay.
# NOTE: For simplicity, the overlay is applied to the top-left corner of the background(0,0). An x and y offset
# could be used to place the overlay at any position on the background.
h, w = overlay.shape[:2]
background_subsection = background[0:h, 0:w]
# combine the background with the overlay image weighted by alpha
composite = background_subsection * (1 - alpha_mask) + overlay_colors * alpha_mask
# overwrite the section of the background image that has been updated
background[0:h, 0:w] = composite
cv2.imwrite('combined.png', background)
How much faster? On my machine the slow method takes ~3 seconds and the optimized method takes ~ 30 ms. So about
100 times faster!
Wrapped up in a function
This function handles foreground and background images of different sizes and also supports negative and positive offsets the move the overlay across the bounds of the background image in any direction.
import cv2
import numpy as np
def add_transparent_image(background, foreground, x_offset=None, y_offset=None):
bg_h, bg_w, bg_channels = background.shape
fg_h, fg_w, fg_channels = foreground.shape
assert bg_channels == 3, f'background image should have exactly 3 channels (RGB). found:{bg_channels}'
assert fg_channels == 4, f'foreground image should have exactly 4 channels (RGBA). found:{fg_channels}'
# center by default
if x_offset is None: x_offset = (bg_w - fg_w) // 2
if y_offset is None: y_offset = (bg_h - fg_h) // 2
w = min(fg_w, bg_w, fg_w + x_offset, bg_w - x_offset)
h = min(fg_h, bg_h, fg_h + y_offset, bg_h - y_offset)
if w < 1 or h < 1: return
# clip foreground and background images to the overlapping regions
bg_x = max(0, x_offset)
bg_y = max(0, y_offset)
fg_x = max(0, x_offset * -1)
fg_y = max(0, y_offset * -1)
foreground = foreground[fg_y:fg_y + h, fg_x:fg_x + w]
background_subsection = background[bg_y:bg_y + h, bg_x:bg_x + w]
# separate alpha and color channels from the foreground image
foreground_colors = foreground[:, :, :3]
alpha_channel = foreground[:, :, 3] / 255 # 0-255 => 0.0-1.0
# construct an alpha_mask that matches the image shape
alpha_mask = np.dstack((alpha_channel, alpha_channel, alpha_channel))
# combine the background with the overlay image weighted by alpha
composite = background_subsection * (1 - alpha_mask) + foreground_colors * alpha_mask
# overwrite the section of the background image that has been updated
background[bg_y:bg_y + h, bg_x:bg_x + w] = composite
example usage:
background = cv2.imread('field.jpg')
overlay = cv2.imread('dice.png', cv2.IMREAD_UNCHANGED) # IMREAD_UNCHANGED => open image with the alpha channel
x_offset = 0
y_offset = 0
print("arrow keys to move the dice. ESC to quit")
while True:
img = background.copy()
add_transparent_image(img, overlay, x_offset, y_offset)
cv2.imshow("", img)
key = cv2.waitKey()
if key == 0: y_offset -= 10 # up
if key == 1: y_offset += 10 # down
if key == 2: x_offset -= 10 # left
if key == 3: x_offset += 10 # right
if key == 27: break # escape
You need to open the transparent png image using the flag IMREAD_UNCHANGED
Mat overlay = cv::imread("dice.png", IMREAD_UNCHANGED);
Then split the channels, group the RGB and use the transparent channel as an mask, do like that:
/**
* #brief Draws a transparent image over a frame Mat.
*
* #param frame the frame where the transparent image will be drawn
* #param transp the Mat image with transparency, read from a PNG image, with the IMREAD_UNCHANGED flag
* #param xPos x position of the frame image where the image will start.
* #param yPos y position of the frame image where the image will start.
*/
void drawTransparency(Mat frame, Mat transp, int xPos, int yPos) {
Mat mask;
vector<Mat> layers;
split(transp, layers); // seperate channels
Mat rgb[3] = { layers[0],layers[1],layers[2] };
mask = layers[3]; // png's alpha channel used as mask
merge(rgb, 3, transp); // put together the RGB channels, now transp insn't transparent
transp.copyTo(frame.rowRange(yPos, yPos + transp.rows).colRange(xPos, xPos + transp.cols), mask);
}
Can be called like that:
drawTransparency(background, overlay, 10, 10);
To overlay png image watermark over normal 3 channel jpeg image
import cv2
import numpy as np
​
def logoOverlay(image,logo,alpha=1.0,x=0, y=0, scale=1.0):
(h, w) = image.shape[:2]
image = np.dstack([image, np.ones((h, w), dtype="uint8") * 255])
​
overlay = cv2.resize(logo, None,fx=scale,fy=scale)
(wH, wW) = overlay.shape[:2]
output = image.copy()
# blend the two images together using transparent overlays
try:
if x<0 : x = w+x
if y<0 : y = h+y
if x+wW > w: wW = w-x
if y+wH > h: wH = h-y
print(x,y,wW,wH)
overlay=cv2.addWeighted(output[y:y+wH, x:x+wW],alpha,overlay[:wH,:wW],1.0,0)
output[y:y+wH, x:x+wW ] = overlay
except Exception as e:
print("Error: Logo position is overshooting image!")
print(e)
​
output= output[:,:,:3]
return output
Usage:
background = cv2.imread('image.jpeg')
overlay = cv2.imread('logo.png', cv2.IMREAD_UNCHANGED)
​
print(overlay.shape) # must be (x,y,4)
print(background.shape) # must be (x,y,3)
# downscale logo by half and position on bottom right reference
out = logoOverlay(background,overlay,scale=0.5,y=-100,x=-100)
​
cv2.imshow("test",out)
cv2.waitKey(0)
import cv2
import numpy as np
background = cv2.imread('background.jpg')
overlay = cv2.imread('cloudy.png')
overlay = cv2.resize(overlay, (200,200))
# overlay = for_transparent_removal(overlay)
h, w = overlay.shape[:2]
shapes = np.zeros_like(background, np.uint8)
shapes[0:h, 0:w] = overlay
alpha = 0.8
mask = shapes.astype(bool)
# option first
background[mask] = cv2.addWeighted(shapes, alpha, shapes, 1 - alpha, 0)[mask]
cv2.imwrite('combined.png', background)
# option second
background[mask] = cv2.addWeighted(background, alpha, overlay, 1 - alpha, 0)[mask]
# NOTE : above both option will give you image overlays but effect would be changed
cv2.imwrite('combined.1.png', background)
**Use this function to place your overlay on any background image.
if want to resize overlay use this overlay = cv2.resize(overlay, (200,200)) and then pass resized overlay into the function.
**
import cv2
import numpy as np
def image_overlay_second_method(img1, img2, location, min_thresh=0, is_transparent=False):
h, w = img1.shape[:2]
h1, w1 = img2.shape[:2]
x, y = location
roi = img1[y:y + h1, x:x + w1]
gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
_, mask = cv2.threshold(gray, min_thresh, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
img_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)
img_fg = cv2.bitwise_and(img2, img2, mask=mask)
dst = cv2.add(img_bg, img_fg)
if is_transparent:
dst = cv2.addWeighted(img1[y:y + h1, x:x + w1], 0.1, dst, 0.9, None)
img1[y:y + h1, x:x + w1] = dst
return img1
if __name__ == '__main__':
background = cv2.imread('background.jpg')
overlay = cv2.imread('overlay.png')
output = image_overlay_third_method(background, overlay, location=(800,50), min_thresh=0, is_transparent=True)
cv2.imwrite('output.png', output)
background.jpg
output.png

Categories