How to warp an document image in python correctly? - python

By using this link, I made the deformed mesh:
inputs = cv2.imread("../datasets/images/0.jpg")
nh, nw = inputs.shape[0]//8, inputs.shape[1]//8
inputs = cv2.resize(inputs, dsize=(nh, nw), interpolation=cv2.INTER_AREA)
mr = nh
mc = nw
xx = np.arange(mr-1, -1, -1)
yy = np.arange(0, mc, 1)
[Y, X] = np.meshgrid(xx, yy)
ms = np.transpose(np.asarray([X.flatten('F'), Y.flatten('F')]), (1,0))
perturbed_mesh = ms
nv = np.random.randint(20) - 1
for k in range(nv):
#Choosing one vertex randomly
vidx = np.random.randint(np.shape(ms)[0])
vtex = ms[vidx, :]
#Vector between all vertices and the selected one
xv = perturbed_mesh - vtex
#Random movement
mv = (np.random.rand(1,2) - 0.5)*20
hxv = np.zeros((np.shape(xv)[0], np.shape(xv)[1] +1) )
hxv[:, :-1] = xv
hmv = np.tile(np.append(mv, 0), (np.shape(xv)[0],1))
d = np.cross(hxv, hmv)
d = np.absolute(d[:, 2])
d = d / (np.linalg.norm(mv, ord=2))
wt = d
curve_type = np.random.rand(1)
if curve_type > 0.3:
alpha = np.random.rand(1) * 50 + 50
wt = alpha / (wt + alpha)
else:
alpha = np.random.rand(1) + 1
wt = 1 - (wt / 100 )**alpha
msmv = mv * np.expand_dims(wt, axis=1)
perturbed_mesh = perturbed_mesh + msmv
So I got the mesh like:
Then I tried to map the source image pixels onto the generated mesh.
img = cv2.copyMakeBorder(inputs, dh, dh, dw, dw, borderType=cv2.BORDER_CONSTANT, value=(0,0,0))
xs, ys = perturbed_mesh[:, 0], perturbed_mesh[:, 1]
xs = xs.reshape(nh, nw).astype(np.float32)
ys = ys.reshape(nh, nw).astype(np.float32)
dst = cv2.remap(img, xs, ys, cv2.INTER_CUBIC)
plt.imshow(dst)
Finally, I got the result:
But this image have a document on the corner, I can't use it.
How to map the document onto the center of image?

Here is an example of what I did for a perspective warp in Python/OpenCV. It will show you how I achieved the expanded view of the output. Not only did I increase the output size, but I also shifted the output control points. I shifted by +500 px and doubled that to +1000 for the output size.
Input:
No Expand Case:
import numpy as np
import cv2
# read input
img = cv2.imread("building.jpg")
# resize
height,width = 1000,1500
img = cv2.resize(img, (width,height))
# specify conjugate coordinates and shift output on left and top
pts1 = np.float32([[ 250, 0],[1220, 300],[1300, 770],[ 250, 860]])
pts2 = np.float32([[0,0],[width,0],[width,height],[0,height]])
# compute perspective matrix
matrix = cv2.getPerspectiveTransform(pts1,pts2)
print(matrix.shape)
print(matrix)
# convert image to BGRA with opaque alpha
img = cv2.cvtColor(img, cv2.COLOR_BGR2BGRA)
# do perspective transformation setting area outside input to transparent
# extend output size so extended by 500 all around
imgOutput = cv2.warpPerspective(img, matrix, (width,height), cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=(0,0,0))
# resize output, since it is too large to post
imgOutput = cv2.resize(imgOutput, (width,height))
# save the warped output
cv2.imwrite("building_warped_unexpanded.png", imgOutput)
# show the result
cv2.imshow("result", imgOutput)
cv2.waitKey(0)
cv2.destroyAllWindows()
No Expand Warped Result:
Expanded Case:
import numpy as np
import cv2
# read input
img = cv2.imread("building.jpg")
# resize
height,width = 1000,1500
img = cv2.resize(img, (width,height))
# specify conjugate coordinates and shift output on left and top
pts1 = np.float32([[ 250, 0],[1220, 300],[1300, 770],[ 250, 860]])
pts2 = np.float32([[+500,+500],[width+500,+500],[width+500,height+500],[+500,height+500]])
# compute perspective matrix
matrix = cv2.getPerspectiveTransform(pts1,pts2)
print(matrix.shape)
print(matrix)
# convert image to BGRA with opaque alpha
img = cv2.cvtColor(img, cv2.COLOR_BGR2BGRA)
# do perspective transformation setting area outside input to transparent
# extend output size so extended by 500 all around
imgOutput = cv2.warpPerspective(img, matrix, (width+1000,height+1000), cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=(0,0,0))
# resize output, since it is too large to post
imgOutput = cv2.resize(imgOutput, (width,height))
# save the warped output
cv2.imwrite("building_warped.jpg", imgOutput)
# show the result
cv2.imshow("result", imgOutput)
cv2.waitKey(0)
cv2.destroyAllWindows()
Expanded Result:

Related

How make eye and nose bigger or smaller in opencv and python

I used the following code to select nose in OpenCV and Python i searched a lot of to find a way to change the size of nose and save as a other image but i didn't find anything is there anybody to help me to do this.
import cv2
import numpy as np
import dlib
img = cv2.imread('1.jpg')
img = cv2.resize(img,(0,0),None,0.5,0.5)
imgOriginal = img.copy()
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
def createBox(img,points,scale=5):
bbox = cv2.boundingRect(points)
x,y,w,h = bbox
imgCrop = img[y:y+h,x:x+w]
imgCrop = cv2.resize(imgCrop,(0,0),None,scale,scale)
return imgCrop
imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = detector(imgGray)
for face in faces:
x1,y1 = face.left(),face.top()
x2,y2 = face.right(),face.bottom()
imgOriginal = cv2.rectangle(img,(x1,y1),(x2,y2),(0,255,0),1)
landmarks = predictor(imgGray,face)
myPoints=[]
for n in range(68):
x = landmarks.part(n).x
y = landmarks.part(n).y
myPoints.append([x,y])
#cv2.circle(imgOriginal,(x,y),5,(50,50,255),cv2.FILLED)
#cv2.putText(imgOriginal,str(n),(x,y-10),cv2.FONT_HERSHEY_COMPLEX_SMALL,0.8,(0,0,255),1)
myPoints = np.array(myPoints)
#nose points to select
#nose_points = myPoints[27:35]
print(myPoints)
cv2_imshow(imgOriginal)
cv2.waitKey(0)
thanks in advance
Here is one way using a spherical (bubble) warp in a local region in Python/OpenCV.
- Define region center and radius and amount of spherical distortion
- Crop the image for that center and radius
- Compute the spherical distortion x and y displacement maps and a binary mask
- Apply the distortion maps using cv2.remap
- Antialias the mask
- Merge the distorted and cropped image using the mask
- Insert that merged image into the original image
- Save the results
Input:
import numpy as np
import cv2
import math
import skimage.exposure
img = cv2.imread("portrait_of_mussorgsky2.jpg")
# set location and radius
cx = 130
cy = 109
radius = 30
# set distortion gain
gain = 1.5
# crop image
crop = img[cy-radius:cy+radius, cx-radius:cx+radius]
# get dimensions
ht, wd = crop.shape[:2]
xcent = wd / 2
ycent = ht / 2
rad = min(xcent,ycent)
# set up the x and y maps as float32
map_x = np.zeros((ht, wd), np.float32)
map_y = np.zeros((ht, wd), np.float32)
mask = np.zeros((ht, wd), np.uint8)
# create map with the spherize distortion formula --- arcsin(r)
# xcomp = arcsin(r)*x/r; ycomp = arsin(r)*y/r
for y in range(ht):
Y = (y - ycent)/ycent
for x in range(wd):
X = (x - xcent)/xcent
R = math.hypot(X,Y)
if R == 0:
map_x[y, x] = x
map_y[y, x] = y
mask[y,x] = 255
elif R >= .90: # avoid extreme blurring near R = 1
map_x[y, x] = x
map_y[y, x] = y
mask[y,x] = 0
elif gain >= 0:
map_x[y, x] = xcent*X*math.pow((2/math.pi)*(math.asin(R)/R), gain) + xcent
map_y[y, x] = ycent*Y*math.pow((2/math.pi)*(math.asin(R)/R), gain) + ycent
mask[y,x] = 255
elif gain < 0:
gain2 = -gain
map_x[y, x] = xcent*X*math.pow((math.sin(math.pi*R/2)/R), gain2) + xcent
map_y[y, x] = ycent*Y*math.pow((math.sin(math.pi*R/2)/R), gain2) + ycent
mask[y,x] = 255
# remap using map_x and map_y
bump = cv2.remap(crop, map_x, map_y, cv2.INTER_LINEAR, borderMode = cv2.BORDER_CONSTANT, borderValue=(0,0,0))
# antialias edge of mask
# (pad so blur does not extend to edges of image, then crop later)
blur = 7
mask = cv2.copyMakeBorder(mask, blur,blur,blur,blur, borderType=cv2.BORDER_CONSTANT, value=(0))
mask = cv2.GaussianBlur(mask, (0,0), sigmaX=blur, sigmaY=blur, borderType = cv2.BORDER_DEFAULT)
h, w = mask.shape
mask = mask[blur:h-blur, blur:w-blur]
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
mask = skimage.exposure.rescale_intensity(mask, in_range=(127.5,255), out_range=(0,1))
# merge bump with crop using grayscale (not binary) mask
bumped = (bump * mask + crop * (1-mask)).clip(0,255).astype(np.uint8)
# insert bumped image into original
result = img.copy()
result[cy-radius:cy+radius, cx-radius:cx+radius] = bumped
# save results
cv2.imwrite("portrait_of_mussorgsky2_bump.jpg", result)
# display images
cv2.imshow('img', img)
cv2.imshow('crop', crop)
cv2.imshow('bump', bump)
cv2.imshow('mask', mask)
cv2.imshow('bumped', bumped)
cv2.imshow('result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Resulting Image:
I think you need "Bulge" effects such as implode and explode. There are no implementation of these filters in OpenCV but, you can find other tools such as Wand(a python binding for ImageMagick) that have implode/explode.
Example (wand):
from wand.image import Image
with Image(filename="test.jpg") as img:
img.implode(amount = -0.2)
img.save(filename="destination.jpg")
# img_array = numpy.asarray(img) --> you can convert wand.image.Image to numpy array for further uses
passing negative values into implode functions is equal to doing explode. So for magnifying effect use negative values.
There is one problem though: img.implode performs on the center of the image, so after you've found the face features(eye, nose, ...) you need to move your picture somehow to make the eye or nose to lie on the center of the image. After that you can simply use implode function.

Image goes black after thresholding

I am trying to extract blood network from this face image: Face image
For such task, i am using the P&M anisotropic diffusion found in this question: Anisotropic diffusion 2d images. Then i am using tophat transform followed by blackhat transform, afterwards i use a simple threshold to set to 255 all pixel that has an intensity value of 100.
The problem is that, after i use the threshold and try to open the image, whatever way i try, the image is displayed as fully black:
In short, my goal is to extract the blood vessels using P&M anisotropic diffusion with structuring element of flat disk of 5x5, then apply tophat and blackhat, respectively and a simple threshold and actually be able to view the image afterwards.
Here's my code on how i am trying it:
import cv2
import import cv2 numpy as np
import warnings
face_img=mpimg.imread('path')
def anisodiff(img, niter=1, kappa=50, gamma=0.1, step=(1., 1.), option=1):
if img.ndim == 3:
m = "Only grayscale images allowed, converting to 2D matrix"
warnings.warn(m)
img = img.mean(2)
img = img.astype('float32')
imgout = img.copy()
deltaS = np.zeros_like(imgout)
deltaE = deltaS.copy()
NS = deltaS.copy()
EW = deltaS.copy()
gS = np.ones_like(imgout)
gE = gS.copy()
for ii in range(niter):
deltaS[:-1, :] = np.diff(imgout, axis=0)
deltaE[:, :-1] = np.diff(imgout, axis=1)
if option == 1:
gS = np.exp(-(deltaS/kappa)**2.)/step[0]
gE = np.exp(-(deltaE/kappa)**2.)/step[1]
elif option == 2:
gS = 1./(1.+(deltaS/kappa)**2.)/step[0]
gE = 1./(1.+(deltaE/kappa)**2.)/step[1]
E = gE*deltaE
S = gS*deltaS
NS[:] = S
EW[:] = E
NS[1:, :] -= S[:-1, :]
EW[:, 1:] -= E[:, :-1]
imgout += gamma*(NS+EW)
return imgout
new_img = anisodiff(face_img, niter=1, kappa=20, gamma=0.1, step=(1., 1.), option=1)
filterSize =(3, 3)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT,
filterSize)
input_image = new_img
first_tophat_img = cv2.morphologyEx(input_image,
cv2.MORPH_TOPHAT,
kernel)
filterSize =(3, 3)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT,
filterSize)
second_tophat_img = cv2.morphologyEx(input_image,
cv2.MORPH_BLACKHAT,
kernel)
ret, thresh1 = cv2.threshold(second_tophat_img, 200, 255, cv2.THRESH_BINARY)
Even when i set the threshold to 254 for instance, the image goes black.
I executed a simple MATLAB implementation, and got a nice result.
MATLAB code:
I = imread('02_giorgos_1_f_M_30_830.tif');
I = im2double(uint8(I));
J = imdiffusefilt(I);
K = imtophat(J, ones(3));
figure;imshow(imadjust(K, stretchlim(K)));
Result:
I don't know if you know MATLAB, but I used the default arguments of imdiffusefilt (equivalent to anisodiff in your code).
Default MATLAB arguments are equivalent to:
Input image is in range [0, 1] and not [0, 255].
niter=5 (note: you used only 1 iteration and it's not enough).
kappa=0.1
gamma=0.125
MATLAB default is 8 neighbors connectivity (not 4 neighbors as used in anisodiff).
8 neighbors connectivity:
For getting same result as in MATLAB, I implemented an 8 neighbors connectivity Anisotropic diffusion (based on MATLAB source code).
Note: with 4 neighbors connectivity it's working, but result is not so nice as using 8 neighbors.
Displaying the output image:
In order to display the output image correctly, I used imadjust(K, stretchlim(K)).
The command stretches the range of the input image such that percentile 1 goes to 0, and percentile 99 goes to 1 (linear stretch).
One more thing:
Instead of using fixed threshold of 200, I used percentile 95 threshold:
t = np.percentile(first_tophat_img, 95)
ret, thresh1 = cv2.threshold(first_tophat_img, t, 255,
cv2.THRESH_BINARY)
Here is the code (uses cv2.imshow for testing):
import cv2
import numpy as np
import matplotlib.image as mpimg
import warnings
face_img = mpimg.imread('02_giorgos_1_f_M_30_830.tif')
def anisodiff8neighbors(img, niter=5, kappa=0.1, gamma=0.125):
""" See https://www.mathworks.com/help/images/ref/imdiffusefilt.html
Anisotropic diffusion filtering with 8 neighbors
Range of img is assumed to be [0, 1] (not [0, 255]).
"""
if img.ndim == 3:
m = "Only grayscale images allowed, converting to 2D matrix"
warnings.warn(m)
img = img.mean(2)
img = img.astype('float32')
imgout = img.copy()
for ii in range(niter):
# MATLAB source code is commented
#paddedImg = padarray(I, [1 1], 'replicate');
padded_img = np.pad(imgout, (1, 1), 'edge')
#diffImgNorth = paddedImg(1:end-1,2:end-1) - paddedImg(2:end,2:end-1);
#diffImgEast = paddedImg(2:end-1,2:end) - paddedImg(2:end-1,1:end-1);
#diffImgNorthWest = paddedImg(1:end-2,1:end-2) - I;
#diffImgNorthEast = paddedImg(1:end-2,3:end) - I;
#diffImgSouthWest = paddedImg(3:end,1:end-2) - I;
#diffImgSouthEast = paddedImg(3:end,3:end) - I;
diff_img_north = padded_img[0:-1, 1:-1] - padded_img[1:, 1:-1]
diff_img_east = padded_img[1:-1, 1:] - padded_img[1:-1, 0:-1]
diff_img_north_west = padded_img[0:-2, 0:-2] - imgout
diff_img_north_east = padded_img[0:-2, 2:] - imgout
diff_img_south_west = padded_img[2:, 0:-2] - imgout
diff_img_south_east = padded_img[2:, 2:] - imgout
#case 'exponential'
#conductCoeffNorth = exp(-(abs(diffImgNorth)/gradientThreshold).^2);
#conductCoeffEast = exp(-(abs(diffImgEast)/gradientThreshold).^2);
#conductCoeffNorthWest = exp(-(abs(diffImgNorthWest)/gradientThreshold).^2);
#conductCoeffNorthEast = exp(-(abs(diffImgNorthEast)/gradientThreshold).^2);
#conductCoeffSouthWest = exp(-(abs(diffImgSouthWest)/gradientThreshold).^2);
#conductCoeffSouthEast = exp(-(abs(diffImgSouthEast)/gradientThreshold).^2);
conduct_coeff_north = np.exp(-(np.abs(diff_img_north)/kappa)**2.0)
conduct_coeff_east = np.exp(-(np.abs(diff_img_east)/kappa)**2.0)
conduct_coeff_north_west = np.exp(-(np.abs(diff_img_north_west)/kappa)**2.0)
conduct_coeff_north_east = np.exp(-(np.abs(diff_img_north_east)/kappa)**2.0)
conduct_coeff_south_west = np.exp(-(np.abs(diff_img_south_west)/kappa)**2.0)
conduct_coeff_south_east = np.exp(-(np.abs(diff_img_south_east)/kappa)**2.0)
#fluxNorth = conductCoeffNorth .* diffImgNorth;
#fluxEast = conductCoeffEast .* diffImgEast;
#fluxNorthWest = conductCoeffNorthWest .* diffImgNorthWest;
#fluxNorthEast = conductCoeffNorthEast .* diffImgNorthEast;
#fluxSouthWest = conductCoeffSouthWest .* diffImgSouthWest;
#fluxSouthEast = conductCoeffSouthEast .* diffImgSouthEast;
flux_north = conduct_coeff_north * diff_img_north
flux_east = conduct_coeff_east * diff_img_east
flux_north_west = conduct_coeff_north_west * diff_img_north_west
flux_north_east = conduct_coeff_north_east * diff_img_north_east
flux_south_west = conduct_coeff_south_west * diff_img_south_west
flux_south_east = conduct_coeff_south_east * diff_img_south_east
#% Discrete PDE solution
#I = I + diffusionRate * (fluxNorth(1:end-1,:) - fluxNorth(2:end,:) + ...
# fluxEast(:,2:end) - fluxEast(:,1:end-1) + (1/(dd^2)).* fluxNorthWest + ...
# (1/(dd^2)).* fluxNorthEast + (1/(dd^2)).* fluxSouthWest + (1/(dd^2)).* fluxSouthEast);
imgout = imgout + gamma * (flux_north[0:-1,:] - flux_north[1:,:] +
flux_east[:,1:] - flux_east[:,0:-1] + 0.5*flux_north_west +
0.5*flux_north_east + 0.5*flux_south_west + 0.5*flux_south_east)
return imgout
#new_img = anisodiff(face_img, niter=1, kappa=20, gamma=0.1, step=(1., 1.), option=1)
face_img = face_img.astype(float) / 255;
#new_img = anisodiff(face_img, niter=5, kappa=0.1, gamma=0.125, step=(1., 1.), option=1)
new_img = anisodiff8neighbors(face_img, niter=5, kappa=0.1, gamma=0.125)
filterSize =(3, 3)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT,
filterSize)
input_image = new_img
first_tophat_img = cv2.morphologyEx(input_image,
cv2.MORPH_TOPHAT,
kernel)
# Use percentile 95 (of image) as threshold instead of fixed threshold 200
t = np.percentile(first_tophat_img, 95)
ret, thresh1 = cv2.threshold(first_tophat_img, t, 255, cv2.THRESH_BINARY)
cv2.imshow('thresh1', thresh1)
filterSize =(3, 3)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT,
filterSize)
second_tophat_img = cv2.morphologyEx(input_image,
cv2.MORPH_BLACKHAT,
kernel)
#ret, thresh1 = cv2.threshold(second_tophat_img, 200, 255, cv2.THRESH_BINARY)
# Use percentile 95 (of image) as threshold instead of fixed threshold 200
t = np.percentile(second_tophat_img, 95)
ret, thresh2 = cv2.threshold(second_tophat_img, t, 255, cv2.THRESH_BINARY)
cv2.imshow('thresh2', thresh2)
lo, hi = np.percentile(first_tophat_img, (1, 99))
first_tophat_img_stretched = (first_tophat_img.astype(float) - lo) / (hi-lo) # Apply linear "stretch" - lo goes to 0, and hi goes to 1
cv2.imshow('first_tophat_img_stretched', first_tophat_img_stretched)
cv2.waitKey()
cv2.destroyAllWindows()
Result:
thresh1:
thresh2:
first_tophat_img_stretched:

What is the correct way to undistort points captured using fisheye camera in OpenCV in Python?

INFO:
I've calibrated my camera and have found the camera's intrinsics matrix (K) and its distortion coefficients (d) to be the following:
import numpy as np
K = np.asarray([[556.3834638575809,0,955.3259939726225],[0,556.2366649196925,547.3011305411478],[0,0,1]])
d = np.asarray([[-0.05165940570900624],[0.0031093602070252167],[-0.0034036648250202746],[0.0003390345044343793]])
From here, I can undistort my image using the following three lines:
final_K = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(K, d, (1920, 1080), np.eye(3), balance=1.0)
map_1, map_2 = cv2.fisheye.initUndistortRectifyMap(K, d, np.eye(3), final_K, (1920, 1080), cv2.CV_32FC1)
undistorted_image = cv2.remap(image, map_1, map_2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
The resulting undistored images appears to be correct Left image is distorted, right is undistorted, but when I try to undistort image points using cv2.remap() points aren't mapped to the same location as their corresponding pixel in the image. I detected the calibration board points in the left image using
ret, corners = cv2.findChessboardCorners(gray, (6,8),cv2.CALIB_CB_ADAPTIVE_THRESH+cv2.CALIB_CB_FAST_CHECK+cv2.CALIB_CB_NORMALIZE_IMAGE)
corners2 = cv2.cornerSubPix(gray, corners, (3,3), (-1,-1), (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1))
then remapped those points in the following way:
remapped_points = []
for corner in corners2:
remapped_points.append(
(map_1[int(corner[0][1]), int(corner[0][0])], map_2[int(corner[0][1]), int(corner[0][0])])
)
In these horizontally concatenated images, the left image shows the points detected in the distorted image, while the right image shows the remapped location of the points in the right image.
Also, I haven't been able to get correct results using cv2.fisheye.undistortPoints(). I have the following function to undistort points:
def undistort_list_of_points(point_list, in_K, in_d):
K = np.asarray(in_K)
d = np.asarray(in_d)
# Input can be list of bbox coords, poly coords, etc.
# TODO -- Check if point behind camera?
points_2d = np.asarray(point_list)
points_2d = points_2d[:, 0:2].astype('float32')
points2d_undist = np.empty_like(points_2d)
points_2d = np.expand_dims(points_2d, axis=1)
result = np.squeeze(cv2.fisheye.undistortPoints(points_2d, K, d))
fx = K[0, 0]
fy = K[1, 1]
cx = K[0, 2]
cy = K[1, 2]
for i, (px, py) in enumerate(result):
points2d_undist[i, 0] = px * fx + cx
points2d_undist[i, 1] = py * fy + cy
return points2d_undist
This image shows the results when undistorting using the above function.
(this is all running in OpenCV 4.2.0 on Ubuntu 18.04 in Python 3.6.8)
QUESTIONS
Why isn't this remapping of image coordinates working properly? Am I using map_1 and map_2 incorrectly?
Why are the results from using cv2.fisheye.undistortPoints() different from using map_1 and map_2?
Answer to Q1:
You are not using map_1 and map_2 correctly.
The map generate by the cv2.fisheye.initUndistortRectifyMap function should be the mapping of the pixel location of the destination image to the pixel location of the source image, i.e. dst(x,y)=src(mapx(x,y),mapy(x,y)). see remap in OpenCV.
In the code, map_1 is for the x-direction pixel mapping and map_2 is for the y-direction pixel mapping. For example,
(X_undistorted, Y_undistorted) is the pixel location in the undistorted image. map_1[Y_undistorted, X_undistorted] gives you where is this pixel should map to the x coordinate in the distorted image, and map_2 will give you the corresponding y coordinate.
So, map_1 and map_2 are useful for constructing an undistorted image from a distorted image, and not really suitable for the reversed process.
remapped_points = []
for corner in corners2:
remapped_points.append(
(map_1[int(corner[0][1]), int(corner[0][0])], map_2[int(corner[0][1]), int(corner[0][0])]))
This code to find the undistorted pixel location of the corners is not correct. You will need to use undistortPoints function.
Answer to Q2:
The mapping and undistortion are different.
You can think of mapping as constructing the undistorted image based on the pixel locations in the undistorted image with the pixel maps, while undistortion is to find undistorted pixel locations using the original pixel location using lens distortion model.
In order to find the correct pixel locations of the corners in the undistorted image. You need to convert the normalized coordinates of the undistorted points back to pixel coordinates using the newly estimated K, in your case, it's the final_K, because the undistorted image can be seen as taken by a camera with the final_K without distortion (there is a small zooming effect).
Here is the modified undistort function:
def undistort_list_of_points(point_list, in_K, in_d, in_K_new):
K = np.asarray(in_K)
d = np.asarray(in_d)
# Input can be list of bbox coords, poly coords, etc.
# TODO -- Check if point behind camera?
points_2d = np.asarray(point_list)
points_2d = points_2d[:, 0:2].astype('float32')
points2d_undist = np.empty_like(points_2d)
points_2d = np.expand_dims(points_2d, axis=1)
result = np.squeeze(cv2.fisheye.undistortPoints(points_2d, K, d))
K_new = np.asarray(in_K_new)
fx = K_new[0, 0]
fy = K_new[1, 1]
cx = K_new[0, 2]
cy = K_new[1, 2]
for i, (px, py) in enumerate(result):
points2d_undist[i, 0] = px * fx + cx
points2d_undist[i, 1] = py * fy + cy
return points2d_undist
Here is my code for doing the same thing.
import cv2
import numpy as np
import matplotlib.pyplot as plt
K = np.asarray([[556.3834638575809,0,955.3259939726225],[0,556.2366649196925,547.3011305411478],[0,0,1]])
D = np.asarray([[-0.05165940570900624],[0.0031093602070252167],[-0.0034036648250202746],[0.0003390345044343793]])
print("K:\n", K)
print("D:\n", D.ravel())
# read image and get the original image on the left
image_path = "sample.jpg"
image = cv2.imread(image_path)
image = image[:, :image.shape[1]//2, :]
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
fig = plt.figure()
plt.imshow(image_gray, "gray")
H_in, W_in = image_gray.shape
print("Grayscale Image Dimension:\n", (W_in, H_in))
scale_factor = 1.0
balance = 1.0
img_dim_out =(int(W_in*scale_factor), int(H_in*scale_factor))
if scale_factor != 1.0:
K_out = K*scale_factor
K_out[2,2] = 1.0
K_new = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(K_out, D, img_dim_out, np.eye(3), balance=balance)
print("Newly estimated K:\n", K_new)
map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), K_new, img_dim_out, cv2.CV_32FC1)
print("Rectify Map1 Dimension:\n", map1.shape)
print("Rectify Map2 Dimension:\n", map2.shape)
undistorted_image_gray = cv2.remap(image_gray, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
fig = plt.figure()
plt.imshow(undistorted_image_gray, "gray")
ret, corners = cv2.findChessboardCorners(image_gray, (6,8),cv2.CALIB_CB_ADAPTIVE_THRESH+cv2.CALIB_CB_FAST_CHECK+cv2.CALIB_CB_NORMALIZE_IMAGE)
corners_subpix = cv2.cornerSubPix(image_gray, corners, (3,3), (-1,-1), (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1))
undistorted_corners = cv2.fisheye.undistortPoints(corners_subpix, K, D)
undistorted_corners = undistorted_corners.reshape(-1,2)
fx = K_new[0,0]
fy = K_new[1,1]
cx = K_new[0,2]
cy = K_new[1,2]
undistorted_corners_pixel = np.zeros_like(undistorted_corners)
for i, (x, y) in enumerate(undistorted_corners):
px = x*fx + cx
py = y*fy + cy
undistorted_corners_pixel[i,0] = px
undistorted_corners_pixel[i,1] = py
undistorted_image_show = cv2.cvtColor(undistorted_image_gray, cv2.COLOR_GRAY2BGR)
for corner in undistorted_corners_pixel:
image_corners = cv2.circle(np.zeros_like(undistorted_image_show), (int(corner[0]),int(corner[1])), 15, [0, 255, 0], -1)
undistorted_image_show = cv2.add(undistorted_image_show, image_corners)
fig = plt.figure()
plt.imshow(undistorted_image_show, "gray")

How to quantify difference between frames using optical flow estimation?

Here is a code to get the optical flow output from a stabilized video (no camera movement) and save it as a set of frames
import cv2 as cv
import numpy as np
# The video feed is read in as a VideoCapture object
cap = cv.VideoCapture("2_stable_video.avi")
# ret = a boolean return value from getting the frame, first_frame = the first frame in the entire video sequence
ret, first_frame = cap.read()
# Converts frame to grayscale because we only need the luminance channel for detecting edges - less computationally expensive
prev_gray = cv.cvtColor(first_frame, cv.COLOR_BGR2GRAY)
# Creates an image filled with zero intensities with the same dimensions as the frame
mask = np.zeros_like(first_frame)
# Sets image saturation to maximum
mask[..., 1] = 255
count = 0
while(cap.isOpened()):
# ret = a boolean return value from getting the frame, frame = the current frame being projected in the video
ret, frame = cap.read()
# Opens a new window and displays the input frame
cv.imshow("input", frame)
# Converts each frame to grayscale - we previously only converted the first frame to grayscale
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
# Calculates dense optical flow by Farneback method
flow = cv.calcOpticalFlowFarneback(prev_gray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)
# Computes the magnitude and angle of the 2D vectors
magnitude, angle = cv.cartToPolar(flow[..., 0], flow[..., 1])
# Sets image hue according to the optical flow direction
mask[..., 0] = angle * 180 / np.pi / 2
# Sets image value according to the optical flow magnitude (normalized)
mask[..., 2] = cv.normalize(magnitude, None, 0, 255, cv.NORM_MINMAX)
# Converts HSV to RGB (BGR) color representation
rgb = cv.cvtColor(mask, cv.COLOR_HSV2BGR)
# Opens a new window and displays the output frame
cv.imshow("dense optical flow", rgb[40:150,120:220])
cv.imwrite("frames_modified_2/%d.png" % count, rgb[40:150,120:220])
count +=1
# Updates previous frame
prev_gray = gray
# Frames are read by intervals of 1 millisecond. The programs breaks out of the while loop when the user presses the 'q' key
if cv.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv.destroyAllWindows()
Can someone please suggest how to quantify the difference between the frames? i.e. to estimate speed/velocity ?
Here's an example to obtain pixel magnitude translation from .bsq frames. You can modify the the code to input a video file instead. You are probably most interested in the get_translation() function. Example:
Graph displaying pixel translation from frame-to-frame
Code
import numpy as np
import argparse
import os
import cv2
from matplotlib import pyplot as plt
from matplotlib import cm
import time
import random
# Usage: python translate_analyzer.py -p <filename.bsq>
# Automatic brightness and contrast optimization with optional histogram clipping
def automatic_brightness_and_contrast(image, clip_hist_percent=25):
if len(image.shape) == 3:
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
gray = image
# Calculate grayscale histogram
hist = cv2.calcHist([gray],[0],None,[256],[0,256])
hist_size = len(hist)
# Calculate cumulative distribution from the histogram
accumulator = []
accumulator.append(float(hist[0]))
for index in range(1, hist_size):
accumulator.append(accumulator[index -1] + float(hist[index]))
# Locate points to clip
maximum = accumulator[-1]
clip_hist_percent *= (maximum/100.0)
clip_hist_percent /= 2.0
# Locate left cut
minimum_gray = 0
while accumulator[minimum_gray] < clip_hist_percent:
minimum_gray += 1
# Locate right cut
maximum_gray = hist_size -1
while accumulator[maximum_gray] >= (maximum - clip_hist_percent):
maximum_gray -= 1
# Calculate alpha and beta values
alpha = 255 / (maximum_gray - minimum_gray)
beta = -minimum_gray * alpha
auto_result = cv2.convertScaleAbs(image, alpha=alpha, beta=beta)
return (auto_result, alpha, beta)
# Draw flow
def draw_flow(img, flow, step=30):
h, w = img.shape[:2]
y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1).astype(int)
fx, fy = flow[y,x].T
lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
lines = np.int32(lines + 0.5)
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.polylines(vis, lines, 1, (36, 255, 12))
for (x1, y1), (_x2, _y2) in lines:
cv2.circle(vis, (x1, y1), 2, (36, 255, 12), -1)
return vis
# Return translation value
def get_translation(img, flow, step=30):
return (np.median(flow[:,:,0].T), flow[:, :, 0].T)
# Get file path
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--path", help="Path to the directory")
args = vars(ap.parse_args())
if not args['path']:
print('Usage: python translate_analyzer.py -p <directory>')
exit(1)
# Extract file name
bsq_fname = os.path.split(args['path'])[-1]
if '.bsq' not in bsq_fname:
print('ERROR: Invalid bsq file. Select correct file.')
exit(1)
width = 640
height = 512
frame_count = int(os.path.getsize(bsq_fname)/(2*height*width))
x,y,w,h = 0,0,100,512
# Simulates calibrated frames to display on video frame
data_file = np.fromfile(bsq_fname, dtype=np.uint16, count=-1)
data_file = data_file.reshape((width, height, frame_count), order='F')
data_file = np.rot90(data_file)
print(bsq_fname)
fname = bsq_fname.split()[0]
prev = data_file[:,:,0].copy()
prev //= 64
prev = automatic_brightness_and_contrast(prev)[0]
prev = prev[y:y+h, x:x+w]
translation_data = []
frame_direction = []
start = time.time()
for index in range(1, frame_count):
data = data_file[:,:,index].copy()
data //= 64
data = automatic_brightness_and_contrast(data)[0]
data = data[y:y+h, x:x+w]
flow = cv2.calcOpticalFlowFarneback(prev=prev, next=data, flow=None, pyr_scale=0.5, levels=2, winsize=80, iterations=2, poly_n=7, poly_sigma=4.5, flags=0)
translation, pixel_direction = get_translation(data, flow)
prev = data
cv2.imshow('flow', draw_flow(data, flow))
cv2.waitKey(1)
translation_data.append(translation)
frame_direction = pixel_direction
index = (index+1) % frame_count
end = time.time()
print('Time:', end - start)
plt.figure()
plt.title(bsq_fname)
plt.xlabel("Frames")
plt.ylabel("Magnitude")
plt.plot(translation_data)
plt.figure()
plt.title("Pixel Direction")
plt.xlabel("Width")
plt.ylabel("Height")
plt.imshow(frame_direction.T)
plt.colorbar(orientation='vertical')
plt.show()

Using openCV to overlay transparent image onto another image

How can I overlay a transparent PNG onto another image without losing it's transparency using openCV in python?
import cv2
background = cv2.imread('field.jpg')
overlay = cv2.imread('dice.png')
# Help please
cv2.imwrite('combined.png', background)
Desired output:
Sources:
Background Image
Overlay
import cv2
background = cv2.imread('field.jpg')
overlay = cv2.imread('dice.png')
added_image = cv2.addWeighted(background,0.4,overlay,0.1,0)
cv2.imwrite('combined.png', added_image)
The correct answer to this was far too hard to come by, so I'm posting this answer even though the question is really old. What you are looking for is "over" compositing, and the algorithm for this can be found on Wikipedia: https://en.wikipedia.org/wiki/Alpha_compositing
I am far from an expert with OpenCV, but after some experimentation this is the most efficient way I have found to accomplish the task:
import cv2
background = cv2.imread("background.png", cv2.IMREAD_UNCHANGED)
foreground = cv2.imread("overlay.png", cv2.IMREAD_UNCHANGED)
# normalize alpha channels from 0-255 to 0-1
alpha_background = background[:,:,3] / 255.0
alpha_foreground = foreground[:,:,3] / 255.0
# set adjusted colors
for color in range(0, 3):
background[:,:,color] = alpha_foreground * foreground[:,:,color] + \
alpha_background * background[:,:,color] * (1 - alpha_foreground)
# set adjusted alpha and denormalize back to 0-255
background[:,:,3] = (1 - (1 - alpha_foreground) * (1 - alpha_background)) * 255
# display the image
cv2.imshow("Composited image", background)
cv2.waitKey(0)
The following code will use the alpha channels of the overlay image to correctly blend it into the background image, use x and y to set the top-left corner of the overlay image.
import cv2
import numpy as np
def overlay_transparent(background, overlay, x, y):
background_width = background.shape[1]
background_height = background.shape[0]
if x >= background_width or y >= background_height:
return background
h, w = overlay.shape[0], overlay.shape[1]
if x + w > background_width:
w = background_width - x
overlay = overlay[:, :w]
if y + h > background_height:
h = background_height - y
overlay = overlay[:h]
if overlay.shape[2] < 4:
overlay = np.concatenate(
[
overlay,
np.ones((overlay.shape[0], overlay.shape[1], 1), dtype = overlay.dtype) * 255
],
axis = 2,
)
overlay_image = overlay[..., :3]
mask = overlay[..., 3:] / 255.0
background[y:y+h, x:x+w] = (1.0 - mask) * background[y:y+h, x:x+w] + mask * overlay_image
return background
This code will mutate background so create a copy if you wish to preserve the original background image.
Been a while since this question appeared, but I believe this is the right simple answer, which could still help somebody.
background = cv2.imread('road.jpg')
overlay = cv2.imread('traffic sign.png')
rows,cols,channels = overlay.shape
overlay=cv2.addWeighted(background[250:250+rows, 0:0+cols],0.5,overlay,0.5,0)
background[250:250+rows, 0:0+cols ] = overlay
This will overlay the image over the background image such as shown here:
Ignore the ROI rectangles
Note that I used a background image of size 400x300 and the overlay image of size 32x32, is shown in the x[0-32] and y[250-282] part of the background image according to the coordinates I set for it, to first calculate the blend and then put the calculated blend in the part of the image where I want to have it.
(overlay is loaded from disk, not from the background image itself,unfortunately the overlay image has its own white background, so you can see that too in the result)
If performance isn't a concern then you can iterate over each pixel of the overlay and apply it to the background. This isn't very efficient, but it does help to understand how to work with png's alpha layer.
slow version
import cv2
background = cv2.imread('field.jpg')
overlay = cv2.imread('dice.png', cv2.IMREAD_UNCHANGED) # IMREAD_UNCHANGED => open image with the alpha channel
height, width = overlay.shape[:2]
for y in range(height):
for x in range(width):
overlay_color = overlay[y, x, :3] # first three elements are color (RGB)
overlay_alpha = overlay[y, x, 3] / 255 # 4th element is the alpha channel, convert from 0-255 to 0.0-1.0
# get the color from the background image
background_color = background[y, x]
# combine the background color and the overlay color weighted by alpha
composite_color = background_color * (1 - overlay_alpha) + overlay_color * overlay_alpha
# update the background image in place
background[y, x] = composite_color
cv2.imwrite('combined.png', background)
result:
fast version
I stumbled across this question while trying to add a png overlay to a live video feed. The above solution is way too slow for that. We can make the algorithm significantly faster by using numpy's vector functions.
note: This was my first real foray into numpy so there may be better/faster methods than what I've come up with.
import cv2
import numpy as np
background = cv2.imread('field.jpg')
overlay = cv2.imread('dice.png', cv2.IMREAD_UNCHANGED) # IMREAD_UNCHANGED => open image with the alpha channel
# separate the alpha channel from the color channels
alpha_channel = overlay[:, :, 3] / 255 # convert from 0-255 to 0.0-1.0
overlay_colors = overlay[:, :, :3]
# To take advantage of the speed of numpy and apply transformations to the entire image with a single operation
# the arrays need to be the same shape. However, the shapes currently looks like this:
# - overlay_colors shape:(width, height, 3) 3 color values for each pixel, (red, green, blue)
# - alpha_channel shape:(width, height, 1) 1 single alpha value for each pixel
# We will construct an alpha_mask that has the same shape as the overlay_colors by duplicate the alpha channel
# for each color so there is a 1:1 alpha channel for each color channel
alpha_mask = np.dstack((alpha_channel, alpha_channel, alpha_channel))
# The background image is larger than the overlay so we'll take a subsection of the background that matches the
# dimensions of the overlay.
# NOTE: For simplicity, the overlay is applied to the top-left corner of the background(0,0). An x and y offset
# could be used to place the overlay at any position on the background.
h, w = overlay.shape[:2]
background_subsection = background[0:h, 0:w]
# combine the background with the overlay image weighted by alpha
composite = background_subsection * (1 - alpha_mask) + overlay_colors * alpha_mask
# overwrite the section of the background image that has been updated
background[0:h, 0:w] = composite
cv2.imwrite('combined.png', background)
How much faster? On my machine the slow method takes ~3 seconds and the optimized method takes ~ 30 ms. So about
100 times faster!
Wrapped up in a function
This function handles foreground and background images of different sizes and also supports negative and positive offsets the move the overlay across the bounds of the background image in any direction.
import cv2
import numpy as np
def add_transparent_image(background, foreground, x_offset=None, y_offset=None):
bg_h, bg_w, bg_channels = background.shape
fg_h, fg_w, fg_channels = foreground.shape
assert bg_channels == 3, f'background image should have exactly 3 channels (RGB). found:{bg_channels}'
assert fg_channels == 4, f'foreground image should have exactly 4 channels (RGBA). found:{fg_channels}'
# center by default
if x_offset is None: x_offset = (bg_w - fg_w) // 2
if y_offset is None: y_offset = (bg_h - fg_h) // 2
w = min(fg_w, bg_w, fg_w + x_offset, bg_w - x_offset)
h = min(fg_h, bg_h, fg_h + y_offset, bg_h - y_offset)
if w < 1 or h < 1: return
# clip foreground and background images to the overlapping regions
bg_x = max(0, x_offset)
bg_y = max(0, y_offset)
fg_x = max(0, x_offset * -1)
fg_y = max(0, y_offset * -1)
foreground = foreground[fg_y:fg_y + h, fg_x:fg_x + w]
background_subsection = background[bg_y:bg_y + h, bg_x:bg_x + w]
# separate alpha and color channels from the foreground image
foreground_colors = foreground[:, :, :3]
alpha_channel = foreground[:, :, 3] / 255 # 0-255 => 0.0-1.0
# construct an alpha_mask that matches the image shape
alpha_mask = np.dstack((alpha_channel, alpha_channel, alpha_channel))
# combine the background with the overlay image weighted by alpha
composite = background_subsection * (1 - alpha_mask) + foreground_colors * alpha_mask
# overwrite the section of the background image that has been updated
background[bg_y:bg_y + h, bg_x:bg_x + w] = composite
example usage:
background = cv2.imread('field.jpg')
overlay = cv2.imread('dice.png', cv2.IMREAD_UNCHANGED) # IMREAD_UNCHANGED => open image with the alpha channel
x_offset = 0
y_offset = 0
print("arrow keys to move the dice. ESC to quit")
while True:
img = background.copy()
add_transparent_image(img, overlay, x_offset, y_offset)
cv2.imshow("", img)
key = cv2.waitKey()
if key == 0: y_offset -= 10 # up
if key == 1: y_offset += 10 # down
if key == 2: x_offset -= 10 # left
if key == 3: x_offset += 10 # right
if key == 27: break # escape
You need to open the transparent png image using the flag IMREAD_UNCHANGED
Mat overlay = cv::imread("dice.png", IMREAD_UNCHANGED);
Then split the channels, group the RGB and use the transparent channel as an mask, do like that:
/**
* #brief Draws a transparent image over a frame Mat.
*
* #param frame the frame where the transparent image will be drawn
* #param transp the Mat image with transparency, read from a PNG image, with the IMREAD_UNCHANGED flag
* #param xPos x position of the frame image where the image will start.
* #param yPos y position of the frame image where the image will start.
*/
void drawTransparency(Mat frame, Mat transp, int xPos, int yPos) {
Mat mask;
vector<Mat> layers;
split(transp, layers); // seperate channels
Mat rgb[3] = { layers[0],layers[1],layers[2] };
mask = layers[3]; // png's alpha channel used as mask
merge(rgb, 3, transp); // put together the RGB channels, now transp insn't transparent
transp.copyTo(frame.rowRange(yPos, yPos + transp.rows).colRange(xPos, xPos + transp.cols), mask);
}
Can be called like that:
drawTransparency(background, overlay, 10, 10);
To overlay png image watermark over normal 3 channel jpeg image
import cv2
import numpy as np
​
def logoOverlay(image,logo,alpha=1.0,x=0, y=0, scale=1.0):
(h, w) = image.shape[:2]
image = np.dstack([image, np.ones((h, w), dtype="uint8") * 255])
​
overlay = cv2.resize(logo, None,fx=scale,fy=scale)
(wH, wW) = overlay.shape[:2]
output = image.copy()
# blend the two images together using transparent overlays
try:
if x<0 : x = w+x
if y<0 : y = h+y
if x+wW > w: wW = w-x
if y+wH > h: wH = h-y
print(x,y,wW,wH)
overlay=cv2.addWeighted(output[y:y+wH, x:x+wW],alpha,overlay[:wH,:wW],1.0,0)
output[y:y+wH, x:x+wW ] = overlay
except Exception as e:
print("Error: Logo position is overshooting image!")
print(e)
​
output= output[:,:,:3]
return output
Usage:
background = cv2.imread('image.jpeg')
overlay = cv2.imread('logo.png', cv2.IMREAD_UNCHANGED)
​
print(overlay.shape) # must be (x,y,4)
print(background.shape) # must be (x,y,3)
# downscale logo by half and position on bottom right reference
out = logoOverlay(background,overlay,scale=0.5,y=-100,x=-100)
​
cv2.imshow("test",out)
cv2.waitKey(0)
import cv2
import numpy as np
background = cv2.imread('background.jpg')
overlay = cv2.imread('cloudy.png')
overlay = cv2.resize(overlay, (200,200))
# overlay = for_transparent_removal(overlay)
h, w = overlay.shape[:2]
shapes = np.zeros_like(background, np.uint8)
shapes[0:h, 0:w] = overlay
alpha = 0.8
mask = shapes.astype(bool)
# option first
background[mask] = cv2.addWeighted(shapes, alpha, shapes, 1 - alpha, 0)[mask]
cv2.imwrite('combined.png', background)
# option second
background[mask] = cv2.addWeighted(background, alpha, overlay, 1 - alpha, 0)[mask]
# NOTE : above both option will give you image overlays but effect would be changed
cv2.imwrite('combined.1.png', background)
**Use this function to place your overlay on any background image.
if want to resize overlay use this overlay = cv2.resize(overlay, (200,200)) and then pass resized overlay into the function.
**
import cv2
import numpy as np
def image_overlay_second_method(img1, img2, location, min_thresh=0, is_transparent=False):
h, w = img1.shape[:2]
h1, w1 = img2.shape[:2]
x, y = location
roi = img1[y:y + h1, x:x + w1]
gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
_, mask = cv2.threshold(gray, min_thresh, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
img_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)
img_fg = cv2.bitwise_and(img2, img2, mask=mask)
dst = cv2.add(img_bg, img_fg)
if is_transparent:
dst = cv2.addWeighted(img1[y:y + h1, x:x + w1], 0.1, dst, 0.9, None)
img1[y:y + h1, x:x + w1] = dst
return img1
if __name__ == '__main__':
background = cv2.imread('background.jpg')
overlay = cv2.imread('overlay.png')
output = image_overlay_third_method(background, overlay, location=(800,50), min_thresh=0, is_transparent=True)
cv2.imwrite('output.png', output)
background.jpg
output.png

Categories