How can I overlay a transparent PNG onto another image without losing it's transparency using openCV in python?
import cv2
background = cv2.imread('field.jpg')
overlay = cv2.imread('dice.png')
# Help please
cv2.imwrite('combined.png', background)
Desired output:
Sources:
Background Image
Overlay
import cv2
background = cv2.imread('field.jpg')
overlay = cv2.imread('dice.png')
added_image = cv2.addWeighted(background,0.4,overlay,0.1,0)
cv2.imwrite('combined.png', added_image)
The correct answer to this was far too hard to come by, so I'm posting this answer even though the question is really old. What you are looking for is "over" compositing, and the algorithm for this can be found on Wikipedia: https://en.wikipedia.org/wiki/Alpha_compositing
I am far from an expert with OpenCV, but after some experimentation this is the most efficient way I have found to accomplish the task:
import cv2
background = cv2.imread("background.png", cv2.IMREAD_UNCHANGED)
foreground = cv2.imread("overlay.png", cv2.IMREAD_UNCHANGED)
# normalize alpha channels from 0-255 to 0-1
alpha_background = background[:,:,3] / 255.0
alpha_foreground = foreground[:,:,3] / 255.0
# set adjusted colors
for color in range(0, 3):
background[:,:,color] = alpha_foreground * foreground[:,:,color] + \
alpha_background * background[:,:,color] * (1 - alpha_foreground)
# set adjusted alpha and denormalize back to 0-255
background[:,:,3] = (1 - (1 - alpha_foreground) * (1 - alpha_background)) * 255
# display the image
cv2.imshow("Composited image", background)
cv2.waitKey(0)
The following code will use the alpha channels of the overlay image to correctly blend it into the background image, use x and y to set the top-left corner of the overlay image.
import cv2
import numpy as np
def overlay_transparent(background, overlay, x, y):
background_width = background.shape[1]
background_height = background.shape[0]
if x >= background_width or y >= background_height:
return background
h, w = overlay.shape[0], overlay.shape[1]
if x + w > background_width:
w = background_width - x
overlay = overlay[:, :w]
if y + h > background_height:
h = background_height - y
overlay = overlay[:h]
if overlay.shape[2] < 4:
overlay = np.concatenate(
[
overlay,
np.ones((overlay.shape[0], overlay.shape[1], 1), dtype = overlay.dtype) * 255
],
axis = 2,
)
overlay_image = overlay[..., :3]
mask = overlay[..., 3:] / 255.0
background[y:y+h, x:x+w] = (1.0 - mask) * background[y:y+h, x:x+w] + mask * overlay_image
return background
This code will mutate background so create a copy if you wish to preserve the original background image.
Been a while since this question appeared, but I believe this is the right simple answer, which could still help somebody.
background = cv2.imread('road.jpg')
overlay = cv2.imread('traffic sign.png')
rows,cols,channels = overlay.shape
overlay=cv2.addWeighted(background[250:250+rows, 0:0+cols],0.5,overlay,0.5,0)
background[250:250+rows, 0:0+cols ] = overlay
This will overlay the image over the background image such as shown here:
Ignore the ROI rectangles
Note that I used a background image of size 400x300 and the overlay image of size 32x32, is shown in the x[0-32] and y[250-282] part of the background image according to the coordinates I set for it, to first calculate the blend and then put the calculated blend in the part of the image where I want to have it.
(overlay is loaded from disk, not from the background image itself,unfortunately the overlay image has its own white background, so you can see that too in the result)
If performance isn't a concern then you can iterate over each pixel of the overlay and apply it to the background. This isn't very efficient, but it does help to understand how to work with png's alpha layer.
slow version
import cv2
background = cv2.imread('field.jpg')
overlay = cv2.imread('dice.png', cv2.IMREAD_UNCHANGED) # IMREAD_UNCHANGED => open image with the alpha channel
height, width = overlay.shape[:2]
for y in range(height):
for x in range(width):
overlay_color = overlay[y, x, :3] # first three elements are color (RGB)
overlay_alpha = overlay[y, x, 3] / 255 # 4th element is the alpha channel, convert from 0-255 to 0.0-1.0
# get the color from the background image
background_color = background[y, x]
# combine the background color and the overlay color weighted by alpha
composite_color = background_color * (1 - overlay_alpha) + overlay_color * overlay_alpha
# update the background image in place
background[y, x] = composite_color
cv2.imwrite('combined.png', background)
result:
fast version
I stumbled across this question while trying to add a png overlay to a live video feed. The above solution is way too slow for that. We can make the algorithm significantly faster by using numpy's vector functions.
note: This was my first real foray into numpy so there may be better/faster methods than what I've come up with.
import cv2
import numpy as np
background = cv2.imread('field.jpg')
overlay = cv2.imread('dice.png', cv2.IMREAD_UNCHANGED) # IMREAD_UNCHANGED => open image with the alpha channel
# separate the alpha channel from the color channels
alpha_channel = overlay[:, :, 3] / 255 # convert from 0-255 to 0.0-1.0
overlay_colors = overlay[:, :, :3]
# To take advantage of the speed of numpy and apply transformations to the entire image with a single operation
# the arrays need to be the same shape. However, the shapes currently looks like this:
# - overlay_colors shape:(width, height, 3) 3 color values for each pixel, (red, green, blue)
# - alpha_channel shape:(width, height, 1) 1 single alpha value for each pixel
# We will construct an alpha_mask that has the same shape as the overlay_colors by duplicate the alpha channel
# for each color so there is a 1:1 alpha channel for each color channel
alpha_mask = np.dstack((alpha_channel, alpha_channel, alpha_channel))
# The background image is larger than the overlay so we'll take a subsection of the background that matches the
# dimensions of the overlay.
# NOTE: For simplicity, the overlay is applied to the top-left corner of the background(0,0). An x and y offset
# could be used to place the overlay at any position on the background.
h, w = overlay.shape[:2]
background_subsection = background[0:h, 0:w]
# combine the background with the overlay image weighted by alpha
composite = background_subsection * (1 - alpha_mask) + overlay_colors * alpha_mask
# overwrite the section of the background image that has been updated
background[0:h, 0:w] = composite
cv2.imwrite('combined.png', background)
How much faster? On my machine the slow method takes ~3 seconds and the optimized method takes ~ 30 ms. So about
100 times faster!
Wrapped up in a function
This function handles foreground and background images of different sizes and also supports negative and positive offsets the move the overlay across the bounds of the background image in any direction.
import cv2
import numpy as np
def add_transparent_image(background, foreground, x_offset=None, y_offset=None):
bg_h, bg_w, bg_channels = background.shape
fg_h, fg_w, fg_channels = foreground.shape
assert bg_channels == 3, f'background image should have exactly 3 channels (RGB). found:{bg_channels}'
assert fg_channels == 4, f'foreground image should have exactly 4 channels (RGBA). found:{fg_channels}'
# center by default
if x_offset is None: x_offset = (bg_w - fg_w) // 2
if y_offset is None: y_offset = (bg_h - fg_h) // 2
w = min(fg_w, bg_w, fg_w + x_offset, bg_w - x_offset)
h = min(fg_h, bg_h, fg_h + y_offset, bg_h - y_offset)
if w < 1 or h < 1: return
# clip foreground and background images to the overlapping regions
bg_x = max(0, x_offset)
bg_y = max(0, y_offset)
fg_x = max(0, x_offset * -1)
fg_y = max(0, y_offset * -1)
foreground = foreground[fg_y:fg_y + h, fg_x:fg_x + w]
background_subsection = background[bg_y:bg_y + h, bg_x:bg_x + w]
# separate alpha and color channels from the foreground image
foreground_colors = foreground[:, :, :3]
alpha_channel = foreground[:, :, 3] / 255 # 0-255 => 0.0-1.0
# construct an alpha_mask that matches the image shape
alpha_mask = np.dstack((alpha_channel, alpha_channel, alpha_channel))
# combine the background with the overlay image weighted by alpha
composite = background_subsection * (1 - alpha_mask) + foreground_colors * alpha_mask
# overwrite the section of the background image that has been updated
background[bg_y:bg_y + h, bg_x:bg_x + w] = composite
example usage:
background = cv2.imread('field.jpg')
overlay = cv2.imread('dice.png', cv2.IMREAD_UNCHANGED) # IMREAD_UNCHANGED => open image with the alpha channel
x_offset = 0
y_offset = 0
print("arrow keys to move the dice. ESC to quit")
while True:
img = background.copy()
add_transparent_image(img, overlay, x_offset, y_offset)
cv2.imshow("", img)
key = cv2.waitKey()
if key == 0: y_offset -= 10 # up
if key == 1: y_offset += 10 # down
if key == 2: x_offset -= 10 # left
if key == 3: x_offset += 10 # right
if key == 27: break # escape
You need to open the transparent png image using the flag IMREAD_UNCHANGED
Mat overlay = cv::imread("dice.png", IMREAD_UNCHANGED);
Then split the channels, group the RGB and use the transparent channel as an mask, do like that:
/**
* #brief Draws a transparent image over a frame Mat.
*
* #param frame the frame where the transparent image will be drawn
* #param transp the Mat image with transparency, read from a PNG image, with the IMREAD_UNCHANGED flag
* #param xPos x position of the frame image where the image will start.
* #param yPos y position of the frame image where the image will start.
*/
void drawTransparency(Mat frame, Mat transp, int xPos, int yPos) {
Mat mask;
vector<Mat> layers;
split(transp, layers); // seperate channels
Mat rgb[3] = { layers[0],layers[1],layers[2] };
mask = layers[3]; // png's alpha channel used as mask
merge(rgb, 3, transp); // put together the RGB channels, now transp insn't transparent
transp.copyTo(frame.rowRange(yPos, yPos + transp.rows).colRange(xPos, xPos + transp.cols), mask);
}
Can be called like that:
drawTransparency(background, overlay, 10, 10);
To overlay png image watermark over normal 3 channel jpeg image
import cv2
import numpy as np
def logoOverlay(image,logo,alpha=1.0,x=0, y=0, scale=1.0):
(h, w) = image.shape[:2]
image = np.dstack([image, np.ones((h, w), dtype="uint8") * 255])
overlay = cv2.resize(logo, None,fx=scale,fy=scale)
(wH, wW) = overlay.shape[:2]
output = image.copy()
# blend the two images together using transparent overlays
try:
if x<0 : x = w+x
if y<0 : y = h+y
if x+wW > w: wW = w-x
if y+wH > h: wH = h-y
print(x,y,wW,wH)
overlay=cv2.addWeighted(output[y:y+wH, x:x+wW],alpha,overlay[:wH,:wW],1.0,0)
output[y:y+wH, x:x+wW ] = overlay
except Exception as e:
print("Error: Logo position is overshooting image!")
print(e)
output= output[:,:,:3]
return output
Usage:
background = cv2.imread('image.jpeg')
overlay = cv2.imread('logo.png', cv2.IMREAD_UNCHANGED)
print(overlay.shape) # must be (x,y,4)
print(background.shape) # must be (x,y,3)
# downscale logo by half and position on bottom right reference
out = logoOverlay(background,overlay,scale=0.5,y=-100,x=-100)
cv2.imshow("test",out)
cv2.waitKey(0)
import cv2
import numpy as np
background = cv2.imread('background.jpg')
overlay = cv2.imread('cloudy.png')
overlay = cv2.resize(overlay, (200,200))
# overlay = for_transparent_removal(overlay)
h, w = overlay.shape[:2]
shapes = np.zeros_like(background, np.uint8)
shapes[0:h, 0:w] = overlay
alpha = 0.8
mask = shapes.astype(bool)
# option first
background[mask] = cv2.addWeighted(shapes, alpha, shapes, 1 - alpha, 0)[mask]
cv2.imwrite('combined.png', background)
# option second
background[mask] = cv2.addWeighted(background, alpha, overlay, 1 - alpha, 0)[mask]
# NOTE : above both option will give you image overlays but effect would be changed
cv2.imwrite('combined.1.png', background)
**Use this function to place your overlay on any background image.
if want to resize overlay use this overlay = cv2.resize(overlay, (200,200)) and then pass resized overlay into the function.
**
import cv2
import numpy as np
def image_overlay_second_method(img1, img2, location, min_thresh=0, is_transparent=False):
h, w = img1.shape[:2]
h1, w1 = img2.shape[:2]
x, y = location
roi = img1[y:y + h1, x:x + w1]
gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
_, mask = cv2.threshold(gray, min_thresh, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
img_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)
img_fg = cv2.bitwise_and(img2, img2, mask=mask)
dst = cv2.add(img_bg, img_fg)
if is_transparent:
dst = cv2.addWeighted(img1[y:y + h1, x:x + w1], 0.1, dst, 0.9, None)
img1[y:y + h1, x:x + w1] = dst
return img1
if __name__ == '__main__':
background = cv2.imread('background.jpg')
overlay = cv2.imread('overlay.png')
output = image_overlay_third_method(background, overlay, location=(800,50), min_thresh=0, is_transparent=True)
cv2.imwrite('output.png', output)
background.jpg
output.png
Related
I am trying to add the background image in pdf using Pymupdf but it is creating a layer between pdf and image as you can see the output.
How can I bypass(remove) the layer between pdf and backround image? please help me regrading this.
This is how I am adding the background image in the pdf here:
import fitz
pdf_name = '3_giberish template.pdf'[enter image description here][1]
doc = fitz.open(pdf_name)
doc = fitz.open(input_file)
#open page first
page = doc.loadPage(0)
background_img_filename = 'background.png'
# insert background image to the full page
full_img_rect = fitz.Rect(0,0,650,792)
#overlay = False add the background image here
page.insertImage(full_img_rect, filename=background_img_filename, overlay=False)
# save doc
doc.save(output_file_path, garbage=4, deflate=True, clean=True)
print("completed")
Hi this answer might not be optimal but I think it would help you.
First you need to convert pdf page to to RGBA image. Then, every white pixel will be converted to be transparent so as to make it disappear when overlay onto the background.
After that, if the background is smaller than text image, scale it bigger for the text to be fitted in.
Finally put the text image on top of the background and export it to pdf.
Bellow are the images I tested.
text.jpg
background.jpg
res.png
"""
pip install opencv-python
pip install pymupdf
pip install Pillow
"""
import fitz
import cv2
import numpy as np
from PIL import Image
def pix2np(pix):
im = np.frombuffer(pix.samples, dtype=np.uint8).reshape(pix.h, pix.w, pix.n)
im = np.ascontiguousarray(im[..., [2, 1, 0]]) # rgb to bgr
return im
def resize(img,scale_percent):
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
return cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
doc = fitz.open('text.pdf')
# fitz to opencv image
# https://study.marearts.com/2020/04/pdf-to-opencv-as-page-by-page-using.html
for page_num, page in enumerate(doc.pages()):
mat = fitz.Matrix(1, 1)
pix = page.get_pixmap(matrix = mat)
im = pix2np(pix)
# white border removed and keep the text
# https://stackoverflow.com/a/49907762/7828101
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
gray = 255*(gray < 128).astype(np.uint8) # invert the text to white
coords = cv2.findNonZero(gray) # Find all non-zero points (text)
x, y, w, h = cv2.boundingRect(coords) # Find minimum spanning bounding box
rect = im[y:y+h, x:x+w] # Crop the image - note we do this on the original image
# cv2.imwrite('res.jpg',rect)
# convert white background to transparent background
new_img = cv2.cvtColor(rect, cv2.COLOR_BGR2BGRA)
for i in range(new_img.shape[0]):
for j in range(new_img.shape[1]):
pixel = new_img[i,j]
for k,value in enumerate(pixel):
if value <250:
break
if k == 3:
new_img[i,j,3] = 0
# paste result image to background
background = cv2.imread('background.jpg')
background = cv2.cvtColor(background, cv2.COLOR_BGR2BGRA)
if background.shape[0] < new_img.shape[0]:
scale_percent = new_img.shape[0]/background.shape[0]
background = resize(background,scale_percent)
if background.shape[1] < new_img.shape[1]:
scale_percent = new_img.shape[1]/background.shape[1]
background = resize(background,scale_percent)
y_position = int((background.shape[0] - new_img.shape[0])/2)
x_position = int((background.shape[1] - new_img.shape[1])/2)
# Merge two image
# https://stackoverflow.com/a/14102014/7828101
alpha_s = new_img[:, :, 3] / 255.0
alpha_l = 1.0 - alpha_s
for i in range(0,3):
new_img_inside_background = background[y_position:y_position + new_img.shape[0],
x_position:x_position + new_img.shape[1],:]
background[y_position:y_position + new_img.shape[0],
x_position:x_position + new_img.shape[1],i] = (alpha_s * new_img[:, :, i] +
alpha_l * new_img_inside_background[:,:,i])
cv2.imwrite('res.png',background)
background = cv2.cvtColor(background, cv2.COLOR_BGRA2RGB)
im_pil = Image.fromarray(background)
im_pil.save('{}_res.pdf'.format(page_num))
I used the following code to select nose in OpenCV and Python i searched a lot of to find a way to change the size of nose and save as a other image but i didn't find anything is there anybody to help me to do this.
import cv2
import numpy as np
import dlib
img = cv2.imread('1.jpg')
img = cv2.resize(img,(0,0),None,0.5,0.5)
imgOriginal = img.copy()
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
def createBox(img,points,scale=5):
bbox = cv2.boundingRect(points)
x,y,w,h = bbox
imgCrop = img[y:y+h,x:x+w]
imgCrop = cv2.resize(imgCrop,(0,0),None,scale,scale)
return imgCrop
imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = detector(imgGray)
for face in faces:
x1,y1 = face.left(),face.top()
x2,y2 = face.right(),face.bottom()
imgOriginal = cv2.rectangle(img,(x1,y1),(x2,y2),(0,255,0),1)
landmarks = predictor(imgGray,face)
myPoints=[]
for n in range(68):
x = landmarks.part(n).x
y = landmarks.part(n).y
myPoints.append([x,y])
#cv2.circle(imgOriginal,(x,y),5,(50,50,255),cv2.FILLED)
#cv2.putText(imgOriginal,str(n),(x,y-10),cv2.FONT_HERSHEY_COMPLEX_SMALL,0.8,(0,0,255),1)
myPoints = np.array(myPoints)
#nose points to select
#nose_points = myPoints[27:35]
print(myPoints)
cv2_imshow(imgOriginal)
cv2.waitKey(0)
thanks in advance
Here is one way using a spherical (bubble) warp in a local region in Python/OpenCV.
- Define region center and radius and amount of spherical distortion
- Crop the image for that center and radius
- Compute the spherical distortion x and y displacement maps and a binary mask
- Apply the distortion maps using cv2.remap
- Antialias the mask
- Merge the distorted and cropped image using the mask
- Insert that merged image into the original image
- Save the results
Input:
import numpy as np
import cv2
import math
import skimage.exposure
img = cv2.imread("portrait_of_mussorgsky2.jpg")
# set location and radius
cx = 130
cy = 109
radius = 30
# set distortion gain
gain = 1.5
# crop image
crop = img[cy-radius:cy+radius, cx-radius:cx+radius]
# get dimensions
ht, wd = crop.shape[:2]
xcent = wd / 2
ycent = ht / 2
rad = min(xcent,ycent)
# set up the x and y maps as float32
map_x = np.zeros((ht, wd), np.float32)
map_y = np.zeros((ht, wd), np.float32)
mask = np.zeros((ht, wd), np.uint8)
# create map with the spherize distortion formula --- arcsin(r)
# xcomp = arcsin(r)*x/r; ycomp = arsin(r)*y/r
for y in range(ht):
Y = (y - ycent)/ycent
for x in range(wd):
X = (x - xcent)/xcent
R = math.hypot(X,Y)
if R == 0:
map_x[y, x] = x
map_y[y, x] = y
mask[y,x] = 255
elif R >= .90: # avoid extreme blurring near R = 1
map_x[y, x] = x
map_y[y, x] = y
mask[y,x] = 0
elif gain >= 0:
map_x[y, x] = xcent*X*math.pow((2/math.pi)*(math.asin(R)/R), gain) + xcent
map_y[y, x] = ycent*Y*math.pow((2/math.pi)*(math.asin(R)/R), gain) + ycent
mask[y,x] = 255
elif gain < 0:
gain2 = -gain
map_x[y, x] = xcent*X*math.pow((math.sin(math.pi*R/2)/R), gain2) + xcent
map_y[y, x] = ycent*Y*math.pow((math.sin(math.pi*R/2)/R), gain2) + ycent
mask[y,x] = 255
# remap using map_x and map_y
bump = cv2.remap(crop, map_x, map_y, cv2.INTER_LINEAR, borderMode = cv2.BORDER_CONSTANT, borderValue=(0,0,0))
# antialias edge of mask
# (pad so blur does not extend to edges of image, then crop later)
blur = 7
mask = cv2.copyMakeBorder(mask, blur,blur,blur,blur, borderType=cv2.BORDER_CONSTANT, value=(0))
mask = cv2.GaussianBlur(mask, (0,0), sigmaX=blur, sigmaY=blur, borderType = cv2.BORDER_DEFAULT)
h, w = mask.shape
mask = mask[blur:h-blur, blur:w-blur]
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
mask = skimage.exposure.rescale_intensity(mask, in_range=(127.5,255), out_range=(0,1))
# merge bump with crop using grayscale (not binary) mask
bumped = (bump * mask + crop * (1-mask)).clip(0,255).astype(np.uint8)
# insert bumped image into original
result = img.copy()
result[cy-radius:cy+radius, cx-radius:cx+radius] = bumped
# save results
cv2.imwrite("portrait_of_mussorgsky2_bump.jpg", result)
# display images
cv2.imshow('img', img)
cv2.imshow('crop', crop)
cv2.imshow('bump', bump)
cv2.imshow('mask', mask)
cv2.imshow('bumped', bumped)
cv2.imshow('result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Resulting Image:
I think you need "Bulge" effects such as implode and explode. There are no implementation of these filters in OpenCV but, you can find other tools such as Wand(a python binding for ImageMagick) that have implode/explode.
Example (wand):
from wand.image import Image
with Image(filename="test.jpg") as img:
img.implode(amount = -0.2)
img.save(filename="destination.jpg")
# img_array = numpy.asarray(img) --> you can convert wand.image.Image to numpy array for further uses
passing negative values into implode functions is equal to doing explode. So for magnifying effect use negative values.
There is one problem though: img.implode performs on the center of the image, so after you've found the face features(eye, nose, ...) you need to move your picture somehow to make the eye or nose to lie on the center of the image. After that you can simply use implode function.
I am working on an image enhancement use case where one of the tasks is to rescale an image to a 3:4 ratio. But rather than blindly resizing the image by calculation on the height and width from the original image, I want it to be cropped, or in other words, I want to discard boundary pixels such that it matches the ratio and don't cut the primary object.
I have the segmentation mask using which I am getting the bounding box. I am also removing the background making it transparent for some other things. I am sharing both the binary mask and the original image.
I am using the below code to generate the box.
import cv2
import numpy as np
THRESHOLD = 0.9
mask = cv2.imread("mask.png")
mask = mask/255
mask[mask > THRESHOLD] = 1
mask[mask <= THRESHOLD] = 0
out_layer = mask[:,:,2]
x_starts = [np.where(out_layer[i]==1)[0][0] if len(np.where(out_layer[i]==1)[0])!=0 else out_layer.shape[0]+1 for i in range(out_layer.shape[0])]
x_ends = [np.where(out_layer[i]==1)[0][-1] if len(np.where(out_layer[i]==1)[0])!=0 else 0 for i in range(out_layer.shape[0])]
y_starts = [np.where(out_layer.T[i]==1)[0][0] if len(np.where(out_layer.T[i]==1)[0])!=0 else out_layer.T.shape[0]+1 for i in range(out_layer.T.shape[0])]
y_ends = [np.where(out_layer.T[i]==1)[0][-1] if len(np.where(out_layer.T[i]==1)[0])!=0 else 0 for i in range(out_layer.T.shape[0])]
startx = min(x_starts)
endx = max(x_ends)
starty = min(y_starts)
endy = max(y_ends)
start = (startx,starty)
end = (endx,endy)
If I understood your problem correctly, you just want to have the masking of the person in an image of size ratio 3:4 without cropping the mask. The approach you are talking about is possible but a bit unnecessary. I am sharing below the approach you can use with explanation and also I have used another approach to find the box. Use any approach you like.
import cv2
import numpy as np
MaskImg = cv2.imread("WomanMask.png", cv2.IMREAD_GRAYSCALE)
cv2.imwrite("RuntimeImages/Input MaskImg.png", MaskImg)
ret, MaskImg = cv2.threshold(MaskImg, 20, 255, cv2.THRESH_BINARY)
cv2.imwrite("RuntimeImages/MaskImg after threshold.png", MaskImg)
# Finding biggest contour in the image
# (Assuming that the woman mask will cover the biggest area of the mask image)
# Getting all external contours
Contours = cv2.findContours(MaskImg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2]
# exit if no white pixel in the image (no contour found)
if len(Contours) == 0:
print("There was no white pixel in the image.")
exit()
# Sorting contours in decreasing order according to their area
Contours = sorted(Contours, key=lambda x:cv2.contourArea(x), reverse=True)
# Getting the biggest contour
BiggestContour = Contours[0] # This is the contour of the girl mask
# Finding the bounding rectangle
BB = cv2.boundingRect(BiggestContour)
print(f"Bounding rectangle : {BB}")
# Getting the position, width, and height of the woman mask
x, y = BB[0], BB[1]
Width, Height = BB[2], BB[3]
# Setting the (height / width) ratio required
Ratio = ( 3 / 4 ) # 3 : 4 :: Height : Width
# Getting the new dimentions of the image to fit the mask
if (Height > Width):
NewHeight = Height
NewWidth = int(NewHeight / Ratio)
else:
NewWidth = Width
NewHeight = int(NewWidth * Ratio)
# Getting the position of the woman mask in this new image
# It will be placed at the center
X = int((NewWidth - Width) / 2)
Y = int((NewHeight - Height) / 2)
# Creating the new image with the woman mask at the center
NewImage = np.zeros((NewHeight, NewWidth), dtype=np.uint8)
NewImage[Y : Y+Height, X : X+Width] = MaskImg[y : y+Height, x : x+Width]
cv2.imwrite("RuntimeImages/Final Image.png", NewImage)
Below is the final output mask image
By using this link, I made the deformed mesh:
inputs = cv2.imread("../datasets/images/0.jpg")
nh, nw = inputs.shape[0]//8, inputs.shape[1]//8
inputs = cv2.resize(inputs, dsize=(nh, nw), interpolation=cv2.INTER_AREA)
mr = nh
mc = nw
xx = np.arange(mr-1, -1, -1)
yy = np.arange(0, mc, 1)
[Y, X] = np.meshgrid(xx, yy)
ms = np.transpose(np.asarray([X.flatten('F'), Y.flatten('F')]), (1,0))
perturbed_mesh = ms
nv = np.random.randint(20) - 1
for k in range(nv):
#Choosing one vertex randomly
vidx = np.random.randint(np.shape(ms)[0])
vtex = ms[vidx, :]
#Vector between all vertices and the selected one
xv = perturbed_mesh - vtex
#Random movement
mv = (np.random.rand(1,2) - 0.5)*20
hxv = np.zeros((np.shape(xv)[0], np.shape(xv)[1] +1) )
hxv[:, :-1] = xv
hmv = np.tile(np.append(mv, 0), (np.shape(xv)[0],1))
d = np.cross(hxv, hmv)
d = np.absolute(d[:, 2])
d = d / (np.linalg.norm(mv, ord=2))
wt = d
curve_type = np.random.rand(1)
if curve_type > 0.3:
alpha = np.random.rand(1) * 50 + 50
wt = alpha / (wt + alpha)
else:
alpha = np.random.rand(1) + 1
wt = 1 - (wt / 100 )**alpha
msmv = mv * np.expand_dims(wt, axis=1)
perturbed_mesh = perturbed_mesh + msmv
So I got the mesh like:
Then I tried to map the source image pixels onto the generated mesh.
img = cv2.copyMakeBorder(inputs, dh, dh, dw, dw, borderType=cv2.BORDER_CONSTANT, value=(0,0,0))
xs, ys = perturbed_mesh[:, 0], perturbed_mesh[:, 1]
xs = xs.reshape(nh, nw).astype(np.float32)
ys = ys.reshape(nh, nw).astype(np.float32)
dst = cv2.remap(img, xs, ys, cv2.INTER_CUBIC)
plt.imshow(dst)
Finally, I got the result:
But this image have a document on the corner, I can't use it.
How to map the document onto the center of image?
Here is an example of what I did for a perspective warp in Python/OpenCV. It will show you how I achieved the expanded view of the output. Not only did I increase the output size, but I also shifted the output control points. I shifted by +500 px and doubled that to +1000 for the output size.
Input:
No Expand Case:
import numpy as np
import cv2
# read input
img = cv2.imread("building.jpg")
# resize
height,width = 1000,1500
img = cv2.resize(img, (width,height))
# specify conjugate coordinates and shift output on left and top
pts1 = np.float32([[ 250, 0],[1220, 300],[1300, 770],[ 250, 860]])
pts2 = np.float32([[0,0],[width,0],[width,height],[0,height]])
# compute perspective matrix
matrix = cv2.getPerspectiveTransform(pts1,pts2)
print(matrix.shape)
print(matrix)
# convert image to BGRA with opaque alpha
img = cv2.cvtColor(img, cv2.COLOR_BGR2BGRA)
# do perspective transformation setting area outside input to transparent
# extend output size so extended by 500 all around
imgOutput = cv2.warpPerspective(img, matrix, (width,height), cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=(0,0,0))
# resize output, since it is too large to post
imgOutput = cv2.resize(imgOutput, (width,height))
# save the warped output
cv2.imwrite("building_warped_unexpanded.png", imgOutput)
# show the result
cv2.imshow("result", imgOutput)
cv2.waitKey(0)
cv2.destroyAllWindows()
No Expand Warped Result:
Expanded Case:
import numpy as np
import cv2
# read input
img = cv2.imread("building.jpg")
# resize
height,width = 1000,1500
img = cv2.resize(img, (width,height))
# specify conjugate coordinates and shift output on left and top
pts1 = np.float32([[ 250, 0],[1220, 300],[1300, 770],[ 250, 860]])
pts2 = np.float32([[+500,+500],[width+500,+500],[width+500,height+500],[+500,height+500]])
# compute perspective matrix
matrix = cv2.getPerspectiveTransform(pts1,pts2)
print(matrix.shape)
print(matrix)
# convert image to BGRA with opaque alpha
img = cv2.cvtColor(img, cv2.COLOR_BGR2BGRA)
# do perspective transformation setting area outside input to transparent
# extend output size so extended by 500 all around
imgOutput = cv2.warpPerspective(img, matrix, (width+1000,height+1000), cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=(0,0,0))
# resize output, since it is too large to post
imgOutput = cv2.resize(imgOutput, (width,height))
# save the warped output
cv2.imwrite("building_warped.jpg", imgOutput)
# show the result
cv2.imshow("result", imgOutput)
cv2.waitKey(0)
cv2.destroyAllWindows()
Expanded Result:
I work with logos and other simple graphics, in which there are no gradients or complex patterns. My task is to extract from the logo segments with letters and other elements.
To do this, I define the background color, and then I go through the picture in order to segment the images. Here is my code for more understanding:
MAXIMUM_COLOR_TRANSITION_DELTA = 100 # 0 - 765
def expand_segment_recursive(image, unexplored_foreground, segment, point, color):
height, width, _ = image.shape
# Unpack coordinates from point
py, px = point
# Create list of pixels to check
neighbourhood_pixels = [(py, px + 1), (py, px - 1), (py + 1, px), (py - 1, px)]
allowed_zone = unexplored_foreground & np.invert(segment)
for y, x in neighbourhood_pixels:
# Add pixel to segment if its coordinates within the image shape and its color differs from segment color no
# more than MAXIMUM_COLOR_TRANSITION_DELTA
if y in range(height) and x in range(width) and allowed_zone[y, x]:
color_delta = np.sum(np.abs(image[y, x].astype(np.int) - color.astype(np.int)))
print(color_delta)
if color_delta <= MAXIMUM_COLOR_TRANSITION_DELTA:
segment[y, x] = True
segment = expand_segment_recursive(image, unexplored_foreground, segment, (y, x), color)
allowed_zone = unexplored_foreground & np.invert(segment)
return segment
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Pass image as the argument to use the tool")
exit(-1)
IMAGE_FILENAME = sys.argv[1]
print(IMAGE_FILENAME)
image = cv.imread(IMAGE_FILENAME)
height, width, _ = image.shape
# To filter the background I use median value of the image, as background in most cases takes > 50% of image area.
background_color = np.median(image, axis=(0, 1))
print("Background color: ", background_color)
# Create foreground mask to find segments in it (TODO: Optimize this part)
foreground = np.zeros(shape=(height, width, 1), dtype=np.bool)
for y in range(height):
for x in range(width):
if not np.array_equal(image[y, x], background_color):
foreground[y, x] = True
unexplored_foreground = foreground
for y in range(height):
for x in range(width):
if unexplored_foreground[y, x]:
segment = np.zeros(foreground.shape, foreground.dtype)
segment[y, x] = True
segment = expand_segment_recursive(image, unexplored_foreground, segment, (y, x), image[y, x])
cv.imshow("segment", segment.astype(np.uint8) * 255)
while cv.waitKey(0) != 27:
continue
Here is the desired result:
In the end of run-time I expect 13 extracted separated segments (for this particular image). But instead I got RecursionError: maximum recursion depth exceeded, which is not surprising as expand_segment_recursive() can be called for every pixel of the image. And since even with small image resolution of 600x500 i got at maximum 300K calls.
My question is how can I get rid of recursion in this case and possibly optimize the algorithm with Numpy or OpenCV algorithms?
You can actually use a thresholded image (binary) and connectedComponents to do this job in a couple of steps. Also, you may use findContours or other methods.
Here is the code:
import numpy as np
import cv2
# load image as greyscale
img = cv2.imread("hp.png", 0)
# puts 0 to the white (background) and 255 in other places (greyscale value < 250)
_, thresholded = cv2.threshold(img, 250, 255, cv2.THRESH_BINARY_INV)
# gets the labels and the amount of labels, label 0 is the background
amount, labels = cv2.connectedComponents(thresholded)
# lets draw it for visualization purposes
preview = np.zeros((img.shape[0], img.shape[2], 3), dtype=np.uint8)
print (amount) #should be 3 -> two components + background
# draw label 1 blue and label 2 green
preview[labels == 1] = (255, 0, 0)
preview[labels == 2] = (0, 255, 0)
cv2.imshow("frame", preview)
cv2.waitKey(0)
At the end, the thresholded image will look like this:
and the preview image (the one with the colored segments) will look like this:
With the mask you can always use numpy functions to get things like, coordinates of the segments you want or to color them (like I did with preview)
UPDATE
To get different colored segments, you may try to create a "border" between the segments. Since they are plain colors and not gradients, you can try to do an edge detector like canny and then put it black in the image....
import numpy as np
import cv2
img = cv2.imread("total.png", 0)
# background to black
img[img>=200] = 0
# get edges
canny = cv2.Canny(img, 60, 180)
# make them thicker
kernel = np.ones((3,3),np.uint8)
canny = cv2.morphologyEx(canny, cv2.MORPH_DILATE, kernel)
# apply edges as border in the image
img[canny==255] = 0
# same as before
amount, labels = cv2.connectedComponents(img)
preview = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
print (amount) #should be 14 -> 13 components + background
# color them randomly
for i in range(1, amount):
preview[labels == i] = np.random.randint(0,255, size=3, dtype=np.uint8)
cv2.imshow("frame", preview )
cv2.waitKey(0)
The result is: