Wanted to have two images where i have a mask stacked on top of another image. But in doing so, i wish to not have two images blend together, rather have the final image stacked onto each other like layers
Here's my original images
masked image
code
import cv2
import numpy as np
image = cv2.imread('test72.jpg')
image2 = cv2.imread('test63.jpg')
blank = np.full((image.shape[0], image.shape[1], 3), (255,255,255), np.uint8)
circle = cv2.circle(blank, (300,300), 10, (0, 0, 0), thickness= 100)
blur = cv2.blur(circle, (50, 50), 0)
subtract = cv2.subtract(image, blur)
blended = cv2.addWeighted(image2, 1, subtract, 1, 0)
cv2.imwrite('mask.jpg', subtract)
cv2.imwrite('blend.jpg', blended)
cv2.waitKey(0)
cv2.destroyAllWindows()
Here is what the result looks like when the function cv2.addweighted is added, which results in the bannanas blending in the shoes, is there another function in OpenCv i could do to make this stack rather than blend?
Something like this might work in your case:
im1_alpha = blur/255
im2_alpha = (255-blur)/255
out_img = ((image2 * im1_alpha) + (image * im2_alpha)).astype(np.uint8)
plt.imshow(out_img)
Here is another answer!
I followed this tutorial right here! Alpha blending tutorial right here
import cv2
import numpy as np
image = cv2.imread('test72.jpg')
image2 = cv2.imread('test63.jpg')
blank = np.full((image.shape[0], image.shape[1], 3), (255,255,255), np.uint8)
circle = cv2.circle(blank, (300,350), 10, (0, 0, 0), thickness= 100)
blur = cv2.blur(circle, (50, 50), 0)
image = image.astype(float)
image2 = image2.astype(float)
alpha = blur.astype(float)/255
multiply = cv2.multiply(alpha, image2)
multiply2 = cv2.multiply(1.0 - alpha, image)
add = cv2.add(multiply, multiply2).astype(np.uint8)
cv2.imshow('alpha', add)
cv2.waitKey(0)
cv2.destroyAllWindows()
Related
I have few Journal pages images where there are two columns I want to mask one column white without a changing the dimension.which means the output image should have same dimensions as input image even though there is one column.
I was able to mask image but the mask part is coming black which I want as white.
import cv2
import numpy as np
# Load the original image
image = cv2.imread(filename = "D:\output_final_word5\image1.jpg")
# Create the basic black image
mask = np.zeros(shape = image.shape, dtype = "uint8")
# Draw a white, filled rectangle on the mask image
cv2.rectangle(img = mask, pt1 = (0, 0), pt2 = (795, 3000), color = (255, 255,
255), thickness = -1)
# Apply the mask and display the result
maskedImg = cv2.bitwise_and(src1 = image, src2 = mask)
#cv2.namedWindow(winname = "masked image", flags = cv2.WINDOW_NORMAL)
cv2.imshow("masked image",maskedImg)
cv2.waitKey(delay = 0)
cv2.imwrite("D:\Test_Mask.jpg",maskedImg)
My final objective is to read a folder where are several Journal pages In which need to be saved by masking first one column and then another column without affecting the dimension of Input image and mask part should be white.
Below are Input Image Attached...
And Output Should be like this....
You don't need mask to draw rectangle. You can draw it directly on image.
You can also use image.copy() to create second image with other column
BTW: if 795 is in the middle of width then you can use image.shape to get its (height,width) and use width//2 instead of 795 so it will work with images which have different widths. But if 795 is not ideally in the middle then use half_width = 795
import cv2
image_1 = cv2.imread('image.jpg')
image_2 = image_1.copy()
height, width, depth = image_1.shape # it gives `height,width`, not `width,height`
half_width = width//2
#half_width = 795
cv2.rectangle(img=image_1, pt1=(0, 0), pt2=(half_width, height), color=(255, 255, 255), thickness=-1)
cv2.rectangle(img=image_2, pt1=(half_width, 0), pt2=(width, height), color=(255, 255, 255), thickness=-1)
cv2.imwrite("image_1.jpg", image_1)
cv2.imwrite("image_2.jpg", image_2)
cv2.imshow("image 1", image_1)
cv2.imshow("image 2", image_2)
cv2.waitKey(0)
cv2.destroyAllWindows()
I want to use Python and OpenCV to achieve a non-Neural Network edge detection to calibrate some very small things, such as sperms under the microscope. Unfortunately, I found that the sperms' tails are very difficult to calibrate and they're really similar with the background.
I used cv2.pyrMeanShiftFiltering() to achieve noise reduction and used cv2.findContours() to find contours. The result is like that:
result:
This is the original picture:
Here is my code:
import cv2 as cv
import numpy as np
import os
path = "/home/rafael/Desktop/2.jpg"
def detection(img):
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
#ret, dst = cv.threshold(gray, 200, 255, cv.THRESH_OTSU)
ret, dst = cv.threshold(gray, 188, 255, cv.THRESH_BINARY_INV)
return dst
image = cv.imread(path)
img = cv.pyrMeanShiftFiltering(src = image, sp = 5, sr = 40)
dst = detection(img)
src, contours, hierarchy = cv.findContours(dst, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
cv.drawContours(image, contours, -1, (0, 0, 255), 2)
cv.namedWindow('img', cv.WINDOW_NORMAL)
cv.imshow('img', image)
cv.waitKey(0)
I tried Luke's method, and the code is here:
import cv2 as cv
import numpy as np
import os
path = "/home/rafael/Desktop/2.jpg"
def enhance(img):
img = cv.resize(img, (0, 0), fx = 0.3, fy = 0.3)
blur = cv.GaussianBlur(img, (23, 23), 0)
img = cv.add(img[:, :, 1], (img[:, :, 1] - blur[:, :, 1]))
return img
def detection(img):
ret, dst = cv.threshold(img, 190, 255, cv.THRESH_BINARY_INV)
return dst
image = cv.imread(path)
img = enhance(image)
dst = detection(img)
src, contours, hierarchy = cv.findContours(dst, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
cv.drawContours(img, contours, -1, (0, 0, 255), 2)
cv.namedWindow('img', cv.WINDOW_NORMAL)
cv.imshow('img', img)
cv.waitKey(0)
This is the result:
The latest picture
Although I used a very big threshold(190), even appeared plenty of noises,the code still couldn't find the tails. How can I solve the problem?
So thanks a lot if anyone could teach me how to improve this simple edge detection program.
Are the sperm tails always green-blue on a gray background? In that case, you can use simple segmentation.
First convert the image to HSV, if the H value is in a range for blue/green, mark it as foreground.
import cv2
import numpy as np
img = cv2.imread('img.jpg')
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
lower = np.array([50, 10, 10])
upper = np.array([120, 255, 255])
mask = cv2.inRange(hsv, lower, upper)
res = cv2.bitwise_and(img,img, mask= mask)
cv2.imwrite('test.jpg', res)
kernel = np.ones((5,5), np.uint8) # note this is a horizontal kernel
d_im = cv2.dilate(mask, kernel, iterations=1)
e_im = cv2.erode(d_im, kernel, iterations=1)
cv2.imwrite('d.jpg', d_im)
cv2.imwrite('e.jpg', e_im)
Images in order are: image with mask applied, image mask with dilation, and image mask with erosion.
You might want enhance your image before performing any operation. Going through this Wikipedia page, I came across the following formula for unsharp-masking:
sharpened = original + (original − blurred) × amount
Assuming amount = 1, I wrote the following snippet:
#--- resized the image (too big !!!) ---
im = cv2.resize(im, (0, 0), fx = 0.3, fy = 0.3)
#--- smoothen the image ---
blur = cv2.GaussianBlur(im, (23, 23), 0)
#--- applied the formula assuming amount = 1---
cv2.imshow('sharpened', cv2.add(im[:,:,1], (im[:,:,1] - blur[:,:,1])))
This is what I got:
The tails are a lot more visible now. Use this to further enhance your detection mechanism.
There are a lot of creative ways to go about edge detection for high frequency edges (like sperm tails for example)
I recommend cv2.Canny() for general edge detection - you will have to play around with the inputs for your specific application.
Alternatively, you can do a difference of gaussian, where you take cv2.GaussianBlur() with two different sigmas and take the difference (https://en.wikipedia.org/wiki/Difference_of_Gaussians)
i.e,
blur1 = cv2.GaussianBlur(im, (5,5), sigmaX_1, sigmaY_1)
blur2 = cv2.GaussianBlur(im, (5,5), sigmaX_2, sigmaY_2)
DoG_edge = blur1 - blur2
One final possibility is that you could also try some histogram manipulation to enhance the tails of the sperm.
Using the follow image..
... I am applying this code to create a circle mask:
import cv2
import numpy as np
img = cv2.imread("car.png")
height, width, depth = img.shape
circle_img = np.zeros((height, width), np.uint8)
mask = cv2.circle(circle_img, (int(width / 2), int(height / 2)), 90, 1, thickness=-1)
masked_img = cv2.bitwise_and(img, img, mask=circle_img)
cv2.imshow("masked", masked_img)
cv2.waitKey(0)
This is the output..
How can I find BGR values of the circle using OpenCV ?
You can do it using numpy arrays.
circle_locations = mask == 1
bgr = img[circle_locations]
EDIT: I'm not sure if your mask has values in {0, 1} though I assume it does. If its background value is 0 and all positive values are forground, just change the == 1 to a > 1.
I am trying to make a transparent image and draw on it, and after I will addWeighted over the base image.
How can I initialize fully transparent image with width and hight in openCV python?
EDIT: I want to make a effect like in Photoshop, having stack of the layers, all stacked layers are initially transparent and drawing is performed on fully transparent layer. On the end I will merge all layers to get final image
For creating a transparent image you need a 4 channel matrix, 3 of which would represent RGB colors and the 4th channel would represent Alpha channel, To create a transparent image, you can ignore the RGB values and directly set the alpha channel to be 0. In Python OpenCV uses numpy to manipulate matrices, so a transparent image can be created as
import numpy as np
import cv2
img_height, img_width = 300, 300
n_channels = 4
transparent_img = np.zeros((img_height, img_width, n_channels), dtype=np.uint8)
# Save the image for visualization
cv2.imwrite("./transparent_img.png", transparent_img)
If you want to draw on several "layers" and then stack the drawings together, then how about this:
import cv2
import numpy as np
#create 3 separate BGRA images as our "layers"
layer1 = np.zeros((500, 500, 4))
layer2 = np.zeros((500, 500, 4))
layer3 = np.zeros((500, 500, 4))
#draw a red circle on the first "layer",
#a green rectangle on the second "layer",
#a blue line on the third "layer"
red_color = (0, 0, 255, 255)
green_color = (0, 255, 0, 255)
blue_color = (255, 0, 0, 255)
cv2.circle(layer1, (255, 255), 100, red_color, 5)
cv2.rectangle(layer2, (175, 175), (335, 335), green_color, 5)
cv2.line(layer3, (170, 170), (340, 340), blue_color, 5)
res = layer1[:] #copy the first layer into the resulting image
#copy only the pixels we were drawing on from the 2nd and 3rd layers
#(if you don't do this, the black background will also be copied)
cnd = layer2[:, :, 3] > 0
res[cnd] = layer2[cnd]
cnd = layer3[:, :, 3] > 0
res[cnd] = layer3[cnd]
cv2.imwrite("out.png", res)
To convert an image's white parts to transparent:
import cv2
import numpy as np
img = cv2.imread("image.png", cv2.IMREAD_UNCHANGED)
img[np.where(np.all(img[..., :3] == 255, -1))] = 0
cv2.imwrite("transparent.png", img)
How do I generate circular image thumbnails using PIL?
The space outside the circle should be transparent.
Snippets would be highly appreciated, thank you in advance.
The easiest way to do it is by using masks. Create a black and white mask with any shape you want. And use putalpha to put that shape as an alpha layer:
from PIL import Image, ImageOps
mask = Image.open('mask.png').convert('L')
im = Image.open('image.png')
output = ImageOps.fit(im, mask.size, centering=(0.5, 0.5))
output.putalpha(mask)
output.save('output.png')
Here is the mask I used:
If you want the thumbnail size to be variable you can use ImageDraw and draw the mask:
from PIL import Image, ImageOps, ImageDraw
size = (128, 128)
mask = Image.new('L', size, 0)
draw = ImageDraw.Draw(mask)
draw.ellipse((0, 0) + size, fill=255)
im = Image.open('image.jpg')
output = ImageOps.fit(im, mask.size, centering=(0.5, 0.5))
output.putalpha(mask)
output.save('output.png')
If you want the output in GIF then you need to use the paste function instead of putalpha:
from PIL import Image, ImageOps, ImageDraw
size = (128, 128)
mask = Image.new('L', size, 255)
draw = ImageDraw.Draw(mask)
draw.ellipse((0, 0) + size, fill=0)
im = Image.open('image.jpg')
output = ImageOps.fit(im, mask.size, centering=(0.5, 0.5))
output.paste(0, mask=mask)
output.convert('P', palette=Image.ADAPTIVE)
output.save('output.gif', transparency=0)
Note that I did the following changes:
The mask is now inverted. The white
was replaced with black and vice versa.
I'm converting into 'P' with an 'adaptive' palette. Otherwise, PIL will only use web-safe colors and the result will look bad.
I'm adding transparency info to the image.
Please note: There is a big issue with this approach. If the GIF image contained black parts, all of them will become transparent as well. You can work around this by choosing another color for the transparency.
I would strongly advise you to use PNG format for this. But if you can't then that is the best you could do.
I would like to add to the already accepted answer a solution to antialias the resulting circle, the trick is to produce a bigger mask and then scale it down using an ANTIALIAS filter:
here is the code
from PIL import Image, ImageOps, ImageDraw
im = Image.open('image.jpg')
bigsize = (im.size[0] * 3, im.size[1] * 3)
mask = Image.new('L', bigsize, 0)
draw = ImageDraw.Draw(mask)
draw.ellipse((0, 0) + bigsize, fill=255)
mask = mask.resize(im.size, Image.ANTIALIAS)
im.putalpha(mask)
this produces a far better result in my opinion.
Slight modification on #DRC's solution to also support images which already have transparency. He sets the alpha channel to 0 (invisible) outside the circle and to 255 inside (opaque), so I use darker which takes the min of the mask and the original alpha channel (which can be anywhere betwen 0-255) :-)
from PIL import Image, ImageChops, ImageDraw
def crop_to_circle(im):
bigsize = (im.size[0] * 3, im.size[1] * 3)
mask = Image.new('L', bigsize, 0)
ImageDraw.Draw(mask).ellipse((0, 0) + bigsize, fill=255)
mask = mask.resize(im.size, Image.ANTIALIAS)
mask = ImageChops.darker(mask, im.split()[-1])
im.putalpha(mask)
im = Image.open('0.png').convert('RGBA')
crop_to_circle(im)
im.save('cropped.png')
Thank you very much. I was looking for hours and your idea does the trick.
Together with this other script from there.
PIL round edges and add border
it works perfectly for me.
from PIL import Image
from PIL import ImageDraw, ImageChops
def add_corners( im, rad=100):
circle = Image.new('L', (rad * 2, rad * 2), 0)
draw = ImageDraw.Draw(circle)
draw.ellipse((0, 0, rad * 2, rad * 2), fill=255)
alpha = Image.new('L', im.size, "white")
w, h = im.size
alpha.paste(circle.crop((0, 0, rad, rad)), (0, 0))
alpha.paste(circle.crop((0, rad, rad, rad * 2)), (0, h - rad))
alpha.paste(circle.crop((rad, 0, rad * 2, rad)), (w - rad, 0))
alpha.paste(circle.crop((rad, rad, rad * 2, rad * 2)), (w - rad, h - rad))
alpha = ImageChops.darker(alpha, im.split()[-1])
im.putalpha(alpha)
return im
im = Image.open ('AceOfSpades.png').convert('RGBA')
im = add_corners (im, 24)
im.show()
im.save("perfect.png")
Name this image AceOfSpades.png for testing