How to change bit depth? [duplicate] - python

I'm using PIL to convert a transparent PNG image uploaded with Django to a JPG file. The output looks broken.
Source file
Code
Image.open(object.logo.path).save('/tmp/output.jpg', 'JPEG')
or
Image.open(object.logo.path).convert('RGB').save('/tmp/output.png')
Result
Both ways, the resulting image looks like this:
Is there a way to fix this? I'd like to have white background where the transparent background used to be.
Solution
Thanks to the great answers, I've come up with the following function collection:
import Image
import numpy as np
def alpha_to_color(image, color=(255, 255, 255)):
"""Set all fully transparent pixels of an RGBA image to the specified color.
This is a very simple solution that might leave over some ugly edges, due
to semi-transparent areas. You should use alpha_composite_with color instead.
Source: http://stackoverflow.com/a/9166671/284318
Keyword Arguments:
image -- PIL RGBA Image object
color -- Tuple r, g, b (default 255, 255, 255)
"""
x = np.array(image)
r, g, b, a = np.rollaxis(x, axis=-1)
r[a == 0] = color[0]
g[a == 0] = color[1]
b[a == 0] = color[2]
x = np.dstack([r, g, b, a])
return Image.fromarray(x, 'RGBA')
def alpha_composite(front, back):
"""Alpha composite two RGBA images.
Source: http://stackoverflow.com/a/9166671/284318
Keyword Arguments:
front -- PIL RGBA Image object
back -- PIL RGBA Image object
"""
front = np.asarray(front)
back = np.asarray(back)
result = np.empty(front.shape, dtype='float')
alpha = np.index_exp[:, :, 3:]
rgb = np.index_exp[:, :, :3]
falpha = front[alpha] / 255.0
balpha = back[alpha] / 255.0
result[alpha] = falpha + balpha * (1 - falpha)
old_setting = np.seterr(invalid='ignore')
result[rgb] = (front[rgb] * falpha + back[rgb] * balpha * (1 - falpha)) / result[alpha]
np.seterr(**old_setting)
result[alpha] *= 255
np.clip(result, 0, 255)
# astype('uint8') maps np.nan and np.inf to 0
result = result.astype('uint8')
result = Image.fromarray(result, 'RGBA')
return result
def alpha_composite_with_color(image, color=(255, 255, 255)):
"""Alpha composite an RGBA image with a single color image of the
specified color and the same size as the original image.
Keyword Arguments:
image -- PIL RGBA Image object
color -- Tuple r, g, b (default 255, 255, 255)
"""
back = Image.new('RGBA', size=image.size, color=color + (255,))
return alpha_composite(image, back)
def pure_pil_alpha_to_color_v1(image, color=(255, 255, 255)):
"""Alpha composite an RGBA Image with a specified color.
NOTE: This version is much slower than the
alpha_composite_with_color solution. Use it only if
numpy is not available.
Source: http://stackoverflow.com/a/9168169/284318
Keyword Arguments:
image -- PIL RGBA Image object
color -- Tuple r, g, b (default 255, 255, 255)
"""
def blend_value(back, front, a):
return (front * a + back * (255 - a)) / 255
def blend_rgba(back, front):
result = [blend_value(back[i], front[i], front[3]) for i in (0, 1, 2)]
return tuple(result + [255])
im = image.copy() # don't edit the reference directly
p = im.load() # load pixel array
for y in range(im.size[1]):
for x in range(im.size[0]):
p[x, y] = blend_rgba(color + (255,), p[x, y])
return im
def pure_pil_alpha_to_color_v2(image, color=(255, 255, 255)):
"""Alpha composite an RGBA Image with a specified color.
Simpler, faster version than the solutions above.
Source: http://stackoverflow.com/a/9459208/284318
Keyword Arguments:
image -- PIL RGBA Image object
color -- Tuple r, g, b (default 255, 255, 255)
"""
image.load() # needed for split()
background = Image.new('RGB', image.size, color)
background.paste(image, mask=image.split()[3]) # 3 is the alpha channel
return background
Performance
The simple non-compositing alpha_to_color function is the fastest solution, but leaves behind ugly borders because it does not handle semi transparent areas.
Both the pure PIL and the numpy compositing solutions give great results, but alpha_composite_with_color is much faster (8.93 msec) than pure_pil_alpha_to_color (79.6 msec). If numpy is available on your system, that's the way to go. (Update: The new pure PIL version is the fastest of all mentioned solutions.)
$ python -m timeit "import Image; from apps.front import utils; i = Image.open(u'logo.png'); i2 = utils.alpha_to_color(i)"
10 loops, best of 3: 4.67 msec per loop
$ python -m timeit "import Image; from apps.front import utils; i = Image.open(u'logo.png'); i2 = utils.alpha_composite_with_color(i)"
10 loops, best of 3: 8.93 msec per loop
$ python -m timeit "import Image; from apps.front import utils; i = Image.open(u'logo.png'); i2 = utils.pure_pil_alpha_to_color(i)"
10 loops, best of 3: 79.6 msec per loop
$ python -m timeit "import Image; from apps.front import utils; i = Image.open(u'logo.png'); i2 = utils.pure_pil_alpha_to_color_v2(i)"
10 loops, best of 3: 1.1 msec per loop

Here's a version that's much simpler - not sure how performant it is. Heavily based on some django snippet I found while building RGBA -> JPG + BG support for sorl thumbnails.
from PIL import Image
png = Image.open(object.logo.path)
png.load() # required for png.split()
background = Image.new("RGB", png.size, (255, 255, 255))
background.paste(png, mask=png.split()[3]) # 3 is the alpha channel
background.save('foo.jpg', 'JPEG', quality=80)
Result #80%
Result # 50%

By using Image.alpha_composite, the solution by Yuji 'Tomita' Tomita become simpler. This code can avoid a tuple index out of range error if png has no alpha channel.
from PIL import Image
png = Image.open(img_path).convert('RGBA')
background = Image.new('RGBA', png.size, (255, 255, 255))
alpha_composite = Image.alpha_composite(background, png)
alpha_composite.save('foo.jpg', 'JPEG', quality=80)

The transparent parts mostly have RGBA value (0,0,0,0). Since the JPG has no transparency, the jpeg value is set to (0,0,0), which is black.
Around the circular icon, there are pixels with nonzero RGB values where A = 0. So they look transparent in the PNG, but funny-colored in the JPG.
You can set all pixels where A == 0 to have R = G = B = 255 using numpy like this:
import Image
import numpy as np
FNAME = 'logo.png'
img = Image.open(FNAME).convert('RGBA')
x = np.array(img)
r, g, b, a = np.rollaxis(x, axis = -1)
r[a == 0] = 255
g[a == 0] = 255
b[a == 0] = 255
x = np.dstack([r, g, b, a])
img = Image.fromarray(x, 'RGBA')
img.save('/tmp/out.jpg')
Note that the logo also has some semi-transparent pixels used to smooth the edges around the words and icon. Saving to jpeg ignores the semi-transparency, making the resultant jpeg look quite jagged.
A better quality result could be made using imagemagick's convert command:
convert logo.png -background white -flatten /tmp/out.jpg
To make a nicer quality blend using numpy, you could use alpha compositing:
import Image
import numpy as np
def alpha_composite(src, dst):
'''
Return the alpha composite of src and dst.
Parameters:
src -- PIL RGBA Image object
dst -- PIL RGBA Image object
The algorithm comes from http://en.wikipedia.org/wiki/Alpha_compositing
'''
# http://stackoverflow.com/a/3375291/190597
# http://stackoverflow.com/a/9166671/190597
src = np.asarray(src)
dst = np.asarray(dst)
out = np.empty(src.shape, dtype = 'float')
alpha = np.index_exp[:, :, 3:]
rgb = np.index_exp[:, :, :3]
src_a = src[alpha]/255.0
dst_a = dst[alpha]/255.0
out[alpha] = src_a+dst_a*(1-src_a)
old_setting = np.seterr(invalid = 'ignore')
out[rgb] = (src[rgb]*src_a + dst[rgb]*dst_a*(1-src_a))/out[alpha]
np.seterr(**old_setting)
out[alpha] *= 255
np.clip(out,0,255)
# astype('uint8') maps np.nan (and np.inf) to 0
out = out.astype('uint8')
out = Image.fromarray(out, 'RGBA')
return out
FNAME = 'logo.png'
img = Image.open(FNAME).convert('RGBA')
white = Image.new('RGBA', size = img.size, color = (255, 255, 255, 255))
img = alpha_composite(img, white)
img.save('/tmp/out.jpg')

Here's a solution in pure PIL.
def blend_value(under, over, a):
return (over*a + under*(255-a)) / 255
def blend_rgba(under, over):
return tuple([blend_value(under[i], over[i], over[3]) for i in (0,1,2)] + [255])
white = (255, 255, 255, 255)
im = Image.open(object.logo.path)
p = im.load()
for y in range(im.size[1]):
for x in range(im.size[0]):
p[x,y] = blend_rgba(white, p[x,y])
im.save('/tmp/output.png')

It's not broken. It's doing exactly what you told it to; those pixels are black with full transparency. You will need to iterate across all pixels and convert ones with full transparency to white.

import numpy as np
import PIL
def convert_image(image_file):
image = Image.open(image_file) # this could be a 4D array PNG (RGBA)
original_width, original_height = image.size
np_image = np.array(image)
new_image = np.zeros((np_image.shape[0], np_image.shape[1], 3))
# create 3D array
for each_channel in range(3):
new_image[:,:,each_channel] = np_image[:,:,each_channel]
# only copy first 3 channels.
# flushing
np_image = []
return new_image

from PIL import Image
def fig2img ( fig ):
"""
#brief Convert a Matplotlib figure to a PIL Image in RGBA format and return it
#param fig a matplotlib figure
#return a Python Imaging Library ( PIL ) image
"""
# put the figure pixmap into a numpy array
buf = fig2data ( fig )
w, h, d = buf.shape
return Image.frombytes( "RGBA", ( w ,h ), buf.tostring( ) )
def fig2data ( fig ):
"""
#brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
#param fig a matplotlib figure
#return a numpy 3D array of RGBA values
"""
# draw the renderer
fig.canvas.draw ( )
# Get the RGBA buffer from the figure
w,h = fig.canvas.get_width_height()
buf = np.fromstring ( fig.canvas.tostring_argb(), dtype=np.uint8 )
buf.shape = ( w, h, 4 )
# canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
buf = np.roll ( buf, 3, axis = 2 )
return buf
def rgba2rgb(img, c=(0, 0, 0), path='foo.jpg', is_already_saved=False, if_load=True):
if not is_already_saved:
background = Image.new("RGB", img.size, c)
background.paste(img, mask=img.split()[3]) # 3 is the alpha channel
background.save(path, 'JPEG', quality=100)
is_already_saved = True
if if_load:
if is_already_saved:
im = Image.open(path)
return np.array(im)
else:
raise ValueError('No image to load.')

Related

How to generate an alpha image with a color range with PIL?

I have a grayscale image and I want to create an alpha layer based on a range of pixel values. I want to know how can I create a fall-off function to generate such image.
The original image is the following:
I can use the color range in photoshop to select the shadows with fuzziness of 20%
And the resultant alpha channel is the following:
With fuzziness of 100%:
How can I generate such alpha channels in python with PIL?
I thought that maybe a subtract, but it does not generates a
The code to generate the image with Numpy and PIL:
from PIL import Image
import numpy as np
img = np.arange(0,256, 0.1).astype(np.uint8)
img = np.reshape(img, (img.shape[0], 1))
img = np.repeat((img), 500, axis=1)
img = Image.fromarray(img.T)
I tried to create a fall-off function from the distance of the pixel values but it does not have the same gradient. Maybe there is a different way?
def gauss_falloff(distance, c=0.2, alpha=255):
new_value = alpha * np.exp(-1 * ((distance) ** 2) / (c**2))
new_value = new_value.clip(0,255)
return new_value.astype(np.uint8)
test = img.T / 255
test = np.abs(test - pixel)
test = gauss_falloff(test, c=0.2, alpha=255)
test = Image.fromarray(test)
With my code:
Here's how you could do that
from PIL import Image, ImageDraw
# Create a new image with a transparent background
width, height = 300, 300
image = Image.new('RGBA', (width, height), (255, 255, 255, 0))
# Create a drawing context for the image
draw = ImageDraw.Draw(image)
# Set the starting and ending colors for the gradient
start_color = (255, 0, 0)
end_color = (0, 0, 255)
# Draw a gradient line with the specified color range
for x in range(width):
color = tuple(int(start_color[i] + (end_color[i] - start_color[i]) * x / width)
for i in range(3))
draw.line((x, 0, x, height), fill=color)
# Save the image
image.save('gradient.png')
This code creates a new image with a transparent background and a drawing context for that image. Then it draws a gradient line on the image with the specified color range. Finally, it saves the image as a PNG file.
Note: The Python Imaging Library (PIL) has been replaced by the Pillow library, which is a fork of PIL. If you are using Pillow, you can use the same code as above, but you need to import the Image and ImageDraw modules from the Pillow package instead of the PIL package.

What is the Fastest way to change pixel values of an RGB image using Numpy / Opencv / scikit-image

Given a binary image, what is the fastest and Pythonic way to convert the image to RGB and then modify it's pixels?
I have these two ways but they don't feel good to me
def get_mask(rgb_image_path):
mask = np.array(Image.open(rgb_image_path).convert('L'), dtype = np.float32) # Mask should be Grayscale so each value is either 0 or 255
mask[mask == 255.0] = 1.0 # whereever there is 255, convert it to 1: (1 == 255 == White)
return mask
def approach1(mask):
mask = np.logical_not(mask)
mask = mask.astype(np.uint8)
mask = mask*255
red = mask.copy()
blue = mask.copy()
green = mask.copy()
red[red == 0] = 26
blue[blue == 0] = 237
green[green == 0] = 160
mask = np.stack([red, blue, green], axis = -1)
plt.imshow(mask)
def approach2(mask):
mask = np.logical_not(mask)
mask = mask.astype(np.uint8)
mask = np.stack([mask,mask,mask], axis = -1)
mask = mask*255
width,height, channels = mask.shape
for i in range(width):
for j in range(height):
if mask[i][j][0] == 0:
mask[i][j] = (26,237,160)
plt.imshow(mask)
Below is the Image
I suppose the most simple way is this:
def mask_coloring(mask):
expected_color = (26, 237, 160)
color_mask = np.zeros((mask.shape[0], mask.shape[1], 3), dtype=np.uint8)
color_mask[mask == 255.0, :] = expected_color
plt.imshow(color_mask)
By the way, similar approach can be found here.
The easiest way to do this is to take advantage of the fact that you only have and only want 2 colours and to use a fast, space-efficient palette image that doesn't require you to iterate over all the pixels, or store 3 bytes per pixel:
from PIL import Image
# Open your image
im = Image.open('oqX2g.gif')
# Create a new palette where the 1st entry is black (0,0,0) and the 2nd is your colour (26,237,160) then pad out to 256 RGB entries
palette = [0,0,0] + [26,237,160] + [0,0,0] * 254
# Put palette into image and save
im.putpalette(palette)
im.save('result.png')
Read more about palette images here.
What if you want the black background to become magenta, and the foreground green? Oh, and you want it as an RGB Numpy array?
Just change the palette and convert:
from PIL import Image
import numpy as np
# Open your image, push in new palette
im = Image.open('oqX2g.gif')
palette = [255,0,255] + [0,255,0] + [0,0,0] * 254
im.putpalette(palette)
# Make RGB Numpy array
na = np.array(im.convert('RGB'))

change white background to a specifc color

Hello I am trying to give a black and white photo a colored background, first I convert pixels to either white or black then I would like to replace the white with a specific color (148,105,39) then save it.
here is the image I am working on
here is my function so far it doesn't change the color (also doesn't give error)
def binary_image(img):
gray = img.convert('L')
bw = gray.point(lambda x: 0 if x<128 else 255, '1')
img = bw.convert('RGBA')
pixdata = img.load()
for y in range(img.size[1]):
for x in range(img.size[0]):
if pixdata[x, y] == (255, 255, 255, 255): #white color
pixdata[x, y] = (148,105,39,255)
return full_4
img=Image.open('logo.jpg')
bg_fps = binary_image(img)
bg_fps.show()
You really, really want to avoid for loops with PIL images, try to use Numpy and it will be easier to write and faster to run.
I would do it like this:
import numpy as np
from PIL import image
# Load image and ensure greyscale
im = Image.open('U6IhW.jpg').convert('L')
# Make Numpy version for fast, efficient access
npim = np.array(im)
# Make solid black RGB output image, same width and height but 3 channels (RGB)
res = np.zeros((npim.shape[0],npim.shape[1],3), dtype=np.uint8)
# Anywhere the grey image is >127, make result image new colour
res[npim>127] = [148,105,39]
# Convert back to PIL Image and save to disk
Image.fromarray(res).save('result.png')
You could equally use PIL's ImageOps.colorize() like this:
from PIL import Image, ImageOps
im = Image.open('U6IhW.jpg').convert('L')
# Threshold to pure black and white
bw = im.point(lambda x: 0 if x<128 else 255)
# Colorize
result = ImageOps.colorize(bw, (0,0,0), (148,105,39))

Using openCV to overlay transparent image onto another image

How can I overlay a transparent PNG onto another image without losing it's transparency using openCV in python?
import cv2
background = cv2.imread('field.jpg')
overlay = cv2.imread('dice.png')
# Help please
cv2.imwrite('combined.png', background)
Desired output:
Sources:
Background Image
Overlay
import cv2
background = cv2.imread('field.jpg')
overlay = cv2.imread('dice.png')
added_image = cv2.addWeighted(background,0.4,overlay,0.1,0)
cv2.imwrite('combined.png', added_image)
The correct answer to this was far too hard to come by, so I'm posting this answer even though the question is really old. What you are looking for is "over" compositing, and the algorithm for this can be found on Wikipedia: https://en.wikipedia.org/wiki/Alpha_compositing
I am far from an expert with OpenCV, but after some experimentation this is the most efficient way I have found to accomplish the task:
import cv2
background = cv2.imread("background.png", cv2.IMREAD_UNCHANGED)
foreground = cv2.imread("overlay.png", cv2.IMREAD_UNCHANGED)
# normalize alpha channels from 0-255 to 0-1
alpha_background = background[:,:,3] / 255.0
alpha_foreground = foreground[:,:,3] / 255.0
# set adjusted colors
for color in range(0, 3):
background[:,:,color] = alpha_foreground * foreground[:,:,color] + \
alpha_background * background[:,:,color] * (1 - alpha_foreground)
# set adjusted alpha and denormalize back to 0-255
background[:,:,3] = (1 - (1 - alpha_foreground) * (1 - alpha_background)) * 255
# display the image
cv2.imshow("Composited image", background)
cv2.waitKey(0)
The following code will use the alpha channels of the overlay image to correctly blend it into the background image, use x and y to set the top-left corner of the overlay image.
import cv2
import numpy as np
def overlay_transparent(background, overlay, x, y):
background_width = background.shape[1]
background_height = background.shape[0]
if x >= background_width or y >= background_height:
return background
h, w = overlay.shape[0], overlay.shape[1]
if x + w > background_width:
w = background_width - x
overlay = overlay[:, :w]
if y + h > background_height:
h = background_height - y
overlay = overlay[:h]
if overlay.shape[2] < 4:
overlay = np.concatenate(
[
overlay,
np.ones((overlay.shape[0], overlay.shape[1], 1), dtype = overlay.dtype) * 255
],
axis = 2,
)
overlay_image = overlay[..., :3]
mask = overlay[..., 3:] / 255.0
background[y:y+h, x:x+w] = (1.0 - mask) * background[y:y+h, x:x+w] + mask * overlay_image
return background
This code will mutate background so create a copy if you wish to preserve the original background image.
Been a while since this question appeared, but I believe this is the right simple answer, which could still help somebody.
background = cv2.imread('road.jpg')
overlay = cv2.imread('traffic sign.png')
rows,cols,channels = overlay.shape
overlay=cv2.addWeighted(background[250:250+rows, 0:0+cols],0.5,overlay,0.5,0)
background[250:250+rows, 0:0+cols ] = overlay
This will overlay the image over the background image such as shown here:
Ignore the ROI rectangles
Note that I used a background image of size 400x300 and the overlay image of size 32x32, is shown in the x[0-32] and y[250-282] part of the background image according to the coordinates I set for it, to first calculate the blend and then put the calculated blend in the part of the image where I want to have it.
(overlay is loaded from disk, not from the background image itself,unfortunately the overlay image has its own white background, so you can see that too in the result)
If performance isn't a concern then you can iterate over each pixel of the overlay and apply it to the background. This isn't very efficient, but it does help to understand how to work with png's alpha layer.
slow version
import cv2
background = cv2.imread('field.jpg')
overlay = cv2.imread('dice.png', cv2.IMREAD_UNCHANGED) # IMREAD_UNCHANGED => open image with the alpha channel
height, width = overlay.shape[:2]
for y in range(height):
for x in range(width):
overlay_color = overlay[y, x, :3] # first three elements are color (RGB)
overlay_alpha = overlay[y, x, 3] / 255 # 4th element is the alpha channel, convert from 0-255 to 0.0-1.0
# get the color from the background image
background_color = background[y, x]
# combine the background color and the overlay color weighted by alpha
composite_color = background_color * (1 - overlay_alpha) + overlay_color * overlay_alpha
# update the background image in place
background[y, x] = composite_color
cv2.imwrite('combined.png', background)
result:
fast version
I stumbled across this question while trying to add a png overlay to a live video feed. The above solution is way too slow for that. We can make the algorithm significantly faster by using numpy's vector functions.
note: This was my first real foray into numpy so there may be better/faster methods than what I've come up with.
import cv2
import numpy as np
background = cv2.imread('field.jpg')
overlay = cv2.imread('dice.png', cv2.IMREAD_UNCHANGED) # IMREAD_UNCHANGED => open image with the alpha channel
# separate the alpha channel from the color channels
alpha_channel = overlay[:, :, 3] / 255 # convert from 0-255 to 0.0-1.0
overlay_colors = overlay[:, :, :3]
# To take advantage of the speed of numpy and apply transformations to the entire image with a single operation
# the arrays need to be the same shape. However, the shapes currently looks like this:
# - overlay_colors shape:(width, height, 3) 3 color values for each pixel, (red, green, blue)
# - alpha_channel shape:(width, height, 1) 1 single alpha value for each pixel
# We will construct an alpha_mask that has the same shape as the overlay_colors by duplicate the alpha channel
# for each color so there is a 1:1 alpha channel for each color channel
alpha_mask = np.dstack((alpha_channel, alpha_channel, alpha_channel))
# The background image is larger than the overlay so we'll take a subsection of the background that matches the
# dimensions of the overlay.
# NOTE: For simplicity, the overlay is applied to the top-left corner of the background(0,0). An x and y offset
# could be used to place the overlay at any position on the background.
h, w = overlay.shape[:2]
background_subsection = background[0:h, 0:w]
# combine the background with the overlay image weighted by alpha
composite = background_subsection * (1 - alpha_mask) + overlay_colors * alpha_mask
# overwrite the section of the background image that has been updated
background[0:h, 0:w] = composite
cv2.imwrite('combined.png', background)
How much faster? On my machine the slow method takes ~3 seconds and the optimized method takes ~ 30 ms. So about
100 times faster!
Wrapped up in a function
This function handles foreground and background images of different sizes and also supports negative and positive offsets the move the overlay across the bounds of the background image in any direction.
import cv2
import numpy as np
def add_transparent_image(background, foreground, x_offset=None, y_offset=None):
bg_h, bg_w, bg_channels = background.shape
fg_h, fg_w, fg_channels = foreground.shape
assert bg_channels == 3, f'background image should have exactly 3 channels (RGB). found:{bg_channels}'
assert fg_channels == 4, f'foreground image should have exactly 4 channels (RGBA). found:{fg_channels}'
# center by default
if x_offset is None: x_offset = (bg_w - fg_w) // 2
if y_offset is None: y_offset = (bg_h - fg_h) // 2
w = min(fg_w, bg_w, fg_w + x_offset, bg_w - x_offset)
h = min(fg_h, bg_h, fg_h + y_offset, bg_h - y_offset)
if w < 1 or h < 1: return
# clip foreground and background images to the overlapping regions
bg_x = max(0, x_offset)
bg_y = max(0, y_offset)
fg_x = max(0, x_offset * -1)
fg_y = max(0, y_offset * -1)
foreground = foreground[fg_y:fg_y + h, fg_x:fg_x + w]
background_subsection = background[bg_y:bg_y + h, bg_x:bg_x + w]
# separate alpha and color channels from the foreground image
foreground_colors = foreground[:, :, :3]
alpha_channel = foreground[:, :, 3] / 255 # 0-255 => 0.0-1.0
# construct an alpha_mask that matches the image shape
alpha_mask = np.dstack((alpha_channel, alpha_channel, alpha_channel))
# combine the background with the overlay image weighted by alpha
composite = background_subsection * (1 - alpha_mask) + foreground_colors * alpha_mask
# overwrite the section of the background image that has been updated
background[bg_y:bg_y + h, bg_x:bg_x + w] = composite
example usage:
background = cv2.imread('field.jpg')
overlay = cv2.imread('dice.png', cv2.IMREAD_UNCHANGED) # IMREAD_UNCHANGED => open image with the alpha channel
x_offset = 0
y_offset = 0
print("arrow keys to move the dice. ESC to quit")
while True:
img = background.copy()
add_transparent_image(img, overlay, x_offset, y_offset)
cv2.imshow("", img)
key = cv2.waitKey()
if key == 0: y_offset -= 10 # up
if key == 1: y_offset += 10 # down
if key == 2: x_offset -= 10 # left
if key == 3: x_offset += 10 # right
if key == 27: break # escape
You need to open the transparent png image using the flag IMREAD_UNCHANGED
Mat overlay = cv::imread("dice.png", IMREAD_UNCHANGED);
Then split the channels, group the RGB and use the transparent channel as an mask, do like that:
/**
* #brief Draws a transparent image over a frame Mat.
*
* #param frame the frame where the transparent image will be drawn
* #param transp the Mat image with transparency, read from a PNG image, with the IMREAD_UNCHANGED flag
* #param xPos x position of the frame image where the image will start.
* #param yPos y position of the frame image where the image will start.
*/
void drawTransparency(Mat frame, Mat transp, int xPos, int yPos) {
Mat mask;
vector<Mat> layers;
split(transp, layers); // seperate channels
Mat rgb[3] = { layers[0],layers[1],layers[2] };
mask = layers[3]; // png's alpha channel used as mask
merge(rgb, 3, transp); // put together the RGB channels, now transp insn't transparent
transp.copyTo(frame.rowRange(yPos, yPos + transp.rows).colRange(xPos, xPos + transp.cols), mask);
}
Can be called like that:
drawTransparency(background, overlay, 10, 10);
To overlay png image watermark over normal 3 channel jpeg image
import cv2
import numpy as np
​
def logoOverlay(image,logo,alpha=1.0,x=0, y=0, scale=1.0):
(h, w) = image.shape[:2]
image = np.dstack([image, np.ones((h, w), dtype="uint8") * 255])
​
overlay = cv2.resize(logo, None,fx=scale,fy=scale)
(wH, wW) = overlay.shape[:2]
output = image.copy()
# blend the two images together using transparent overlays
try:
if x<0 : x = w+x
if y<0 : y = h+y
if x+wW > w: wW = w-x
if y+wH > h: wH = h-y
print(x,y,wW,wH)
overlay=cv2.addWeighted(output[y:y+wH, x:x+wW],alpha,overlay[:wH,:wW],1.0,0)
output[y:y+wH, x:x+wW ] = overlay
except Exception as e:
print("Error: Logo position is overshooting image!")
print(e)
​
output= output[:,:,:3]
return output
Usage:
background = cv2.imread('image.jpeg')
overlay = cv2.imread('logo.png', cv2.IMREAD_UNCHANGED)
​
print(overlay.shape) # must be (x,y,4)
print(background.shape) # must be (x,y,3)
# downscale logo by half and position on bottom right reference
out = logoOverlay(background,overlay,scale=0.5,y=-100,x=-100)
​
cv2.imshow("test",out)
cv2.waitKey(0)
import cv2
import numpy as np
background = cv2.imread('background.jpg')
overlay = cv2.imread('cloudy.png')
overlay = cv2.resize(overlay, (200,200))
# overlay = for_transparent_removal(overlay)
h, w = overlay.shape[:2]
shapes = np.zeros_like(background, np.uint8)
shapes[0:h, 0:w] = overlay
alpha = 0.8
mask = shapes.astype(bool)
# option first
background[mask] = cv2.addWeighted(shapes, alpha, shapes, 1 - alpha, 0)[mask]
cv2.imwrite('combined.png', background)
# option second
background[mask] = cv2.addWeighted(background, alpha, overlay, 1 - alpha, 0)[mask]
# NOTE : above both option will give you image overlays but effect would be changed
cv2.imwrite('combined.1.png', background)
**Use this function to place your overlay on any background image.
if want to resize overlay use this overlay = cv2.resize(overlay, (200,200)) and then pass resized overlay into the function.
**
import cv2
import numpy as np
def image_overlay_second_method(img1, img2, location, min_thresh=0, is_transparent=False):
h, w = img1.shape[:2]
h1, w1 = img2.shape[:2]
x, y = location
roi = img1[y:y + h1, x:x + w1]
gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
_, mask = cv2.threshold(gray, min_thresh, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
img_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)
img_fg = cv2.bitwise_and(img2, img2, mask=mask)
dst = cv2.add(img_bg, img_fg)
if is_transparent:
dst = cv2.addWeighted(img1[y:y + h1, x:x + w1], 0.1, dst, 0.9, None)
img1[y:y + h1, x:x + w1] = dst
return img1
if __name__ == '__main__':
background = cv2.imread('background.jpg')
overlay = cv2.imread('overlay.png')
output = image_overlay_third_method(background, overlay, location=(800,50), min_thresh=0, is_transparent=True)
cv2.imwrite('output.png', output)
background.jpg
output.png

How do I create an image in PIL using a list of RGB tuples?

Suppose I have a list of pixels (represented as tuples with 3 RGB values) in a list that looks like list(im.getdata()), like this:
[(0,0,0),(255,255,255),(38,29,58)...]
How do I create a new image using RGB values (each tuple corresponds to a pixel) in this format?
Thanks for your help.
You can do it like this:
list_of_pixels = list(im.getdata())
# Do something to the pixels...
im2 = Image.new(im.mode, im.size)
im2.putdata(list_of_pixels)
You can also use scipy for that:
#!/usr/bin/env python
import scipy.misc
import numpy as np
# Image size
width = 640
height = 480
channels = 3
# Create an empty image
img = np.zeros((height, width, channels), dtype=np.uint8)
# Draw something (http://stackoverflow.com/a/10032271/562769)
xx, yy = np.mgrid[:height, :width]
circle = (xx - 100) ** 2 + (yy - 100) ** 2
# Set the RGB values
for y in range(img.shape[0]):
for x in range(img.shape[1]):
r, g, b = circle[y][x], circle[y][x], circle[y][x]
img[y][x][0] = r
img[y][x][1] = g
img[y][x][2] = b
# Display the image
scipy.misc.imshow(img)
# Save the image
scipy.misc.imsave("image.png", img)
gives
Here's a complete example since I didn't get the trick at first.
from PIL import Image
img = Image.new('RGB', [500,500], 255)
data = img.load()
for x in range(img.size[0]):
for y in range(img.size[1]):
data[x,y] = (
x % 255,
y % 255,
(x**2-y**2) % 255,
)
img.save('image.png')
And if you're looking for grayscale only, you can do Image.new('L', [500,500], 255) and then data[x,y] = <your value between 0 and 255>

Categories