PIL Resizes Image - python

So I have this code
from PIL import Image
def get_concat_h_cut(im1, im2):
dst = Image.new('RGB', (im1.width + im2.width,
min(im1.height, im2.height)))
dst.paste(im1, (0, 0))
dst.paste(im2, (im1.width, 0))
return dst
def get_concat_v_cut(im1, im2):
dst = Image.new(
'RGB', (min(im1.width, im2.width), im1.height + im2.height))
dst.paste(im1, (0, 0))
dst.paste(im2, (0, im1.height))
return dst
FileA = Image.open("dog.jpg")
FileB = Image.open("cat.jpg")
get_concat_v_cut(FileA, FileB).save('pillow_concat_v_cut.jpg')
But since the reolsution of the cat image is small, the program resizes the entire image, thus I can't see most of the dog image. How do I make it so I can see both the cat and dog?
Cat image: https://i.ibb.co/j58TRnt/cat.jpg
Dog image: https://i.ibb.co/d6jdsBC/dog.jpg
Image that the program generates: https://i.ibb.co/WkDdPTD/pillow-concat-v-cut.jpg
Thank you for your help.

If you wish to keep the width of the dog image the same, which will fill in black space to the right of the cat image on the output image, you can change the size argument for Image.new in your concat function to
(max(im1.width, im2.width), im1.height+im2.height)
The same would apply to your h-cut concat function, just applied to the height argument.
If you're trying to scale them to the same size, you can invoke the .resize() method on whichever one you want to scale, and pass it the other image's height and width variables as it's size argument.
You can find more info here:
https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.resize

Related

How to generate an alpha image with a color range with PIL?

I have a grayscale image and I want to create an alpha layer based on a range of pixel values. I want to know how can I create a fall-off function to generate such image.
The original image is the following:
I can use the color range in photoshop to select the shadows with fuzziness of 20%
And the resultant alpha channel is the following:
With fuzziness of 100%:
How can I generate such alpha channels in python with PIL?
I thought that maybe a subtract, but it does not generates a
The code to generate the image with Numpy and PIL:
from PIL import Image
import numpy as np
img = np.arange(0,256, 0.1).astype(np.uint8)
img = np.reshape(img, (img.shape[0], 1))
img = np.repeat((img), 500, axis=1)
img = Image.fromarray(img.T)
I tried to create a fall-off function from the distance of the pixel values but it does not have the same gradient. Maybe there is a different way?
def gauss_falloff(distance, c=0.2, alpha=255):
new_value = alpha * np.exp(-1 * ((distance) ** 2) / (c**2))
new_value = new_value.clip(0,255)
return new_value.astype(np.uint8)
test = img.T / 255
test = np.abs(test - pixel)
test = gauss_falloff(test, c=0.2, alpha=255)
test = Image.fromarray(test)
With my code:
Here's how you could do that
from PIL import Image, ImageDraw
# Create a new image with a transparent background
width, height = 300, 300
image = Image.new('RGBA', (width, height), (255, 255, 255, 0))
# Create a drawing context for the image
draw = ImageDraw.Draw(image)
# Set the starting and ending colors for the gradient
start_color = (255, 0, 0)
end_color = (0, 0, 255)
# Draw a gradient line with the specified color range
for x in range(width):
color = tuple(int(start_color[i] + (end_color[i] - start_color[i]) * x / width)
for i in range(3))
draw.line((x, 0, x, height), fill=color)
# Save the image
image.save('gradient.png')
This code creates a new image with a transparent background and a drawing context for that image. Then it draws a gradient line on the image with the specified color range. Finally, it saves the image as a PNG file.
Note: The Python Imaging Library (PIL) has been replaced by the Pillow library, which is a fork of PIL. If you are using Pillow, you can use the same code as above, but you need to import the Image and ImageDraw modules from the Pillow package instead of the PIL package.

How to efficiently change colors on a lot of images?

I have a huge dataset of images like this:
I would like to change the colors on these. All white should stay white, all purple should turn white and everything else should turn black. The desired output would look like this:
I've made the code underneath and it is doing what I want, but it takes way to long to go through the amount of pictures I have. Is there another and faster way of doing this?
path = r"C:path"
for f in os.listdir(path):
f_name = (os.path.join(path,f))
if f_name.endswith(".png"):
im = Image.open(f_name)
fn, fext = os.path.splitext(f_name)
print (fn)
im =im.convert("RGBA")
for x in range(im.size[0]):
for y in range(im.size[1]):
if im.getpixel((x, y)) == (255, 255, 255, 255):
im.putpixel((x, y),(255, 255, 255,255))
elif im.getpixel((x, y)) == (128, 64, 128, 255):
im.putpixel((x, y),(255, 255, 255,255))
else:
im.putpixel((x, y),(0, 0, 0,255))
im.show()
Your images seem to be palettised as they represent segmentations, or labelled classes and there are typically fewer than 256 classes. As such, each pixel is just a label (or class number) and the actual colours are looked up in a 256-element table, i.e. the palette.
Have a look here if you are unfamiliar with palletised images.
So, you don't need to iterate over all 12 million pixels, you can instead just iterate over the palette which is only 256 elements long...
#!/usr/bin/env python3
import sys
import numpy as np
from PIL import Image
# Load image
im = Image.open('image.png')
# Check it is palettised as expected
if im.mode != 'P':
sys.exit("ERROR: Was expecting a palettised image")
# Get palette and make into Numpy array of 256 entries of 3 RGB colours
palette = np.array(im.getpalette(),dtype=np.uint8).reshape((256,3))
# Name our colours for readability
purple = [128,64,128]
white = [255,255,255]
black = [0,0,0]
# Go through palette, setting purple to white
palette[np.all(palette==purple, axis=-1)] = white
# Go through palette, setting anything not white to black
palette[~np.all(palette==white, axis=-1)] = black
# Apply our modified palette and save
im.putpalette(palette.ravel().tolist())
im.save('result.png')
That takes 290ms including loading and saving the image.
If you have many thousands of images to do, and you are on a decent OS, you can use GNU Parallel. Change the above code to accept a command-line parameter which is the name of the image, and save it as recolour.py then use:
parallel ./recolour.py {} ::: *.png
It will keep all CPU cores on your CPU busy till they are all processed.
Keywords: Image processing, Python, Numpy, PIL, Pillow, palette, getpalette, putpalette, classes, classification, label, labels, labelled image.
If you're open to use NumPy, you can heavily speed-up pixel manipulations:
from PIL import Image
import numpy as np
# Open PIL image
im = Image.open('path/to/your/image.png').convert('RGBA')
# Convert to NumPy array
pixels = np.array(im)
# Get logical indices of all white and purple pixels
idx_white = (pixels == (255, 255, 255, 255)).all(axis=2)
idx_purple = (pixels == (128, 64, 128, 255)).all(axis=2)
# Generate black image; set alpha channel to 255
out = np.zeros(pixels.shape, np.uint8)
out[:, :, 3] = 255
# Set white and purple pixels to white
out[idx_white | idx_purple] = (255, 255, 255, 255)
# Convert back to PIL image
im = Image.fromarray(out)
That code generates the desired output, and takes around 1 second on my machine, whereas your loop code needs 33 seconds.
Hope that helps!

Transparency not working consistently while saving GIF's in PIL

I am working on script that writes over images and makes the background transparent. Output is supposed to be in GIF format.
The script works but for certain images the transparency is not working as expected.
Here is the script
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
CANVAS_HEIGHT = 354
CANVAS_WIDTH = 344
def get_text_mask():
font_style_path = 'Ultra-Regular.ttf'
text_mask_base = Image.new('L', (CANVAS_WIDTH, CANVAS_HEIGHT), 255)
text_mask = text_mask_base.copy()
text_mask_draw = ImageDraw.Draw(text_mask)
font = ImageFont.truetype(font_style_path, 94)
text_mask_width, text_mask_height = text_mask_draw.multiline_textsize("1000\nUsers",
font=font)
text_mask_draw.multiline_text(((CANVAS_WIDTH - text_mask_width) / 2,
(CANVAS_HEIGHT - text_mask_height) / 2),
"1000\nUsers",
font=font,
fill=0,
align='center')
return text_mask
def run():
images = ['image1.png', 'image2.png']
for index, original_image in enumerate(images):
image = Image.open(original_image)
blank_canvas = Image.new('RGBA', (CANVAS_WIDTH, CANVAS_HEIGHT), (255, 255, 255, 0))
text_mask = get_text_mask()
final_canvas = blank_canvas.copy()
for i in xrange(0, CANVAS_WIDTH, image.width):
for j in xrange(0, CANVAS_HEIGHT, image.height):
final_canvas.paste(image, (i, j))
final_canvas.paste(text_mask, mask=text_mask)
final_canvas.convert('P', palette=Image.ADAPTIVE)
final_canvas.save("output-{}.gif".format(index), format="GIF", transparency=0)
run()
image1.png
image2.png
And the font is here
https://bipuljain.com/static/images/Ultra-Regular.ttf
And the output with issue.
And output working fine.
The problem is that your "original image" contains the same index-color that the GIFs use to signify "this pixel is transparent".
Gifs are "palette-based" - one index into this palette is designated as "this is transparent" (see f.e. https://en.wikipedia.org/wiki/GIF)
So if you specify pure black or pure white as color-index that is transparent and your sourceimage already contains pixels with this exact color, they get to be transperent as well.
To avoid this, you could sample your source background-image and choose a "non-existent" color as transparency-color - this would never get to be in your resulting image.
You could also change your source images pixel values - check all pixel and change all "background-ones" a tiny fraction off so they do not get translucent.

Extending an image (PIL/Pillow)

I can use the following code to draw a black strip (or rectangle) atop a base image:
base_width, base_height = img.size
background = Image.new('RGBA', (base_width, base_height/3),(0,0,0,146))
offset = (0,base_height/2)
img.paste(background,offset,mask=background)
Result:
But how do I extend the height of the image such that the said black strip appears attached below the image's bottom border, outside the image itself?
If I move the offset in my code above, the black strip can't move beyond the borders of the base image, so that's not a viable solution.
Here's one way:
Create a new_img that is (base_width, base_height + background.height) in size
Paste the original img into new_img at (0, 0)
Paste the background into new_img at (0, base_height)

In Python, Python Image Library 1.1.6, how can I expand the canvas without resizing?

I am probably looking for the wrong thing in the handbook, but I am looking to take an image object and expand it without resizing (stretching/squishing) the original image.
Toy example: imagine a blue rectangle, 200 x 100, then I perform some operation and I have a new image object, 400 x 300, consisting of a white background upon which a 200 x 100 blue rectangle rests. Bonus if I can control in which direction this expands, or the new background color, etc.
Essentially, I have an image to which I will be adding iteratively, and I do not know what size it will be at the outset.
I suppose it would be possible for me to grab the original object, make a new, slightly larger object, paste the original on there, draw a little more, then repeat. It seems like it might be computationally expensive. However, I thought there would be a function for this, as I assume it is a common operation. Perhaps I assumed wrong.
The ImageOps.expand function will expand the image, but it adds the same amount of pixels in each direction.
The best way is simply to make a new image and paste:
newImage = Image.new(mode, (newWidth,newHeight))
newImage.paste(srcImage, (x1,y1,x1+oldWidth,y1+oldHeight))
If performance is an issue, make your original image bigger than needed and crop it after the drawing is done.
Based on interjays answer:
#!/usr/bin/env python
from PIL import Image
import math
def resize_canvas(old_image_path="314.jpg", new_image_path="save.jpg",
canvas_width=500, canvas_height=500):
"""
Resize the canvas of old_image_path.
Store the new image in new_image_path. Center the image on the new canvas.
Parameters
----------
old_image_path : str
new_image_path : str
canvas_width : int
canvas_height : int
"""
im = Image.open(old_image_path)
old_width, old_height = im.size
# Center the image
x1 = int(math.floor((canvas_width - old_width) / 2))
y1 = int(math.floor((canvas_height - old_height) / 2))
mode = im.mode
if len(mode) == 1: # L, 1
new_background = (255)
if len(mode) == 3: # RGB
new_background = (255, 255, 255)
if len(mode) == 4: # RGBA, CMYK
new_background = (255, 255, 255, 255)
newImage = Image.new(mode, (canvas_width, canvas_height), new_background)
newImage.paste(im, (x1, y1, x1 + old_width, y1 + old_height))
newImage.save(new_image_path)
resize_canvas()
You might consider a rather different approach to your image... build it out of tiles of a fixed size. That way, as you need to expand, you just add new image tiles. When you have completed all of your computation, you can determine the final size of the image, create a blank image of that size, and paste the tiles into it. That should reduce the amount of copying you're looking at for completing the task.
(You'd likely want to encapsulate such a tiled image into an object that hid the tiling aspects from the other layers of code, of course.)
This code will enlarge a smaller image, preserving aspect ratio, then center it on a standard sized canvas. Also preserves transparency, or defaults to gray background.
Tested with P mode PNG files.
Coded debug final.show() and break for testing. Remove lines and hashtag on final.save(...) to loop and save.
Could parameterize canvas ratio and improve flexibility, but it served my purpose.
"""
Resize ... and reconfigures. images in a specified directory
Use case: Images of varying size, need to be enlarged to exaxtly 1200 x 1200
"""
import os
import glob
from PIL import Image
# Source directory plus Glob file reference (Windows)
source_path = os.path.join('C:', os.sep, 'path', 'to', 'source', '*.png')
# List of UNC Image File paths
images = glob.glob(source_path)
# Destination directory of modified image (Windows)
destination_path = os.path.join('C:', os.sep, 'path', 'to', 'destination')
for image in images:
original = Image.open(image)
# Retain original attributes (ancillary chunks)
info = original.info
# Retain original mode
mode = original.mode
# Retain original palette
if original.palette is not None:
palette = original.palette.getdata()[1]
else:
palette = False
# Match original aspect ratio
dimensions = original.getbbox()
# Identify destination image background color
if 'transparency' in info.keys():
background = original.info['transparency']
else:
# Image does not have transparency set
print(image)
background = (64)
# Get base filename and extension for destination
filename, extension = os.path.basename(image).split('.')
# Calculate matched aspect ratio
if dimensions[2] > dimensions[3]:
width = int(1200)
modifier = width / dimensions[2]
length = int(dimensions[3] * modifier)
elif dimensions[3] > dimensions[2]:
length = int(1200)
modifier = length / dimensions[3]
width = int(dimensions[2] * modifier)
else:
width, length = (1200, 1200)
size = (width, length)
# Set desired final image size
canvas = (1200, 1200)
# Calculate center position
position = (
int((1200 - width)/2),
int((1200 - length)/2),
int((1200 - width)/2) + width,
int((1200 - length)/2) + length
)
# Enlarge original image proportionally
resized = original.resize(size, Image.LANCZOS)
# Then create sized canvas
final = Image.new(mode, canvas, background)
# Replicate original properties
final.info = info
# Replicate original palatte
if palette:
final.putpalette(palette)
# Cemter paste resized image to final canvas
final.paste(resized, position)
# Save final image to destination directory
final.show()
#final.save("{}\\{}.{}".format(destination_path, filename, extension))
break

Categories