I want to make thumb and crop it to needed size. It works fine, but if my new thumb area is smaller than crop one, all empty space fills with black color.
Code:
import os
from PIL import Image
def resize(file_path):
file, ext = os.path.splitext(file_path)
im = Image.open(file_path)
size = (100, 'auto')
new_path = file + "_.jpg"
im.thumbnail(size, Image.ANTIALIAS)
region = im.crop((0, 0, 100, 100))
region.save(new_path, "JPEG")
Maybe there is some option like max_height for crop method or anything else?
Thanks!
You will need to apply some simple algorithm there instead of a blind cropping.
Get the square of maximum size possible in the image with square center aligning with the center of the image.
Square of maximum size would be having side equal to max of height or width of the image.
After getting the square, resample it to the size of your thumbnail dimensions.
This should work fine for most images, however if you are generating thumbnails for face images, this might not be a good method, and you might need some face recognition techniques for better output.
Are you trying to only conditionally crop the image if its LARGER than 100x100?
If so,
def resize(file_path):
file, ext = os.path.splitext(file_path)
im = Image.open(file_path)
size = (100, 'auto')
new_path = file + "_.jpg"
im.thumbnail(size, Image.ANTIALIAS)
if im.size[1] > 100:
im = im.crop((0, 0, 100, 100))
im.save(new_path, "JPEG")
I found solution:
import os
from PIL import Image
def resize(file_path):
file, ext = os.path.splitext(file_path)
im = Image.open(file_path)
size = (100, 'auto')
new_path = file + "_.jpg"
im.thumbnail(size)
(width, height) = im.size
if height >= width: box = (0, 0, 100, 100)
else: box = (0, 0, 100, height)
region = im.crop(box)
region.save(new_path, "JPEG")
Thanks for your responses!
I would do it this way:
If the image is wide, then scale it to be 100px tall. If it's tall, scale it to be 100px wide.
Crop out the middle 100x100.
def resize(file_path):
file, ext = os.path.splitext(file_path)
im = Image.open(file_path)
w, h = im.size
size = (100, 'auto') if h > w else ('auto', 100)
new_path = file + "_.jpg"
im.thumbnail(size, Image.ANTIALIAS)
w, h = im.size
region = im.crop((w/2 - 50, h/2 - 50, w/2 + 50, h/2 + 50))
region.save(new_path, "JPEG")
Related
How do I get ImageOps.fit(source28x32, (128, 128)) to fit without cropping off the top/bottom/sides? Do I really have to find the aspect, resize accordingly so the enlarged version does not exceed 128x128, and then add border pixels (or center the image in a 128x128 canvas)? Mind you that the source can be of any ratio, the 28x32 is just an example.
source image (28x32)
fitted image (128x128)
This is my attempt so far, not particularly elegant
def fit(im):
size = 128
x, y = im.size
ratio = float(x) / float(y)
if x > y:
x = size
y = size * 1 / ratio
else:
y = size
x = size * ratio
x, y = int(x), int(y)
im = im.resize((x, y))
new_im = Image.new('L', (size, size), 0)
new_im.paste(im, ((size - x) / 2, (size - y) / 2))
return new_im
New fitted image
Here is the function implemented in both PIL and cv2. The input can be of any size; the function finds the scale needed to fit the largest edge to the desired width, and then puts it onto a black square image of the desired width.
In PIL
def resize_PIL(im, output_edge):
scale = output_edge / max(im.size)
new = Image.new(im.mode, (output_edge, output_edge), (0, 0, 0))
paste = im.resize((int(im.width * scale), int(im.height * scale)), resample=Image.NEAREST)
new.paste(paste, (0, 0))
return new
In cv2
def resize_cv2(im, output_edge):
scale = output_edge / max(im.shape[:2])
new = np.zeros((output_edge, output_edge, 3), np.uint8)
paste = cv2.resize(im, None, fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)
new[:paste.shape[0], :paste.shape[1], :] = paste
return new
With a desired width of 128:
→
→
Not shown: these functions work on images larger than the desired size
This works pretty good to fit the image to size you want while filling in the rest with black space
from PIL import Image, ImageOps
def fit(im, width):
border = int((max(im.width, im.height) - min(im.width, im.height))/2)
im = ImageOps.expand(im, border)
im = ImageOps.fit(im, (width, width))
return im
I have a 100 images, each 10 for every digit and i am trying to convert it like MNIST images in python. But, constantly i am getting an error. Error is posted down!
from PIL import Image, ImageFilter
from os import listdir
def imageprepare(argv):
"""
This function returns the pixel values.
The imput is a png file location.
"""
imagesList = listdir(argv)
for image in imagesList:
im = Image.open(argv).convert('L')
width = float(im.size[0])
height = float(im.size[1])
newImage = Image.new('L', (28, 28), (255)) # creates white canvas of 28x28 pixels
if width > height: # check which dimension is bigger
# Width is bigger. Width becomes 20 pixels.
nheight = int(round((20.0 / width * height), 0)) # resize height according to ratio width
if (nheight == 0): # rare case but minimum is 1 pixel
nheight = 1
# resize and sharpen
img = im.resize((20, nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)
wtop = int(round(((28 - nheight) / 2), 0)) # calculate horizontal position
newImage.paste(img, (4, wtop)) # paste resized image on white canvas
else:
# Height is bigger. Heigth becomes 20 pixels.
nwidth = int(round((20.0 / height * width), 0)) # resize width according to ratio height
if (nwidth == 0): # rare case but minimum is 1 pixel
nwidth = 1
# resize and sharpen
img = im.resize((nwidth, 20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)
wleft = int(round(((28 - nwidth) / 2), 0)) # caculate vertical pozition
newImage.paste(img, (wleft, 4)) # paste resized image on white canvas
# newImage.save("sample.png
tv = list(newImage.getdata()) # get pixel values
# normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.
tva = [(255 - x) * 1.0 / 255.0 for x in tv]
print(tva)
return tva
argv= 'images/'
x=imageprepare(argv)#file path here
print(len(x))# mnist IMAGES are 28x28=784 pixels
error:
File "C:/Users/lenovo/.spyder-py3/Project1/test12.py", line 47, in
x=imageprepare(argv)#file path here
File "C:/Users/lenovo/.spyder-py3/Project1/test12.py", line 14, in imageprepare
im = Image.open(argv).convert('L')
File "C:\Users\lenovo\Anaconda3\lib\site-packages\PIL\Image.py", line 2477, in open
fp = builtins.open(filename, "rb")
PermissionError: [Errno 13] Permission denied: 'images/'
From the log above, it seems that you have no permission on folder images/ which has been passed as an argument to function imageprepare. Have you tried to change the access privileges of images? Or just run this from prompt as Administrator.
What I want to do is to crop out the white lines above a given instagram print screen. I tried doing that by finding the center of the image and going up, line by line, until I found the first line entirely white. Any idea why my code is not working?
from PIL import Image
image_file = "test.png"
im = Image.open(image_file)
width, height = im.size
centerLine = height // 2
entireWhiteLine = set()
entireWhiteLine.add(im.getpixel((0, 0)))
terminateUpperCrop = 1
while terminateUpperCrop != 2 :
for i in range(centerLine, 1, -1) :
entireLine = set()
upperBorder = i - 1
for j in range(0, width, 1) :
entireLine.add((im.getpixel((i, j))))
if entireLine == im.getpixel((0,0)):
box = (0, upperBorder, width, height)
crop = im.crop((box))
crop.save('test2.png')
terminateUpperCrop = 2
Your getpixel() call is actually searching with the coordinates the wrong way around, so in effect you were scanning for the left edge. You could use the following approach. This creates a row of data containing only white pixels. If the length of the row equals your width, then you know they are all white.
from PIL import Image
image_file = "test.png"
im = Image.open(image_file)
width, height = im.size
centerLine = height // 2
white = (255, 255, 255)
for y in range(centerLine, 0, -1) :
if len([1 for x in range(width) if im.getpixel((x, y)) == white]) == width - 1:
box = (0, y, width, height)
crop = im.crop((box))
crop.save('test2.png')
break
I would like to crop an image using PIL, although it could be some other module. I need the method to crop with a scale factor, ie 1.5 meaning that the output would be 1.5x zoomed in. Additionally, I would need to set the center where it zooms. This means setting x/2,y/2 as the center would zoom straight to the center, but other x,y values would zoom into those pixels.
If anyone knows how to do this I would really appreciate any help.
Right now I have some cropping working with ims = im.crop((int((x-x/i)/2), int((y-y/i)/2), int((x+(x/i))/2), int((y+(y/i))/2)))
but that only zooms into the center, and "i" doesn't give a nice scale factor.
Again, that you for your help.
It is just a matter of getting the center and the sizes right.
Determine the center of the spot where you want to crop
Determine the new size using the scale factor
Determine the bounding box of the cropped image
The following script should do the trick.
import os.path
from PIL import Image
def get_img_dir():
src_dir = os.path.dirname(__file__)
img_dir = os.path.join(src_dir, '..', 'img')
return img_dir
def open_img():
img_dir = get_img_dir()
img_name = 'amsterdam.jpg'
full_img_path = os.path.join(img_dir, img_name)
img = Image.open(full_img_path)
return img
def crop_image(img, xy, scale_factor):
'''Crop the image around the tuple xy
Inputs:
-------
img: Image opened with PIL.Image
xy: tuple with relative (x,y) position of the center of the cropped image
x and y shall be between 0 and 1
scale_factor: the ratio between the original image's size and the cropped image's size
'''
center = (img.size[0] * xy[0], img.size[1] * xy[1])
new_size = (img.size[0] / scale_factor, img.size[1] / scale_factor)
left = max (0, (int) (center[0] - new_size[0] / 2))
right = min (img.size[0], (int) (center[0] + new_size[0] / 2))
upper = max (0, (int) (center[1] - new_size[1] / 2))
lower = min (img.size[1], (int) (center[1] + new_size[1] / 2))
cropped_img = img.crop((left, upper, right, lower))
return cropped_img
def save_img(img, img_name):
img_dir = get_img_dir()
full_img_path = os.path.join(img_dir, img_name)
img.save(full_img_path)
if __name__ == '__main__':
ams = open_img()
crop_ams = crop_image(ams, (0.50, 0.50), 0.95)
save_img(crop_ams, 'crop_amsterdam_01.jpg')
crop_ams = crop_image(ams, (0.25, 0.25), 2.5)
save_img(crop_ams, 'crop_amsterdam_02.jpg')
crop_ams = crop_image(ams, (0.75, 0.45), 3.5)
save_img(crop_ams, 'crop_amsterdam_03.jpg')
Original image:
crop_amsterdam_01.jpg:
crop_amsterdam_02.jpg:
crop_amsterdam_03.jpg:
How to cut off the blank border area of a PNG image and shrink it to its minimum size using Python?
NB: The border size is not a fixed value, but may vary per image.
PIL's getbbox is working for me
im.getbbox() => 4-tuple or None
Calculates the bounding box of the
non-zero regions in the image. The
bounding box is returned as a 4-tuple
defining the left, upper, right, and
lower pixel coordinate. If the image
is completely empty, this method
returns None.
Code Sample that I tried, I have tested with bmp, but it should work for png too.
import Image
im = Image.open("test.bmp")
im.size # (364, 471)
im.getbbox() # (64, 89, 278, 267)
im2 = im.crop(im.getbbox())
im2.size # (214, 178)
im2.save("test2.bmp")
Here is ready-to-use solution:
import numpy as np
from PIL import Image
def bbox(im):
a = np.array(im)[:,:,:3] # keep RGB only
m = np.any(a != [255, 255, 255], axis=2)
coords = np.argwhere(m)
y0, x0, y1, x1 = *np.min(coords, axis=0), *np.max(coords, axis=0)
return (x0, y0, x1+1, y1+1)
im = Image.open('test.png')
print(bbox(im)) # (33, 12, 223, 80)
im2 = im.crop(bbox(im))
im2.save('test_cropped.png')
Example input (download link if you want to try):
Output:
I had the same problem today. Here is my solution to crop the transparent borders. Just throw this script in your folder with your batch .png files:
from PIL import Image
import numpy as np
from os import listdir
def crop(png_image_name):
pil_image = Image.open(png_image_name)
np_array = np.array(pil_image)
blank_px = [255, 255, 255, 0]
mask = np_array != blank_px
coords = np.argwhere(mask)
x0, y0, z0 = coords.min(axis=0)
x1, y1, z1 = coords.max(axis=0) + 1
cropped_box = np_array[x0:x1, y0:y1, z0:z1]
pil_image = Image.fromarray(cropped_box, 'RGBA')
print(pil_image.width, pil_image.height)
pil_image.save(png_image_name)
print(png_image_name)
for f in listdir('.'):
if f.endswith('.png'):
crop(f)
https://gist.github.com/3141140
import Image
import sys
import glob
# Trim all png images with alpha in a folder
# Usage "python PNGAlphaTrim.py ../someFolder"
try:
folderName = sys.argv[1]
except :
print "Usage: python PNGPNGAlphaTrim.py ../someFolder"
sys.exit(1)
filePaths = glob.glob(folderName + "/*.png") #search for all png images in the folder
for filePath in filePaths:
image=Image.open(filePath)
image.load()
imageSize = image.size
imageBox = image.getbbox()
imageComponents = image.split()
if len(imageComponents) < 4: continue #don't process images without alpha
rgbImage = Image.new("RGB", imageSize, (0,0,0))
rgbImage.paste(image, mask=imageComponents[3])
croppedBox = rgbImage.getbbox()
if imageBox != croppedBox:
cropped=image.crop(croppedBox)
print filePath, "Size:", imageSize, "New Size:",croppedBox
cropped.save(filePath)
You can use PIL to find rows and cols of your image that are made up purely of your border color.
Using this information, you can easily determine the extents of the inlaid image.
PIL again will then allow you to crop the image to remove the border.
I think it's necessary to supplement #Frank Krueger's answer. He makes a good point, but it doesn't include how to properly crop extra border color out of an image. I found that here. Specifically, I found this useful:
from PIL import Image, ImageChops
def trim(im):
bg = Image.new(im.mode, im.size, im.getpixel((0,0)))
diff = ImageChops.difference(im, bg)
diff = ImageChops.add(diff, diff, 2.0, -100)
bbox = diff.getbbox()
if bbox:
return im.crop(bbox)
im = Image.open("bord3.jpg")
im = trim(im)
im.show()
The other answers did not work for me while writing a Blender script (cannot use PIL), so maybe someone else will find this useful.
import numpy as np
def crop(crop_file):
"""crop the image, removing invisible borders"""
image = bpy.data.images.load(crop_file, check_existing=False)
w, h = image.size
print("Original size: " + str(w) + " x " + str(h))
linear_pixels = image.pixels[:]
pixels4d = np.reshape(linear_pixels, (h, w, 4))
mask = pixels4d [:,:,3] != 0.
coords = np.argwhere(mask)
y0, x0 = coords.min(axis=0)
y1, x1 = coords.max(axis=0) + 1
cropped_box = pixels4d[y0:y1, x0:x1, :]
w1, h1 = x1 - x0, y1 - y0
print("Crop size: " + str(w1) + " x " + str(h1))
temp_image = bpy.data.images.new(crop_file, alpha=True, width=w1, height=h1)
temp_image.pixels[:] = cropped_box.ravel()
temp_image.filepath_raw = crop_file
temp_image.file_format = 'PNG'
temp_image.alpha_mode = 'STRAIGHT'
temp_image.save()