padding image array with gray background - python

I am comparing thumbnail images by showing them side by side using Image.fromarray(np.haystack(<list of image array>).show(). The problem is that the image arrays have different sizes. My solution is to pad the array with a background gray color (200, 200, 200) and make all arrays equal size 200x200.
My question does numpy have a more direct way of doing this?
My solution:
def pad_with_gray_backgound(_array, size):
array_padded = np.ones((size, size, 3), dtype=np.uint8)*200
for i in range(array_padded.shape[0]):
for j in range(array_padded.shape[1]):
try:
array_padded[i, j] = _array[i, j]
except IndexError:
pass
return array_padded
and to call this function
import numpy as np
from PIL import Image
image_arrays = []
for pic in pic_selection:
pic_thumbnail = io.BytesIO(pic.thumbnail.encode('ISO-8859-1'))
padded_image_array = pad_with_gray_background(
np.array(Image.open(pic_thumbnail)), 200)
image_arrays.append(padded_image_array)
Image.fromarray(np.hstack(image_arrays)).show()
note pic.thumbnail is a bytes object taken from the exif

Answer by Mark Setchell is to use slicing:
array_padded[0:height, 0:width, :] = image_array[:]
Just have make sure that the shape of image_array is not bigger than array_padded.
import numpy as np
from PIL import Image
image_arrays = []
for pic in pic_selection:
pic_thumbnail = io.BytesIO(pic.thumbnail.encode('ISO-8859-1'))
image_array = np.array(Image.open(pic_thumbnail))
height, width = (200, 200)
array_padded = np.ones((height, width, 3), dtype=np.uint8)*200
height = min(image_array.shape[0], height)
width = min(image_array.shape[1], width)
array_padded[0:height, 0:width, :] = image_array[0:height, 0:width, :]
image_arrays.append(array_padded)
Image.fromarray(np.hstack(image_arrays)).show()

Related

How to iterate over multiple images of different dimensions and stack them into a single picture horizontally?

I have a multiple pictures with different dimensions. I have been trying to concat them horizontally using openCV.
The process is kind of following:
Iterate over all the images to find the max width and total height.
Create a black mask that is with the size of max width and total height got from all the images.
Stack all the images horizontally on that black mask.
I am not sure how to do this thing. Kindly help me!
Images are just 3D matrices, so you can do this very easily by creating a matrix of zeros (= black) of the desired size, then filling in your images.
I've created fake images here but you can use cv2 to read in your real images.
import numpy as np
import matplotlib.pyplot as plt
# create three images of different shapes and different shades of grey
img1 = np.ones((100, 200, 3), dtype=int)*50
img2 = np.ones((200, 400, 3), dtype=int)*100
img3 = np.ones((100, 300, 3), dtype=int)*150
imgs = [img1, img2, img3]
# get max width and total height
max_width = 0
total_height = 0
for img in imgs:
total_height += img.shape[0]
max_width = max(max_width, img.shape[1])
# make black canvas of appropriate shape
canvas = np.zeros((total_height, max_width, 3), dtype=int)
# stack images on canvas
start_height = 0
for img in imgs:
print(img.shape)
canvas[start_height:start_height+img.shape[0], 0:img.shape[1], :] = img
start_height+= img.shape[0]
# show results
plt.imshow(canvas)
This produces the following result:

How to slice and complie an image into a window effect using Python

I would like to slice up an image in python
and paste it back together again as a window.
The tiles measure as 8pixels by 9pixels and each row needs to skip 1 pixel
I would then need to merge the tiles back together again with a 1 pixel padding around each tile to give a windowed effect.
The image is black and white but for the example I have used color to show that the windowed effect would need to have a white background
input example
Desired Output
Update: change tiles dimension to bigger for illustration, you can adjust per your need
Use this:
import cv2
image = cv2.imread('test.jpg')
tiles_height = 50
tiles_width = 30
# white padding
padding_x = 10
padding_y = 20
num_y = int(image.shape[0]/tiles_height)
num_x = int(image.shape[1]/tiles_width)
new_img = np.full((image.shape[0] + num_y*padding_y, image.shape[1] + num_x*padding_x,3),255)
for incre_i,i in enumerate(range(0,image.shape[0],tiles_height)):
for incre_j,j in enumerate(range(0, image.shape[1], tiles_width)):
new_img[i+incre_i*padding_y:i+tiles_height+incre_i*padding_y
,j+incre_j*padding_x:j+tiles_width+incre_j*padding_x,:] = image[i:i+tiles_height,j:j+tiles_width,:]
cv2.imwrite('res.jpg',new_img)
print(image.shape, new_img.shape)
Update 1:
Because you want to latter remove tiles, I added code that can help you with that. Now all you have to do is changing variables in tiles config, white padding, tile index to be removed:
import cv2
image = cv2.imread('test.jpg')
# tiles config
tiles_height = 50
tiles_width = 30
# white padding
padding_x = 10
padding_y = 20
# tile index to be removed
remove_indices = [(0,0),(3,6)]
num_y = int(image.shape[0]/tiles_height)
num_x = int(image.shape[1]/tiles_width)
new_img = np.full((image.shape[0] + num_y*padding_y, image.shape[1] + num_x*padding_x,3),255)
for incre_i,i in enumerate(range(0,image.shape[0],tiles_height)):
for incre_j,j in enumerate(range(0, image.shape[1], tiles_width)):
if (incre_i,incre_j) in remove_indices:
new_img[i+incre_i*padding_y:i+tiles_height+incre_i*padding_y
,j+incre_j*padding_x:j+tiles_width+incre_j*padding_x,:] = 255
else:
new_img[i+incre_i*padding_y:i+tiles_height+incre_i*padding_y
,j+incre_j*padding_x:j+tiles_width+incre_j*padding_x,:] = image[i:i+tiles_height,j:j+tiles_width,:]
cv2.imwrite('remove_tiles.jpg',new_img)
print(image.shape, new_img.shape)
test.jpg
res.jpg
remove_tiles.jpg
print(image.shape, new_img.shape) gives (952, 1429, 3) (1332, 1899, 3)
You can try with skimage.utils.view_as_windows from the scikit-image package:
from skimage.util import view_as_windows
import matplotlib.pyplot as plt
import numpy as np
img = np.random.rand(90, 90, 1) # gray-scale image, you can change the channels accordingly
img[8::9,] = 0
tiles = view_as_windows(img, (9, 9, 1), (9, 9, 1)).squeeze(2) # squeeze out unneded dim
tiles = tiles[:, :, :-1, :, :] # Remove last row of each tile
# plot the original image
plt.axis("off")
plt.imshow(img.squeeze(2))
plt.show()
# plot the tiles
fig, axes = plt.subplots(10, 10)
for i in range(10):
for j in range(10):
axes[i, j].axis("off")
axes[i, j].imshow(tiles[i, j, ...].squeeze(-1))
plt.show()
Here is the result:
Original
Sliced
The torch.Tensor.unfold operator from PyTorch could be an option too.

Scaling an image makes no discernable change

I am using two different ways to re-size an image, but all three look exactly the same...
What am I doing wrong that no scaling occurs?
import cv2 as cv
import numpy as np
path = "resources/Shapes.png"
img = cv.imread(path)
cv.imshow("img", img)
res1 = cv.resize(img, None, fx = 2, fy = 2, interpolation = cv.INTER_CUBIC)
cv.imshow("res1", res1)
height, width = img.shape[:2]
res2 = cv.resize(img, (2 * width, 2 * height), interpolation = cv.INTER_CUBIC)
cv.imshow("res2", res2)
k = cv.waitKey(0)
Just putting this here for future reference:
The code above works, the issue was that imshow does not always show the true size of the image, by saving the different images, or simply examining them with res1.shape vs img.shape, you can see the true size of the image.

Using openCV, resize multiple the same size of images at once in Python

One image can be resized using opencv on python3
import cv2
res_image = cv2.resize(image, dsize=(50, 100))
Also, multiple images can be resized through for syntax.
for image in images:
res_image = cv2.resize(image, dsize=(50, 100))
But I want to resize the the same size of images(array) at once instead of using for loop.
>>> images.shape
(32, 64, 64, 3)
Please let me know if you have any solve to this problem.
(even if don't use opencv)
thank you.
not possible. OpenCV only resizes one image at a time. stick with the loop. it costs no meaningful time.
you can simplify the task:
assert original_tensor.shape == (32, 64, 64, 3)
resized_tensor = np.empty((32, 100, 50, 3), dtype=original_tensor.dtype)
for (k, image) in enumerate(original_tensor):
resized_tensor[k] = cv2.resize(image, dsize=(50, 100))
maybe you want to resize your images once, outside of the program? I would recommend "imagemagick" for that.
The following code could do the trick.
def resize_batch(img_batch, resize_width, resize_height):
"""
:params
image: np.array(), shape -> (batch, width, height, channels)
resize_width: The resize width dimension.
resize_height: The resize height dimension.
:returns
array of shape -> (batch, resized_width, resized_height, channels)
"""
batch, original_width, original_height, channel = img_batch.shape
rd_ch = img_batch[:,:,:,0]
gr_ch = img_batch[:,:,:,1]
bl_ch = img_batch[:,:,:,2]
resized_images = np.zeros((batch, resize_width, resize_height, channel), dtype=np.uint8)
x_scale = original_width/resize_width
y_scale = original_height/resize_height
resize_idx = np.zeros((resize_width, resize_height))
resize_index_x = np.ceil(np.arange(0, original_width, x_scale)).astype(int)
resize_index_y = np.ceil(np.arange(0, original_height, y_scale)).astype(int)
resize_index_x[np.where(resize_index_x == original_width)] -= 1
resize_index_y[np.where(resize_index_y == original_height)] -= 1
resized_images[:,:,:,0] = rd_ch[:,resize_index_x,:][:,:,resize_index_y]
resized_images[:,:,:,1] = gr_ch[:,resize_index_x,:][:,:,resize_index_y]
resized_images[:,:,:,2] = bl_ch[:,resize_index_x,:][:,:,resize_index_y]
return resized_images

get ValueError: bad transparency mask when trying to paste, image and mask are the same

import numpy as np
from PIL import Image
img_orig = Image.open("me.jpg")
# convert pic to 3-D array
array_orig = np.array(img_orig)
# create R image
array_r = np.copy(array_orig)
array_r[:, :, 1:3] = 0
img_r = Image.fromarray(array_r)
# create GB image
array_gb = np.copy(array_orig)
array_gb[:, :, 0] = 0
img_gb = Image.fromarray(array_gb)
canvas_r = Image.new("RGB", img_orig.size, color=(0,0,0))
canvas_r.paste(img_r, (5, 5), img_r) #error line
canvas_gb = Image.new("RGB", img_orig.size, color=(0,0,0))
canvas_gb.paste(img_gb, (0, 0), img_gb)
result_array = np.array(canvas_r) + np.array(canvas_gb)
result = Image.fromarray(result_array)
result.show()
The line of code gives error is given in the code. I don't quite understand why PIL gives transparency error for same image. I believe there is no need to specify mode because the mask and image are the same.
I solved a similiar problem with:
Replace img_orig = Image.open("me.jpg")
with img_orig = Image.open("me.jpg").convert('RGBA')
And replace canvas_gb = Image.new("RGB", img_orig.size, color=(0,0,0))
with canvas_gb = Image.new("RGBA", img_orig.size, color=(0,0,0))
The image is lacking alpha value
img_orig = Image.open("me.jpg")
img_orig.putalpha(255)
solved the issue.

Categories