Resize image in python without using resize() - nearest neighbor - python

For an assignment I want to resize a .jpg image with a python code, but without using the pil.image.resize() function or another similar function. I want to write the code myself but I can't figure out how. The image is RGB. I have found this can be solved by nearest neighbor interpolation (as well as other methods but this one is fine for my specific assignment). The height and the width should both be able to be made bigger or smaller. So far I only have this:
import numpy as np
import scipy as sc
import matplotlib as plt
import math
import PIL
from PIL import Image
img = np.array(Image.open("foto1.jpg"))
height = img.shape[0]
width = img.shape[1]
dim = img.shape[2]
new_h = int(input("New height: "))
new_w = int(input("New width: "))
imgR = img[:,:,0] #red pixels
imgG = img[:,:,1] #green pixels
imgB = img[:,:,2] #blue pixels
newR = np.empty([new_h, new_w])
newG = np.empty([new_h, new_w])
newB = np.empty([new_h, new_w])
So now all three colours have a new array of the right dimensions. Unfortunately on the web I can only find people who use resize() functions... Does anyone know?
Thank in advance!

The key to doing any image transformation like resizing is to have a mapping from output coordinates to input coordinates. Then you can simply iterate over the entire output and grab a pixel from the input. Nearest neighbor makes this particularly easy, because there's never a need to interpolate a pixel that doesn't lie exactly on integer coordinates - you simply round the coordinates to the nearest integer.
for new_y in range(new_h):
old_y = int(round(new_y * (new_h - 1) / (height - 1)))
if old_y < 0: old_y = 0
if old_y >= height: old_y = height - 1
for new_x in range(new_w):
old_x = int(round(new_x * (new_w - 1) / (width - 1)))
if old_x < 0: old_x = 0
if old_x >= width: old_x = width - 1
newR[new_y,new_x] = imgR[old_y,old_x]
newG[new_y,new_x] = imgG[old_y,old_x]
newB[new_y,new_x] = imgB[old_y,old_x]

The following code could do the trick.
def resize_img(image, resize_width, resize_height):
"""
:params
image: shape -> (width, height, channels)
resize_width: The resize width dimension.
resize_height: The resize height dimension.
:returns
array of shape -> (resized_width, resized_height, channels)
"""
original_width, original_height, channel = image.shape
red_channel = image[:, :, 0]
green_channel = image[:, :, 1]
blue_channel = image[:, :, 2]
resized_image = np.zeros((resize_width, resize_height, channel), dtype=np.uint8)
x_scale = original_width/resize_width
y_scale = original_height/resize_height
resize_idx = np.zeros((resize_width, resize_height))
resize_index_x = np.ceil(np.arange(0, original_width, x_scale)).astype(int)
resize_index_y = np.ceil(np.arange(0, original_height, y_scale)).astype(int)
resize_index_x[np.where(resize_index_x == original_width)] -= 1
resize_index_y[np.where(resize_index_y == original_height)] -= 1
resized_image[:, :, 0] = red_channel[resize_index_x, :][:, resize_index_y]
resized_image[:, :, 1] = green_channel[resize_index_x, :][:, resize_index_y]
resized_image[:, :, 2] = blue_channel[resize_index_x, :][:, resize_index_y]
return resized_image

Related

Getting a incorrect picture output when running my self-made rotation algorithm

In order to better understand how image manipulation works, I've decided to create my own image rotation algorithm rather than using cv2.rotate() However, I'm encountering a weird picture cropping and pixel misplacement issue.
I think it may have something to do with my padding, but there may be other errors
import cv2
import math
import numpy as np
# Load & Show original image
img = cv2.imread('Lena.png', 0)
cv2.imshow('Original', img)
# Variable declarations
h = img.shape[0] # Also known as rows
w = img.shape[1] # Also known as columns
cX = h / 2 #Image Center X
cY = w / 2 #Image Center Y
theta = math.radians(100) #Change to adjust rotation angle
imgArray = np.array((img))
imgArray = np.pad(imgArray,pad_width=((100,100),(100,100)),mode='constant',constant_values=0)
#Add padding in an attempt to prevent image cropping
# loop pixel by pixel in image
for x in range(h + 1):
for y in range(w + 1):
try:
TX = int((x-cX)*math.cos(theta)+(y-cY)*math.sin(theta)+cX) #Rotation formula
TY = int(-(x-cX)*math.sin(theta)+(y-cY)*math.cos(theta)+cY) #Rotation formula
imgArray[x,y] = img[TX,TY]
except IndexError as error:
print(error)
cv2.imshow('Rotated', imgArray)
cv2.waitKey(0)
Edit:
I think the misplaced image position may have something to do with lack of proper origin point, however I cannot seem to find a functioning solution to that problem.
Though I didn't dive in the math part of the domain, but based on the given information I think the matrix rotating formula should work like this:
UPDATE:
As I promised I dived a bit into the domain and got to the solution you can see as follows. The main trick that I've swapped the source and destination indices in the looping too, so the rounding doesn't mean any problem ever:
import cv2
import math
import numpy as np
# Load & Show original image
img = cv2.imread('/home/george/Downloads/lena.png', 0)
cv2.imshow('Original', img)
# Variable declarations
h = img.shape[0] # Also known as rows
w = img.shape[1] # Also known as columns
p = 120
h += 2 * p
w += 2 * p
cX = h / 2 #Image Center X
cY = h / 2 #Image Center Y
theta = math.radians(45) #Change to adjust rotation angle
imgArray = np.zeros_like((img))
#Add padding in an attempt to prevent image cropping
imgArray = np.pad(imgArray, pad_width=p, mode='constant', constant_values=0)
img = np.pad(img, pad_width=p, mode='constant', constant_values=0)
# loop pixel by pixel in image
for TX in range(h + 1):
for TY in range(w + 1):
try:
x = int( +(TX - cX) * math.cos(theta) + (TY - cY) * math.sin(theta) + cX) #Rotation formula
y = int( -(TX - cX) * math.sin(theta) + (TY - cY) * math.cos(theta) + cY) #Rotation formula
imgArray[TX, TY] = img[x, y]
except IndexError as error:
pass
# print(error)
cv2.imshow('Rotated', imgArray)
cv2.waitKey(0)
exit()
Note: See usr2564301 comment too, if you want to dive deeper in the domain.

Implementing my own algorithm to scale and rotate images in python

I am trying to implement an algorithm in python to scale images by a factor or rotate them by a given angle (or both at the same time). I am using opencv to handle the images and I know opencv has these functions built in, however I want to do this myself to better understand image transformations. I believe I calculate the rotation matrix correctly. However, when I try to implement the affine transformation, it does not come out correctly.
import numpy as np
import cv2
import math as m
import sys
img = cv2.imread(sys.argv[1])
angle = sys.argv[2]
#get rotation matrix
def getRMat((cx, cy), angle, scale):
a = scale*m.cos(angle*np.pi/180)
b = scale*(m.sin(angle*np.pi/180))
u = (1-a)*cx-b*cy
v = b*cx+(1-a)*cy
return np.array([[a,b,u], [-b,a,v]])
#determine shape of img
h, w = img.shape[:2]
#print h, w
#determine center of image
cx, cy = (w / 2, h / 2)
#calculate rotation matrix
#then grab sine and cosine of the matrix
mat = getRMat((cx,cy), -int(angle), 1)
print mat
cos = np.abs(mat[0,0])
sin = np.abs(mat[0,1])
#calculate new height and width to account for rotation
newWidth = int((h * sin) + (w * cos))
newHeight = int((h * cos) + (w * sin))
#print newWidth, newHeight
mat[0,2] += (newWidth / 2) - cx
mat[1,2] += (newHeight / 2) - cy
#this is how the image SHOULD look
dst = cv2.warpAffine(img, mat, (newWidth, newHeight))
cv2.imshow('dst', dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
#apply transform
#attempt at my own warp affine function...still buggy tho
def warpAff(image, matrix, (width, height)):
dst = np.zeros((width, height, 3), dtype=np.uint8)
oldh, oldw = image.shape[:2]
#print oldh, oldw
#loop through old img and transform its coords
for x in range(oldh):
for y in range(oldw):
#print y, x
#transform the coordinates
u = int(x*matrix[0,0]+y*matrix[0,1]+matrix[0,2])
v = int(x*matrix[1,0]+y*matrix[1,1]+matrix[1,2])
#print u, v
#v -= width / 1.5
if (u >= 0 and u < height) and (v >= 0 and v < width):
dst[u,v] = image[x,y]
return dst
dst = warpAff(img, mat, (newWidth, newHeight))
cv2.imshow('dst', dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
Image I am using for testing
You're applying the rotation backward.
This means that for an angle of 20, instead of rotating 20 degrees clockwise, you rotate 20 degrees counterclockwise. That on its own would be easy to fix—just negate the angle.
But it also means that, for each destination pixel, if no source pixel exactly rotates to it, you end up with an all-black pixel. You could solve that by using any interpolation algorithm, but it's making things more complicated.
If we instead just reverse the process, and instead of calculating the destination (u, v) for each (x, y), we calculate the source (x, y) for every destination (u, v), that solves both problems:
def warpAff(image, matrix, width, height):
dst = np.zeros((width, height, 3), dtype=np.uint8)
oldh, oldw = image.shape[:2]
# Loop over the destination, not the source, to ensure that you cover
# every destination pixel exactly 1 time, rather than 0-4 times.
for u in range(width):
for v in range(height):
x = u*matrix[0,0]+v*matrix[0,1]+matrix[0,2]
y = u*matrix[1,0]+v*matrix[1,1]+matrix[1,2]
intx, inty = int(x), int(y)
# We could interpolate here by using something like this linear
# interpolation matrix, but let's keep it simple and not do that.
# fracx, fracy = x%1, y%1
# interp = np.array([[fracx*fracy, (1-fracx)*fracy],
# [fracx*(1-fracy), (1-fracx)*(1-fracy)]])
if 0 < x < oldw and 0 < y < oldh:
dst[u, v] = image[intx, inty]
return dst
Now the only remaining problem is that you didn't apply the shift backward, so we end up shifting the image in the wrong direction when we turn everything else around. That's trivial to fix:
mat[0,2] += cx - (newWidth / 2)
mat[1,2] += cy - (newHeight / 2)
You do have one more problem: your code (and this updated code) only works for square images. You're getting height and width backward multiple times, and they almost all cancel out, but apparently one of them doesn't. In general, you're treating your arrays as (width, height) rather than (height, width), but you end up comparing to (original version) or looping over (new version) (height, width). So, if height and width are different, you end up trying to write past the end of the array.
Trying to find all of these and fix them is probably as much work as just starting over and doing it consistently everywhere from the start:
mat = getRMat(cx, cy, int(angle), 1)
cos = np.abs(mat[0,0])
sin = np.abs(mat[0,1])
newWidth = int((h * sin) + (w * cos))
newHeight = int((h * cos) + (w * sin))
mat[0,2] += cx - (newWidth / 2)
mat[1,2] += cy - (newHeight / 2)
def warpAff2(image, matrix, width, height):
dst = np.zeros((height, width, 3), dtype=np.uint8)
oldh, oldw = image.shape[:2]
for u in range(width):
for v in range(height):
x = u*matrix[0,0]+v*matrix[0,1]+matrix[0,2]
y = u*matrix[1,0]+v*matrix[1,1]+matrix[1,2]
intx, inty = int(x), int(y)
if 0 < intx < oldw and 0 < inty < oldh:
pix = image[inty, intx]
dst[v, u] = pix
return dst
dst = warpAff2(img, mat, newWidth, newHeight)
It's worth noting that there are much simpler (and more efficient) ways to implement this. If you build a 3x3 square matrix, you can vectorize the multiplication. Also, you can create the matrix more simply by just multiplying a shift matrix # a rotation matrix # an unshift matrix instead of manually fixing things up after the fact. But hopefully this version, since it's as close as possible to your original, should be easiest to understand.

Scipy rotate and zoom an image without changing its dimensions

For my neural network I want to augment my training data by adding small random rotations and zooms to my images. The issue I am having is that scipy is changing the size of my images when it applies the rotations and zooms. I need to to just clip the edges if part of the image goes out of bounds. All of my images must be the same size.
def loadImageData(img, distort = False):
c, fn = img
img = scipy.ndimage.imread(fn, True)
if distort:
img = scipy.ndimage.zoom(img, 1 + 0.05 * rnd(), mode = 'constant')
img = scipy.ndimage.rotate(img, 10 * rnd(), mode = 'constant')
print(img.shape)
img = img - np.min(img)
img = img / np.max(img)
img = np.reshape(img, (1, *img.shape))
y = np.zeros(ncats)
y[c] = 1
return (img, y)
scipy.ndimage.rotate accepts a reshape= parameter:
reshape : bool, optional
If reshape is true, the output shape is adapted so that the input
array is contained completely in the output. Default is True.
So to "clip" the edges you can simply call scipy.ndimage.rotate(img, ..., reshape=False).
from scipy.ndimage import rotate
from scipy.misc import face
from matplotlib import pyplot as plt
img = face()
rot = rotate(img, 30, reshape=False)
fig, ax = plt.subplots(1, 2)
ax[0].imshow(img)
ax[1].imshow(rot)
Things are more complicated for scipy.ndimage.zoom.
A naive method would be to zoom the entire input array, then use slice indexing and/or zero-padding to make the output the same size as your input. However, in cases where you're increasing the size of the image it's wasteful to interpolate pixels that are only going to get clipped off at the edges anyway.
Instead you could index only the part of the input that will fall within the bounds of the output array before you apply zoom:
import numpy as np
from scipy.ndimage import zoom
def clipped_zoom(img, zoom_factor, **kwargs):
h, w = img.shape[:2]
# For multichannel images we don't want to apply the zoom factor to the RGB
# dimension, so instead we create a tuple of zoom factors, one per array
# dimension, with 1's for any trailing dimensions after the width and height.
zoom_tuple = (zoom_factor,) * 2 + (1,) * (img.ndim - 2)
# Zooming out
if zoom_factor < 1:
# Bounding box of the zoomed-out image within the output array
zh = int(np.round(h * zoom_factor))
zw = int(np.round(w * zoom_factor))
top = (h - zh) // 2
left = (w - zw) // 2
# Zero-padding
out = np.zeros_like(img)
out[top:top+zh, left:left+zw] = zoom(img, zoom_tuple, **kwargs)
# Zooming in
elif zoom_factor > 1:
# Bounding box of the zoomed-in region within the input array
zh = int(np.round(h / zoom_factor))
zw = int(np.round(w / zoom_factor))
top = (h - zh) // 2
left = (w - zw) // 2
out = zoom(img[top:top+zh, left:left+zw], zoom_tuple, **kwargs)
# `out` might still be slightly larger than `img` due to rounding, so
# trim off any extra pixels at the edges
trim_top = ((out.shape[0] - h) // 2)
trim_left = ((out.shape[1] - w) // 2)
out = out[trim_top:trim_top+h, trim_left:trim_left+w]
# If zoom_factor == 1, just return the input array
else:
out = img
return out
For example:
zm1 = clipped_zoom(img, 0.5)
zm2 = clipped_zoom(img, 1.5)
fig, ax = plt.subplots(1, 3)
ax[0].imshow(img)
ax[1].imshow(zm1)
ax[2].imshow(zm2)
I recommend using cv2.resize because it is way faster than scipy.ndimage.zoom, probably due to support for simpler interpolation methods.
For a 480x640 image :
cv2.resize takes ~2 ms
scipy.ndimage.zoom takes ~500 ms
scipy.ndimage.zoom(...,order=0) takes ~175ms
If you are doing the data augmentation on the fly, this amount of speedup is invaluable because it means more experiments in less time.
Here is a version of clipped_zoom using cv2.resize
def cv2_clipped_zoom(img, zoom_factor=0):
"""
Center zoom in/out of the given image and returning an enlarged/shrinked view of
the image without changing dimensions
------
Args:
img : ndarray
Image array
zoom_factor : float
amount of zoom as a ratio [0 to Inf). Default 0.
------
Returns:
result: ndarray
numpy ndarray of the same shape of the input img zoomed by the specified factor.
"""
if zoom_factor == 0:
return img
height, width = img.shape[:2] # It's also the final desired shape
new_height, new_width = int(height * zoom_factor), int(width * zoom_factor)
### Crop only the part that will remain in the result (more efficient)
# Centered bbox of the final desired size in resized (larger/smaller) image coordinates
y1, x1 = max(0, new_height - height) // 2, max(0, new_width - width) // 2
y2, x2 = y1 + height, x1 + width
bbox = np.array([y1,x1,y2,x2])
# Map back to original image coordinates
bbox = (bbox / zoom_factor).astype(np.int)
y1, x1, y2, x2 = bbox
cropped_img = img[y1:y2, x1:x2]
# Handle padding when downscaling
resize_height, resize_width = min(new_height, height), min(new_width, width)
pad_height1, pad_width1 = (height - resize_height) // 2, (width - resize_width) //2
pad_height2, pad_width2 = (height - resize_height) - pad_height1, (width - resize_width) - pad_width1
pad_spec = [(pad_height1, pad_height2), (pad_width1, pad_width2)] + [(0,0)] * (img.ndim - 2)
result = cv2.resize(cropped_img, (resize_width, resize_height))
result = np.pad(result, pad_spec, mode='constant')
assert result.shape[0] == height and result.shape[1] == width
return result

tensorflow: how to rotate an image for data augmentation?

In tensorflow, I would like to rotate an image from a random angle, for data augmentation. But I don't find this transformation in the tf.image module.
This can be done in tensorflow now:
tf.contrib.image.rotate(images, degrees * math.pi / 180, interpolation='BILINEAR')
Because I wanted to be able to rotate tensors I came up with the following piece of code, which rotates a [height, width, depth] tensor by a given angle:
def rotate_image_tensor(image, angle, mode='black'):
"""
Rotates a 3D tensor (HWD), which represents an image by given radian angle.
New image has the same size as the input image.
mode controls what happens to border pixels.
mode = 'black' results in black bars (value 0 in unknown areas)
mode = 'white' results in value 255 in unknown areas
mode = 'ones' results in value 1 in unknown areas
mode = 'repeat' keeps repeating the closest pixel known
"""
s = image.get_shape().as_list()
assert len(s) == 3, "Input needs to be 3D."
assert (mode == 'repeat') or (mode == 'black') or (mode == 'white') or (mode == 'ones'), "Unknown boundary mode."
image_center = [np.floor(x/2) for x in s]
# Coordinates of new image
coord1 = tf.range(s[0])
coord2 = tf.range(s[1])
# Create vectors of those coordinates in order to vectorize the image
coord1_vec = tf.tile(coord1, [s[1]])
coord2_vec_unordered = tf.tile(coord2, [s[0]])
coord2_vec_unordered = tf.reshape(coord2_vec_unordered, [s[0], s[1]])
coord2_vec = tf.reshape(tf.transpose(coord2_vec_unordered, [1, 0]), [-1])
# center coordinates since rotation center is supposed to be in the image center
coord1_vec_centered = coord1_vec - image_center[0]
coord2_vec_centered = coord2_vec - image_center[1]
coord_new_centered = tf.cast(tf.pack([coord1_vec_centered, coord2_vec_centered]), tf.float32)
# Perform backward transformation of the image coordinates
rot_mat_inv = tf.dynamic_stitch([[0], [1], [2], [3]], [tf.cos(angle), tf.sin(angle), -tf.sin(angle), tf.cos(angle)])
rot_mat_inv = tf.reshape(rot_mat_inv, shape=[2, 2])
coord_old_centered = tf.matmul(rot_mat_inv, coord_new_centered)
# Find nearest neighbor in old image
coord1_old_nn = tf.cast(tf.round(coord_old_centered[0, :] + image_center[0]), tf.int32)
coord2_old_nn = tf.cast(tf.round(coord_old_centered[1, :] + image_center[1]), tf.int32)
# Clip values to stay inside image coordinates
if mode == 'repeat':
coord_old1_clipped = tf.minimum(tf.maximum(coord1_old_nn, 0), s[0]-1)
coord_old2_clipped = tf.minimum(tf.maximum(coord2_old_nn, 0), s[1]-1)
else:
outside_ind1 = tf.logical_or(tf.greater(coord1_old_nn, s[0]-1), tf.less(coord1_old_nn, 0))
outside_ind2 = tf.logical_or(tf.greater(coord2_old_nn, s[1]-1), tf.less(coord2_old_nn, 0))
outside_ind = tf.logical_or(outside_ind1, outside_ind2)
coord_old1_clipped = tf.boolean_mask(coord1_old_nn, tf.logical_not(outside_ind))
coord_old2_clipped = tf.boolean_mask(coord2_old_nn, tf.logical_not(outside_ind))
coord1_vec = tf.boolean_mask(coord1_vec, tf.logical_not(outside_ind))
coord2_vec = tf.boolean_mask(coord2_vec, tf.logical_not(outside_ind))
coord_old_clipped = tf.cast(tf.transpose(tf.pack([coord_old1_clipped, coord_old2_clipped]), [1, 0]), tf.int32)
# Coordinates of the new image
coord_new = tf.transpose(tf.cast(tf.pack([coord1_vec, coord2_vec]), tf.int32), [1, 0])
image_channel_list = tf.split(2, s[2], image)
image_rotated_channel_list = list()
for image_channel in image_channel_list:
image_chan_new_values = tf.gather_nd(tf.squeeze(image_channel), coord_old_clipped)
if (mode == 'black') or (mode == 'repeat'):
background_color = 0
elif mode == 'ones':
background_color = 1
elif mode == 'white':
background_color = 255
image_rotated_channel_list.append(tf.sparse_to_dense(coord_new, [s[0], s[1]], image_chan_new_values,
background_color, validate_indices=False))
image_rotated = tf.transpose(tf.pack(image_rotated_channel_list), [1, 2, 0])
return image_rotated
for tensorflow 2.0:
import tensorflow_addons as tfa
tfa.image.transform_ops.rotate(image, radian)
Rotation and cropping in TensorFlow
I personally needed image rotation and cropping out black borders functions in TensorFlow as below.
And I could implement this function as below.
def _rotate_and_crop(image, output_height, output_width, rotation_degree, do_crop):
"""Rotate the given image with the given rotation degree and crop for the black edges if necessary
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
rotation_degree: The degree of rotation on the image.
do_crop: Do cropping if it is True.
Returns:
A rotated image.
"""
# Rotate the given image with the given rotation degree
if rotation_degree != 0:
image = tf.contrib.image.rotate(image, math.radians(rotation_degree), interpolation='BILINEAR')
# Center crop to ommit black noise on the edges
if do_crop == True:
lrr_width, lrr_height = _largest_rotated_rect(output_height, output_width, math.radians(rotation_degree))
resized_image = tf.image.central_crop(image, float(lrr_height)/output_height)
image = tf.image.resize_images(resized_image, [output_height, output_width], method=tf.image.ResizeMethod.BILINEAR, align_corners=False)
return image
def _largest_rotated_rect(w, h, angle):
"""
Given a rectangle of size wxh that has been rotated by 'angle' (in
radians), computes the width and height of the largest possible
axis-aligned rectangle within the rotated rectangle.
Original JS code by 'Andri' and Magnus Hoff from Stack Overflow
Converted to Python by Aaron Snoswell
Source: http://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders
"""
quadrant = int(math.floor(angle / (math.pi / 2))) & 3
sign_alpha = angle if ((quadrant & 1) == 0) else math.pi - angle
alpha = (sign_alpha % math.pi + math.pi) % math.pi
bb_w = w * math.cos(alpha) + h * math.sin(alpha)
bb_h = w * math.sin(alpha) + h * math.cos(alpha)
gamma = math.atan2(bb_w, bb_w) if (w < h) else math.atan2(bb_w, bb_w)
delta = math.pi - alpha - gamma
length = h if (w < h) else w
d = length * math.cos(alpha)
a = d * math.sin(alpha) / math.sin(delta)
y = a * math.cos(gamma)
x = y * math.tan(gamma)
return (
bb_w - 2 * x,
bb_h - 2 * y
)
If you need further implementation of example and visualization in TensorFlow, you can use this repository.
I hope this could be helpful to other people.
Update: see #astromme's answer below. Tensorflow now supports rotating images natively.
What you can do while there is no native method in tensorflow is something like this:
from PIL import Image
sess = tf.InteractiveSession()
# Pass image tensor object to a PIL image
image = Image.fromarray(image.eval())
# Use PIL or other library of the sort to rotate
rotated = Image.Image.rotate(image, degrees)
# Convert rotated image back to tensor
rotated_tensor = tf.convert_to_tensor(np.array(rotated))
tf.contrib is not available in tensorflow 2.
For tensorflow >= 2.* the following can be used:
tf.keras.preprocessing.image.random_rotation(x, rg, row_axis=1,col_axis=2, channel_axis=0,fill_mode='nearest', cval=0., interpolation_order=1);
you can find the documantation here:
https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/image/random_rotation
Here's the #zimmermc answer updated to Tensorflow v0.12
Changes:
pack() is now stack()
order of split parameters reversed
def rotate_image_tensor(image, angle, mode='white'):
"""
Rotates a 3D tensor (HWD), which represents an image by given radian angle.
New image has the same size as the input image.
mode controls what happens to border pixels.
mode = 'black' results in black bars (value 0 in unknown areas)
mode = 'white' results in value 255 in unknown areas
mode = 'ones' results in value 1 in unknown areas
mode = 'repeat' keeps repeating the closest pixel known
"""
s = image.get_shape().as_list()
assert len(s) == 3, "Input needs to be 3D."
assert (mode == 'repeat') or (mode == 'black') or (mode == 'white') or (mode == 'ones'), "Unknown boundary mode."
image_center = [np.floor(x/2) for x in s]
# Coordinates of new image
coord1 = tf.range(s[0])
coord2 = tf.range(s[1])
# Create vectors of those coordinates in order to vectorize the image
coord1_vec = tf.tile(coord1, [s[1]])
coord2_vec_unordered = tf.tile(coord2, [s[0]])
coord2_vec_unordered = tf.reshape(coord2_vec_unordered, [s[0], s[1]])
coord2_vec = tf.reshape(tf.transpose(coord2_vec_unordered, [1, 0]), [-1])
# center coordinates since rotation center is supposed to be in the image center
coord1_vec_centered = coord1_vec - image_center[0]
coord2_vec_centered = coord2_vec - image_center[1]
coord_new_centered = tf.cast(tf.stack([coord1_vec_centered, coord2_vec_centered]), tf.float32)
# Perform backward transformation of the image coordinates
rot_mat_inv = tf.dynamic_stitch([[0], [1], [2], [3]], [tf.cos(angle), tf.sin(angle), -tf.sin(angle), tf.cos(angle)])
rot_mat_inv = tf.reshape(rot_mat_inv, shape=[2, 2])
coord_old_centered = tf.matmul(rot_mat_inv, coord_new_centered)
# Find nearest neighbor in old image
coord1_old_nn = tf.cast(tf.round(coord_old_centered[0, :] + image_center[0]), tf.int32)
coord2_old_nn = tf.cast(tf.round(coord_old_centered[1, :] + image_center[1]), tf.int32)
# Clip values to stay inside image coordinates
if mode == 'repeat':
coord_old1_clipped = tf.minimum(tf.maximum(coord1_old_nn, 0), s[0]-1)
coord_old2_clipped = tf.minimum(tf.maximum(coord2_old_nn, 0), s[1]-1)
else:
outside_ind1 = tf.logical_or(tf.greater(coord1_old_nn, s[0]-1), tf.less(coord1_old_nn, 0))
outside_ind2 = tf.logical_or(tf.greater(coord2_old_nn, s[1]-1), tf.less(coord2_old_nn, 0))
outside_ind = tf.logical_or(outside_ind1, outside_ind2)
coord_old1_clipped = tf.boolean_mask(coord1_old_nn, tf.logical_not(outside_ind))
coord_old2_clipped = tf.boolean_mask(coord2_old_nn, tf.logical_not(outside_ind))
coord1_vec = tf.boolean_mask(coord1_vec, tf.logical_not(outside_ind))
coord2_vec = tf.boolean_mask(coord2_vec, tf.logical_not(outside_ind))
coord_old_clipped = tf.cast(tf.transpose(tf.stack([coord_old1_clipped, coord_old2_clipped]), [1, 0]), tf.int32)
# Coordinates of the new image
coord_new = tf.transpose(tf.cast(tf.stack([coord1_vec, coord2_vec]), tf.int32), [1, 0])
image_channel_list = tf.split(image, s[2], 2)
image_rotated_channel_list = list()
for image_channel in image_channel_list:
image_chan_new_values = tf.gather_nd(tf.squeeze(image_channel), coord_old_clipped)
if (mode == 'black') or (mode == 'repeat'):
background_color = 0
elif mode == 'ones':
background_color = 1
elif mode == 'white':
background_color = 255
image_rotated_channel_list.append(tf.sparse_to_dense(coord_new, [s[0], s[1]], image_chan_new_values,
background_color, validate_indices=False))
image_rotated = tf.transpose(tf.stack(image_rotated_channel_list), [1, 2, 0])
return image_rotated
For rotating an image or a batch of images counter-clockwise by multiples of 90 degrees, you can use tf.image.rot90(image,k=1,name=None).
k denotes the number of 90 degrees rotations you want to make.
In case of a single image, image is a 3-D Tensor of shape [height, width, channels] and in case of a batch of images, image is a 4-D Tensor of shape [batch, height, width, channels]

overlay a smaller image on a larger image python OpenCv

Hi I am creating a program that replaces a face in a image with someone else's face. However, I am stuck on trying to insert the new face into the original, larger image. I have researched ROI and addWeight(needs the images to be the same size) but I haven't found a way to do this in python. Any advise is great. I am new to opencv.
I am using the following test images:
smaller_image:
larger_image:
Here is my Code so far... a mixer of other samples:
import cv2
import cv2.cv as cv
import sys
import numpy
def detect(img, cascade):
rects = cascade.detectMultiScale(img, scaleFactor=1.1, minNeighbors=3, minSize=(10, 10), flags = cv.CV_HAAR_SCALE_IMAGE)
if len(rects) == 0:
return []
rects[:,2:] += rects[:,:2]
return rects
def draw_rects(img, rects, color):
for x1, y1, x2, y2 in rects:
cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
if __name__ == '__main__':
if len(sys.argv) != 2: ## Check for error in usage syntax
print "Usage : python faces.py <image_file>"
else:
img = cv2.imread(sys.argv[1],cv2.CV_LOAD_IMAGE_COLOR) ## Read image file
if (img == None):
print "Could not open or find the image"
else:
cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
gray = cv2.cvtColor(img, cv.CV_BGR2GRAY)
gray = cv2.equalizeHist(gray)
rects = detect(gray, cascade)
## Extract face coordinates
x1 = rects[0][3]
y1 = rects[0][0]
x2 = rects[0][4]
y2 = rects[0][5]
y=y2-y1
x=x2-x1
## Extract face ROI
faceROI = gray[x1:x2, y1:y2]
## Show face ROI
cv2.imshow('Display face ROI', faceROI)
small = cv2.imread("average_face.png",cv2.CV_LOAD_IMAGE_COLOR)
print "here"
small=cv2.resize(small, (x, y))
cv2.namedWindow('Display image') ## create window for display
cv2.imshow('Display image', small) ## Show image in the window
print "size of image: ", img.shape ## print size of image
cv2.waitKey(1000)
A simple way to achieve what you want:
import cv2
s_img = cv2.imread("smaller_image.png")
l_img = cv2.imread("larger_image.jpg")
x_offset=y_offset=50
l_img[y_offset:y_offset+s_img.shape[0], x_offset:x_offset+s_img.shape[1]] = s_img
Update
I suppose you want to take care of the alpha channel too. Here is a quick and dirty way of doing so:
s_img = cv2.imread("smaller_image.png", -1)
y1, y2 = y_offset, y_offset + s_img.shape[0]
x1, x2 = x_offset, x_offset + s_img.shape[1]
alpha_s = s_img[:, :, 3] / 255.0
alpha_l = 1.0 - alpha_s
for c in range(0, 3):
l_img[y1:y2, x1:x2, c] = (alpha_s * s_img[:, :, c] +
alpha_l * l_img[y1:y2, x1:x2, c])
Using #fireant's idea, I wrote up a function to handle overlays. This works well for any position argument (including negative positions).
def overlay_image_alpha(img, img_overlay, x, y, alpha_mask):
"""Overlay `img_overlay` onto `img` at (x, y) and blend using `alpha_mask`.
`alpha_mask` must have same HxW as `img_overlay` and values in range [0, 1].
"""
# Image ranges
y1, y2 = max(0, y), min(img.shape[0], y + img_overlay.shape[0])
x1, x2 = max(0, x), min(img.shape[1], x + img_overlay.shape[1])
# Overlay ranges
y1o, y2o = max(0, -y), min(img_overlay.shape[0], img.shape[0] - y)
x1o, x2o = max(0, -x), min(img_overlay.shape[1], img.shape[1] - x)
# Exit if nothing to do
if y1 >= y2 or x1 >= x2 or y1o >= y2o or x1o >= x2o:
return
# Blend overlay within the determined ranges
img_crop = img[y1:y2, x1:x2]
img_overlay_crop = img_overlay[y1o:y2o, x1o:x2o]
alpha = alpha_mask[y1o:y2o, x1o:x2o, np.newaxis]
alpha_inv = 1.0 - alpha
img_crop[:] = alpha * img_overlay_crop + alpha_inv * img_crop
Example usage:
import numpy as np
from PIL import Image
# Prepare inputs
x, y = 50, 0
img = np.array(Image.open("img_large.jpg"))
img_overlay_rgba = np.array(Image.open("img_small.png"))
# Perform blending
alpha_mask = img_overlay_rgba[:, :, 3] / 255.0
img_result = img[:, :, :3].copy()
img_overlay = img_overlay_rgba[:, :, :3]
overlay_image_alpha(img_result, img_overlay, x, y, alpha_mask)
# Save result
Image.fromarray(img_result).save("img_result.jpg")
Result:
If you encounter errors or unusual outputs, please ensure:
img should not contain an alpha channel. (e.g. If it is RGBA, convert to RGB first.)
img_overlay has the same number of channels as img.
Based on fireant's excellent answer above, here is the alpha blending but a bit more human legible. You may need to swap 1.0-alpha and alpha depending on which direction you're merging (mine is swapped from fireant's answer).
o* == s_img.*
b* == b_img.*
for c in range(0,3):
alpha = s_img[oy:oy+height, ox:ox+width, 3] / 255.0
color = s_img[oy:oy+height, ox:ox+width, c] * (1.0-alpha)
beta = l_img[by:by+height, bx:bx+width, c] * (alpha)
l_img[by:by+height, bx:bx+width, c] = color + beta
Here it is:
def put4ChannelImageOn4ChannelImage(back, fore, x, y):
rows, cols, channels = fore.shape
trans_indices = fore[...,3] != 0 # Where not transparent
overlay_copy = back[y:y+rows, x:x+cols]
overlay_copy[trans_indices] = fore[trans_indices]
back[y:y+rows, x:x+cols] = overlay_copy
#test
background = np.zeros((1000, 1000, 4), np.uint8)
background[:] = (127, 127, 127, 1)
overlay = cv2.imread('imagee.png', cv2.IMREAD_UNCHANGED)
put4ChannelImageOn4ChannelImage(background, overlay, 5, 5)
A simple function that blits an image front onto an image back and returns the result. It works with both 3 and 4-channel images and deals with the alpha channel. Overlaps are handled as well.
The output image has the same size as back, but always 4 channels.
The output alpha channel is given by (u+v)/(1+uv) where u,v are the alpha channels of the front and back image and -1 <= u,v <= 1. Where there is no overlap with front, the alpha value from back is taken.
import cv2
def merge_image(back, front, x,y):
# convert to rgba
if back.shape[2] == 3:
back = cv2.cvtColor(back, cv2.COLOR_BGR2BGRA)
if front.shape[2] == 3:
front = cv2.cvtColor(front, cv2.COLOR_BGR2BGRA)
# crop the overlay from both images
bh,bw = back.shape[:2]
fh,fw = front.shape[:2]
x1, x2 = max(x, 0), min(x+fw, bw)
y1, y2 = max(y, 0), min(y+fh, bh)
front_cropped = front[y1-y:y2-y, x1-x:x2-x]
back_cropped = back[y1:y2, x1:x2]
alpha_front = front_cropped[:,:,3:4] / 255
alpha_back = back_cropped[:,:,3:4] / 255
# replace an area in result with overlay
result = back.copy()
print(f'af: {alpha_front.shape}\nab: {alpha_back.shape}\nfront_cropped: {front_cropped.shape}\nback_cropped: {back_cropped.shape}')
result[y1:y2, x1:x2, :3] = alpha_front * front_cropped[:,:,:3] + (1-alpha_front) * back_cropped[:,:,:3]
result[y1:y2, x1:x2, 3:4] = (alpha_front + alpha_back) / (1 + alpha_front*alpha_back) * 255
return result
For just add an alpha channel to s_img I just use cv2.addWeighted before the line
l_img[y_offset:y_offset+s_img.shape[0], x_offset:x_offset+s_img.shape[1]] = s_img
as following:
s_img=cv2.addWeighted(l_img[y_offset:y_offset+s_img.shape[0], x_offset:x_offset+s_img.shape[1]],0.5,s_img,0.5,0)
When attempting to write to the destination image using any of these answers above and you get the following error:
ValueError: assignment destination is read-only
A quick potential fix is to set the WRITEABLE flag to true.
img.setflags(write=1)
A simple 4on4 pasting function that works-
def paste(background,foreground,pos=(0,0)):
#get position and crop pasting area if needed
x = pos[0]
y = pos[1]
bgWidth = background.shape[0]
bgHeight = background.shape[1]
frWidth = foreground.shape[0]
frHeight = foreground.shape[1]
width = bgWidth-x
height = bgHeight-y
if frWidth<width:
width = frWidth
if frHeight<height:
height = frHeight
# normalize alpha channels from 0-255 to 0-1
alpha_background = background[x:x+width,y:y+height,3] / 255.0
alpha_foreground = foreground[:width,:height,3] / 255.0
# set adjusted colors
for color in range(0, 3):
fr = alpha_foreground * foreground[:width,:height,color]
bg = alpha_background * background[x:x+width,y:y+height,color] * (1 - alpha_foreground)
background[x:x+width,y:y+height,color] = fr+bg
# set adjusted alpha and denormalize back to 0-255
background[x:x+width,y:y+height,3] = (1 - (1 - alpha_foreground) * (1 - alpha_background)) * 255
return background
I reworked #fireant's concept to allow for optional alpha masks and allow any x or y, including values outside of the bounds of the image. It will crop to the bounds.
def overlay_image_alpha(img, img_overlay, x, y, alpha_mask=None):
"""Overlay `img_overlay` onto `img` at (x, y) and blend using optional `alpha_mask`.
`alpha_mask` must have same HxW as `img_overlay` and values in range [0, 1].
"""
if y < 0 or y + img_overlay.shape[0] > img.shape[0] or x < 0 or x + img_overlay.shape[1] > img.shape[1]:
y_origin = 0 if y > 0 else -y
y_end = img_overlay.shape[0] if y < 0 else min(img.shape[0] - y, img_overlay.shape[0])
x_origin = 0 if x > 0 else -x
x_end = img_overlay.shape[1] if x < 0 else min(img.shape[1] - x, img_overlay.shape[1])
img_overlay_crop = img_overlay[y_origin:y_end, x_origin:x_end]
alpha = alpha_mask[y_origin:y_end, x_origin:x_end] if alpha_mask is not None else None
else:
img_overlay_crop = img_overlay
alpha = alpha_mask
y1 = max(y, 0)
y2 = min(img.shape[0], y1 + img_overlay_crop.shape[0])
x1 = max(x, 0)
x2 = min(img.shape[1], x1 + img_overlay_crop.shape[1])
img_crop = img[y1:y2, x1:x2]
img_crop[:] = alpha * img_overlay_crop + (1.0 - alpha) * img_crop if alpha is not None else img_overlay_crop

Categories