Blur image using Python-errors - python

I need to blur an image by taking a kernel K and averaging the values in the 2D array and setting the center value to the average of K. Here is the code I have written to do so...
def Clamp(pix):
pix = abs(pix)
if pix > 255:
pix = 255
return pix
def Convolve2D(image1, K, image2):
img = graphics.Image(graphics.Point(0, 0), image1)
img.save(image2)
secondimage=graphics.Image(graphics.Point(0,0),image2)
h = img.getHeight()
w = img.getWidth()
A = [[0]*h for y in range(w)]
B = [[0]*w for x in range(h)]
#iterate over all rows (ignore 1-pixel borders)
for v in range(1, h-3):
graphics.update() # this updates the output for each row
# for every row, iterate over all columns (again ignore 1-pixel borders)
for u in range(1, w-3):
#A[u][v] = 0
#B[u][v] = 0
# for every pixel, iterate over region of overlap between
# input image and 3x3 kernel centered at current pixel
for i in range (0, 3):
for j in range (0, 3):
A[u][v] = A[u][v] + B[v+i][u+j] * K[i][j]
r, g, b = img.getPixel(u, v)
if (r * A[u][v] >= 255):
Clamp(r)
else:
r = r * A[u][v]
if (g * A[u][v] >= 255):
Clamp(g)
else:
g = g * A[u][v]
if (b * A[u][v] >= 255):
Clamp(b)
else:
b = b * A[u][v]
newcolor = graphics.color_rgb(r, g, b)
secondimage.setPixel(u, v , newcolor)
print("Not yet implemented") # to be removed
secondimage.save(image2)
secondimage.move(secondimage.getWidth()/2, secondimage.getHeight()/2)
win = graphics.GraphWin(secondimage, secondimage.getWidth(), secondimage.getHeight())
secondimage.draw(win)
def Blur3(image1, image2):
K = [[1/9, 1/9, 1/9], [1/9, 1/9, 1/9], [1/9, 1/9, 1/9]]
return Convolve2D(image1, K, image2)
This is the image I am trying to blur
This is what comes out of my code
is it possibly my if and else statements and the clamp function that is doing this? I just want a blurred image to come out like this

do this :
for v in range(h):
graphics.update() # this updates the output for each row
for u in range(w):
for i in range (0, 3):
for j in range (0, 3):
if v-i>=0 and u-j>=0 and v+i<=256 and u+j<=256 :
img[u][v] = img[u][v] + img[v-i][u-j] * K[i][j]
this should work!
Can you please tell me why you have two images A, B and blur image A using B? I mean :
A[u][v] = A[u][v] + B[v+i][u+j] * K[i][j]
Here I add a code which works for gray scale image,you can expand it for your need!
import matplotlib.image as mpimg
Img=mpimg.imread('GrayScaleImg.jpg')
kernel=towDimGuassKernel(size)
def conv2D(I,kernel):
filterWidth=kernel.shape[0]
half=filterWidth/2
bluredImg=np.zeros(I.shape)
for imgRow in range(I.shape[0]):
print imgRow
for imgCol in range(I.shape[1]):
for filterRow in range(filterWidth):
for filterCol in range(filterWidth):
if imgRow-filterRow>=0 and imgCol-filterCol>=0 and imgRow+filterRow<=256 and imgCol+filterCol<=256 :
bluredImg[imgRow,imgCol]+=I[imgRow-filterRow,imgCol-filterCol]*kernel[filterRow,filterCol]
return bluredImg

You've initialized A and B to empty lists with a size of 0. You need to initialize them instead to be the size of the image, in both dimensions.
A = [[0]*w for y in range(h)]
Edit: Your second problem is that you're defining the kernel with 1/9 which is an integer division yielding 0.

Related

Alternate faster method for RGB to self-defined color-space image conversion in real-time in Python

Question:
I have defined my own colorspace (Yellow-Blue) using some loops, and want to convert a standard HD image from RGB to YB in real-time, with some post-processing filters, but the method I wrote performs the favorable task at a slow speed.
Context:
I was wondering what colors would dogs see, and found that they cannot distinguish between green and red:
So I decided to define my own YB colorspace, as shown in this scheme:
calculating.py
bits = 8
values = 2 ** bits - 1
color_count = values * 6
def hues():
lst = []
for i in range(color_count):
r = g = b = 0
turn = (i // values) + 1
if turn == 1:
r = values
g = i % values
b = 0
elif turn == 2:
r = values - i % values
g = values
b = 0
elif turn == 3:
r = 0
g = values
b = i % values
elif turn == 4:
r = 0
g = values - i % values
b = values
elif turn == 5:
r = i % values
g = 0
b = values
elif turn == 6:
r = values
g = 0
b = values - i % values
r = round(r / values * 255)
g = round(g / values * 255)
b = round(b / values * 255)
lst.append((r, g, b))
return lst
def dues():
lst = []
for i in range(color_count):
r = g = b = 0
turn = (i // values) + 1
if turn == 1:
r = values
g = values
b = round((values - i % values) / 2)
elif turn == 2:
r = values
g = values
b = round((i % values) / 2)
elif turn == 3:
if i % values < values / 2:
r = values
g = values
b = round((values / 2 + i % values))
else:
r = round((3 / 2 * values - i % values))
g = round((3 / 2 * values - i % values))
b = values
elif turn == 4:
r = round((values - i % values) / 2)
g = round((values - i % values) / 2)
b = values
elif turn == 5:
r = round((i % values) / 2)
g = round((i % values) / 2)
b = values
elif turn == 6:
if i % values < values / 2:
r = round((values / 2 + i % values))
g = round((values / 2 + i % values))
b = values
else:
r = values
g = values
b = round((3 / 2 * values - i % values))
r = round(r / values * 255)
g = round(g / values * 255)
b = round(b / values * 255)
lst.append((r, g, b))
return lst
def rgb_to_hsl(color: tuple):
r, g, b = color
r /= 255
g /= 255
b /= 255
cmax = max(r, g, b)
cmin = min(r, g, b)
delta = cmax - cmin
h = 0
l = (cmax + cmin) / 2
if delta == 0:
h = 0
elif cmax == r:
h = ((g - b) / delta) % 6
elif cmax == g:
h = ((b - r) / delta) + 2
elif cmax == b:
h = ((r - g) / delta) + 4
h *= 60
if delta == 0:
s = 0
else:
s = delta / (1 - abs(2 * l - 1))
return h, s, l
def hsl_to_rgb(color: tuple):
h, s, l = color
c = (1 - abs(2 * l - 1)) * s
x = c * (1 - abs((h / 60) % 2 - 1))
m = l - c / 2
r = g = b = 0
if 0 <= h < 60:
r = c
g = x
elif 60 <= h < 120:
r = x
g = c
elif 120 <= h < 180:
g = c
b = x
elif 180 <= h < 240:
g = x
b = c
elif 240 <= h < 300:
r = x
b = c
elif 300 <= h < 360:
r = c
b = x
r = round((r + m) * 255)
g = round((g + m) * 255)
b = round((b + m) * 255)
return r, g, b
On saving the list values I obtained the expected Hues:
Now the main processing includes pixel-by-pixel conversion of color in this order:
Obtaining RGB
RGB --> HSL
Change value of hue to corresponding value in dues_hsl list
New HSL --> RGB
Set new RGB value at same coordinates in another array
This is repeated for every pixel in the image, and took about 58 seconds on a test image of dimensions 481 x 396 pixels
Input and output:
Code for the same:
defining.py
from PIL import Image
import numpy as np
from calculating import hues, dues
from calculating import rgb_to_hsl as hsl
from calculating import hsl_to_rgb as rgb
hues = hues()
dues = dues()
# Hues = human hues
# Dues = dog hues
hues_hsl = [hsl(i) for i in hues]
dues_hsl = [hsl(i) for i in dues]
img = np.array(Image.open('dog.png').convert('RGB'))
arr_blank = np.zeros(img.shape[0:3])
print(arr_blank.shape)
print(img.shape[0:3])
total = img.shape[0] * img.shape[1]
for i in range(img.shape[0]):
for j in range(img.shape[1]):
hsl_val = hsl(tuple(img[i, j]))
h = dues_hsl[hues_hsl.index(min(hues_hsl, key=lambda x: abs(x[0] - hsl_val[0])))][0]
pixel = np.array(rgb((h, hsl_val[1], hsl_val[2])))
arr_blank[i, j, :] = pixel
print(f'{i * img.shape[1] + j} / {total} --- {(i * img.shape[1] + j)/total*100} %')
print(arr_blank)
data = Image.fromarray(arr_blank.astype('uint8'), 'RGB')
data.save('dog_color.png')
Conclusion:
After this I want to add a gaussian blur filter too, post-conversion in real-time, but this is taking long for just one frame. Is there a way the speed can be improved?
Machine info:
If this info is helpful: i7-10750H # 2.6Ghz, SSD, 16 gigs ram
Thanks!
I had forgotten Pillow also does HSV just as well, so no need for OpenCV.
This executes in about 0.45 seconds on my machine.
from PIL import Image
import numpy as np
values = 2 ** 8 - 1
color_count = values * 6
def dog_hues():
# ... from original post, removed for brevity...
return lst
# Convert the dog_hues() list into an image of size 256x1
hue_map_img = Image.new("RGB", (color_count, 1))
hue_map_img.putdata(dog_hues())
hue_map_img = hue_map_img.resize((256, 1), Image.LANCZOS)
# Get the hues out of it
hsv_array = np.array(hue_map_img.convert("HSV"))
hue_map = hsv_array[:, :, 0].flatten()
# Read in the dog, convert it to HSV
img = np.array(Image.open("dog.jpg").convert("HSV"))
# Remap hue
img[:, :, 0] = hue_map[img[:, :, 0]]
# Convert back to RGB and save
img = Image.fromarray(img, "HSV").convert("RGB")
img.save("dog_hsv.jpg")
1st remark: you can't really change colorspace like this. Because when you see a color, interpreted by human eye (and therefore by human rgb image formats) as yellow, like (255,255,0), you can't know whether that is made of a yellow frequency (570 nm for example) that excite both our red and green cones, but not the blue ones. Of if it is made of a mixture of red frequencies (690 nm for example) and green frequencies (530 nm) or any other spectrum that lead to the same red and green cones saturated (255, 255) and blue one not touched (0).
And you need that information to deduce how the two dog cones are impacted.
In other words there isn't any mapping between human color and dog color. In math words, there is a projection between real color space (∞ dimension, a spectrum) and human color space (3D, to simplify: r, g, and b). There is another projection between real color space and dog colorspace (2D, also to simplify). But those projection axes are not included one in the other. So, there isn't any projection between the 3d human color space and the 2d dog colorspace. There is no way to know how dog sees a color with only the knowledge of how human sees it; you need to know the real color. You could do this with hyperspectral cameras (and do both projections to compute both human rgb image, and dog yb image). And that is assuming the quite naive (but correct in first approximation) idea that those color follows elementary college-level linear algebra, which, in reality, it doesn't exactly.
That being said, PIL or OpenCV based solutions are a solution. But more generally speaking, if you don't trust PIL or OpenCV, or any existing library color model and really want to invent your wheel (I respect that; there is no better way to understand things that to reinvent the wheel), then one rule you must abide with is never ever iterate over pixels. If you do that, you have lost the performance match. Python is very very slow. The only reason why it is still a popular language, and why there are still fast programs made with python, is because python coder do whatever it takes so that the computation heavy loops (in image processing, those are the loops over the pixels) are not really made in python.
So you must rely on numpy to perform your operation on all pixels, and not write the for loops yourself.
For example, here a rewrite of your rgb_to_hsl making batch computation with numpy. That is, rgb_to_hsl is not made to be called with a single color, but with a whole array (a 2d array) of colors, that is an image
def rgb_to_hsl(image):
# rgb is the r,g,b channels between 0 and 1 (as you did for individual
# r,g,b variables, but it is easier (see below) to keep them as a single
# array. Rgb is not just a triplet (unlike your r,g,b) but a 2d-array of
# triplets (so a 3d-array)
rgb = image/255
# Likewise, cmax, cmin, delta are not scalar as in your code, but
# 2d array of such scalar
cmax = rgb.max(axis=2) # axis=2 means that axis 0 and 1 are kept, and max
# is computed along axis 2, that is along the 3
# values of each triplets. So rgb is a HxWx3
# 3d-array (axis 0 = y, axis 1=x, axis 2=color
# channel). cmax is a HxW 2d-array
cmin = rgb.min(axis=2) # likewise
delta = cmax - cmin # same code. But this is done on all HxW cmax and cmin
h = zeros_like(delta) # 2d-array of 0
l = (cmax + cmin) / 2 # 2d array of (cmax+cmin)/2
# Here come a trickier part. We need to separate cases, and do computation
# in each subsets concerning those cases
case1= delta==0
h[case1] = 0 # In reality, we could skip those, since h is already 0 everywhere
case2 = cmax==r
h[case2] = (rgb[case2,1]-rgb[case2,2])/delta[case2] % 6
case3 = cmax==g
h[case3] = (rgb[case3,2]-rgb[case3,0])/delta[case3] + 2
case4 = cmax==b
h[case4] = (rgb[case4,0]-rgb[case4,1])/delta[case4] + 4
h *= 60 # Same code, applies on all HxW values of h
s=np.zeros_like(h)
s[case1] = 0 # same remark. I just mimick your code as much as possible
# but that is already the default value
s[~case1] = delta[~case1] / (1-abs(2*l[~case1]-1))
# ~case1 is the opposite of case1. So, equivalent of the else in your code
# returns 3 2d HxW arrays for h, s and l
return h, s, l

How to redistort a single image point with a map? python opencv

I have used this method to create an inverse mapping to redistort an image and it works fine. Heres what it looks like in code:
# invert the mapping
combined_map_inverted = invert_map(combined_map, shape)
# apply mapping to image
frame = cv2.remap(img, combined_map_inverted, None ,cv2.INTER_LINEAR)
Notice that its a combined map, not separated into x and y. How can I take a single (x,y) point in the undistorted image and find the corresponding distorted point? I see this answer but I'm unsure how to apply it to my case.
The combined map is a simple look up table - mapping from (u,v) to x and from (u,v) to y.
Assume (u, v) is the column, row coordinate of the undistorted image.
Than the coordinate in the distorted image is:
x = combined_map_inverted[v, u, 0]
y = combined_map_inverted[v, u, 1]
In more compact form:
x, y = combined_map_inverted[v, u].tolist()
In case we want to get the value in the (x, y) coordinate, we may use bi-linear interpolation as described in my following answer (or use other kind of interpolation).
I tried testing it using the code from your previous post:
import cv2
import glob
import numpy as np
import math
import os
if os.path.isfile('xymap_inverted.npy'):
xymap_inverted = np.load('xymap_inverted.npy')
else:
A = -1010
B = -3.931
C = 5.258
D = 978.3
M = -193.8
N = 1740
def get_tan_func_value(x):
return A * math.tan((((x-N)/M)+B)/C) + D
def get_inverse_tan_func_value(x):
return M * (C*math.atan((x-D)/A) - B) + N
# answer from linked post
#def invert_map(F, shape):
# I = np.zeros_like(F)
# I[:,:,1], I[:,:,0] = np.indices(shape)
# P = np.copy(I)
# for i in range(10):
# P += I - cv2.remap(F, P, None, interpolation=cv2.INTER_LINEAR)
# return P
# https://stackoverflow.com/a/72649764/4926757
def invert_map(F):
(h, w) = F.shape[:2] # (h, w, 2), "xymap"
I = np.zeros_like(F)
I[:,:,1], I[:,:,0] = np.indices((h,w)) # identity map
P = np.copy(I)
for i in range(10):
correction = I - cv2.remap(F, P, None, interpolation=cv2.INTER_LINEAR)
P += correction * 0.5
return P
# import image
#images = glob.glob('*.jpg')
img = cv2.imread('image1.jpg') #img = cv2.imread(images[0])
h, w = img.shape[:2]
map_x_tan = np.zeros((img.shape[0], img.shape[1]), dtype=np.float32)
map_x_inverse_tan = np.zeros((img.shape[0], img.shape[1]), dtype=np.float32)
map_y = np.zeros((img.shape[0], img.shape[1]), dtype=np.float32)
# x tan function map
for i in range(map_x_tan.shape[0]):
map_x_tan[i,:] = [get_tan_func_value(x) for x in range(map_x_tan.shape[1])]
# x inverse tan function map
for i in range(map_x_inverse_tan.shape[0]):
map_x_inverse_tan[i,:] = [get_inverse_tan_func_value(x) for x in range(map_x_inverse_tan.shape[1])]
# default y map
for j in range(map_y.shape[1]):
map_y[:,j] = [y for y in range(map_y.shape[0])]
# convert x tan map to 2 channel (x,y) map
(xymap_tan, _) = cv2.convertMaps(map1=map_x_tan, map2=map_y, dstmap1type=cv2.CV_32FC2)
# invert the 2 channel x tan map
xymap_inverted = invert_map(xymap_tan)
np.save('xymap_inverted.npy', xymap_inverted)
combined_map_inverted = xymap_inverted
u = 150
v = 120
x, y = combined_map_inverted[v, u].tolist()
The output is:
x = 278.2418212890625
y = 120.0
Bi-lienar interpolation example:
x0 = int(x)
y0 = int(y)
x1 = int(x0 + 1)
y1 = int(y0 + 1)
dx = x - x0
dy = y - y0
new_pixel = np.round(img[y0,x0]*(1-dx)*(1-dy) + img[y1,x0]*(1-dx)*dy + img[y0,x1]*dx*(1-dy) + img[y1,x1]*dx*dy)
Testing by remapping an entire image, and comparing with cv2.remap:
def bilinear_interp(img, x, y):
x0 = int(x)
y0 = int(y)
x1 = int(x0 + 1)
y1 = int(y0 + 1)
dx = x - x0
dy = y - y0
new_pixel = np.round(img[y0,x0]*(1-dx)*(1-dy) + img[y1,x0]*(1-dx)*dy + img[y0,x1]*dx*(1-dy) + img[y1,x1]*dx*dy)
return new_pixel.astype(np.uint8)
img = cv2.imread('image1.jpg')
ref_img = cv2.remap(img, xymap_inverted, None, cv2.INTER_LINEAR)
cv2.imwrite('ref_img.jpg', ref_img)
new_img = np.zeros_like(img)
for v in range(img.shape[0]):
for u in range(img.shape[1]):
x, y = combined_map_inverted[v, u].tolist()
if (x >= 0) and (y >= 0) and (x < img.shape[1]-1) and (y < img.shape[0]-1):
new_img[v, u] = bilinear_interp(img, x, y)
cv2.imwrite('new_img.jpg', new_img)
abs_diff = cv2.absdiff(ref_img, new_img)
cv2.imshow('abs_diff', abs_diff) # Display the absolute difference for testing
cv2.waitKey()
cv2.destroyAllWindows()
ref_img and new_img are almost the same.

fast <image,time> linear interpolation

I'm trying to achieve linear interpolation, where the data points are N images of shape: HxWx3 (stored in buf (NxHxWx3)), and the points to interpolate are specified in another (2D) grid (interp_values).
Non-vectorizable approach:
In principle I have made interp_values a HxW grid with values 0..N-1 indicating for each i,j element from which image (in buf) to read it from, including fractional values meaning interpolation.
E.g.: a value of 3.6 means blend 40% (1-0.6) of image 3 with 60% (0.6) of image 4. However with this approach it is quite impossible to vectorize the code, and performance was poor.
One vectorization approach:
So I changed interp_values to be a NxHxWx3 grid with values 0..1. Each column :,i,j,c would specify blend coefficients for the N images, where only 1 or 2 elements are non-zero, e.g. for 3.6 we have: [0, 0, 0, 0.6, 0.4, 0, 0, ...]. I can convert interp_values from HxW to NxHxWx3 with:
def expand_interp_values(interp_values):
r = np.zeros((N,) + interp_values.shape + (3,))
for i in range(interp_values.shape[0]):
for j in range(interp_values.shape[1]):
v = interp_values[i, j]
a, b, x = math.floor(v), math.ceil(v), math.fmod(v, 1)
if int(a) == int(b):
r[a, i, j, :] = 3 * [1]
else:
r[a, i, j, :] = 3 * [1 - x]
r[b, i, j, :] = 3 * [x]
return r
This representation is more sparse (many zeros) but now interpolation can be computed as element-wise multiplication between buf and interp_values (the multiplication part of the linear interpolation) followed by a sum(..., axis=0) (i.e. the addition part of the linear interpolation):
def linear_interp(data, interp_values):
return np.sum(data * interp_values, axis=0)
With this approach, there is some performance improvement, however it seems with this approach the CPU will be most of the times busy computing x1*0, x2*0, ... or 0 + 0 + 0...
Can this be improved any better?
Additionally, the creation of the expanded interp_values grid is not vectorized, so perhaps performance would be bad if that grid has to be updated continuously.
Complete python+opencv code:
import cv2
import numpy as np
import math
vid = cv2.VideoCapture(0)
vid.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
vid.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
# store last N images into a NxHxWx3 grid (circular buffer):
N = 25
buf = None
interp_values = None
DOWNSAMPLING = 6
def linear_interp(data, interp_values):
return np.sum(data * interp_values / 256, axis=0)
def expand_interp_values(interp_values):
r = np.zeros((N,) + interp_values.shape + (3,))
for i in range(interp_values.shape[0]):
for j in range(interp_values.shape[1]):
v = interp_values[i, j]
a, b, x = math.floor(v), math.ceil(v), math.fmod(v, 1)
if int(a) == int(b):
r[a, i, j, :] = 3 * [1]
else:
r[a, i, j, :] = 3 * [1 - x]
r[b, i, j, :] = 3 * [x]
return r
while True:
ret, frame = vid.read()
H, W, Ch = frame.shape
frame = cv2.resize(frame, dsize=(W//DOWNSAMPLING, H//DOWNSAMPLING), interpolation=cv2.INTER_LINEAR)
# circular buffer:
if buf is None:
buf = np.zeros((N,) + frame.shape, dtype=np.uint8)
# there should be a simpler way to a FIFO-grid...
for i in reversed(range(1, N)):
buf[i] = buf[i - 1]
buf[0] = frame
if interp_values is None:
# create a lookup pattern here:
interp_values = np.zeros(frame.shape[:2])
for i in range(frame.shape[0]):
for j in range(frame.shape[1]):
y = i / (frame.shape[0] - 1) * 2 - 1
x = j / (frame.shape[1] - 1) * 2 - 1
#interp_values[i, j] = (N - 1) * min(1, math.hypot(x, y))
interp_values[i, j] = (N - 1) * (y + 1) / 2
interp_values = expand_interp_values(interp_values)
im = linear_interp(buf, interp_values)
im = cv2.resize(im, dsize=(W, H), interpolation=cv2.INTER_LANCZOS4)
cv2.imshow('image', im)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
vid.release()
cv2.destroyAllWindows()

Bilinear interpolation in Python

I'm trying to evaluate the quality of image provided by implementing nearest neighbour and bi-linear interpolation to resize an image. Currently the two images look identical. I cannot seem to find out the reason for the bi-linear method not providing the smooth output picture it should. Below is nearest neighbour
def scale_image_NN(image, scaling_factor):
cv2.imshow('Original image', lena)
cv2.waitKey(0)
print 'Running'
size = np.shape(image)
scaled_image = np.zeros((size[0]*scaling_factor, size[1]*scaling_factor,3), dtype=np.uint32)
for i in range (0, scaling_factor*size[0]-3):
for j in range (0, scaling_factor*size[1]-3):
x = int(m.floor(i/scaling_factor))
y = int(m.floor(j/scaling_factor))
for k in range (0, 3):
scaled_image[i+1, j+1, k] = image[x+1, y+1, k]
cv2.imshow('Scaled image - NN', scaled_image)
cv2.waitKey(0)
cv2.imwrite('NN.jpg',scaled_image)
and subsequently bi-linear interpolation
def scale_image_BL(image, scaling_factor):
cv2.imshow('Original image', lena)
cv2.waitKey(0)
print 'Running'
orig_size = np.shape(image)
h = orig_size[0]
w = orig_size[1]
c = orig_size[2]
r = scaling_factor
padded_image = np.zeros((h*scaling_factor, w*scaling_factor, c), dtype=np.uint8)
for i in range (0, h*scaling_factor):
x1 = int(m.floor(i/r))
x2 = int(m.ceil(i/r))
if x1 == 0:
x1 = 1
x = np.remainder(i/r,1)
for j in range (0, w*scaling_factor):
y1 = int(m.floor(j/r))
y2 = int(m.ceil(j/r))
if y1 == 0:
y1 = 1
ctl = image[x1, y1, :]
cbl = image[x2, y1, :]
ctr = image[x1, y2, :]
cbr = image[x2, y2, :]
y = np.remainder(j/r, 1)
tr = (ctr*y) + (ctl*(1-y))
br = (ctr*y) + (cbl*(1-y))
padded_image[i, j, :] = (br*x)+(tr*(1-x))
scaledImage = padded_image.astype(np.uint8)
cv2.imshow('Scaled image - BL',scaledImage)
cv2.waitKey(0)
cv2.imwrite('BL.jpg',scaledImage)
The problem was due Why does Python return 0 for simple division calculation?
During the calculation of the two positions to interpolate between, say x1 or x2 in the bi-linear interpolation, python was returing 0 for simple division such as 1/2, and not 0.5, thus there weren't always two points to interpolate between resulting in the NN-type output.
For scale_image_BL(image, scaling_factor) to work, simply include :
from future import division
at the beginning of the script.

HSV to RGB (and back) without floating point math in Python

Anyone know of a good python algorithm for converting HSV color to RGB (and vice versa) that doesn't depend on any external modules? I'm working on some animation generation code and want to support the HSV colorspace, but it's running on a Raspberry Pi and I'm trying to avoid any floating point.
This site here takes you through the steps, including how to do it using integer division. Here is a python port of the RGB to HSV function described in there
def RGB_2_HSV(RGB):
''' Converts an integer RGB tuple (value range from 0 to 255) to an HSV tuple '''
# Unpack the tuple for readability
R, G, B = RGB
# Compute the H value by finding the maximum of the RGB values
RGB_Max = max(RGB)
RGB_Min = min(RGB)
# Compute the value
V = RGB_Max;
if V == 0:
H = S = 0
return (H,S,V)
# Compute the saturation value
S = 255 * (RGB_Max - RGB_Min) // V
if S == 0:
H = 0
return (H, S, V)
# Compute the Hue
if RGB_Max == R:
H = 0 + 43*(G - B)//(RGB_Max - RGB_Min)
elif RGB_Max == G:
H = 85 + 43*(B - R)//(RGB_Max - RGB_Min)
else: # RGB_MAX == B
H = 171 + 43*(R - G)//(RGB_Max - RGB_Min)
return (H, S, V)
Which gives correct results when compared to the colorsys functions
import colorsys
RGB = (127, 127, 127)
Converted_2_HSV = RGB_2_HSV(RGB)
Verify_RGB_2_HSV = colorsys.rgb_to_hsv(RGB[0], RGB[1], RGB[2])
print Converted_2_HSV
>>> (0, 0, 127)
print Verify_RGB_2_HSV # multiplied by 255 to bring it into the same scale
>>> (0.0, 0.0, 127.5)
And you can check that the output is still in fact an integer
type(Converted_2_HSV[0])
>>> <type 'int'>
Now for the reverse function. The original source can be found here, and here is the Python port.
def HSV_2_RGB(HSV):
''' Converts an integer HSV tuple (value range from 0 to 255) to an RGB tuple '''
# Unpack the HSV tuple for readability
H, S, V = HSV
# Check if the color is Grayscale
if S == 0:
R = V
G = V
B = V
return (R, G, B)
# Make hue 0-5
region = H // 43;
# Find remainder part, make it from 0-255
remainder = (H - (region * 43)) * 6;
# Calculate temp vars, doing integer multiplication
P = (V * (255 - S)) >> 8;
Q = (V * (255 - ((S * remainder) >> 8))) >> 8;
T = (V * (255 - ((S * (255 - remainder)) >> 8))) >> 8;
# Assign temp vars based on color cone region
if region == 0:
R = V
G = T
B = P
elif region == 1:
R = Q;
G = V;
B = P;
elif region == 2:
R = P;
G = V;
B = T;
elif region == 3:
R = P;
G = Q;
B = V;
elif region == 4:
R = T;
G = P;
B = V;
else:
R = V;
G = P;
B = Q;
return (R, G, B)
And we can verify the result in the same way as before
interger_HSV = (127, 127, 127)
Converted_2_RGB = HSV_2_RGB(interger_HSV)
Verify_HSV_2_RGB = colorsys.hsv_to_rgb(0.5, 0.5, 0.5)
print Converted_2_RGB
>>> (63, 127, 124)
print type(Converted_2_RGB[0])
>>> <type 'int'>
print Verify_HSV_2_RGB # multiplied these by 255 so they are on the same scale
>>> (63.75, 127.5, 127.5)
The integer arithmetic does introduce some errors, however depending on the application these might be ok.

Categories