HSV to RGB (and back) without floating point math in Python - python

Anyone know of a good python algorithm for converting HSV color to RGB (and vice versa) that doesn't depend on any external modules? I'm working on some animation generation code and want to support the HSV colorspace, but it's running on a Raspberry Pi and I'm trying to avoid any floating point.

This site here takes you through the steps, including how to do it using integer division. Here is a python port of the RGB to HSV function described in there
def RGB_2_HSV(RGB):
''' Converts an integer RGB tuple (value range from 0 to 255) to an HSV tuple '''
# Unpack the tuple for readability
R, G, B = RGB
# Compute the H value by finding the maximum of the RGB values
RGB_Max = max(RGB)
RGB_Min = min(RGB)
# Compute the value
V = RGB_Max;
if V == 0:
H = S = 0
return (H,S,V)
# Compute the saturation value
S = 255 * (RGB_Max - RGB_Min) // V
if S == 0:
H = 0
return (H, S, V)
# Compute the Hue
if RGB_Max == R:
H = 0 + 43*(G - B)//(RGB_Max - RGB_Min)
elif RGB_Max == G:
H = 85 + 43*(B - R)//(RGB_Max - RGB_Min)
else: # RGB_MAX == B
H = 171 + 43*(R - G)//(RGB_Max - RGB_Min)
return (H, S, V)
Which gives correct results when compared to the colorsys functions
import colorsys
RGB = (127, 127, 127)
Converted_2_HSV = RGB_2_HSV(RGB)
Verify_RGB_2_HSV = colorsys.rgb_to_hsv(RGB[0], RGB[1], RGB[2])
print Converted_2_HSV
>>> (0, 0, 127)
print Verify_RGB_2_HSV # multiplied by 255 to bring it into the same scale
>>> (0.0, 0.0, 127.5)
And you can check that the output is still in fact an integer
type(Converted_2_HSV[0])
>>> <type 'int'>
Now for the reverse function. The original source can be found here, and here is the Python port.
def HSV_2_RGB(HSV):
''' Converts an integer HSV tuple (value range from 0 to 255) to an RGB tuple '''
# Unpack the HSV tuple for readability
H, S, V = HSV
# Check if the color is Grayscale
if S == 0:
R = V
G = V
B = V
return (R, G, B)
# Make hue 0-5
region = H // 43;
# Find remainder part, make it from 0-255
remainder = (H - (region * 43)) * 6;
# Calculate temp vars, doing integer multiplication
P = (V * (255 - S)) >> 8;
Q = (V * (255 - ((S * remainder) >> 8))) >> 8;
T = (V * (255 - ((S * (255 - remainder)) >> 8))) >> 8;
# Assign temp vars based on color cone region
if region == 0:
R = V
G = T
B = P
elif region == 1:
R = Q;
G = V;
B = P;
elif region == 2:
R = P;
G = V;
B = T;
elif region == 3:
R = P;
G = Q;
B = V;
elif region == 4:
R = T;
G = P;
B = V;
else:
R = V;
G = P;
B = Q;
return (R, G, B)
And we can verify the result in the same way as before
interger_HSV = (127, 127, 127)
Converted_2_RGB = HSV_2_RGB(interger_HSV)
Verify_HSV_2_RGB = colorsys.hsv_to_rgb(0.5, 0.5, 0.5)
print Converted_2_RGB
>>> (63, 127, 124)
print type(Converted_2_RGB[0])
>>> <type 'int'>
print Verify_HSV_2_RGB # multiplied these by 255 so they are on the same scale
>>> (63.75, 127.5, 127.5)
The integer arithmetic does introduce some errors, however depending on the application these might be ok.

Related

Alternate faster method for RGB to self-defined color-space image conversion in real-time in Python

Question:
I have defined my own colorspace (Yellow-Blue) using some loops, and want to convert a standard HD image from RGB to YB in real-time, with some post-processing filters, but the method I wrote performs the favorable task at a slow speed.
Context:
I was wondering what colors would dogs see, and found that they cannot distinguish between green and red:
So I decided to define my own YB colorspace, as shown in this scheme:
calculating.py
bits = 8
values = 2 ** bits - 1
color_count = values * 6
def hues():
lst = []
for i in range(color_count):
r = g = b = 0
turn = (i // values) + 1
if turn == 1:
r = values
g = i % values
b = 0
elif turn == 2:
r = values - i % values
g = values
b = 0
elif turn == 3:
r = 0
g = values
b = i % values
elif turn == 4:
r = 0
g = values - i % values
b = values
elif turn == 5:
r = i % values
g = 0
b = values
elif turn == 6:
r = values
g = 0
b = values - i % values
r = round(r / values * 255)
g = round(g / values * 255)
b = round(b / values * 255)
lst.append((r, g, b))
return lst
def dues():
lst = []
for i in range(color_count):
r = g = b = 0
turn = (i // values) + 1
if turn == 1:
r = values
g = values
b = round((values - i % values) / 2)
elif turn == 2:
r = values
g = values
b = round((i % values) / 2)
elif turn == 3:
if i % values < values / 2:
r = values
g = values
b = round((values / 2 + i % values))
else:
r = round((3 / 2 * values - i % values))
g = round((3 / 2 * values - i % values))
b = values
elif turn == 4:
r = round((values - i % values) / 2)
g = round((values - i % values) / 2)
b = values
elif turn == 5:
r = round((i % values) / 2)
g = round((i % values) / 2)
b = values
elif turn == 6:
if i % values < values / 2:
r = round((values / 2 + i % values))
g = round((values / 2 + i % values))
b = values
else:
r = values
g = values
b = round((3 / 2 * values - i % values))
r = round(r / values * 255)
g = round(g / values * 255)
b = round(b / values * 255)
lst.append((r, g, b))
return lst
def rgb_to_hsl(color: tuple):
r, g, b = color
r /= 255
g /= 255
b /= 255
cmax = max(r, g, b)
cmin = min(r, g, b)
delta = cmax - cmin
h = 0
l = (cmax + cmin) / 2
if delta == 0:
h = 0
elif cmax == r:
h = ((g - b) / delta) % 6
elif cmax == g:
h = ((b - r) / delta) + 2
elif cmax == b:
h = ((r - g) / delta) + 4
h *= 60
if delta == 0:
s = 0
else:
s = delta / (1 - abs(2 * l - 1))
return h, s, l
def hsl_to_rgb(color: tuple):
h, s, l = color
c = (1 - abs(2 * l - 1)) * s
x = c * (1 - abs((h / 60) % 2 - 1))
m = l - c / 2
r = g = b = 0
if 0 <= h < 60:
r = c
g = x
elif 60 <= h < 120:
r = x
g = c
elif 120 <= h < 180:
g = c
b = x
elif 180 <= h < 240:
g = x
b = c
elif 240 <= h < 300:
r = x
b = c
elif 300 <= h < 360:
r = c
b = x
r = round((r + m) * 255)
g = round((g + m) * 255)
b = round((b + m) * 255)
return r, g, b
On saving the list values I obtained the expected Hues:
Now the main processing includes pixel-by-pixel conversion of color in this order:
Obtaining RGB
RGB --> HSL
Change value of hue to corresponding value in dues_hsl list
New HSL --> RGB
Set new RGB value at same coordinates in another array
This is repeated for every pixel in the image, and took about 58 seconds on a test image of dimensions 481 x 396 pixels
Input and output:
Code for the same:
defining.py
from PIL import Image
import numpy as np
from calculating import hues, dues
from calculating import rgb_to_hsl as hsl
from calculating import hsl_to_rgb as rgb
hues = hues()
dues = dues()
# Hues = human hues
# Dues = dog hues
hues_hsl = [hsl(i) for i in hues]
dues_hsl = [hsl(i) for i in dues]
img = np.array(Image.open('dog.png').convert('RGB'))
arr_blank = np.zeros(img.shape[0:3])
print(arr_blank.shape)
print(img.shape[0:3])
total = img.shape[0] * img.shape[1]
for i in range(img.shape[0]):
for j in range(img.shape[1]):
hsl_val = hsl(tuple(img[i, j]))
h = dues_hsl[hues_hsl.index(min(hues_hsl, key=lambda x: abs(x[0] - hsl_val[0])))][0]
pixel = np.array(rgb((h, hsl_val[1], hsl_val[2])))
arr_blank[i, j, :] = pixel
print(f'{i * img.shape[1] + j} / {total} --- {(i * img.shape[1] + j)/total*100} %')
print(arr_blank)
data = Image.fromarray(arr_blank.astype('uint8'), 'RGB')
data.save('dog_color.png')
Conclusion:
After this I want to add a gaussian blur filter too, post-conversion in real-time, but this is taking long for just one frame. Is there a way the speed can be improved?
Machine info:
If this info is helpful: i7-10750H # 2.6Ghz, SSD, 16 gigs ram
Thanks!
I had forgotten Pillow also does HSV just as well, so no need for OpenCV.
This executes in about 0.45 seconds on my machine.
from PIL import Image
import numpy as np
values = 2 ** 8 - 1
color_count = values * 6
def dog_hues():
# ... from original post, removed for brevity...
return lst
# Convert the dog_hues() list into an image of size 256x1
hue_map_img = Image.new("RGB", (color_count, 1))
hue_map_img.putdata(dog_hues())
hue_map_img = hue_map_img.resize((256, 1), Image.LANCZOS)
# Get the hues out of it
hsv_array = np.array(hue_map_img.convert("HSV"))
hue_map = hsv_array[:, :, 0].flatten()
# Read in the dog, convert it to HSV
img = np.array(Image.open("dog.jpg").convert("HSV"))
# Remap hue
img[:, :, 0] = hue_map[img[:, :, 0]]
# Convert back to RGB and save
img = Image.fromarray(img, "HSV").convert("RGB")
img.save("dog_hsv.jpg")
1st remark: you can't really change colorspace like this. Because when you see a color, interpreted by human eye (and therefore by human rgb image formats) as yellow, like (255,255,0), you can't know whether that is made of a yellow frequency (570 nm for example) that excite both our red and green cones, but not the blue ones. Of if it is made of a mixture of red frequencies (690 nm for example) and green frequencies (530 nm) or any other spectrum that lead to the same red and green cones saturated (255, 255) and blue one not touched (0).
And you need that information to deduce how the two dog cones are impacted.
In other words there isn't any mapping between human color and dog color. In math words, there is a projection between real color space (∞ dimension, a spectrum) and human color space (3D, to simplify: r, g, and b). There is another projection between real color space and dog colorspace (2D, also to simplify). But those projection axes are not included one in the other. So, there isn't any projection between the 3d human color space and the 2d dog colorspace. There is no way to know how dog sees a color with only the knowledge of how human sees it; you need to know the real color. You could do this with hyperspectral cameras (and do both projections to compute both human rgb image, and dog yb image). And that is assuming the quite naive (but correct in first approximation) idea that those color follows elementary college-level linear algebra, which, in reality, it doesn't exactly.
That being said, PIL or OpenCV based solutions are a solution. But more generally speaking, if you don't trust PIL or OpenCV, or any existing library color model and really want to invent your wheel (I respect that; there is no better way to understand things that to reinvent the wheel), then one rule you must abide with is never ever iterate over pixels. If you do that, you have lost the performance match. Python is very very slow. The only reason why it is still a popular language, and why there are still fast programs made with python, is because python coder do whatever it takes so that the computation heavy loops (in image processing, those are the loops over the pixels) are not really made in python.
So you must rely on numpy to perform your operation on all pixels, and not write the for loops yourself.
For example, here a rewrite of your rgb_to_hsl making batch computation with numpy. That is, rgb_to_hsl is not made to be called with a single color, but with a whole array (a 2d array) of colors, that is an image
def rgb_to_hsl(image):
# rgb is the r,g,b channels between 0 and 1 (as you did for individual
# r,g,b variables, but it is easier (see below) to keep them as a single
# array. Rgb is not just a triplet (unlike your r,g,b) but a 2d-array of
# triplets (so a 3d-array)
rgb = image/255
# Likewise, cmax, cmin, delta are not scalar as in your code, but
# 2d array of such scalar
cmax = rgb.max(axis=2) # axis=2 means that axis 0 and 1 are kept, and max
# is computed along axis 2, that is along the 3
# values of each triplets. So rgb is a HxWx3
# 3d-array (axis 0 = y, axis 1=x, axis 2=color
# channel). cmax is a HxW 2d-array
cmin = rgb.min(axis=2) # likewise
delta = cmax - cmin # same code. But this is done on all HxW cmax and cmin
h = zeros_like(delta) # 2d-array of 0
l = (cmax + cmin) / 2 # 2d array of (cmax+cmin)/2
# Here come a trickier part. We need to separate cases, and do computation
# in each subsets concerning those cases
case1= delta==0
h[case1] = 0 # In reality, we could skip those, since h is already 0 everywhere
case2 = cmax==r
h[case2] = (rgb[case2,1]-rgb[case2,2])/delta[case2] % 6
case3 = cmax==g
h[case3] = (rgb[case3,2]-rgb[case3,0])/delta[case3] + 2
case4 = cmax==b
h[case4] = (rgb[case4,0]-rgb[case4,1])/delta[case4] + 4
h *= 60 # Same code, applies on all HxW values of h
s=np.zeros_like(h)
s[case1] = 0 # same remark. I just mimick your code as much as possible
# but that is already the default value
s[~case1] = delta[~case1] / (1-abs(2*l[~case1]-1))
# ~case1 is the opposite of case1. So, equivalent of the else in your code
# returns 3 2d HxW arrays for h, s and l
return h, s, l

how to get the pixel coordinate of image using python

So Question is a bit changed:
I Have this piece of code:
import re
from PIL import Image
def rgb_to_hex(rgb_color):
[r, g, b] = rgb_color
assert 0 <= r <= 255
assert 0 <= g <= 255
assert 0 <= b <= 255
r = hex(r).lstrip('0x')
g = hex(g).lstrip('0x')
b = hex(b).lstrip('0x')
r = (2 - len(r)) * '0' + r
g = (2 - len(g)) * '0' + g
b = (2 - len(b)) * '0' + b
hex_color = '#' + r + g + b
return hex_color
img = Image.open('img.png')
pix_val = list(img.getdata())
x, y = img.size
a = 0
for element in pix_val:
element = list(element)
del element[-1]
print(rgb_to_hex(element))
a += 1
if a == x:
a = 0
print("")
what this code does is it opens a image file and reads it's data & then column by column it prints the hex code of the pixel or a particular row & column
so what i want is that i also want to print the coordinate of the pixel.
for example i have this image
so i want the coordinate of the pixel whose pixel value i am printing.
Please help me
Thanks for answering in advance
You can also use fstring introduced in python 3.6 like:
from PIL import Image
img = Image.open('img.png')
pixels = img.load()
width, height = img.size
for x in range(width):
for y in range(height):
r, g, b = pixels[x, y]
# in case your image has an alpha channel
# r, g, b, a = pixels[x, y]
print(x, y, f"#{r:02x}{g:02x}{b:02x}")
which outputs:
0 0 #4777b9
0 1 #4878ba
0 2 #4a77ba
0 3 #4a75b9
0 4 #4b73b8
0 5 #4d75ba
...
Reference:
Converting a RGB color tuple to a six digit code, in Python - Stack Overflow
What’s New In Python 3.6 — Python 3.9.1 documentation
python - Getting list of pixel values from PIL - Stack Overflow
you can try this:
import re
from PIL import Image
def rgb_to_hex(rgb_color):
[r, g, b] = rgb_color
assert 0 <= r <= 255
assert 0 <= g <= 255
assert 0 <= b <= 255
r = hex(r).lstrip('0x')
g = hex(g).lstrip('0x')
b = hex(b).lstrip('0x')
r = (2 - len(r)) * '0' + r
g = (2 - len(g)) * '0' + g
b = (2 - len(b)) * '0' + b
hex_color = '#' + r + g + b
return hex_color
img = Image.open('img.png')
pix_val = list(img.getdata())
x, y = img.size
a = 0
for element in pix_val:
element = list(element)
del element[-1]
print(rgb_to_hex(element))
# this line of code here:
print(f"x:{a%x} y:{int(a/x)}")
a += 1

Mapping polar coordinates to a color (RGB, etc.)

Say I have 80 (or n) polar coordinates that are pretty evenly distributed across a circular area. I want a unique color for each polar coordinate.
If you imagine a color wheel like this (though it could be a different transformation if you like), I'd like one of its colors given a polar coordinate.
At first I was not using the actual polar coordinates, and just scaled one of the channels by some even stride, like RGB (255, i * stride, 255). But now I'd like different colors from all over the spectrum (or at least more than a single color tone).
I thought of just using an image of a color wheel and then sampling it, but that seems kind of weak. Isn't there a formula I could use to convert the polar coordinates to some assumed/generated RGB, HSV, or CMYK space?
I'm working in Python 3, but I'm mostly interested in the formulas/algorithm. I'm not using any specific plotting API.
You could use a conversion from HSV or HSL to RGB, many packages such as Colour (Numpy Vectorised) or python-colormath (Vanilla Python) have implementations:
From Colour, assuming you have Numpy and the tsplit and tstack definitions:
def RGB_to_HSV(RGB):
"""
Converts from *RGB* colourspace to *HSV* colourspace.
Parameters
----------
RGB : array_like
*RGB* colourspace array.
Returns
-------
ndarray
*HSV* array.
Notes
-----
- Input *RGB* colourspace array is in domain [0, 1].
- Output *HSV* colourspace array is in range [0, 1].
References
----------
- :cite:`EasyRGBj`
- :cite:`Smith1978b`
- :cite:`Wikipediacg`
Examples
--------
>>> RGB = np.array([0.49019608, 0.98039216, 0.25098039])
>>> RGB_to_HSV(RGB) # doctest: +ELLIPSIS
array([ 0.2786738..., 0.744 , 0.98039216])
"""
maximum = np.amax(RGB, -1)
delta = np.ptp(RGB, -1)
V = maximum
R, G, B = tsplit(RGB)
S = np.asarray(delta / maximum)
S[np.asarray(delta == 0)] = 0
delta_R = (((maximum - R) / 6) + (delta / 2)) / delta
delta_G = (((maximum - G) / 6) + (delta / 2)) / delta
delta_B = (((maximum - B) / 6) + (delta / 2)) / delta
H = delta_B - delta_G
H = np.where(G == maximum, (1 / 3) + delta_R - delta_B, H)
H = np.where(B == maximum, (2 / 3) + delta_G - delta_R, H)
H[np.asarray(H < 0)] += 1
H[np.asarray(H > 1)] -= 1
H[np.asarray(delta == 0)] = 0
HSV = tstack((H, S, V))
return HSV
def HSV_to_RGB(HSV):
"""
Converts from *HSV* colourspace to *RGB* colourspace.
Parameters
----------
HSV : array_like
*HSV* colourspace array.
Returns
-------
ndarray
*RGB* colourspace array.
Notes
-----
- Input *HSV* colourspace array is in domain [0, 1].
- Output *RGB* colourspace array is in range [0, 1].
References
----------
- :cite:`EasyRGBn`
- :cite:`Smith1978b`
- :cite:`Wikipediacg`
Examples
--------
>>> HSV = np.array([0.27867384, 0.74400000, 0.98039216])
>>> HSV_to_RGB(HSV) # doctest: +ELLIPSIS
array([ 0.4901960..., 0.9803921..., 0.2509803...])
"""
H, S, V = tsplit(HSV)
h = np.asarray(H * 6)
h[np.asarray(h == 6)] = 0
i = np.floor(h)
j = V * (1 - S)
k = V * (1 - S * (h - i))
l = V * (1 - S * (1 - (h - i))) # noqa
i = tstack((i, i, i)).astype(np.uint8)
RGB = np.choose(
i, [
tstack((V, l, j)),
tstack((k, V, j)),
tstack((j, V, l)),
tstack((j, k, V)),
tstack((l, j, V)),
tstack((V, j, k)),
],
mode='clip')
return RGB
def RGB_to_HSL(RGB):
"""
Converts from *RGB* colourspace to *HSL* colourspace.
Parameters
----------
RGB : array_like
*RGB* colourspace array.
Returns
-------
ndarray
*HSL* array.
Notes
-----
- Input *RGB* colourspace array is in domain [0, 1].
- Output *HSL* colourspace array is in range [0, 1].
References
----------
- :cite:`EasyRGBl`
- :cite:`Smith1978b`
- :cite:`Wikipediacg`
Examples
--------
>>> RGB = np.array([0.49019608, 0.98039216, 0.25098039])
>>> RGB_to_HSL(RGB) # doctest: +ELLIPSIS
array([ 0.2786738..., 0.9489796..., 0.6156862...])
"""
minimum = np.amin(RGB, -1)
maximum = np.amax(RGB, -1)
delta = np.ptp(RGB, -1)
R, G, B = tsplit(RGB)
L = (maximum + minimum) / 2
S = np.where(L < 0.5, delta / (maximum + minimum),
delta / (2 - maximum - minimum))
S[np.asarray(delta == 0)] = 0
delta_R = (((maximum - R) / 6) + (delta / 2)) / delta
delta_G = (((maximum - G) / 6) + (delta / 2)) / delta
delta_B = (((maximum - B) / 6) + (delta / 2)) / delta
H = delta_B - delta_G
H = np.where(G == maximum, (1 / 3) + delta_R - delta_B, H)
H = np.where(B == maximum, (2 / 3) + delta_G - delta_R, H)
H[np.asarray(H < 0)] += 1
H[np.asarray(H > 1)] -= 1
H[np.asarray(delta == 0)] = 0
HSL = tstack((H, S, L))
return HSL

rgb_to_hsv and backwards using python and numpy

I tried to execute this code here as described in this answer. Bu I can't seem to get away from dividing with zero value.
I tried to copy this code from caman Js for transforming from rgb to hsv but I get the same thing.
RuntimeWarning invalide value encountered in divide
caman code is
Convert.rgbToHSV = function(r, g, b) {
var d, h, max, min, s, v;
r /= 255;
g /= 255;
b /= 255;
max = Math.max(r, g, b);
min = Math.min(r, g, b);
v = max;
d = max - min;
s = max === 0 ? 0 : d / max;
if (max === min) {
h = 0;
} else {
h = (function() {
switch (max) {
case r:
return (g - b) / d + (g < b ? 6 : 0);
case g:
return (b - r) / d + 2;
case b:
return (r - g) / d + 4;
}
})();
h /= 6;
}
return {
h: h,
s: s,
v: v
};
};
my code based on the answer from here
import Image
import numpy as np
def rgb_to_hsv(rgb):
hsv = np.empty_like(rgb)
hsv[...,3] = rgb[...,3]
r,g,b = rgb[...,0], rgb[...,1], rgb[...,2]
maxc = np.amax(rgb[...,:3], axis=-1)
print maxc
minc = np.amin(rgb[...,:3], axis=-1)
print minc
hsv[...,2] = maxc
dif = (maxc - minc)
hsv[...,1] = np.where(maxc==0, 0, dif/maxc)
#rc = (maxc-r)/ (maxc-minc)
#gc = (maxc-g)/(maxc-minc)
#bc = (maxc-b)/(maxc-minc)
hsv[...,0] = np.select([dif==0, r==maxc, g==maxc, b==maxc], [np.zeros(maxc.shape), (g-b) / dif + np.where(g<b, 6, 0), (b-r)/dif + 2, (r - g)/dif + 4])
hsv[...,0] = (hsv[...,0]/6.0) % 1.0
idx = (minc == maxc)
hsv[...,0][idx] = 0.0
hsv[...,1][idx] = 0.0
return hsv
The exception I get it in both whereever I divide with maxc or with dif (because they have zero values).
I encounter the same problem on the original code by #unutbu, runtimewarning. Caman seems to do this in every pixel seperately that is for every r,g,b combinations.
I also get a ValueError of shape missmatch: Objexts cannot be broadcast to a single shape when the select function is executed. But i double checked all the shapes of the choices and they are all (256,256)
Edit:
I corrected the function using this wikipedia article, and updated the code...now i get only the runimeWarning
The error comes from the fact that numpy.where (and numpy.select) computes all its arguments, even if they aren't used in the output. So in your line hsv[...,1] = np.where(maxc==0, 0, dif/maxc), dif / maxc is computed even for elements where maxc == 0, but then only the ones where maxc != 0 are used. This means that your output is fine, but you still get the RuntimeWarning.
If you want to avoid the warning (and make your code a little faster), do something like:
nz = maxc != 0 # find the nonzero values
hsv[nz, 1] = dif[nz] / maxc[nz]
You'll also have to change the numpy.select statement, because it also evaluates all its arguments.

Blur image using Python-errors

I need to blur an image by taking a kernel K and averaging the values in the 2D array and setting the center value to the average of K. Here is the code I have written to do so...
def Clamp(pix):
pix = abs(pix)
if pix > 255:
pix = 255
return pix
def Convolve2D(image1, K, image2):
img = graphics.Image(graphics.Point(0, 0), image1)
img.save(image2)
secondimage=graphics.Image(graphics.Point(0,0),image2)
h = img.getHeight()
w = img.getWidth()
A = [[0]*h for y in range(w)]
B = [[0]*w for x in range(h)]
#iterate over all rows (ignore 1-pixel borders)
for v in range(1, h-3):
graphics.update() # this updates the output for each row
# for every row, iterate over all columns (again ignore 1-pixel borders)
for u in range(1, w-3):
#A[u][v] = 0
#B[u][v] = 0
# for every pixel, iterate over region of overlap between
# input image and 3x3 kernel centered at current pixel
for i in range (0, 3):
for j in range (0, 3):
A[u][v] = A[u][v] + B[v+i][u+j] * K[i][j]
r, g, b = img.getPixel(u, v)
if (r * A[u][v] >= 255):
Clamp(r)
else:
r = r * A[u][v]
if (g * A[u][v] >= 255):
Clamp(g)
else:
g = g * A[u][v]
if (b * A[u][v] >= 255):
Clamp(b)
else:
b = b * A[u][v]
newcolor = graphics.color_rgb(r, g, b)
secondimage.setPixel(u, v , newcolor)
print("Not yet implemented") # to be removed
secondimage.save(image2)
secondimage.move(secondimage.getWidth()/2, secondimage.getHeight()/2)
win = graphics.GraphWin(secondimage, secondimage.getWidth(), secondimage.getHeight())
secondimage.draw(win)
def Blur3(image1, image2):
K = [[1/9, 1/9, 1/9], [1/9, 1/9, 1/9], [1/9, 1/9, 1/9]]
return Convolve2D(image1, K, image2)
This is the image I am trying to blur
This is what comes out of my code
is it possibly my if and else statements and the clamp function that is doing this? I just want a blurred image to come out like this
do this :
for v in range(h):
graphics.update() # this updates the output for each row
for u in range(w):
for i in range (0, 3):
for j in range (0, 3):
if v-i>=0 and u-j>=0 and v+i<=256 and u+j<=256 :
img[u][v] = img[u][v] + img[v-i][u-j] * K[i][j]
this should work!
Can you please tell me why you have two images A, B and blur image A using B? I mean :
A[u][v] = A[u][v] + B[v+i][u+j] * K[i][j]
Here I add a code which works for gray scale image,you can expand it for your need!
import matplotlib.image as mpimg
Img=mpimg.imread('GrayScaleImg.jpg')
kernel=towDimGuassKernel(size)
def conv2D(I,kernel):
filterWidth=kernel.shape[0]
half=filterWidth/2
bluredImg=np.zeros(I.shape)
for imgRow in range(I.shape[0]):
print imgRow
for imgCol in range(I.shape[1]):
for filterRow in range(filterWidth):
for filterCol in range(filterWidth):
if imgRow-filterRow>=0 and imgCol-filterCol>=0 and imgRow+filterRow<=256 and imgCol+filterCol<=256 :
bluredImg[imgRow,imgCol]+=I[imgRow-filterRow,imgCol-filterCol]*kernel[filterRow,filterCol]
return bluredImg
You've initialized A and B to empty lists with a size of 0. You need to initialize them instead to be the size of the image, in both dimensions.
A = [[0]*w for y in range(h)]
Edit: Your second problem is that you're defining the kernel with 1/9 which is an integer division yielding 0.

Categories