Histogram Equalization without using built-in histogram methods in python - python

I wrote the code below and what I get is the output below. What I suppose to do is write an histogram equalization function(without built in methods) I get no error, however output is not what it should to be. I could not notice any logic mistakes im my code. Although, while writing the loop for calculating cdf and/or mapping I couldn't follow what happens behind it exactly, maybe the problem is there but I am not sure.
def my_float2int(img):
img = np.round(img * 255, 0)
img = np.minimum(img, 255)
img = np.maximum(img, 0)
img = img.astype('uint8')
return img
def equalizeHistogram(img):
img_height = img.shape[0]
img_width = img.shape[1]
histogram = np.zeros([256], np.int32)
# calculate histogram
for i in range(0, img_height):
for j in range(0, img_width):
histogram[img[i, j]] +=1
# calculate pdf of the image
pdf_img = histogram / histogram.sum()
### calculate cdf
# cdf initialize .
cdf = np.zeros([256], np.int32)
# For loop for cdf
for i in range(0, 256):
for j in range(0, i+1):
cdf[i] += pdf_img[j]
cdf_eq = np.round(cdf * 255, 0) # mapping, transformation function T(x)
imgEqualized = np.zeros((img_height, img_width))
# for mapping input image to s.
for i in range(0, img_height):
for j in range(0, img_width):
r = img[i, j] # feeding intensity levels of pixels into r.
s = cdf_eq[r] # finding value of s by finding r'th position in the cdf_eq list.
imgEqualized[i, j] = s # mapping s thus creating new output image.
# calculate histogram equalized image here
# imgEqualized = s # change this
return imgEqualized
# end of function
# 2.2 obtain the histogram equalized images using the above function
img_eq_low = equalizeHistogram(img_low)
img_eq_high = equalizeHistogram(img_high)
img_eq_low = my_float2int(img_eq_low)
img_eq_high = my_float2int(img_eq_high)
# 2.3 calculate the pdf's of the histogram equalized images
hist_img_eq_low = calcHistogram(img_eq_low)
hist_img_eq_high = calcHistogram(img_eq_high)
pdf_eq_low = hist_img_eq_low / hist_img_eq_low.sum()
pdf_eq_high = hist_img_eq_high / hist_img_eq_high.sum()
# 2.4 display the histogram equalized images and their pdf's
plt.figure(figsize=(14,8))
plt.subplot(121), plt.imshow(img_eq_low, cmap = 'gray', vmin=0, vmax=255)
plt.title('Hist. Equalized Low Exposure Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(img_eq_high, cmap = 'gray', vmin=0, vmax=255)
plt.title('Hist. Equalized High Exposure Image'), plt.xticks([]), plt.yticks([])
plt.show()
plt.close()
My output:
Expected output: with the built-in methods.

I found two minor bugs, and one efficiency issue:
Replace cdf = np.zeros([256], np.int32) with cdf = np.zeros([256], float)
In the loop, you are putting float elements in cdf, so the type should be float instead of int32.
Replace img = np.round(img * 255, 0) with img = np.round(img, 0) (in my_float2int).
You are scaling img by 255 twice (the first time is in cdf_eq = np.round(cdf * 255, 0)).
You may compute cdf more efficiently.
Your implementation:
for i in range(0, 256):
for j in range(0, i+1):
cdf[i] += pdf_img[j]
Suggested implementation (more efficient way for computing "accumulated sum"):
cdf[0] = pdf_img[0]
for i in range(1, 256):
cdf[i] = cdf[i-1] + pdf_img[i]
It's not a bug, but a kind of academic issue (regarding complexity).
Here is an example for corrected code (uses only img_low):
import numpy as np
import cv2
def my_float2int(img):
# Don't use *255 twice
# img = np.round(img * 255, 0)
img = np.round(img, 0)
img = np.minimum(img, 255)
img = np.maximum(img, 0)
img = img.astype('uint8')
return img
def equalizeHistogram(img):
img_height = img.shape[0]
img_width = img.shape[1]
histogram = np.zeros([256], np.int32)
# calculate histogram
for i in range(0, img_height):
for j in range(0, img_width):
histogram[img[i, j]] +=1
# calculate pdf of the image
pdf_img = histogram / histogram.sum()
### calculate cdf
# cdf initialize .
# Why does the type np.int32?
#cdf = np.zeros([256], np.int32)
cdf = np.zeros([256], float)
# For loop for cdf
for i in range(0, 256):
for j in range(0, i+1):
cdf[i] += pdf_img[j]
# You may implement the "accumulated sum" in a more efficient way:
cdf = np.zeros(256, float)
cdf[0] = pdf_img[0]
for i in range(1, 256):
cdf[i] = cdf[i-1] + pdf_img[i]
cdf_eq = np.round(cdf * 255, 0) # mapping, transformation function T(x)
imgEqualized = np.zeros((img_height, img_width))
# for mapping input image to s.
for i in range(0, img_height):
for j in range(0, img_width):
r = img[i, j] # feeding intensity levels of pixels into r.
s = cdf_eq[r] # finding value of s by finding r'th position in the cdf_eq list.
imgEqualized[i, j] = s # mapping s thus creating new output image.
# calculate histogram equalized image here
# imgEqualized = s # change this
return imgEqualized
# end of function
# Read input image as Grayscale
img_low = cv2.imread('img_low.png', cv2.IMREAD_GRAYSCALE)
# 2.2 obtain the histogram equalized images using the above function
img_eq_low = equalizeHistogram(img_low)
img_eq_low = my_float2int(img_eq_low)
# Use cv2.imshow (instead of plt.imshow) just for testing.
cv2.imshow('img_eq_low', img_eq_low)
cv2.waitKey()
Result:

Related

Average color of multiple images merged together

I have all these layers in Photoshop:
I also have these images in Python. In Photoshop, I can get the average color of the entire document from the RGB channel of the document. In Python, I was thinking I would blend the images together using cv2.addWeighted, and then use the histograms of the three channels to get the average color. However, my average color does not match with the one obtained from Photoshop.
Here is my code so far:
im1 = cv2.imread('a1.png')
im2 = cv2.imread('a2.png')
im3 = cv2.imread('a3.png')
alpha = 0.5
beta = (1.0 - alpha)
blended = cv2.addWeighted(im1, alpha, im2, beta, 0.0)
blended = cv2.addWeighted(blended, alpha, im3, beta, 0.0)
# cv2.imwrite('blended.png', blended)
averageColor = []
chans = cv2.split(blended)
colors = ("b", "g", "r")
for (chan, color) in zip(chans, colors):
# create a histogram for the current channel and plot it
hist = cv2.calcHist([chan], [0], None, [256], [0, 256])
for i in range(len(hist)):
if (hist[i]) != 0:
averageColor.append(i)
break
print("Average color: ", averageColor)
base_color = np.fill((aw, ah, 3), averageColor)
base_color_layer = Image.fromarray(base_color)
base_color_layer.save("Base Color.png")
im3 is the "dup" layer containing only one color (129, 127, 121). im2 is all the Mask layers summed together using np.add. im1 is the Background image.
Any thoughts?
# given
im1 = cv2.imread('a1.png')
im2 = cv2.imread('a2.png')
im3 = cv2.imread('a3.png')
# calculations
averageIm = np.mean([im1, im2, im3], axis=0)
averageColor = averageIm.mean(axis=(0,1))
Assuming the images are color without alpha, and assuming you want each picture to have equal weight.
This is not equivalent to a stack of layers and giving each layer the same transparency, because that is equivalent to exponential/geometric average, i.e. alpha * im1 + (1-alpha) * (alpha * im2 + (1-alpha) * (im3)) which gives an image successively less weight the lower it is on the stack. it goes like [0.5, 0.25, 0.25], or [0.5, 0.25, 0.125, 0.0625, ...]
Disclaimer: I would highly recommend you do this with the CUDA API (either in C or with the Python bindings) but since the qn is about pure python, see below
I'm not going to add all the cv2 reading code bc idk the API but let's assume you've got images 1...n in a list called images and each image is a 3d array of width x height x rgba and let's assume all images are the same size
Image_t = list[list[list[int]]]
def get_average_colour(images: list[Image_t]) -> Image_t:
new_image: Image_t = []
image_width = len(images[0][0]) # len of first row in first image
image_height = len(images[0]) # number of rows
colour_channel_size = len(images[0][0][0])
for h in range(image_height):
row = []
for w in range(image_width):
curr_pixel = [0] * colour_channel_size
for img in images:
for chan, val in enumerate(img[h][w]):
curr_pixel[chan] += val
row.append(list(map(lambda v: int(v / len(images), curr_pixel)) # This will average out your pixel values
new_image.append(row)
return new_image

Why is my code only working on part of my image?

I created code to equalize the luminosity values of pixels in an image so that when the image is further edited I do not have dark or light spots in my final image. However, the code seems to stop short and only equalize part of my image. Any ideas as to why the code is stopping early?
Here is my code:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img = mpimg.imread('EXP_0159-2_8b.tif')
imgOut = img.copy()
for i in range(0, len(img[0, :])):
imgLine1 = (img[:, i] < 165) * img[:, i]
p = imgLine1.nonzero()
if len(p[0]) < 1:
imgOut[:, i] == 0
else:
imgLine2 = imgLine1[p[0]]
def curvefitting(lineFunction):
x = np.arange(0, len(lineFunction))
y = lineFunction
curve = np.polyfit(x, y, deg = 2)
a = curve[0]
b = curve[1]
c = curve[2]
curveEquation = (a*(x**2)) + (b*(x**1)) + (c)
curveCorrected = lineFunction - curveEquation + 200
return curveCorrected
imgLine1[p[0]] = curvefitting(imgLine2)
imgOut[:, i] = imgLine1
plt.imshow(imgOut, cmap = 'gray')
The for loop takes the individual columns of pixels in my image and restricts the endpoints of that column to (0, 165), so that pixels outside of that range are turned into zero and ignored by the nonzero() function. The if condition just finalizes the conversion of values outside (0, 165) to zero. Additionally, I converted the image to gray so I would not have to deal with colors and could focus only on luminosity.
This is my corrected image. The program works to average the luminosity values across the entire surface. However, you can see that it stops before reaching the end. The initial image was darker on the sides and lighter in the middle, but the file is too large to upload.
Any help is greatly appreciated.
If you are not interested in color you can convert input image to grayscale. That would simplified the matrix multiplications. The simplified version would be
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])
def curvefitting(lineFunction):
x = np.arange(0, len(lineFunction))
y = lineFunction
curve = np.polyfit(x, y, deg = 2)
a = curve[0]
b = curve[1]
c = curve[2]
curveEquation = [(a*(x_**2)) + (b*(x_**1)) + (c) for x_ in x]
curveCorrected = lineFunction - curveEquation + 200
return curveCorrected
img = mpimg.imread('EXP_0159-2_8b.tif')
img = rgb2gray(img)
imgOut = img.copy()
for i in range(0, len(img[0, :])):
imgLine1 = (img[:, i] < 165) * img[:, i]
p = imgLine1.nonzero()
if len(p) < 1:
imgOut[:, i] == 0
else:
imgLine2 = imgLine1[p]
imgLine1[p] = curvefitting(imgLine2)
imgOut[:, i] = imgLine1
plt.imshow(imgOut, cmap = 'gray')
plt.show()

How to warp an document image in python correctly?

By using this link, I made the deformed mesh:
inputs = cv2.imread("../datasets/images/0.jpg")
nh, nw = inputs.shape[0]//8, inputs.shape[1]//8
inputs = cv2.resize(inputs, dsize=(nh, nw), interpolation=cv2.INTER_AREA)
mr = nh
mc = nw
xx = np.arange(mr-1, -1, -1)
yy = np.arange(0, mc, 1)
[Y, X] = np.meshgrid(xx, yy)
ms = np.transpose(np.asarray([X.flatten('F'), Y.flatten('F')]), (1,0))
perturbed_mesh = ms
nv = np.random.randint(20) - 1
for k in range(nv):
#Choosing one vertex randomly
vidx = np.random.randint(np.shape(ms)[0])
vtex = ms[vidx, :]
#Vector between all vertices and the selected one
xv = perturbed_mesh - vtex
#Random movement
mv = (np.random.rand(1,2) - 0.5)*20
hxv = np.zeros((np.shape(xv)[0], np.shape(xv)[1] +1) )
hxv[:, :-1] = xv
hmv = np.tile(np.append(mv, 0), (np.shape(xv)[0],1))
d = np.cross(hxv, hmv)
d = np.absolute(d[:, 2])
d = d / (np.linalg.norm(mv, ord=2))
wt = d
curve_type = np.random.rand(1)
if curve_type > 0.3:
alpha = np.random.rand(1) * 50 + 50
wt = alpha / (wt + alpha)
else:
alpha = np.random.rand(1) + 1
wt = 1 - (wt / 100 )**alpha
msmv = mv * np.expand_dims(wt, axis=1)
perturbed_mesh = perturbed_mesh + msmv
So I got the mesh like:
Then I tried to map the source image pixels onto the generated mesh.
img = cv2.copyMakeBorder(inputs, dh, dh, dw, dw, borderType=cv2.BORDER_CONSTANT, value=(0,0,0))
xs, ys = perturbed_mesh[:, 0], perturbed_mesh[:, 1]
xs = xs.reshape(nh, nw).astype(np.float32)
ys = ys.reshape(nh, nw).astype(np.float32)
dst = cv2.remap(img, xs, ys, cv2.INTER_CUBIC)
plt.imshow(dst)
Finally, I got the result:
But this image have a document on the corner, I can't use it.
How to map the document onto the center of image?
Here is an example of what I did for a perspective warp in Python/OpenCV. It will show you how I achieved the expanded view of the output. Not only did I increase the output size, but I also shifted the output control points. I shifted by +500 px and doubled that to +1000 for the output size.
Input:
No Expand Case:
import numpy as np
import cv2
# read input
img = cv2.imread("building.jpg")
# resize
height,width = 1000,1500
img = cv2.resize(img, (width,height))
# specify conjugate coordinates and shift output on left and top
pts1 = np.float32([[ 250, 0],[1220, 300],[1300, 770],[ 250, 860]])
pts2 = np.float32([[0,0],[width,0],[width,height],[0,height]])
# compute perspective matrix
matrix = cv2.getPerspectiveTransform(pts1,pts2)
print(matrix.shape)
print(matrix)
# convert image to BGRA with opaque alpha
img = cv2.cvtColor(img, cv2.COLOR_BGR2BGRA)
# do perspective transformation setting area outside input to transparent
# extend output size so extended by 500 all around
imgOutput = cv2.warpPerspective(img, matrix, (width,height), cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=(0,0,0))
# resize output, since it is too large to post
imgOutput = cv2.resize(imgOutput, (width,height))
# save the warped output
cv2.imwrite("building_warped_unexpanded.png", imgOutput)
# show the result
cv2.imshow("result", imgOutput)
cv2.waitKey(0)
cv2.destroyAllWindows()
No Expand Warped Result:
Expanded Case:
import numpy as np
import cv2
# read input
img = cv2.imread("building.jpg")
# resize
height,width = 1000,1500
img = cv2.resize(img, (width,height))
# specify conjugate coordinates and shift output on left and top
pts1 = np.float32([[ 250, 0],[1220, 300],[1300, 770],[ 250, 860]])
pts2 = np.float32([[+500,+500],[width+500,+500],[width+500,height+500],[+500,height+500]])
# compute perspective matrix
matrix = cv2.getPerspectiveTransform(pts1,pts2)
print(matrix.shape)
print(matrix)
# convert image to BGRA with opaque alpha
img = cv2.cvtColor(img, cv2.COLOR_BGR2BGRA)
# do perspective transformation setting area outside input to transparent
# extend output size so extended by 500 all around
imgOutput = cv2.warpPerspective(img, matrix, (width+1000,height+1000), cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=(0,0,0))
# resize output, since it is too large to post
imgOutput = cv2.resize(imgOutput, (width,height))
# save the warped output
cv2.imwrite("building_warped.jpg", imgOutput)
# show the result
cv2.imshow("result", imgOutput)
cv2.waitKey(0)
cv2.destroyAllWindows()
Expanded Result:

How can i block wrong lines into houghspace

I found hough line implemantation in github.
And I try this code my computer.
When I plot hough space with matplot, there is build-up at shown in picture.
This cause wrong lines detection in image.
def hough_line(img, angle_step=1, lines_are_white=True, value_threshold=5):
# Rho and Theta ranges
thetas = np.deg2rad(np.arange(-90.0, 90.0, angle_step))
width, height = img.shape
diag_len = int(round(math.sqrt(width * width + height * height)))
rhos = np.linspace(-diag_len, diag_len, diag_len * 2)
# Cache some resuable values
cos_t = np.cos(thetas)
sin_t = np.sin(thetas)
num_thetas = len(thetas)
# Hough accumulator array of theta vs rho
accumulator = np.zeros((2 * diag_len, num_thetas), dtype=np.uint8)
# (row, col) indexes to edges
are_edges = img > value_threshold if lines_are_white else img < value_threshold
y_idxs, x_idxs = np.nonzero(are_edges)
# Vote in the hough accumulator
for i in range(len(x_idxs)):
x = x_idxs[i]
y = y_idxs[i]
for t_idx in range(num_thetas):
# Calculate rho. diag_len is added for a positive index
rho = diag_len + int(round(x * cos_t[t_idx] + y * sin_t[t_idx]))
accumulator[rho, t_idx] += 1
return accumulator, thetas, rhos
def show_hough_line(img, accumulator, thetas, rhos, save_path=None):
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 2, figsize=(10, 10))
ax[0].imshow(img, cmap=plt.cm.gray)
ax[0].set_title('Input image')
ax[0].axis('image')
ax[1].imshow(
accumulator, cmap='jet',
extent=[np.rad2deg(thetas[-1]), np.rad2deg(thetas[0]), rhos[-1], rhos[0]])
ax[1].set_aspect('equal', adjustable='box')
ax[1].set_title('Hough transform')
ax[1].set_xlabel('Angles (degrees)')
ax[1].set_ylabel('Distance (pixels)')
ax[1].axis('image')
# plt.axis('off')
if save_path is not None:
plt.savefig(save_path, bbox_inches='tight')
plt.show()
And I run this code,
img = cv2.imread('05102009081.png')
img_dark = cv2.imread('05102009081-1.png')
img1 = cv2.bitwise_and(img, img_dark)
gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 0)
edges = cv2.Canny(blur, 100, 200)
accumulator, thetas, rhos = hough_line(edges)
#Thresholding with 100
a = (accumulator > 100).astype(int)
accumulator = accumulator * a
show_hough_line(edges, accumulator, thetas, rhos)
This is result without thresholding
This is result after thresholding
As you see when i apply thresholding this edges, There is some peak point approximatly 55. degree and between 10-50. pixels in the hough space, This cause wrong lines in image.
What is the problem ?
How can i solve this problem ?
Thanks in advance.

How to halftone a black and white picture?

I want to divide a picture in equally big squares and measure the average gray scale level and replace it with a blob, aka halftoning. This code gives me a picture but it doesn't look right. Any ideas what could be wrong?
im = scipy.misc.imread("uggla.tif")
def halftoning(im):
im = im.astype('float64')
width,height = im.shape
halftone_pic = np.zeros((width, height))
for x in range(width):
for y in range(height):
floating_matrix = im[x:x + 1, y:y + 1]
sum = np.sum(floating_matrix)
mean = np.mean(sum)
round = (mean > 128) * 255
halftone_pic[x,y] = round
fig, ax = plt.subplots(1,2)
ax[0].imshow(im, cmap="gray")
ax[1].imshow(halftone_pic, cmap="gray")
plt.show()
Here's something that does what you want. It's essentially a simplification of the code in the accepted answer to the related question How to create CMYK halftone Images from a color image?:
from PIL import Image, ImageDraw, ImageStat
# Adaption of answer https://stackoverflow.com/a/10575940/355230
def halftone(img, sample, scale, angle=45):
''' Returns a halftone image created from the given input image `img`.
`sample` (in pixels), determines the sample box size from the original
image. The maximum output dot diameter is given by `sample` * `scale`
(which is also the number of possible dot sizes). So `sample` == 1 will
preserve the original image resolution, but `scale` must be > 1 to allow
variations in dot size.
'''
img_grey = img.convert('L') # Convert to greyscale.
channel = img_grey.split()[0] # Get grey pixels.
channel = channel.rotate(angle, expand=1)
size = channel.size[0]*scale, channel.size[1]*scale
bitmap = Image.new('1', size)
draw = ImageDraw.Draw(bitmap)
for x in range(0, channel.size[0], sample):
for y in range(0, channel.size[1], sample):
box = channel.crop((x, y, x+sample, y+sample))
mean = ImageStat.Stat(box).mean[0]
diameter = (mean/255) ** 0.5
edge = 0.5 * (1-diameter)
x_pos, y_pos = (x+edge) * scale, (y+edge) * scale
box_edge = sample * diameter * scale
draw.ellipse((x_pos, y_pos, x_pos+box_edge, y_pos+box_edge),
fill=255)
bitmap = bitmap.rotate(-angle, expand=1)
width_half, height_half = bitmap.size
xx = (width_half - img.size[0]*scale) / 2
yy = (height_half - img.size[1]*scale) / 2
bitmap = bitmap.crop((xx, yy, xx + img.size[0]*scale,
yy + img.size[1]*scale))
return Image.merge('1', [bitmap])
# Sample usage
img = Image.open('uggla.tif')
img_ht = halftone(img, 8, 1)
img_ht.show()
Here's the results from using this as the input image:
Halftoned result produced:

Categories