Normalizing the brightness of an image - python

def normalize_brightness(img: Image) -> Image:
"""
Normalize the brightness of the given Image img by:
1. computing the average brightness of the picture:
- this can be done by calculating the average brightness of each pixel
in img (the average brightness of each pixel is the sum of the values
of red, blue and green of the pixel, divided by 3 as a float division)
- the average brightness of the picture is then the sum of all the
pixel averages, divided by the product of the width and height of img
2. find the factor, let's call it x, which we can multiply the
average brightness by to get the value of 128
3. multiply the colors in each pixel by this factor x
"""
img_width, img_height = img.size
pixels = img.load() # create the pixel map
h = 0.0
for i in range(img_width):
for j in range(img_height):
r, g, b = pixels[i, j]
avg = sum(pixels[i, j]) / 3
h += avg
total_avg = h / (img_width * img_height)
x = int(128 // total_avg)
for i in range(img_width):
for j in range(img_height):
r, g, b = pixels[i, j]
pixels[i, j] = (min(255, r * x), min(255, g * x), min(255, b * x))
return img
So basically that is my take on the following function but for some reason it does not work. I believe my calculations and steps are what the docstring has told me to do but I am lost where to go from here.

Related

black pixels when save MRI dicom after applying lut

I'm trying to apply LUT to MR images using following codes. the original dicom pixel intensities ranges from 0 to 4334 and the pixel intensities after applying LUT function ranges from 0 to 254. I need to preserve the metadata info for the resulted image. When I plot the resulted image, it displays correct but when I save the image, it is all black pixels. I know I need to change some meta data tags but I'm new to dicom image processing and couldn't figure out what is causing the problem.
def make_lut(dcm_data, width, center, p_i):
"""
LUT: look-up tables
VOI: volume of interest
"""
slope= 1.0
intercept= 0.0
min_px= int(np.amin(dcm_data))
max_px= int(np.amax(dcm_data))
lut= [0] * (max_px + 1)
invert= False
if p_i == "MONOCHROME1":
invert= True
else:
center = (max_px - min_px) - center
for px_value in range(min_px, max_px):
lut_value = px_value * slope + intercept
voi_value= (((lut_value - center) / width + 0.5) * 255.0)
clamped_value= min(max(voi_value, 0), 255)
if invert:
lut[px_value] = round(255 - clamped_value)
else:
lut[px_value] = round(clamped_value)
return lut
def apply_lut(pixels_in, lut):
pixels= pixels_in.flatten()
pixels_out= [0] * len(pixels)
for i in range (0, len(pixels)):
pixel= pixels[i]
if pixel > 0:
pixels_out[i] = int(lut[pixel])
return np.reshape(pixels_out, (pixels_in.shape[0], pixels_in.shape[1]))
# apply the function
idx= 30
ds= pydicom.dcmread(dcm_files[idx])
raw_pixels= dcm_data.pixel_array
if dcm_data.WindowWidth != '' and dcm_data.WindowCenter != '':
window_width = dcm_data.WindowWidth
window_center = dcm_data.WindowCenter
lut = make_lut(raw_pixels, window_width, window_center, dcm_data.PhotometricInterpretation)
dcm_default_windowing = apply_lut(raw_pixels, lut)
# save the result
ds.PixelData = dcm_default_windowing.tobytes()
ds.save_as("test_luted.dcm")

Implementing my own algorithm to scale and rotate images in python

I am trying to implement an algorithm in python to scale images by a factor or rotate them by a given angle (or both at the same time). I am using opencv to handle the images and I know opencv has these functions built in, however I want to do this myself to better understand image transformations. I believe I calculate the rotation matrix correctly. However, when I try to implement the affine transformation, it does not come out correctly.
import numpy as np
import cv2
import math as m
import sys
img = cv2.imread(sys.argv[1])
angle = sys.argv[2]
#get rotation matrix
def getRMat((cx, cy), angle, scale):
a = scale*m.cos(angle*np.pi/180)
b = scale*(m.sin(angle*np.pi/180))
u = (1-a)*cx-b*cy
v = b*cx+(1-a)*cy
return np.array([[a,b,u], [-b,a,v]])
#determine shape of img
h, w = img.shape[:2]
#print h, w
#determine center of image
cx, cy = (w / 2, h / 2)
#calculate rotation matrix
#then grab sine and cosine of the matrix
mat = getRMat((cx,cy), -int(angle), 1)
print mat
cos = np.abs(mat[0,0])
sin = np.abs(mat[0,1])
#calculate new height and width to account for rotation
newWidth = int((h * sin) + (w * cos))
newHeight = int((h * cos) + (w * sin))
#print newWidth, newHeight
mat[0,2] += (newWidth / 2) - cx
mat[1,2] += (newHeight / 2) - cy
#this is how the image SHOULD look
dst = cv2.warpAffine(img, mat, (newWidth, newHeight))
cv2.imshow('dst', dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
#apply transform
#attempt at my own warp affine function...still buggy tho
def warpAff(image, matrix, (width, height)):
dst = np.zeros((width, height, 3), dtype=np.uint8)
oldh, oldw = image.shape[:2]
#print oldh, oldw
#loop through old img and transform its coords
for x in range(oldh):
for y in range(oldw):
#print y, x
#transform the coordinates
u = int(x*matrix[0,0]+y*matrix[0,1]+matrix[0,2])
v = int(x*matrix[1,0]+y*matrix[1,1]+matrix[1,2])
#print u, v
#v -= width / 1.5
if (u >= 0 and u < height) and (v >= 0 and v < width):
dst[u,v] = image[x,y]
return dst
dst = warpAff(img, mat, (newWidth, newHeight))
cv2.imshow('dst', dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
Image I am using for testing
You're applying the rotation backward.
This means that for an angle of 20, instead of rotating 20 degrees clockwise, you rotate 20 degrees counterclockwise. That on its own would be easy to fix—just negate the angle.
But it also means that, for each destination pixel, if no source pixel exactly rotates to it, you end up with an all-black pixel. You could solve that by using any interpolation algorithm, but it's making things more complicated.
If we instead just reverse the process, and instead of calculating the destination (u, v) for each (x, y), we calculate the source (x, y) for every destination (u, v), that solves both problems:
def warpAff(image, matrix, width, height):
dst = np.zeros((width, height, 3), dtype=np.uint8)
oldh, oldw = image.shape[:2]
# Loop over the destination, not the source, to ensure that you cover
# every destination pixel exactly 1 time, rather than 0-4 times.
for u in range(width):
for v in range(height):
x = u*matrix[0,0]+v*matrix[0,1]+matrix[0,2]
y = u*matrix[1,0]+v*matrix[1,1]+matrix[1,2]
intx, inty = int(x), int(y)
# We could interpolate here by using something like this linear
# interpolation matrix, but let's keep it simple and not do that.
# fracx, fracy = x%1, y%1
# interp = np.array([[fracx*fracy, (1-fracx)*fracy],
# [fracx*(1-fracy), (1-fracx)*(1-fracy)]])
if 0 < x < oldw and 0 < y < oldh:
dst[u, v] = image[intx, inty]
return dst
Now the only remaining problem is that you didn't apply the shift backward, so we end up shifting the image in the wrong direction when we turn everything else around. That's trivial to fix:
mat[0,2] += cx - (newWidth / 2)
mat[1,2] += cy - (newHeight / 2)
You do have one more problem: your code (and this updated code) only works for square images. You're getting height and width backward multiple times, and they almost all cancel out, but apparently one of them doesn't. In general, you're treating your arrays as (width, height) rather than (height, width), but you end up comparing to (original version) or looping over (new version) (height, width). So, if height and width are different, you end up trying to write past the end of the array.
Trying to find all of these and fix them is probably as much work as just starting over and doing it consistently everywhere from the start:
mat = getRMat(cx, cy, int(angle), 1)
cos = np.abs(mat[0,0])
sin = np.abs(mat[0,1])
newWidth = int((h * sin) + (w * cos))
newHeight = int((h * cos) + (w * sin))
mat[0,2] += cx - (newWidth / 2)
mat[1,2] += cy - (newHeight / 2)
def warpAff2(image, matrix, width, height):
dst = np.zeros((height, width, 3), dtype=np.uint8)
oldh, oldw = image.shape[:2]
for u in range(width):
for v in range(height):
x = u*matrix[0,0]+v*matrix[0,1]+matrix[0,2]
y = u*matrix[1,0]+v*matrix[1,1]+matrix[1,2]
intx, inty = int(x), int(y)
if 0 < intx < oldw and 0 < inty < oldh:
pix = image[inty, intx]
dst[v, u] = pix
return dst
dst = warpAff2(img, mat, newWidth, newHeight)
It's worth noting that there are much simpler (and more efficient) ways to implement this. If you build a 3x3 square matrix, you can vectorize the multiplication. Also, you can create the matrix more simply by just multiplying a shift matrix # a rotation matrix # an unshift matrix instead of manually fixing things up after the fact. But hopefully this version, since it's as close as possible to your original, should be easiest to understand.

Errow displaying image from array of pixels in python

I have a (numpy) array of pixels acquired as:
''' import numpy and matplotlib '''
image = Image.open('trollface.png', 'r')
width, height = image.size
pixel_values = list(image.getdata())
pixel_values = np.array(pixel_values).reshape((width, height, 3)) # 3 channels RGB
#height, width = len(pixel_values), len(pixel_values[0])
I need to compute digital negative of this image -
for y in range(0,height):
for x in range(0,width):
R,G,B = pixel_values[x,y]
pixel_values[x,y] =(255 - R, 255 - G, 255 - B)
tried displaying image from above pixels with the help of this thread
plt.imshow(np.array(pixel_values).reshape(width,height,3))
plt.show()
But it just displays a blank (white) window, with this error in CLI:
The aim here is to achieve a negative transformation of an image.
Pixel translations can be directly applied to the R, G, B band using Image.point method.
image = Image.open('trollface.png')
source = image.split()
r, g, b, a = 0, 1, 2, 3
negate = lambda i: 255 - i
transform = [source[band].point(negate) for band in (r, g, b)]
if len(source) == 4: # should have 4 bands for images with alpha channel
transform.append(source[a]) # add alpha channel
out = Image.merge(im.mode, transform)
out.save('negativetrollface.png')
EDIT using OP's procedure, you have:
im = Image.open('trollface.png')
w, h = im.size
arr = np.array(im)
original_shape = arr.shape
arr_to_dim = arr.reshape((w, h, 4))
# Note that this is expensive.
# Always take advantage of array manipulation implemented in the C bindings
for x in range(0, w):
for y in range(0, h):
r, g, b, a = arr_to_dim[x, y]
arr_to_dim[x, y] = np.array([255 - r, 255 - g, 255 - b, a])
dim_to_arr = arr_to_dim.reshape(original_shape)
im = Image.fromarray(dim_to_arr)
out.save('negativetrollface.png')

How to halftone a black and white picture?

I want to divide a picture in equally big squares and measure the average gray scale level and replace it with a blob, aka halftoning. This code gives me a picture but it doesn't look right. Any ideas what could be wrong?
im = scipy.misc.imread("uggla.tif")
def halftoning(im):
im = im.astype('float64')
width,height = im.shape
halftone_pic = np.zeros((width, height))
for x in range(width):
for y in range(height):
floating_matrix = im[x:x + 1, y:y + 1]
sum = np.sum(floating_matrix)
mean = np.mean(sum)
round = (mean > 128) * 255
halftone_pic[x,y] = round
fig, ax = plt.subplots(1,2)
ax[0].imshow(im, cmap="gray")
ax[1].imshow(halftone_pic, cmap="gray")
plt.show()
Here's something that does what you want. It's essentially a simplification of the code in the accepted answer to the related question How to create CMYK halftone Images from a color image?:
from PIL import Image, ImageDraw, ImageStat
# Adaption of answer https://stackoverflow.com/a/10575940/355230
def halftone(img, sample, scale, angle=45):
''' Returns a halftone image created from the given input image `img`.
`sample` (in pixels), determines the sample box size from the original
image. The maximum output dot diameter is given by `sample` * `scale`
(which is also the number of possible dot sizes). So `sample` == 1 will
preserve the original image resolution, but `scale` must be > 1 to allow
variations in dot size.
'''
img_grey = img.convert('L') # Convert to greyscale.
channel = img_grey.split()[0] # Get grey pixels.
channel = channel.rotate(angle, expand=1)
size = channel.size[0]*scale, channel.size[1]*scale
bitmap = Image.new('1', size)
draw = ImageDraw.Draw(bitmap)
for x in range(0, channel.size[0], sample):
for y in range(0, channel.size[1], sample):
box = channel.crop((x, y, x+sample, y+sample))
mean = ImageStat.Stat(box).mean[0]
diameter = (mean/255) ** 0.5
edge = 0.5 * (1-diameter)
x_pos, y_pos = (x+edge) * scale, (y+edge) * scale
box_edge = sample * diameter * scale
draw.ellipse((x_pos, y_pos, x_pos+box_edge, y_pos+box_edge),
fill=255)
bitmap = bitmap.rotate(-angle, expand=1)
width_half, height_half = bitmap.size
xx = (width_half - img.size[0]*scale) / 2
yy = (height_half - img.size[1]*scale) / 2
bitmap = bitmap.crop((xx, yy, xx + img.size[0]*scale,
yy + img.size[1]*scale))
return Image.merge('1', [bitmap])
# Sample usage
img = Image.open('uggla.tif')
img_ht = halftone(img, 8, 1)
img_ht.show()
Here's the results from using this as the input image:
Halftoned result produced:

Is there any function in openCV or another library that can tile squares within an arbitrary contour?

I have images where I've found some contours around dogs, e.g.:
I want to tile squares/rectangles inside of the contour. Is there an openCV (or other library) function for this? I'm using Python. I'd like it to look something like this:
I was able to solve this by first drawing rectangles over the entire image, then checking which ones were in the area with the dog:
# the image here is stored as the variable fg
# with b, g, r, and alpha channels
# the alpha channel is masking the dog part of the image
import cv2
b, g, r, a = cv2.split(fg)
fgcp = fg.copy()
h, w = fg.shape[:2]
h -= 1
w -= 1 # avoid indexing error
rectDims = [10, 10] # w, h of rectangles
hRects = h / rectDims[0]
wRects = w / rectDims[1]
for i in range(wRects):
for j in range(hRects):
pt1 = (i * rectDims[0], j * rectDims[1])
pt2 = ((i + 1) * rectDims[0], (j + 1) * rectDims[1])
# alpha is 255 over the part of the dog
if a[pt1[1], pt1[0]] == 255 and a[pt2[1], pt2[0]] == 255:
cv2.rectangle(fgcp, pt1, pt2, [0, 0, 255], 2)
cv2.imshow('', fgcp), cv2.waitKey(0)
It's not necessarily the ideal implementation, but it works well enough.

Categories