How to augment scanned document image with creases, folds and wrinkles? - python

I am creating a synthetic dataset to train a model that needs to find documents in an image. the documents will be far from perfect, i.e they were folded, creased and wrinkled crinkled.
I could find a few ways of doing it in photoshop but I was wondering if someone has a better idea of doing this augmentation in opencv without trying to reverse engineer the photoshop process.
for example (from https://www.photoshopessentials.com/photo-effects/folds-creases/):
to:
or crinkles (from https://www.myjanee.com/tuts/crumple/crumple.htm):

I have tried to put all your distortions together in one script in Python/Opencv.
Input:
Wrinkles:
import cv2
import numpy as np
import math
import skimage.exposure
# read desert car image and convert to float in range 0 to 1
img = cv2.imread('desert_car.png').astype("float32") / 255.0
hh, ww = img.shape[:2]
# read wrinkle image as grayscale and convert to float in range 0 to 1
wrinkles = cv2.imread('wrinkles.jpg',0).astype("float32") / 255.0
# resize wrinkles to same size as desert car image
wrinkles = cv2.resize(wrinkles, (ww,hh), fx=0, fy=0)
# apply linear transform to stretch wrinkles to make shading darker
#wrinkles = skimage.exposure.rescale_intensity(wrinkles, in_range=(0,1), out_range=(0,1)).astype(np.float32)
# shift image brightness so mean is (near) mid gray
mean = np.mean(wrinkles)
shift = mean - 0.4
wrinkles = cv2.subtract(wrinkles, shift)
# create folds image as diagonal grayscale gradient as float as plus and minus equal amount
hh1 = math.ceil(hh/2)
ww1 = math.ceil(ww/3)
val = math.sqrt(0.2)
grady = np.linspace(-val, val, hh1, dtype=np.float32)
gradx = np.linspace(-val, val, ww1, dtype=np.float32)
grad1 = np.outer(grady, gradx)
# flip grad in different directions
grad2 = cv2.flip(grad1, 0)
grad3 = cv2.flip(grad1, 1)
grad4 = cv2.flip(grad1, -1)
# concatenate to form folds image
foldx1 = np.hstack([grad1-0.1,grad2,grad3])
foldx2 = np.hstack([grad2+0.1,grad3,grad1+0.2])
folds = np.vstack([foldx1,foldx2])
#folds = (1-val)*folds[0:hh, 0:ww]
folds = folds[0:hh, 0:ww]
# add the folds image to the wrinkles image
wrinkle_folds = cv2.add(wrinkles, folds)
# draw creases as blurred lines on black background
creases = np.full((hh,ww), 0, dtype=np.float32)
ww2 = 2*ww1
cv2.line(creases, (0,hh1), (ww-1,hh1), 0.25, 1)
cv2.line(creases, (ww1,0), (ww1,hh-1), 0.25, 1)
cv2.line(creases, (ww2,0), (ww2,hh-1), 0.25, 1)
# blur crease image
creases = cv2.GaussianBlur(creases, (3,3), 0)
# add crease to wrinkles_fold image
wrinkle_folds_creases = cv2.add(wrinkle_folds, creases)
# threshold wrinkles and invert
thresh = cv2.threshold(wrinkle_folds_creases,0.7,1,cv2.THRESH_BINARY)[1]
thresh = cv2.cvtColor(thresh,cv2.COLOR_GRAY2BGR)
thresh_inv = 1-thresh
# convert from grayscale to bgr
wrinkle_folds_creases = cv2.cvtColor(wrinkle_folds_creases,cv2.COLOR_GRAY2BGR)
# do hard light composite and convert to uint8 in range 0 to 255
# see CSS specs at https://www.w3.org/TR/compositing-1/#blendinghardlight
low = 2.0 * img * wrinkle_folds_creases
high = 1 - 2.0 * (1-img) * (1-wrinkle_folds_creases)
result = ( 255 * (low * thresh_inv + high * thresh) ).clip(0, 255).astype(np.uint8)
# save results
cv2.imwrite('desert_car_wrinkles_adjusted.jpg',(255*wrinkles).clip(0,255).astype(np.uint8))
cv2.imwrite('desert_car_wrinkles_folds.jpg', (255*wrinkle_folds).clip(0,255).astype(np.uint8))
cv2.imwrite('wrinkle_folds_creases.jpg', (255*wrinkle_folds_creases).clip(0,255).astype(np.uint8))
cv2.imwrite('desert_car_result.jpg', result)
# show results
cv2.imshow('wrinkles', wrinkles)
cv2.imshow('wrinkle_folds', wrinkle_folds)
cv2.imshow('wrinkle_folds_creases', wrinkle_folds_creases)
cv2.imshow('thresh', thresh)
cv2.imshow('result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Wrinkles adjusted:
Wrinkles with folds:
Wrinkles with folds and creases:
Result:

The proper way to apply the wrinkles to the image is to use hardlight blending in Python/OpenCV.
Read the (cat) image as grayscale and convert to range 0 to 1
Read the wrinkles image as grayscale and convert to range 0 to 1
Resize the wrinkles image to the same dimensions as the cat image
Linearly stretch the wrinkles dynamic range to make the wrinkles more contrasted
Threshold the wrinkles image and also get its inverse
Shift the brightness of the wrinkles image so that the mean is mid-gray (important for hard light composition)
Convert the wrinkles image to 3 channel gray
Apply the hard light composition
Save the results.
Cat image:
Wrinkle image:
import cv2
import numpy as np
# read cat image and convert to float in range 0 to 1
img = cv2.imread('cat.jpg').astype("float32") / 255.0
hh, ww = img.shape[:2]
# read wrinkle image as grayscale and convert to float in range 0 to 1
wrinkles = cv2.imread('wrinkles.jpg',0).astype("float32") / 255.0
# resize wrinkles to same size as cat image
wrinkles = cv2.resize(wrinkles, (ww,hh), fx=0, fy=0)
# apply linear transform to stretch wrinkles to make shading darker
# C = A*x+B
# x=1 -> 1; x=0.25 -> 0
# 1 = A + B
# 0 = 0.25*A + B
# Solve simultaneous equations to get:
# A = 1.33
# B = -0.33
wrinkles = 1.33 * wrinkles -0.33
# threshold wrinkles and invert
thresh = cv2.threshold(wrinkles,0.5,1,cv2.THRESH_BINARY)[1]
thresh = cv2.cvtColor(thresh,cv2.COLOR_GRAY2BGR)
thresh_inv = 1-thresh
# shift image brightness so mean is mid gray
mean = np.mean(wrinkles)
shift = mean - 0.5
wrinkles = cv2.subtract(wrinkles, shift)
# convert wrinkles from grayscale to rgb
wrinkles = cv2.cvtColor(wrinkles,cv2.COLOR_GRAY2BGR)
# do hard light composite and convert to uint8 in range 0 to 255
# see CSS specs at https://www.w3.org/TR/compositing-1/#blendinghardlight
low = 2.0 * img * wrinkles
high = 1 - 2.0 * (1-img) * (1-wrinkles)
result = ( 255 * (low * thresh_inv + high * thresh) ).clip(0, 255).astype(np.uint8)
# save results
cv2.imwrite('cat_wrinkled.jpg', result)
# show results
cv2.imshow('Wrinkles', wrinkles)
cv2.imshow('Result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Wrinkled Cat image:

This is not an answer to your question. It's more about using a blending mode suitable for your application. See more details about blending modes in the wiki page. This might help you address the quality loss. Following code implements the first few blend modes under Multiply and Screen from the wiki page. This does not address the Plastic Wrap filter and the effects added using the Brushes given in the Photoshop tutorial you refer.
You'll still have to generate the overlays (image b in the code), and I agree with Nelly's comment regarding augmentation.
import cv2 as cv
import numpy as np
a = cv.imread("image.jpg").astype(np.float32)/255.0
b = cv.imread("gradients.jpg").astype(np.float32)/255.0
multiply_blended = a*b
multiply_blended = (255*multiply_blended).astype(np.uint8)
screen_blended = 1 - (1 - a)*(1 - b)
multiply_blended = (255*screen_blended).astype(np.uint8)
overlay_blended = 2*a*b*(a < 0.5).astype(np.float32) + (1 - 2*(1 - a)*(1 - b))*(a >= 0.5).astype(np.float32)
overlay_blended = (255*overlay_blended).astype(np.uint8)
photoshop_blended = (2*a*b + a*a*(1 - 2*b))*(b < 0.5).astype(np.float32) + (2*a*(1 - b) + np.sqrt(a)*(2*b - 1))*(b >= 0.5).astype(np.float32)
photoshop_blended = (255*photoshop_blended).astype(np.uint8)
pegtop_blended = (1 - 2*b)*a*a + 2*b*a
pegtop_blended = (255*pegtop_blended).astype(np.uint8)
Photoshop Soft Light:

Without too much work I came up with this result. It's far from perfect but I think it is in the right direction.
from PIL import Image, ImageDraw, ImageFilter
import requests
from io import BytesIO
response = requests.get('https://icatcare.org/app/uploads/2018/07/Thinking-of-getting-a-cat.png')
img1 = Image.open(BytesIO(response.content))
response = requests.get('https://st2.depositphotos.com/5579432/8172/i/950/depositphotos_81721770-stock-photo-paper-texture-crease-white-paper.jpg')
img2 = Image.open(BytesIO(response.content)).resize(img1.size)
final_img = Image.blend(img1, img2, 0.5)
From this:
And this:
We get this (blend 0.5):
Or this (blend 0.333):
Here is also one with folds:

As you are creating a static synthetic data set, a more realistic and possibly the simplest solution seems to be using DocCreator to randomly generate the data set for you.
With the given sample:
One can generate the following data set
Via Image > Degradation > Color Degradation > 3D distortion
Then you choose the Mesh (Load mesh...) and finally hit the save random images... button and select the constraints.
Generating a data set with more subtle distortions is possible by changing the Phy and the Theta upper and lower bounds.
The project offers a demo that allows one to better assess whether it is applicable to your purposes.

Related

Subtract vignetting template from image in OpenCV Python

I have 750+ images, like this 'test.png', that I need to subtract the vignetting in 'vig-raw.png' from. I just started using opencv-python, so "I don't even know what I don't know".
Using GIMP, I desaturated 'vig-raw.png' to create 'vig-desat.png', which I then converted with Color to Alpha to create 'vig-alpha.png'.
This is my attempt to subtract 'vig-alpha.png' from 'test.png'.
import cv2 as cv
import numpy as np
img1 = cv.imread('test.png',0)
img1 = cv.cvtColor(img1, cv.COLOR_BGR2BGRA) # add alpha channel to RGB image
print(img1[0][0]) # show alpha
img2 = cv.imread('vig-alpha.png',flags=cv.IMREAD_UNCHANGED) # read RGBA image
print(img2[0][0]) #show alpha
img3 = cv.subtract(img1, img2)
img3 = cv.resize(img3, (500,250))
print(img3[0][0]) # show alpha
cv.imshow('result',img3)
cv.waitKey()
cv.destroyAllWindows()
However, this is the 'result'. I need to produce a uniform shading throughout the image while leaving the original colors intact. I don't know the correct terminology for this sort of thing, and it's hard to search for a solution with what I do know. Thanks in advance.
EDIT: As per Rotem's answer, image file format matters. StackOverflow converted the PNG files I posted to JPEG, which did effect results while checking their answer. See the comment I left on Rotem's answer below for more information.
Vignette template is not supposed to be subtracted, it supposed to be scaled.
The vignette correction process is known as Flat-field correction applies:
G = m / (F - D)
C = (R - D) * G
When D is dark field or dark frame.
We don't have dark frame sample - we may assume that the dark frame is all zeros.
Assuming D=zeros, the correction formula is:
G = m / F
C = R * G
m = mean(F), and F applies vig-alpha.
R is test.png.
For computing G (name it inv_vig_norm, we may use the following stages):
Read vig-alpha.png as grayscale, and convert it to float in range [0, 1] (vig_norm applies F):
vig = cv2.imread('vig-alpha.png', cv2.IMREAD_GRAYSCALE)
vig_norm = vig.astype(np.float32) / 255
Divide m by F:
vig_mean_val = cv2.mean(vig_norm)[0]
inv_vig_norm = vig_mean_val / vig_norm # Compute G = m/F
Compute C = R * G - scale img1 by inv_vig_norm:
inv_vig_norm = cv2.cvtColor(inv_vig_norm, cv2.COLOR_GRAY2BGR)
img2 = cv2.multiply(img1, inv_vig_norm, dtype=cv2.CV_8U) # Compute: C = R * G
For removing noise and artifacts, we may apply Median Blur and Gaussian Blur over vig (it may be required because the site converted vig-alpha.png to JPEG format).
Code sample:
import cv2
import numpy as np
img1 = cv2.imread('test.png')
vig = cv2.imread('vig-alpha.png', cv2.IMREAD_GRAYSCALE) # Read vignette template as grayscale
vig = cv2.medianBlur(vig, 15) # Apply median filter for removing artifacts and extreem pixels.
vig_norm = vig.astype(np.float32) / 255 # Convert vig to float32 in range [0, 1]
vig_norm = cv2.GaussianBlur(vig_norm, (51, 51), 30) # Blur the vignette template (because there are still artifacts, maybe because SO convered the image to JPEG).
#vig_max_val = vig_norm.max() # For avoiding "false colors" we may use the maximum instead of the mean.
vig_mean_val = cv2.mean(vig_norm)[0]
# vig_max_val / vig_norm
inv_vig_norm = vig_mean_val / vig_norm # Compute G = m/F
inv_vig_norm = cv2.cvtColor(inv_vig_norm, cv2.COLOR_GRAY2BGR) # Convert inv_vig_norm to 3 channels before using cv2.multiply. https://stackoverflow.com/a/48338932/4926757
img2 = cv2.multiply(img1, inv_vig_norm, dtype=cv2.CV_8U) # Compute: C = R * G
cv2.imshow('inv_vig_norm', cv2.resize(inv_vig_norm / inv_vig_norm.max(), (500, 250))) # Show inv_vig_norm for testing
cv2.imshow('img1', cv2.resize(img1, (500, 250)))
cv2.imshow('result', cv2.resize(img2, (500, 250)))
cv2.waitKey()
cv2.destroyAllWindows()
Results:
img1:
inv_vig_norm:
img2:

How Do I Develop a negative film image using python

I have tried inverting a negative film images color with the bitwise_not() function in python but it has this blue tint. I would like to know how I could develop a negative film image that looks somewhat good. Here's the outcome of what I did. (I just cropped the negative image for a new test I was doing so don't mind that)
If you don't use exact maximum and minimum, but 1st and 99th percentile, or something nearby (0.1%?), you'll get some nicer contrast. It'll cut away outliers due to noise, compression, etc.
Additionally, you should want to mess with gamma, or scale the values linearly, to achieve white balance.
I'll apply a "gray world assumption" and scale each plane so the mean is gray. I'll also mess with gamma, but that's just messing around.
And... all of that completely ignores gamma mapping, both of the "negative" and of the outputs.
import numpy as np
import cv2 as cv
import skimage
im = cv.imread("negative.png")
(bneg,gneg,rneg) = cv.split(im)
def stretch(plane):
# take 1st and 99th percentile
imin = np.percentile(plane, 1)
imax = np.percentile(plane, 99)
# stretch the image
plane = (plane - imin) / (imax - imin)
return plane
b = 1 - stretch(bneg)
g = 1 - stretch(gneg)
r = 1 - stretch(rneg)
bgr = cv.merge([b,g,r])
cv.imwrite("positive.png", bgr * 255)
b = 1 - stretch(bneg)
g = 1 - stretch(gneg)
r = 1 - stretch(rneg)
# gray world
b *= 0.5 / b.mean()
g *= 0.5 / g.mean()
r *= 0.5 / r.mean()
bgr = cv.merge([b,g,r])
cv.imwrite("positive_grayworld.png", bgr * 255)
b = 1 - np.clip(stretch(bneg), 0, 1)
g = 1 - np.clip(stretch(gneg), 0, 1)
r = 1 - np.clip(stretch(rneg), 0, 1)
# goes in the right direction
b = skimage.exposure.adjust_gamma(b, gamma=b.mean()/0.5)
g = skimage.exposure.adjust_gamma(g, gamma=g.mean()/0.5)
r = skimage.exposure.adjust_gamma(r, gamma=r.mean()/0.5)
bgr = cv.merge([b,g,r])
cv.imwrite("positive_gamma.png", bgr * 255)
Here's what happens when gamma is applied to the inverted picture... a reasonably tolerable transfer function results from applying the same factor twice, instead of applying its inverse.
Trying to "undo" the gamma while ignoring that the values were inverted... causes serious distortions:
And the min/max values for contrast stretching also affect the whole thing.
A simple photo of a negative simply won't do. It'll include stray light that offsets the black point, at the very least. You need a proper scan of the negative.
Here is one simple way to do that in Python/OpenCV. Basically one stretches each channel of the image to full dynamic range separately. Then recombines. Then inverts.
Input:
import cv2
import numpy as np
import skimage.exposure
# read image
img = cv2.imread('boys_negative.png')
# separate channels
r,g,b = cv2.split(img)
# stretch each channel
r_stretch = skimage.exposure.rescale_intensity(r, in_range='image', out_range=(0,255)).astype(np.uint8)
g_stretch = skimage.exposure.rescale_intensity(g, in_range='image', out_range=(0,255)).astype(np.uint8)
b_stretch = skimage.exposure.rescale_intensity(b, in_range='image', out_range=(0,255)).astype(np.uint8)
# combine channels
img_stretch = cv2.merge([r_stretch, g_stretch, b_stretch])
# invert
result = 255 - img_stretch
cv2.imshow('input', img)
cv2.imshow('result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
# save results
cv2.imwrite('boys_negative_inverted.jpg', result)
Result:
Caveat: This works for this image, but may not be a universal solution for all images.
ADDITION
In the above, I did not clip when stretching as I wanted to preserver all information. But if one wants to clip and use skimage.exposure.rescale_intensity for stretching, then it is easy enough by the following:
import cv2
import numpy as np
import skimage.exposure
# read image
img = cv2.imread('boys_negative.png')
# separate channels
r,g,b = cv2.split(img)
# compute clip points -- clip 1% only on high side
clip_rmax = np.percentile(r, 99)
clip_gmax = np.percentile(g, 99)
clip_bmax = np.percentile(b, 99)
clip_rmin = np.percentile(r, 0)
clip_gmin = np.percentile(g, 0)
clip_bmin = np.percentile(b, 0)
# stretch each channel
r_stretch = skimage.exposure.rescale_intensity(r, in_range=(clip_rmin,clip_rmax), out_range=(0,255)).astype(np.uint8)
g_stretch = skimage.exposure.rescale_intensity(g, in_range=(clip_gmin,clip_gmax), out_range=(0,255)).astype(np.uint8)
b_stretch = skimage.exposure.rescale_intensity(b, in_range=(clip_bmin,clip_bmax), out_range=(0,255)).astype(np.uint8)
# combine channels
img_stretch = cv2.merge([r_stretch, g_stretch, b_stretch])
# invert
result = 255 - img_stretch
cv2.imshow('input', img)
cv2.imshow('result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
# save results
cv2.imwrite('boys_negative_inverted2.jpg', result)
Result:

extract ridges and valleys from finger Image

for my class project I am trying to extract ridges and Valleys from the finger image. An example is given below.
#The code I am using
import cv2
import numpy as np
import fingerprint_enhancer
clip_hist_percent=25
image = cv2.imread("")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Calculate grayscale histogram
hist = cv2.calcHist([gray],[0],None,[256],[0,256])
hist_size = len(hist)
# Calculate cumulative distribution from the histogram
accumulator = []
accumulator.append(float(hist[0]))
for index in range(1, hist_size):
accumulator.append(accumulator[index -1] + float(hist[index]))
# Locate points to clip
maximum = accumulator[-1]
clip_hist_percent *= (maximum/100.0)
clip_hist_percent /= 2.0
# Locate left cut
minimum_gray = 0
while accumulator[minimum_gray] < clip_hist_percent:
minimum_gray += 1
# Locate right cut
maximum_gray = hist_size -1
while accumulator[maximum_gray] >= (maximum - clip_hist_percent):
maximum_gray -= 1
# Calculate alpha and beta values
alpha = 255 / (maximum_gray - minimum_gray)
beta = -minimum_gray * alpha
auto_result = cv2.convertScaleAbs(image, alpha=alpha, beta=beta)
gray = cv2.cvtColor(auto_result, cv2.COLOR_BGR2GRAY)
# compute gamma = log(mid*255)/log(mean)
mid = 0.5
mean = np.mean(gray)
gamma = math.log(mid*255)/math.log(mean)
# do gamma correction
img_gamma1 = np.power(auto_result,gamma).clip(0,255).astype(np.uint8)
g1 = cv2.cvtColor(img_gamma2, cv2.COLOR_BGR2GRAY)
# blur = cv2.GaussianBlur(g1,(2,1),0)
thresh2 = cv2.adaptiveThreshold(g1, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 199, 3)
# blur = cv2.GaussianBlur(thresh2,(2,1),0)
blur=((3,3),1)
erode_=(5,5)
dilate_=(3, 3)
dilate = cv2.dilate(cv2.erode(cv2.GaussianBlur(thresh2/255, blur[0],
blur[1]), np.ones(erode_)), np.ones(dilate_))*255
out = fingerprint_enhancer.enhance_Fingerprint(dilate)
I am having difficulty extracting the lines on the finger. I tried to adjust the brightness and contrast, applied calcHist, adaptive thresholding, applied blur, then applied the Gabor filters (as per UTKARSH code). The result look like above.
We could clearly see that the lower part of the image has many spurious lines. My project requirement is to get clear lines from the RGB image. Could anyone help me with the steps and the code?
Thank you in advance
reference:
https://github.com/Utkarsh-Deshmukh/Fingerprint-Enhancement-Python
https://ieeexplore.ieee.org/abstract/document/7358782
There are several strange things (IMO) about your code.
First you do a contrast stretch that sets the 12.5% darkest pixels to black and the 12.5% brightest pixels to white. You probably already have this number of white pixels, so not much happens there, but you do remove all the information in the darkest region of the finger print.
Next you threshold. Here you remove most of the remaining information. Thresholding is something you should leave until the very last step of any processing. In particular, the algorithm implemented in fingerprint_enhancer.enhance_Fingerprint() takes a gray-scale image as input. You should not binarize its input at all!
I would start with a local contrast stretch, then you can directly apply the enhancement algorithm:
import cv2
import fingerprint_enhancer
image = cv2.imread("zMxbO.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Apply local contrast stretch
se = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (25, 25)) # larger than the width of the widest ridges
low = cv2.morphologyEx(gray, cv2.MORPH_OPEN, se) # locally lowest grayvalue
high = cv2.morphologyEx(gray, cv2.MORPH_CLOSE, se) # locally highest grayvalue
gray = (gray - o) / (c - o + 1e-6)
# Apply fingerprint enhancement
out = fingerprint_enhancer.enhance_Fingerprint(gray, resize=True)
The local contrast stretch yields this:
The finger print enhancement algorithm now yields this:
Note things go wrong around the edges, where the background was cut out and replaced with white, as well as in the dark region, where the noise dominates and the enhancement algorithm hallucinates a bit. I don't think you can extract meaningful information from that area, a better illumination would be necessary.

Difference of Gaussian filtering (DoG) doesn't give the expected results

I'm trying to implement the method from the original paper ( Enhanced Local Texture Feature Sets for Face
Recognition Under Difficult Lighting Conditions by Xiaoyang Tan and Bill Triggs) in python 3.6 and Opencv 4.2, but when I applied the preprocessing to an image it didn't give the same results as in the paper
although I used the same parameters specified:
1- for the gamma correction, gamma=0.2
2- for the DoG, (sigma0=1, sigma1=2)
3- for the contrast equalization, tau=10 and alpha=0.1
here is the expected result and the result that i had:
Original Image resulted image expected image
and here is the code that I used :
img_original = cv.imread('C:/Users/Ouss/Desktop/TP-LTP/face.jpg', cv.IMREAD_GRAYSCALE)
# gamma correction
lookUpTable = np.empty((1, 256), np.uint8)
for i in range(256):
# calculating the new values
lookUpTable[0, i] = np.clip(pow(i / 255.0, 2) * 255.0, 0, 255)
# mapping the new values with the original
gamma_corrected_img = cv.LUT(img_original, lookUpTable)
# DOG
blur1 = cv.GaussianBlur(gamma_corrected_img, (3, 3), 1, borderType=cv.BORDER_REPLICATE)
blur2 = cv.GaussianBlur(gamma_corrected_img, (7, 7), 2, borderType=cv.BORDER_REPLICATE)
dog_img = cv.subtract(blur1, blur2)
# contrast equalisation
# step 1
alpha = 0.1
tau = 10
temp1 = pow(np.abs(dog_img), alpha)
meanImg = np.mean(temp1)
Contrast_Equa_step01 = dog_img / pow(meanImg, 1/alpha)
# step 2
minMat = np.abs(Contrast_Equa_step01)
minMat[minMat > tau] = tau
temp2 = pow(minMat, alpha)
meanImg2 = np.mean(temp2)
Contrast_Equa_step02 = Contrast_Equa_step01 / pow(meanImg2, 1/alpha)
CEqualized_img = tau * np.tanh((Contrast_Equa_step02/tau))
I think your main issue is that tau=10.0 is too large. It seems to work for me with tau=3.0 with my images normalized to floats in the range 0 to 1. Then at the end multiply by 255 and convert to uint8.
Here is my Python/OpenCV code. I have saved versions of the gamma corrected, DoG and first stage of the contrast equalization scaled by 255 to uint8 for viewing. I have also normalized the DoG (though not needed) by dividing by the largest absolute value to stretch values to -1 to 1. The normalization gives the DoG better contrast. I also swapped the order of the two blur images in the DoG to match his contrast polarity.
Input:
import cv2
import numpy as np
# Reference: Enhanced Local Texture Feature Sets for Face Recognition Under Difficult Lighting Conditions by Xiaoyang Tan and Bill Triggs
# https://lear.inrialpes.fr/pubs/2007/TT07/Tan-amfg07a.pdf
# read image as grayscale float in range 0 to 1
img = cv2.imread('face.jpg', cv2.IMREAD_GRAYSCALE).astype(np.float64) / 255.0
# set arguments
gamma = 0.2
alpha = 0.1
tau = 3.0
# gamma correction
img_gamma = np.power(img, gamma)
img_gamma2 = (255.0 * img_gamma).clip(0,255).astype(np.uint8)
# DOG
blur1 = cv2.GaussianBlur(img_gamma, (0,0), 1, borderType=cv2.BORDER_REPLICATE)
blur2 = cv2.GaussianBlur(img_gamma, (0,0), 2, borderType=cv2.BORDER_REPLICATE)
img_dog = (blur1 - blur2)
# normalize by the largest absolute value so range is -1 to
img_dog = img_dog / np.amax(np.abs(img_dog))
img_dog2 = (255.0 * (0.5*img_dog + 0.5)).clip(0,255).astype(np.uint8)
# contrast equalization equation 1
img_contrast1 = np.abs(img_dog)
img_contrast1 = np.power(img_contrast1, alpha)
img_contrast1 = np.mean(img_contrast1)
img_contrast1 = np.power(img_contrast1,1.0/alpha)
img_contrast1 = img_dog/img_contrast1
# contrast equalization equation 2
img_contrast2 = np.abs(img_contrast1)
img_contrast2 = img_contrast2.clip(0,tau)
img_contrast2 = np.mean(img_contrast2)
img_contrast2 = np.power(img_contrast2,1.0/alpha)
img_contrast2 = img_contrast1/img_contrast2
img_contrast = tau * np.tanh((img_contrast2/tau))
# Scale results two ways back to uint8 in the range 0 to 255
img_contrastA = (255.0 * (img_contrast+0.5)).clip(0,255).astype(np.uint8)
img_contrastB = (255.0 * (0.5*img_contrast+0.5)).clip(0,255).astype(np.uint8)
# show results
cv2.imshow('Face', img)
cv2.imshow('Gamma', img_gamma2)
cv2.imshow('DoG', img_dog2)
cv2.imshow('CE1', img_contrast1)
cv2.imshow('CE_A', img_contrastA)
cv2.imshow('CE_B', img_contrastB)
cv2.waitKey(0)
cv2.destroyAllWindows()
# save results
cv2.imwrite('face_contrast_equalization_A.jpg', img_contrastA)
cv2.imwrite('face_contrast_equalization_B.jpg', img_contrastB)
One gets slightly different results depending upon how one scales the result from float back to uint8 in the range 0 to 255. The first method simply biases by 0.5 before multiplying by 255. The second multiplies by 0.5, then biases by 0.5 before multiplying by 255. Method A may be closer to what the authors of the reference achieved.
Scaling Method A:
Scaling Method B:
Adjust tau higher or lower to get the contrast you desire.
There are several possible issues.
1) You should normalize your image to the range 0 to 1 as floats throughout all your operations, then scale back to ints in the range 0 to 255 for the final result.
2) You have used gamma=2, not gamma=0.2
3) When computing the Gaussian blur using sigma values, you should set the dimensions to 0. See https://docs.opencv.org/4.1.1/d4/d86/group__imgproc__filter.html#gaabe8c836e97159a9193fb0b11ac52cf1
4) Typically, the DoG is biased to 0.5
Here is a simple example of computing the Gamma enhanced image and separately of the DoG image from your original face image in Python/OpenCV.
Input:
import cv2
img = cv2.imread('face.jpg', cv2.IMREAD_GRAYSCALE).astype('float32') / 255.0
# gamma correction
img_gamma = img**0.2
img_gamma = (255.0 * img_gamma).clip(0,255).astype('uint8')
# DOG
blur1 = cv2.GaussianBlur(img, (0,0), 1, borderType=cv2.BORDER_REPLICATE)
blur2 = cv2.GaussianBlur(img, (0,0), 2, borderType=cv2.BORDER_REPLICATE)
# compute difference and bias to 0.5
img_dog1 = blur2 - blur1 + 0.5
img_dog1 = (255.0 * img_dog1).clip(0,255).astype('uint8')
# Or compute difference and add back to image as band pass boost filter
img_dog2 = blur2 - blur1 + img
img_dog2 = (255.0 * img_dog2).clip(0,255).astype('uint8')
# show results
cv2.imshow('Face', img)
cv2.imshow('Gamma', img_gamma)
cv2.imshow('DOG1', img_dog1)
cv2.imshow('DOG2', img_dog2)
cv2.waitKey(0)
cv2.destroyAllWindows()
# save results
cv2.imwrite('face_gamma.jpg', img_gamma)
cv2.imwrite('face_dog1.jpg', img_dog1)
cv2.imwrite('face_dog2.jpg', img_dog2)
Gamma Enhance Result from input:
DoG Result1 from input:
DoG Result2 from input:
Perhaps these suggestions will help in your full processing.

Image de-blurring

This post is divided in two
Part One
I have a little issue converting an image from grayscale back to RGB.
Image in question:
I use this code to convert it:
equ = cv2.cvtColor(equ, cv2.COLOR_GRAY2RGB)
without any success though...
Part Two
Moreover I need to de-blur such image. Here I found some code that uses a wiener filter to do so, but when I implement it it doesn't seem to work effectively. Here is the code:
psf = np.ones((5, 5)) / 25
img = convolve2d(equ, psf, 'same')
img += 0.1 * img.std() * np.random.standard_normal(img.shape)
#deconvolved_img = restoration.wiener(img, psf, 1100)
deconvolved = restoration.wiener(img, psf, 1, clip=False)
plt.imshow(deconvolved, cmap='gray')
and this is the output:
Any help for the two problems is greatly appreciated!
To equalize a color image, it seems a common thing to do is
convert the image to HSV or YUV
split the image into separate components (e.g. H, S, V)
equalize on Value channel (or all three if you want)
merge the channels back together
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
split = cv2.split(hsv) # split is a 3D array containing H S V info
split[2] = cv2.equalizeHist(split[2])
hsv = cv2.merge(split)
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
For "deblurring", I sometimes use an unsharp mask. From the Wikipedia page on unsharp masking, the formula for this operation is
sharpened = original + (original − blurred) × amount
which can be rearranged to
sharpened = original×(1 + amount) + blurred×(-amount)
Wikipedia says a good starting point for amount is 0.5 to 1.5. In my app I have a spinbox that let's it vary between 0 and 10. For blurring I use a Gaussian blur with kernel size varying from 1 to 31 (must be odd and integer). To do the matrix math, I prefer to use OpenCV functions because they are often faster than NumPy and they will usually autoscale output to values between 0 and 255 (e.g. for 8 bit and 8 bit/3 channel images). Here we use addWeighted which does
dst = src1*alpha + src2*beta + gamma;
amount = 1.5
ksize = 3
blur = cv2.GaussianBlur(img, ksize, 0, 0)
unsharp = cv.addWeighted(img, 1 + amount, blur, -amount, 0)

Categories