Chromatic Aberration Estimation in python - python

Hi this code estimates chromatic aberration in an image by giving the center of distortion (x,y) and magnitude of distortion (alpha) between the red and green channels and also between the blue and green channels. I have an error in the WarpRegion function
File "CAfeb.py", line 217, in warpRegion
reg_w = sp.interpolate.interp2d(yrampf,xrampf,Cwarp, yramp1f, xramp1f,'cubic');
File "/usr/lib/python2.7/dist-packages/scipy/interpolate/interpolate.py", line 109, in __init__
'quintic' : 5}[kind]
TypeError: unhashable type: 'numpy.ndarray'
Below is the complete code - Any help will be greatly appreciated-Thank you. Areej
import math
from PIL import Image
import numpy as np
from decimal import Decimal
import scipy as sp
from scipy import interpolate
from scitools.std import ndgrid
from scipy import ogrid, sin, mgrid, ndimage, array
def ldimage():
#load image
global im
im = Image.open("/home/areej/Desktop/mandril_color.tif")
def analyzeCA(mode, im):
n_regions = 10;
reg_size = [300, 300];
overlap = 0.5;
levels = 9;
steps = 2;
edge_width = 10;
hist_sz = 128;
# alpha_1 and alpha_2 are assumed to be between these values
w_data = [0.9985, 1.0015];
reg_list=[]
#creating an array of pixels so that we can access them
pix=im.load()
#
#Analyze full image
if mode=='full':
print "Doing a full analysis"
# mx_shift is the third argument in 'full' mode
mx_shift = n_regions;
# [ydim,xdim,zdim]= size(im);
ydim=im.size[0]
xdim=im.size[1]
zdim=3
print "Image dimensions: [ydim, xdim, zdim]= "+str([ydim,xdim,zdim])
global alpha_mx, alpha_mn
alpha_mx = 1 + 4*mx_shift / math.sqrt( xdim*xdim + ydim*ydim );
alpha_mn = 1.0/alpha_mx;
print "alpha_mx= "+str(alpha_mx)
print "alpha_mn= "+str(alpha_mn)
#recompute alpha_1 and alpha_2 to be between
#these new values
w_data = [alpha_mn, alpha_mx];
ew = edge_width;
#take the image minus a ew-wide edge
roi = [ew+1, xdim-ew, ew+1, ydim-ew];
print "edge_width= "+str(ew)
print "roi= "+str(roi)
#Analyze blue to green chromatic aberration
bg_params = parameterSearch( im, [3, 2], roi, ew, hist_sz, w_data);
# Analyze red to green chromatic aberration
rg_params = parameterSearch( im, [1, 2], roi, ew, hist_sz, w_data );
elif mode=='reg':
print "we should do a regional analysis here"
else:
print "unsupported call"
#def estimateCARegions( im, [3, 2], reg_list, settings ):
def parameterSearch( im, colour_space, roi, ew, hist_sz, w_data):
#levels is number of iterations
levels = 8;
steps = 2;
#[ydim,xdim,zdim] = size(im);
ydim=im.size[0]
xdim=im.size[1]
zdim= 3
x_data = [1, xdim];
y_data = [1, ydim];
xlim = x_data;
ylim = y_data;
zlim = w_data;
#work out which of height and width is the bigger
dim = max(xdim,ydim)
print "The highest dimension is : "+str(dim)
#check that roi falls within expected boundries
if ((roi[0] <= ew) or (roi[1] > xdim-ew) or (roi[2] <= ew) or (roi[3] > ydim-ew)):
print "ROI is too close to image edges"
return -1 # TODO: terminate here with an error
#Get image regions
source = im.split()
Cfixed = source[2]
Cwarp = source[1]
#[ydim,xdim,zdim] = size(im);
ydimCwarp=Cwarp.size[0]
xdimCwarp=Cwarp.size[1]
print 'xdimCwarp'+str(xdimCwarp)
roi_pad = [roi[0]-ew, roi[1]+ew, roi[2]-ew, roi[3]+ew];
for levels in range(1,8):
#Guess at a center and then compute best warp
#user defined function linear_space used to generate linearly spaced vectors
x_coords = np.linspace(0,511,steps+2)
y_coords = np.linspace(0,511,steps+2)
z_coords = np.linspace(alpha_mn,alpha_mx,steps+2)
step_x=(xlim[1]-xlim[0])/(steps+1)
start_x=xlim[0]+step_x
end_x=xlim[1]-step_x+0.5
step_y=(ylim[1]-ylim[0])/(steps+1)
start_y=ylim[0]+step_y
end_y=ylim[1]-step_y+0.5
step_z=(zlim[1]-zlim[0])/(steps+1)
start_z=zlim[0]+step_z
fudge_z=step_z/2.0
end_z=zlim[1]-step_z+fudge_z
#Do not include end points in search;
centers_x, centers_y, warps= np.mgrid[start_x:end_x:step_x,start_y:end_y:step_y,start_z:end_z:step_z]
centers_x=centers_x.flatten()
centers_y=centers_y.flatten()
warps=warps.flatten()
mi = np.zeros(centers_x.size)
for k in range(0,centers_x.size):
cx = centers_x[k]
cy = centers_y[k]
wz = warps[k]
#Warp the region
temp_im = warpRegion(Cwarp, roi_pad, [cx, cy, wz])
#correlation
mi[k] = np.corrcoef(Cfixed, temp_im)
#Now pick the best quadrant
v, max_ix = math.max(mi)
ix, jx, kx = arrayInd(mi.size, max_ix);
##The coordinates of err are off by 1 from x_coords and y_coords because
##we did not include the end point
xlim = x_coords([jx, jx+2]);
ylim = y_coords([ix, ix+2]);
zlim = z_coords([kx, kx+2]);
cx = math.mean(xlim);
cy = math.mean(ylim);
wz = math.mean(zlim);
print "x= "+str(cx)
print "y= "+str(cy)
print "z= "+str(wz)
def warpRegion(Cwarp, roi_pad, (cx, cy, wz)):
#Unpack region indices
sx, ex, sy, ey = roi_pad
xramp, yramp = np.mgrid[sx:ex+1, sy:ey+1]
xrampc = xramp - cx;
yrampc = yramp - cy;
xramp1 = 1/wz*xrampc;
yramp1 = 1/wz*yrampc;
xrampf = xrampc.flatten()
yrampf = yrampc.flatten()
xramp1f = xramp1.flatten()
yramp1f = yramp1.flatten()
reg_w = sp.interpolate.interp2d(yrampf,xrampf,Cwarp, yramp1f, xramp1f,'cubic');
ldimage()
analyzeCA('full', im)

As DSM states correctly this is not the correct calling syntax for interp2d which can be viewed at scipy.interp2d. If you would read the calling syntax and then your error message again (or the module itself whichever you prefer) you would recognize that you are trying to use an array as index for a dictionary which will naturally throw an exception.
I think what you are trying to do is an interpolation of the grid given by the arrays xrampf, yrampf at the new positions xrampf1, yrampf1. The scipy documentation also gives an exact same usage example which translate as following to your code:
interp_func = sp.interpolate.interp2d(yrampf, xrampf, Cwarp, kind='cubic')
reg_w = interp_func(yramp1f, xramp1f)
I hope that was your intention to do.
Kind regards

Related

Doing naive affine_transforms (shear numpy image using numpy)

from scipy import ndimage
height, width, colors = image.shape
transform = [[1, 0, 0],
[0.5, 1, 0],
[0, 0, 1]]
sheared_array = ndimage.affine_transform(image,
transform,
offset=(0, -height*0.7, 0),
output_shape=(height, width*2, colors))
plt.imshow(sheared_array)
My current code does this. My aim is to shear the image by any degree X.
I want to do the same thing with a naive approach. As in, without any pre-defined functions. Just python/numpy code from scratch.
Given the image:
the following code should do what you want to achieve. It copies y-rows of pixels from the numpy array representing the source image to a new created wider image at appropriate x-offsets calculated from the given shear angle. The variable names in a following code are chosen in a way explaining what they are used for providing further details about what the code does:
from PIL import Image
import numpy as np
shearAngleDegrees = 30
PILimg = Image.open('shearNumpyImageByAngle.jpg')
#PILimg.show()
npImg = np.asarray(PILimg)
def shearNpImgByAngle(numpyImageArray, shearAngleDegrees, maxShearAngle=75):
import numpy as np
from math import tan, radians
assert -maxShearAngle <= shearAngleDegrees <= maxShearAngle
ccw = True if shearAngleDegrees > 0 else False # shear counter-clockwise?
imgH, imgW, imgRGBtplItems = npImg.shape
shearAngleRadians = radians(shearAngleDegrees)
imgWplus2imgH = abs(tan(shearAngleRadians)) # (plus in width)/(image height)
imgWplus = int((imgH-1)*imgWplus2imgH) # image width increase in pixels
npImgOut = np.zeros((imgH, imgW+imgWplus, imgRGBtplItems), dtype='uint8')
Wplus, Wplus2H = (0, -imgWplus2imgH) if ccw else (imgWplus,imgWplus2imgH)
for y in range(imgH):
shiftX = Wplus-int(y*Wplus2H)
npImgOut[y][shiftX:shiftX+imgW] = npImg[y]
return npImgOut
#:def
npImgOut = shearNpImgByAngle(npImg, shearAngleDegrees)
PILout = Image.fromarray(npImgOut)
PILout.show()
PILout.save('shearNumpyImageByAngle_shearedBy30deg.jpg')
gives:
As a nice add-on to the above code an extension filling the black edges of the sheared image mirroring the source picture around its sides:
def filledShearNpImgByAngle(npImg, angleDeg, fill=True, maxAngle=75):
import numpy as np
from math import tan, radians
assert -maxAngle <= angleDeg <= maxAngle
ccw = True if angleDeg > 0 else False # shear counter-clockwise?
imgH, imgW, imgRGBtplItems = npImg.shape
angleRad = radians(angleDeg)
imgWplus2imgH = abs(tan(angleRad)) # (plus in width)/(image height)
imgWplus = int((imgH-1)*imgWplus2imgH) # image add. width in pixels
npImgOut = np.zeros((imgH, imgW+imgWplus, imgRGBtplItems),
dtype=npImg.dtype) # 'uint8')
Wplus, Wplus2H = (0, -imgWplus2imgH) if ccw else (imgWplus, imgWplus2imgH)
for y in range(imgH):
shiftXy = Wplus-int(y*Wplus2H)
npImgOut[y][shiftXy:shiftXy+imgW] = npImg[y]
if fill:
assert imgW > imgWplus
npImgOut[y][0:shiftXy] = np.flip(npImg[y][0:shiftXy], axis=0)
npImgOut[y][imgW+shiftXy:imgW+imgWplus] = np.flip(npImg[y][imgW-imgWplus-1+shiftXy:imgW-1], axis=0)
[imgW-x-2]
return npImgOut
#:def
from PIL import Image
import numpy as np
PILimg = Image.open('shearNumpyImageByAngle.jpg')
npImg = np.asarray(PILimg)
shearAngleDegrees = 20
npImgOut = filledShearNpImgByAngle(npImg, shearAngleDegrees)#, fill=False)
shearAngleDegrees = 10
npImgOut = filledShearNpImgByAngle(npImgOut, shearAngleDegrees)#, fill=False)
PILout = Image.fromarray(npImgOut)
PILout.show()
PILout.save('shearNumpyImageByAngle_filledshearBy30deg.jpg')
gives:
or other way around:

How to identify dashed lines in an image?

I am trying to identify small dashed lines in an image. An example would be identifying copy area in an excel type of application.
I have tried this.
I am finding it difficult to chose the filter sizes. So, I tried a different approach using Fourier Transform to check repeatability.
Given I know the dashed line pixel repetition range I go row by row by using a moving window to check for periodicity by finding dominant frequency in that window.
If dominant frequency is in range of dashed lines period I set the mask in the mask image. I repeat the same for columns. However this is still failing. Any suggestions/other techniques ?
Here is the code:
import cv2
import numpy as np
img = cv2.imread('test.png')
imgGray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
cv2.imshow('imgGray', imgGray)
rows,cols = imgGray.shape
maskImage = np.full((rows, cols), 0, dtype=np.uint8)
kernelL = np.array([[1, 1, 1], [1, -8, 1], [1, 1, 1]], dtype=np.float32)
imgLaplacian = cv2.filter2D(imgGray, cv2.CV_32F, kernelL)
imgResult = imgLaplacian
imgResult = np.clip(imgResult, 0, 255)
imgResult = imgResult.astype('uint8')
imgLaplacian = imgResult
cv2.imshow('imgLaplacian', imgLaplacian)
dashLineSearchInterval = 30
fmaxPixel =9 # minimum interval for dash repetation
fminPixel =7 # maximum interval for dash repetation
stride =2
for y in range(0,rows-dashLineSearchInterval,stride):
for x in range(0,cols-dashLineSearchInterval,stride):
kX = (imgLaplacian[y,x:x+ dashLineSearchInterval]).copy()
kX = kX - np.mean(kX)
N= dashLineSearchInterval
freq = np.fft.fftfreq(N)
ft = np.fft.fft(kX) # fourier transform
power = ft.real**2 + ft.imag**2 # power
maxPowerFreq= np.argmax(power) # dominant frequency
domFreq = freq [maxPowerFreq]
if(domFreq<0):
domFreq = -domFreq
#print(domFreq)
if float(1/fmaxPixel) <= domFreq <= float(1/fminPixel) :
maskImage[y,x:x+dashLineSearchInterval]=255
for x in range(0,cols-dashLineSearchInterval,stride):
for y in range(0,rows-dashLineSearchInterval,stride):
kY = (imgLaplacian[y:y+dashLineSearchInterval,x]).copy()
kY = kY - np.mean(kY)
N= dashLineSearchInterval
freq = np.fft.fftfreq(N)
ft = np.fft.fft(kY) # fourier transform
power = ft.real**2 + ft.imag**2 # power
maxPowerFreq= np.argmax(power) # dominant frequency
domFreq = freq [maxPowerFreq]
if(domFreq<0):
domFreq = -domFreq
#print(domFreq)
if float(1/fmaxPixel) <= domFreq <= float(1/fminPixel) :
maskImage[y:y+dashLineSearchInterval,x]=255
cv2.imshow('maskImage', maskImage)
cv2.waitKey()

How to correct color of an image based on a standard image

Please see the figure:
image1 is the image to be corrected, and image2 is the standard image taken in a black box.
There is a triangle in both images with slightly different color, I want to correct image1 through the triangle based on image2 so that the circle and the square in image1 can be also corrected.
How can I do that?
What I have tried:
get the B, G, R mean value of the triangle in image1 and image2, dividing them respectively to get KB, KG, KR, then multiply B, G, R channel of image1 with KB, KG, KR, lastly merge the 3 channel to get the corrected image
Demo code in python with OpenCV:
triangle_image1 = cv2.mean(image1, mask1)[:3]
triangle_image2 = cv2.mean(image2, mask2)[:3]
k_b, k_g, k_r = triangle_image2 / triangle_image1
b, g, r = cv2.split(image1)
corrected = b * k_b, g * k_g, r * k_r
corrected = np.clip(corrected, 0, 255)
corrected = cv2.merge(np.array(corrected, np.uint8))
The result image looks OK but actually not right because the color difference (delta E) of the triangle in the corrected image and image2 is about 6.
I tried executing chromatic adaptation transform, but I have no way telling if the result is correct.
Note that chromatic adaptation corrects the chrominance, but not the luminescence (only the color but not the brightens).
Chromatic adaptation transform is used for color balancing (White Balance), and I don't know if it fits your case.
I reused MATLAB implementation (I did't look for Python examples).
Even if you don't know MATLAB, and the solution is not what you are looking for, you may learn from it (Linearize the RGB value for example).
Here is the code:
T = imread('image.png'); % Load input image (two images side by side).
image1 = T(:, 1:end/2, :); % Left side
image2 = T(:, end/2+1:end, :); % Right side
I = image1; % Source image is named I
% Use color components in range [0, 1] (colors were found by manual picking).
src_sRGB = [205, 232, 32]/255; %Triangle sRGB color from image 1 "source image"
dst_sRGB = [13, 133, 38]/255; %Triangle sRGB color from image 2 "destination image"
%Linearize gamma-corrected RGB values (image values are in sRGB color space, we need to Linearize them).
srcRGB = rgb2lin(src_sRGB)';
dstRGB = rgb2lin(dst_sRGB)';
linI = rgb2lin(double(I)/255); % I in linear RGB color space.
% Color correction by Chromatic Adaptation:
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html
% Convert from XYZ D65 color space to Linear sRGB color space.
XYZD65_to_sRGB = [ 3.2404542 -1.5371385 -0.4985314
-0.9692660 1.8760108 0.0415560
0.0556434 -0.2040259 1.0572252];
% Convert from Linear sRGB color space to XYZ D65 color space.
sRGBtoXYZD65 = [0.4124564 0.3575761 0.1804375; ...
0.2126729 0.7151522 0.0721750; ...
0.0193339 0.1191920 0.9503041];
% Convert srcRGB and dstRGB to XYZ color space
srcXYZ = sRGBtoXYZD65 * srcRGB;
dstXYZ = sRGBtoXYZD65 * dstRGB;
% Convert srcXYZ and dstXYZ to xyY color space (get only xy out of xyY - xy applies chromaticity).
xySrc = XYZ2xy(srcXYZ);
xyDst = XYZ2xy(dstXYZ);
xyzSrc = xy2XYZ(xySrc, 1); %normalize Y to 1 so D65 luminance comparable
xyzDst = xy2XYZ(xyDst, 1); %normalize Y to 1 so D65 luminance comparable
% Chromatic adaptation transform
catType = 'bradford'; %Bradford transformation is recommended by Bruce Lindbloom http://www.brucelindbloom.com/index.html?Eqn_ChromAdapt.html
estMAT = cbCAT(xyzSrc, xyzDst, catType);
% Ascale estMAT by XYZD65_to_sRGB before apply color correction
M = estMAT * XYZD65_to_sRGB;
linI = cbreshape(linI);
% Destination image - apply color correction be multiplying by correction matrix M
linJ = M*linI;
linJ = cbunshape(linJ, size(I));
% Convet J from Linear to sRGB
J = lin2rgb(linJ);
% Convert from double to uint8 (multiply by 255).
J = im2uint8(J);
% Display result
figure;imshow(J);title('Corrected image1');impixelinfo
figure;imshow(image2);title('image2');impixelinfo
% Save result
imwrite(image2, 'image2.png');
imwrite(J, 'J.png');
function xy = XYZ2xy(xyz)
%xy = XYZ2xy(xyz)
% Converts CIE XYZ to xy chromaticity.
X = xyz(1, :);
Y = xyz(2, :);
s = sum(xyz);
xy = [X./s; Y./s];
end
function XYZ = xy2XYZ(xy,Y)
%XYZ = xy2XYZ(xy,Y)
% Converts xyY chromaticity to CIE XYZ.
x = xy(1); y = xy(2);
XYZ = [Y/y*x; Y; Y/y*(1-x-y)];
end
function outMat = cbCAT(xyz_src, xyz_dst, type)
%https://web.stanford.edu/~sujason/ColorBalancing/adaptation.html
%M = cbCAT(xyz_src, xyz_dst, type)
% Chromatic adaptation transform via von Kries's method.
% type chooses the LMS-like space to apply scaling in, valid options:
% 'vonKries', 'bradford', 'sharp', 'cmccat2000', 'cat02', 'xyz'
% See http://www.brucelindbloom.com/index.html?Eqn_ChromAdapt.html
xyz_src = makecol(xyz_src);
xyz_dst = makecol(xyz_dst);
% the following are mostly taken from S. Bianco. "Two New von Kries Based
% Chromatic Adaptation Transforms Found by Numerical Optimization."
if strcmpi(type,'vonKries') %Hunt-Pointer-Estevez normalized to D65
Ma = [0.40024 0.7076 -0.08081; -0.2263 1.16532 0.0457; 0 0 0.91822];
elseif strcmpi(type,'bradford')
Ma = [0.8951 0.2664 -0.1614; -0.7502 1.7135 0.0367; 0.0389 -0.0685 1.0296];
elseif strcmpi(type,'sharp')
Ma = [1.2694 -0.0988 -0.1706; -0.8364 1.8006 0.0357; 0.0297 -0.0315 1.0018];
elseif strcmpi(type,'cmccat2000')
Ma = [0.7982 0.3389 -0.1371; -0.5918 1.5512 0.0406; 0.0008 0.239 0.9753];
elseif strcmpi(type,'cat02')
Ma = [0.7328 0.4296 -0.1624; -0.7036 1.6975 0.0061; 0.0030 0.0136 0.9834];
else
Ma = eye(3);
end
%Chromatic Adaptation Transforms:
%1. Transform from XYZ into a cone response domain (ro, gamma, beta)
%2. Scale the vector components by factors dependent upon both the source and destination reference whites.
%3. Transform from (ro, gamma, beta) back to XYZ using the inverse transform of step 1.
%D is diagonal matrix marked as inv(Ma)*diag(roD/roS, gammaD/gammaS, betaD/betaS)*Ma.
%Matrix D applies ratios in "cone response domain".
D = diag((Ma*xyz_dst)./(Ma*xyz_src));
%Transform back to XYZ domain:
M = Ma\D*Ma;
sRGBtoXYZ = [0.4124564 0.3575761 0.1804375; ...
0.2126729 0.7151522 0.0721750; ...
0.0193339 0.1191920 0.9503041];
outMat = sRGBtoXYZ\M*sRGBtoXYZ;
end
function x = makecol(x)
%x = makecol(x)
% returns x as a column vector
s = size(x);
if (length(s) == 2) && (s(1) < s(2))
x = x.';
end
end
function out = cbreshape(im)
%out = cbreshape(im)
% Takes a width x height x 3 RGB image and returns a matrix where each column is an RGB
% pixel.
if (size(im, 3) == 3)
out = reshape(permute(im, [3, 1, 2]), [3, numel(im)/3, 1]);
else
out = (im(:))';
end
end
function out = cbunshape(mat,s)
%out = cbunshape(im,[height, width])
% Takes a 3xn matrix of RGB pixels and returns a height x width x 3 RGB
% image
height = s(1); width = s(2);
if (size(mat,1) == 3)
%In case mat is 3 rows, convert to 3D matrix
out = reshape(mat,[3,height,width]);
out = permute(out,[2 3 1]);
else
%In case mat is 1 row, convert to 2D matrix
out = reshape(mat, [height, width]);
end
end
Result:
Update:
Same solution with luminescence adjustment:
In case you need to correct the luminescence, add the following code before "Color correction by Chromatic Adaptation":
% Scale the input so the mean of the triangle in image1 and image2 will be the same.
% The scaling is eqivalent to adjusting the exposure level of the camera.
rgb_scale = mean(dstRGB) / mean(srcRGB);
srcRGB = srcRGB*rgb_scale;
linI = linI*rgb_scale;
Result:

Compare the LBP in python

I generated a texture image like this
I have to compare two textures. I have used histogram comparison method.
image_file = 'output_ori.png'
img_bgr = cv2.imread(image_file)
height, width, channel = img_bgr.shape
hist_lbp = cv2.calcHist([img_bgr], [0], None, [256], [0, 256])
print("second started")
image_fileNew = 'output_scan.png'
img_bgr_new = cv2.imread(image_fileNew)
height_new, width_new, channel_new = img_bgr_new.shape
print("second lbp")
hist_lbp_new = cv2.calcHist([img_bgr_new], [0], None, [256], [0, 256])
print("compar started")
compare = cv2.compareHist(hist_lbp, hist_lbp_new, cv2.HISTCMP_CORREL)
print(compare)
But this method is not effective. It shows similar results for two different image textures. Also it is not showing too much of variation to identify Print & Scan effect. How do I compare the textures? I thought of analysing the GLCM characteristics.
import cv2
import numpy as np
from skimage.feature import greycomatrix
img = cv2.imread('images/noised_img1.jpg', 0)
image = np.array(img, dtype=np.uint8)
g = greycomatrix(image, [1, 2], [0, np.pi/2], levels=4, normed=True, symmetric=True)
contrast = greycoprops(g, 'contrast')
print(contrast)
In this method, I am getting the output as 2*2 matrix. How do I compare two matrices of several features like contrast, similarity, homogeneity, ASM, energy and correlation?
COMMENT CLARIFICATION
import numpy as np
from PIL import Image
class LBP:
def __init__(self, input, num_processes, output):
# Convert the image to grayscale
self.image = Image.open(input).convert("L")
self.width = self.image.size[0]
self.height = self.image.size[1]
self.patterns = []
self.num_processes = num_processes
self.output = output
def execute(self):
self._process()
if self.output:
self._output()
def _process(self):
pixels = list(self.image.getdata())
pixels = [pixels[i * self.width:(i + 1) * self.width] for i in range(self.height)]
# Calculate LBP for each non-edge pixel
for i in range(1, self.height - 1):
# Cache only the rows we need (within the neighborhood)
previous_row = pixels[i - 1]
current_row = pixels[i]
next_row = pixels[i + 1]
for j in range(1, self.width - 1):
# Compare this pixel to its neighbors, starting at the top-left pixel and moving
# clockwise, and use bit operations to efficiently update the feature vector
pixel = current_row[j]
pattern = 0
pattern = pattern | (1 << 0) if pixel < previous_row[j-1] else pattern
pattern = pattern | (1 << 1) if pixel < previous_row[j] else pattern
pattern = pattern | (1 << 2) if pixel < previous_row[j+1] else pattern
pattern = pattern | (1 << 3) if pixel < current_row[j+1] else pattern
pattern = pattern | (1 << 4) if pixel < next_row[j+1] else pattern
pattern = pattern | (1 << 5) if pixel < next_row[j] else pattern
pattern = pattern | (1 << 6) if pixel < next_row[j-1] else pattern
pattern = pattern | (1 << 7) if pixel < current_row[j-1] else pattern
self.patterns.append(pattern)
def _output(self):
# Write the result to an image file
result_image = Image.new(self.image.mode, (self.width - 2, self.height - 2))
result_image.putdata(self.patterns)
result_image.save("output.png")
I generated texture with this code. I have texture and I have methods to calculate the texture properties, but the question is how to identify the similarity between two textures.
Suppose you have two classes, for example couscous and knitwear, and you wish to classify an unknown color image as either couscous or knitwear. One possible method would be:
Converting the color images to grayscale.
Computing the local binary patterns.
Calculating the normalized histogram of local binary patterns.
The following snippet implements this approach:
import numpy as np
from skimage import io, color
from skimage.feature import local_binary_pattern
def lbp_histogram(color_image):
img = color.rgb2gray(color_image)
patterns = local_binary_pattern(img, 8, 1)
hist, _ = np.histogram(patterns, bins=np.arange(2**8 + 1), density=True)
return hist
couscous = io.imread('https://i.stack.imgur.com/u3xLI.png')
knitwear = io.imread('https://i.stack.imgur.com/Zj14J.png')
unknown = io.imread('https://i.stack.imgur.com/JwP3j.png')
couscous_feats = lbp_histogram(couscous)
knitwear_feats = lbp_histogram(knitwear)
unknown_feats = lbp_histogram(unknown)
Then you need to measure the similarity (or dissimilarity) between the LBP histogram of the unknown image and the histograms of the images that represent the two considered classes. Euclidean distance between histograms is a popular dissimilarity measure.
In [63]: from scipy.spatial.distance import euclidean
In [64]: euclidean(unknown_feats, couscous_feats)
Out[64]: 0.10165884804845844
In [65]: euclidean(unknown_feats, knitwear_feats)
Out[65]: 0.0887492936776889
In this example the unknown image will be classified as knitwear because the dissimilarity unknown-couscous is greater than the dissimilarity unknown-knitwear. This is in good agreement with the fact that the unknown image is actually a different type of knitwear.
import matplotlib.pyplot as plt
hmax = max([couscous_feats.max(), knitwear_feats.max(), unknown_feats.max()])
fig, ax = plt.subplots(2, 3)
ax[0, 0].imshow(couscous)
ax[0, 0].axis('off')
ax[0, 0].set_title('Cous cous')
ax[1, 0].plot(couscous_feats)
ax[1, 0].set_ylim([0, hmax])
ax[0, 1].imshow(knitwear)
ax[0, 1].axis('off')
ax[0, 1].set_title('Knitwear')
ax[1, 1].plot(knitwear_feats)
ax[1, 1].set_ylim([0, hmax])
ax[1, 1].axes.yaxis.set_ticklabels([])
ax[0, 2].imshow(unknown)
ax[0, 2].axis('off')
ax[0, 2].set_title('Unknown (knitwear)')
ax[1, 2].plot(unknown_feats)
ax[1, 1].set_ylim([0, hmax])
ax[1, 2].axes.yaxis.set_ticklabels([])
plt.show(fig)

TypeError message in Pythons PIL (v2.6): integer expected, got float

first time I'm in the forum. Hope I'm specific enough.
Using ImageChops inside PIL, I'm trying to multiply two images (both mode="L") but I always get the same error message. I've looked everywhere but couldn't find anything useful. I'd greatly appreciate any helpful ideas!
The relevant part of the code is attached.
def point(self, f, searchImage, technique): # technique - inpaint or bicubic
dimx, dimy = searchImage.size
reader = csv.reader(f)
for line in reader: #f.readlines():
coord = line
print coord
if searchImage.size[0] > float(coord[0])+95.5 and searchImage.size[1]\
> float(coord[1])+95.5:
box = (float(coord[0])-93.5,float(coord[1])-93.5,\
float(coord[0])+95.5,float(coord[1])+95.5) # left upper right
elif searchImage.size[0] < float(coord[0])+95.5 and searchImage.size[1]\
> float(coord[1])+95.5:
box = (float(coord[0])-93.5,float(coord[1])-93.5,\
searchImage.size[0]-0.5,float(coord[1])+95.5) # size of box
# depends on pixel size. A pixel size of 14 micrometer results in a
# cross size of 189 pixels
else:
box = (float(coord[0])-93.5,float(coord[1])-93.5,\
float(coord[0])+95.5,searchImage.size[1]-0.5)
box = (math.floor(box[0]), math.floor(box[1]), math.floor(box[2]),\
math.floor(box[3]))
searchCrop = searchImage.crop(box)
c_x = int(float(coord[1]))
c_y = int(float(coord[0]))
abst_y = c_x - int(math.floor(box[1])) - 1 # x shift
center = num.asarray(searchImage)[c_x,c_y]
if center == 0:
center = center + 0.00001 # to avoid division by zero
val = [num.asarray(searchImage)[c_x-1,c_y+1], num.asarray(searchImage)\
[c_x-1,c_y-1], num.asarray(searchImage)[c_x+1,c_y-1], \
num.asarray(searchImage)[c_x+1,c_y+1]] # ERDAS upper right,
# upper left, lower left, lower right
val_dict = {0:1,1:-1,2:-1,3:1}
flag = val_dict[val.index(min(val))]
if float(min(val))/center > 2. or min(val) > 100:
flag = 0
newima = num.zeros( (searchCrop.size[1], searchCrop.size[0]),\
dtype = "float")
Ayo = num.array(int(searchCrop.size[0])*[255])
Ay = num.array((abst_y + flag)*[255] + 3*[0] + ((int(searchCrop.size[0]\
)-3-abst_y)-flag)*[255])
Ax = num.array(int(searchCrop.size[0])*[0])
Kx = num.array(3*[Ayo] + ((int(searchCrop.size[1])-9)/2+flag)*[Ay] + 3*[Ax] \
+ ((int(searchCrop.size[1])-9)/2-flag)*[Ay] + 3*[Ayo])
Kxlist = list(itertools.chain(*Kx))
i=0
for y in range(int(searchCrop.size[1])):
for x in range(int(searchCrop.size[0])):
newima[y,x] = Kxlist[i+y+x]
i=i+x
kernel = Image.fromarray(newima)
kernel = kernel.convert(mode="L")
# -----
modified = ImageChops.multiply(searchCrop,kernel) # Results in an image
# where the pixels along the cross axes will get a value of 0
# ---
The error message is the following:
File "D:\GIS_dbase\Data\hma_cci\hexagon\KH9_Python\interpolate_cross.py", line 58, in
crossInterpolation filledImage = self.p_model.point(f, searchImage, method)
File "D:\GIS_dbase\Data\hma_cci\hexagon\KH9_Python\interpolate_cross.py", line 207, in
point modified = ImageChops.multiply(searchCrop,kernel) # Results in an image where
the pixels along the cross axes will get a value of 0
File "C:\Python27\lib\site-packages\PIL\ImageChops.py", line 119, in multiply
image1.load()
File "C:\Python27\lib\site-packages\PIL\Image.py", line 1730, in load
self.im = self.im.crop(self.__crop)
TypeError: integer argument expected, got float
The issue is that PIL's crop method takes a tuple of 4 integer values but you are passing it floats. This should work:
box = tuple([int(math.floor(x)) for x in box])

Categories