from scipy import ndimage
height, width, colors = image.shape
transform = [[1, 0, 0],
[0.5, 1, 0],
[0, 0, 1]]
sheared_array = ndimage.affine_transform(image,
transform,
offset=(0, -height*0.7, 0),
output_shape=(height, width*2, colors))
plt.imshow(sheared_array)
My current code does this. My aim is to shear the image by any degree X.
I want to do the same thing with a naive approach. As in, without any pre-defined functions. Just python/numpy code from scratch.
Given the image:
the following code should do what you want to achieve. It copies y-rows of pixels from the numpy array representing the source image to a new created wider image at appropriate x-offsets calculated from the given shear angle. The variable names in a following code are chosen in a way explaining what they are used for providing further details about what the code does:
from PIL import Image
import numpy as np
shearAngleDegrees = 30
PILimg = Image.open('shearNumpyImageByAngle.jpg')
#PILimg.show()
npImg = np.asarray(PILimg)
def shearNpImgByAngle(numpyImageArray, shearAngleDegrees, maxShearAngle=75):
import numpy as np
from math import tan, radians
assert -maxShearAngle <= shearAngleDegrees <= maxShearAngle
ccw = True if shearAngleDegrees > 0 else False # shear counter-clockwise?
imgH, imgW, imgRGBtplItems = npImg.shape
shearAngleRadians = radians(shearAngleDegrees)
imgWplus2imgH = abs(tan(shearAngleRadians)) # (plus in width)/(image height)
imgWplus = int((imgH-1)*imgWplus2imgH) # image width increase in pixels
npImgOut = np.zeros((imgH, imgW+imgWplus, imgRGBtplItems), dtype='uint8')
Wplus, Wplus2H = (0, -imgWplus2imgH) if ccw else (imgWplus,imgWplus2imgH)
for y in range(imgH):
shiftX = Wplus-int(y*Wplus2H)
npImgOut[y][shiftX:shiftX+imgW] = npImg[y]
return npImgOut
#:def
npImgOut = shearNpImgByAngle(npImg, shearAngleDegrees)
PILout = Image.fromarray(npImgOut)
PILout.show()
PILout.save('shearNumpyImageByAngle_shearedBy30deg.jpg')
gives:
As a nice add-on to the above code an extension filling the black edges of the sheared image mirroring the source picture around its sides:
def filledShearNpImgByAngle(npImg, angleDeg, fill=True, maxAngle=75):
import numpy as np
from math import tan, radians
assert -maxAngle <= angleDeg <= maxAngle
ccw = True if angleDeg > 0 else False # shear counter-clockwise?
imgH, imgW, imgRGBtplItems = npImg.shape
angleRad = radians(angleDeg)
imgWplus2imgH = abs(tan(angleRad)) # (plus in width)/(image height)
imgWplus = int((imgH-1)*imgWplus2imgH) # image add. width in pixels
npImgOut = np.zeros((imgH, imgW+imgWplus, imgRGBtplItems),
dtype=npImg.dtype) # 'uint8')
Wplus, Wplus2H = (0, -imgWplus2imgH) if ccw else (imgWplus, imgWplus2imgH)
for y in range(imgH):
shiftXy = Wplus-int(y*Wplus2H)
npImgOut[y][shiftXy:shiftXy+imgW] = npImg[y]
if fill:
assert imgW > imgWplus
npImgOut[y][0:shiftXy] = np.flip(npImg[y][0:shiftXy], axis=0)
npImgOut[y][imgW+shiftXy:imgW+imgWplus] = np.flip(npImg[y][imgW-imgWplus-1+shiftXy:imgW-1], axis=0)
[imgW-x-2]
return npImgOut
#:def
from PIL import Image
import numpy as np
PILimg = Image.open('shearNumpyImageByAngle.jpg')
npImg = np.asarray(PILimg)
shearAngleDegrees = 20
npImgOut = filledShearNpImgByAngle(npImg, shearAngleDegrees)#, fill=False)
shearAngleDegrees = 10
npImgOut = filledShearNpImgByAngle(npImgOut, shearAngleDegrees)#, fill=False)
PILout = Image.fromarray(npImgOut)
PILout.show()
PILout.save('shearNumpyImageByAngle_filledshearBy30deg.jpg')
gives:
or other way around:
Related
I am trying to identify small dashed lines in an image. An example would be identifying copy area in an excel type of application.
I have tried this.
I am finding it difficult to chose the filter sizes. So, I tried a different approach using Fourier Transform to check repeatability.
Given I know the dashed line pixel repetition range I go row by row by using a moving window to check for periodicity by finding dominant frequency in that window.
If dominant frequency is in range of dashed lines period I set the mask in the mask image. I repeat the same for columns. However this is still failing. Any suggestions/other techniques ?
Here is the code:
import cv2
import numpy as np
img = cv2.imread('test.png')
imgGray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
cv2.imshow('imgGray', imgGray)
rows,cols = imgGray.shape
maskImage = np.full((rows, cols), 0, dtype=np.uint8)
kernelL = np.array([[1, 1, 1], [1, -8, 1], [1, 1, 1]], dtype=np.float32)
imgLaplacian = cv2.filter2D(imgGray, cv2.CV_32F, kernelL)
imgResult = imgLaplacian
imgResult = np.clip(imgResult, 0, 255)
imgResult = imgResult.astype('uint8')
imgLaplacian = imgResult
cv2.imshow('imgLaplacian', imgLaplacian)
dashLineSearchInterval = 30
fmaxPixel =9 # minimum interval for dash repetation
fminPixel =7 # maximum interval for dash repetation
stride =2
for y in range(0,rows-dashLineSearchInterval,stride):
for x in range(0,cols-dashLineSearchInterval,stride):
kX = (imgLaplacian[y,x:x+ dashLineSearchInterval]).copy()
kX = kX - np.mean(kX)
N= dashLineSearchInterval
freq = np.fft.fftfreq(N)
ft = np.fft.fft(kX) # fourier transform
power = ft.real**2 + ft.imag**2 # power
maxPowerFreq= np.argmax(power) # dominant frequency
domFreq = freq [maxPowerFreq]
if(domFreq<0):
domFreq = -domFreq
#print(domFreq)
if float(1/fmaxPixel) <= domFreq <= float(1/fminPixel) :
maskImage[y,x:x+dashLineSearchInterval]=255
for x in range(0,cols-dashLineSearchInterval,stride):
for y in range(0,rows-dashLineSearchInterval,stride):
kY = (imgLaplacian[y:y+dashLineSearchInterval,x]).copy()
kY = kY - np.mean(kY)
N= dashLineSearchInterval
freq = np.fft.fftfreq(N)
ft = np.fft.fft(kY) # fourier transform
power = ft.real**2 + ft.imag**2 # power
maxPowerFreq= np.argmax(power) # dominant frequency
domFreq = freq [maxPowerFreq]
if(domFreq<0):
domFreq = -domFreq
#print(domFreq)
if float(1/fmaxPixel) <= domFreq <= float(1/fminPixel) :
maskImage[y:y+dashLineSearchInterval,x]=255
cv2.imshow('maskImage', maskImage)
cv2.waitKey()
I have an image of dimension 155 x 240. Like the following:
I want to extract certain shape of patchs (25 x 25).
I don't want to patch from the whole image.
I want to extract N number of patch from non-zero (not background) area of the image. How can I do that? Any idea or suggestion or implementation will be appreciated. You can try with either Matlab or Python.
Note:
I have generated a random image so that you can process it for patching. image_process variable is that image in this code.
import numpy as np
from scipy.ndimage.filters import convolve
import matplotlib.pyplot as plt
background = np.ones((155,240))
background[78,120] = 2
n_d = 50
y,x = np.ogrid[-n_d: n_d+1, -n_d: n_d+1]
mask = x**2+y**2 <= n_d**2
mask = 254*mask.astype(float)
image_process = convolve(background, mask)-sum(sum(mask))+1
image_process[image_process==1] = 0
image_process[image_process==255] = 1
plt.imshow(image_process)
Lets assume that the pixels values you want to omit is 0.
In this case what you could do, is first find the indices of the non-zero values, then slice the image in the min/max position to get only the desired area, and then simply apply extract_patches_2d with the desired window size and number of patches.
For example, given the dummy image you supplied:
import numpy as np
from scipy.ndimage.filters import convolve
import matplotlib.pyplot as plt
background = np.ones((155,240))
background[78,120] = 2
n_d = 50
y,x = np.ogrid[-n_d: n_d+1, -n_d: n_d+1]
mask = x**2+y**2 <= n_d**2
mask = 254*mask.astype(float)
image_process = convolve(background, mask)-sum(sum(mask))+1
image_process[image_process==1] = 0
image_process[image_process==255] = 1
plt.figure()
plt.imshow(image_process)
plt.show()
from sklearn.feature_extraction.image import extract_patches_2d
x, y = np.nonzero(image_process)
xl,xr = x.min(),x.max()
yl,yr = y.min(),y.max()
only_desired_area = image_process[xl:xr+1, yl:yr+1]
window_shape = (25, 25)
B = extract_patches_2d(only_desired_area, window_shape, max_patches=100) # B shape will be (100, 25, 25)
If you plot the only_desired_area you will get the following image:
This is the main logic if you wish an even tighter bound you should adjust the slicing properly.
I am using skimage. I need to create a mask equal in area to an image. The mask will have a region which will hide part of the image. I am building it as in the sample below but this is very slow and am sure there is a pythonic way of doing it. Could anyone highlight this please?
Code am using presently:
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import skimage as sk
sourceimage = './sample.jpg'
img = np.copy(io.imread(sourceimage, as_gray=True))
mask = np.full(img.shape, 1)
maskpolygon = [(1,200),(300,644),(625,490),(625,1)]
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
pgon = Polygon(maskpolygon)
for r in range(mask.shape[0]):
for c in range(mask.shape[1]):
p = Point(r,c)
if pgon.contains(p):
mask[r,c] = 0
Expected result is like (for a 9x9 image - but I am working on 700x700)
[1,1,1,1,1,1,1,1,1]
[1,1,1,1,1,1,1,1,1]
[1,1,0,0,1,1,1,1,1]
[1,1,0,0,1,1,1,1,1]
[1,1,0,0,0,0,1,1,1]
[1,1,0,0,0,0,0,1,1]
[1,1,1,0,0,0,0,1,1]
[1,1,1,1,0,0,1,1,1]
[1,1,1,1,1,1,1,1,1]
I can invert 1's and 0's to show/hide region.
Thank you.
I have been able to resolve this thanks to #HansHirse.
Below is how I worked it out
sourceimage = './sample.jpg'
figuresize = (100, 100)
from skimage.draw import polygon
#open source and create a copy
img = np.copy(io.imread(sourceimage, as_gray=True))
mask = np.full(img.shape, 0)
maskpolygon = [(1,1), (280,1),(625, 280),(460, 621),(15, 625)]
maskpolygonr = [x[0] for x in maskpolygon]
maskpolygonc = [x[1] for x in maskpolygon]
rr, cc = polygon(maskpolygonr, maskpolygonc)
mask[rr ,cc] = 1
masked_image = img * mask
# show step by step what is happening
fig, axs = plt.subplots(nrows = 3, ncols = 1, sharex=True, sharey = True, figsize=figuresize )
ax = axs.ravel()
ax[0].imshow(img)#, cmap=plt.cm.gray)
ax[1].imshow(mask)#, cmap=plt.cm.gray)
ax[2].imshow(masked_image)#, cmap=plt.cm.gray)
I generated a texture image like this
I have to compare two textures. I have used histogram comparison method.
image_file = 'output_ori.png'
img_bgr = cv2.imread(image_file)
height, width, channel = img_bgr.shape
hist_lbp = cv2.calcHist([img_bgr], [0], None, [256], [0, 256])
print("second started")
image_fileNew = 'output_scan.png'
img_bgr_new = cv2.imread(image_fileNew)
height_new, width_new, channel_new = img_bgr_new.shape
print("second lbp")
hist_lbp_new = cv2.calcHist([img_bgr_new], [0], None, [256], [0, 256])
print("compar started")
compare = cv2.compareHist(hist_lbp, hist_lbp_new, cv2.HISTCMP_CORREL)
print(compare)
But this method is not effective. It shows similar results for two different image textures. Also it is not showing too much of variation to identify Print & Scan effect. How do I compare the textures? I thought of analysing the GLCM characteristics.
import cv2
import numpy as np
from skimage.feature import greycomatrix
img = cv2.imread('images/noised_img1.jpg', 0)
image = np.array(img, dtype=np.uint8)
g = greycomatrix(image, [1, 2], [0, np.pi/2], levels=4, normed=True, symmetric=True)
contrast = greycoprops(g, 'contrast')
print(contrast)
In this method, I am getting the output as 2*2 matrix. How do I compare two matrices of several features like contrast, similarity, homogeneity, ASM, energy and correlation?
COMMENT CLARIFICATION
import numpy as np
from PIL import Image
class LBP:
def __init__(self, input, num_processes, output):
# Convert the image to grayscale
self.image = Image.open(input).convert("L")
self.width = self.image.size[0]
self.height = self.image.size[1]
self.patterns = []
self.num_processes = num_processes
self.output = output
def execute(self):
self._process()
if self.output:
self._output()
def _process(self):
pixels = list(self.image.getdata())
pixels = [pixels[i * self.width:(i + 1) * self.width] for i in range(self.height)]
# Calculate LBP for each non-edge pixel
for i in range(1, self.height - 1):
# Cache only the rows we need (within the neighborhood)
previous_row = pixels[i - 1]
current_row = pixels[i]
next_row = pixels[i + 1]
for j in range(1, self.width - 1):
# Compare this pixel to its neighbors, starting at the top-left pixel and moving
# clockwise, and use bit operations to efficiently update the feature vector
pixel = current_row[j]
pattern = 0
pattern = pattern | (1 << 0) if pixel < previous_row[j-1] else pattern
pattern = pattern | (1 << 1) if pixel < previous_row[j] else pattern
pattern = pattern | (1 << 2) if pixel < previous_row[j+1] else pattern
pattern = pattern | (1 << 3) if pixel < current_row[j+1] else pattern
pattern = pattern | (1 << 4) if pixel < next_row[j+1] else pattern
pattern = pattern | (1 << 5) if pixel < next_row[j] else pattern
pattern = pattern | (1 << 6) if pixel < next_row[j-1] else pattern
pattern = pattern | (1 << 7) if pixel < current_row[j-1] else pattern
self.patterns.append(pattern)
def _output(self):
# Write the result to an image file
result_image = Image.new(self.image.mode, (self.width - 2, self.height - 2))
result_image.putdata(self.patterns)
result_image.save("output.png")
I generated texture with this code. I have texture and I have methods to calculate the texture properties, but the question is how to identify the similarity between two textures.
Suppose you have two classes, for example couscous and knitwear, and you wish to classify an unknown color image as either couscous or knitwear. One possible method would be:
Converting the color images to grayscale.
Computing the local binary patterns.
Calculating the normalized histogram of local binary patterns.
The following snippet implements this approach:
import numpy as np
from skimage import io, color
from skimage.feature import local_binary_pattern
def lbp_histogram(color_image):
img = color.rgb2gray(color_image)
patterns = local_binary_pattern(img, 8, 1)
hist, _ = np.histogram(patterns, bins=np.arange(2**8 + 1), density=True)
return hist
couscous = io.imread('https://i.stack.imgur.com/u3xLI.png')
knitwear = io.imread('https://i.stack.imgur.com/Zj14J.png')
unknown = io.imread('https://i.stack.imgur.com/JwP3j.png')
couscous_feats = lbp_histogram(couscous)
knitwear_feats = lbp_histogram(knitwear)
unknown_feats = lbp_histogram(unknown)
Then you need to measure the similarity (or dissimilarity) between the LBP histogram of the unknown image and the histograms of the images that represent the two considered classes. Euclidean distance between histograms is a popular dissimilarity measure.
In [63]: from scipy.spatial.distance import euclidean
In [64]: euclidean(unknown_feats, couscous_feats)
Out[64]: 0.10165884804845844
In [65]: euclidean(unknown_feats, knitwear_feats)
Out[65]: 0.0887492936776889
In this example the unknown image will be classified as knitwear because the dissimilarity unknown-couscous is greater than the dissimilarity unknown-knitwear. This is in good agreement with the fact that the unknown image is actually a different type of knitwear.
import matplotlib.pyplot as plt
hmax = max([couscous_feats.max(), knitwear_feats.max(), unknown_feats.max()])
fig, ax = plt.subplots(2, 3)
ax[0, 0].imshow(couscous)
ax[0, 0].axis('off')
ax[0, 0].set_title('Cous cous')
ax[1, 0].plot(couscous_feats)
ax[1, 0].set_ylim([0, hmax])
ax[0, 1].imshow(knitwear)
ax[0, 1].axis('off')
ax[0, 1].set_title('Knitwear')
ax[1, 1].plot(knitwear_feats)
ax[1, 1].set_ylim([0, hmax])
ax[1, 1].axes.yaxis.set_ticklabels([])
ax[0, 2].imshow(unknown)
ax[0, 2].axis('off')
ax[0, 2].set_title('Unknown (knitwear)')
ax[1, 2].plot(unknown_feats)
ax[1, 1].set_ylim([0, hmax])
ax[1, 2].axes.yaxis.set_ticklabels([])
plt.show(fig)
Hi this code estimates chromatic aberration in an image by giving the center of distortion (x,y) and magnitude of distortion (alpha) between the red and green channels and also between the blue and green channels. I have an error in the WarpRegion function
File "CAfeb.py", line 217, in warpRegion
reg_w = sp.interpolate.interp2d(yrampf,xrampf,Cwarp, yramp1f, xramp1f,'cubic');
File "/usr/lib/python2.7/dist-packages/scipy/interpolate/interpolate.py", line 109, in __init__
'quintic' : 5}[kind]
TypeError: unhashable type: 'numpy.ndarray'
Below is the complete code - Any help will be greatly appreciated-Thank you. Areej
import math
from PIL import Image
import numpy as np
from decimal import Decimal
import scipy as sp
from scipy import interpolate
from scitools.std import ndgrid
from scipy import ogrid, sin, mgrid, ndimage, array
def ldimage():
#load image
global im
im = Image.open("/home/areej/Desktop/mandril_color.tif")
def analyzeCA(mode, im):
n_regions = 10;
reg_size = [300, 300];
overlap = 0.5;
levels = 9;
steps = 2;
edge_width = 10;
hist_sz = 128;
# alpha_1 and alpha_2 are assumed to be between these values
w_data = [0.9985, 1.0015];
reg_list=[]
#creating an array of pixels so that we can access them
pix=im.load()
#
#Analyze full image
if mode=='full':
print "Doing a full analysis"
# mx_shift is the third argument in 'full' mode
mx_shift = n_regions;
# [ydim,xdim,zdim]= size(im);
ydim=im.size[0]
xdim=im.size[1]
zdim=3
print "Image dimensions: [ydim, xdim, zdim]= "+str([ydim,xdim,zdim])
global alpha_mx, alpha_mn
alpha_mx = 1 + 4*mx_shift / math.sqrt( xdim*xdim + ydim*ydim );
alpha_mn = 1.0/alpha_mx;
print "alpha_mx= "+str(alpha_mx)
print "alpha_mn= "+str(alpha_mn)
#recompute alpha_1 and alpha_2 to be between
#these new values
w_data = [alpha_mn, alpha_mx];
ew = edge_width;
#take the image minus a ew-wide edge
roi = [ew+1, xdim-ew, ew+1, ydim-ew];
print "edge_width= "+str(ew)
print "roi= "+str(roi)
#Analyze blue to green chromatic aberration
bg_params = parameterSearch( im, [3, 2], roi, ew, hist_sz, w_data);
# Analyze red to green chromatic aberration
rg_params = parameterSearch( im, [1, 2], roi, ew, hist_sz, w_data );
elif mode=='reg':
print "we should do a regional analysis here"
else:
print "unsupported call"
#def estimateCARegions( im, [3, 2], reg_list, settings ):
def parameterSearch( im, colour_space, roi, ew, hist_sz, w_data):
#levels is number of iterations
levels = 8;
steps = 2;
#[ydim,xdim,zdim] = size(im);
ydim=im.size[0]
xdim=im.size[1]
zdim= 3
x_data = [1, xdim];
y_data = [1, ydim];
xlim = x_data;
ylim = y_data;
zlim = w_data;
#work out which of height and width is the bigger
dim = max(xdim,ydim)
print "The highest dimension is : "+str(dim)
#check that roi falls within expected boundries
if ((roi[0] <= ew) or (roi[1] > xdim-ew) or (roi[2] <= ew) or (roi[3] > ydim-ew)):
print "ROI is too close to image edges"
return -1 # TODO: terminate here with an error
#Get image regions
source = im.split()
Cfixed = source[2]
Cwarp = source[1]
#[ydim,xdim,zdim] = size(im);
ydimCwarp=Cwarp.size[0]
xdimCwarp=Cwarp.size[1]
print 'xdimCwarp'+str(xdimCwarp)
roi_pad = [roi[0]-ew, roi[1]+ew, roi[2]-ew, roi[3]+ew];
for levels in range(1,8):
#Guess at a center and then compute best warp
#user defined function linear_space used to generate linearly spaced vectors
x_coords = np.linspace(0,511,steps+2)
y_coords = np.linspace(0,511,steps+2)
z_coords = np.linspace(alpha_mn,alpha_mx,steps+2)
step_x=(xlim[1]-xlim[0])/(steps+1)
start_x=xlim[0]+step_x
end_x=xlim[1]-step_x+0.5
step_y=(ylim[1]-ylim[0])/(steps+1)
start_y=ylim[0]+step_y
end_y=ylim[1]-step_y+0.5
step_z=(zlim[1]-zlim[0])/(steps+1)
start_z=zlim[0]+step_z
fudge_z=step_z/2.0
end_z=zlim[1]-step_z+fudge_z
#Do not include end points in search;
centers_x, centers_y, warps= np.mgrid[start_x:end_x:step_x,start_y:end_y:step_y,start_z:end_z:step_z]
centers_x=centers_x.flatten()
centers_y=centers_y.flatten()
warps=warps.flatten()
mi = np.zeros(centers_x.size)
for k in range(0,centers_x.size):
cx = centers_x[k]
cy = centers_y[k]
wz = warps[k]
#Warp the region
temp_im = warpRegion(Cwarp, roi_pad, [cx, cy, wz])
#correlation
mi[k] = np.corrcoef(Cfixed, temp_im)
#Now pick the best quadrant
v, max_ix = math.max(mi)
ix, jx, kx = arrayInd(mi.size, max_ix);
##The coordinates of err are off by 1 from x_coords and y_coords because
##we did not include the end point
xlim = x_coords([jx, jx+2]);
ylim = y_coords([ix, ix+2]);
zlim = z_coords([kx, kx+2]);
cx = math.mean(xlim);
cy = math.mean(ylim);
wz = math.mean(zlim);
print "x= "+str(cx)
print "y= "+str(cy)
print "z= "+str(wz)
def warpRegion(Cwarp, roi_pad, (cx, cy, wz)):
#Unpack region indices
sx, ex, sy, ey = roi_pad
xramp, yramp = np.mgrid[sx:ex+1, sy:ey+1]
xrampc = xramp - cx;
yrampc = yramp - cy;
xramp1 = 1/wz*xrampc;
yramp1 = 1/wz*yrampc;
xrampf = xrampc.flatten()
yrampf = yrampc.flatten()
xramp1f = xramp1.flatten()
yramp1f = yramp1.flatten()
reg_w = sp.interpolate.interp2d(yrampf,xrampf,Cwarp, yramp1f, xramp1f,'cubic');
ldimage()
analyzeCA('full', im)
As DSM states correctly this is not the correct calling syntax for interp2d which can be viewed at scipy.interp2d. If you would read the calling syntax and then your error message again (or the module itself whichever you prefer) you would recognize that you are trying to use an array as index for a dictionary which will naturally throw an exception.
I think what you are trying to do is an interpolation of the grid given by the arrays xrampf, yrampf at the new positions xrampf1, yrampf1. The scipy documentation also gives an exact same usage example which translate as following to your code:
interp_func = sp.interpolate.interp2d(yrampf, xrampf, Cwarp, kind='cubic')
reg_w = interp_func(yramp1f, xramp1f)
I hope that was your intention to do.
Kind regards