Using gabor kernel to extract vertical lines results in black image - python

I understand the concept of the gabor kernel and how it can be used to identify directional edges. So I want to use it to identify barcode lines in images.
However when I filter an image with a gabor kernel I always get a blank/black result. Can you provide feedback on what I need to do to get Gabor to identify the vertical lines in an image, ie, produce a result that has white where the vertical edges are?
Input image:
Result:
import cv2
import numpy as np
def deginrad(degree):
radiant = 2*np.pi/360 * degree
return radiant
def main():
src = cv2.imread('./images/barcode1.jpg', cv2.IMREAD_GRAYSCALE)
# Introduce consistency in width
const_width = 300
aspect = float(src.shape[0]) / float(src.shape[1])
src = cv2.resize(src, (const_width, int(const_width * aspect)))
src = cv2.GaussianBlur(src, (7,7), 0)
# Apply gabor kernel to identify vertical edges
g_kernel = cv2.getGaborKernel((9,9), 8, deginrad(0), 5, 0.5, 0, ktype=cv2.CV_32F)
gabor = cv2.filter2D(src, cv2.CV_8UC3, g_kernel)
# Visual the gabor kernel
h, w = g_kernel.shape[:2]
g_kernel = cv2.resize(g_kernel, (20*w, 20*h), interpolation=cv2.INTER_CUBIC)
cv2.imshow('src', src)
cv2.imshow('gabor', gabor) # gabor is just black
cv2.imshow('gabor kernel', g_kernel)
cv2.waitKey(0)
if __name__ == "__main__":
main()

You need to play with the parameters to view it properly. The parameters are as follows-
cv2.getGaborKernel(ksize, sigma, theta, lambda, gamma, psi, ktype)
with
g_kernel = cv2.getGaborKernel((31,31), 4, deginrad(0), 5, 0.5, 0, ktype=cv2.CV_32F)
the result is -

Related

Image processing for blur detection

I am trying to detect blurred images. Thanks to this post, I managed to create a script using Fast Fourier Transform and so far it worked quite well. But for some photos, I am not able to get correct results.
When the background is almost as the same color than the objects from the front, I think my script is not able to give good result.
Do you have any leads to correct this ?
import cv2
import imutils
from PIL import Image as pilImg
from IPython.display import display
import numpy as np
from matplotlib import pyplot as plt
def detect_blur_fft(image, size=60, thresh=17, vis=False):
"""
Detects blur by comparing the image to a blurred version of the image
:param image: The image to detect blur in
:param size: the dimension of the smaller square extracted from the image, defaults to 60 (optional)
:param thresh: the lower this value, the more blur is acceptable, defaults to 17 (optional)
:param vis: Whether or not to return a visualization of the detected blur points, defaults to False
(optional)
"""
# grab the dimensions of the image and use the dimensions to
# derive the center (x, y)-coordinates
(h, w) = image.shape
(cX, cY) = (int(w / 2.0), int(h / 2.0))
# compute the FFT to find the frequency transform, then shift
# the zero frequency component (i.e., DC component located at
# the top-left corner) to the center where it will be more
# easy to analyze
fft = np.fft.fft2(image)
fftShift = np.fft.fftshift(fft)
# check to see if we are visualizing our output
if vis:
# compute the magnitude spectrum of the transform
magnitude = 20 * np.log(np.abs(fftShift))
# display the original input image
(fig, ax) = plt.subplots(1, 2, )
ax[0].imshow(image, cmap="gray")
ax[0].set_title("Input")
ax[0].set_xticks([])
ax[0].set_yticks([])
# display the magnitude image
ax[1].imshow(magnitude, cmap="gray")
ax[1].set_title("Magnitude Spectrum")
ax[1].set_xticks([])
ax[1].set_yticks([])
# show our plots
plt.show()
# zero-out the center of the FFT shift (i.e., remove low
# frequencies), apply the inverse shift such that the DC
# component once again becomes the top-left, and then apply
# the inverse FFT
fftShift[cY - size:cY + size, cX - size:cX + size] = 0
fftShift = np.fft.ifftshift(fftShift)
recon = np.fft.ifft2(fftShift)
# compute the magnitude spectrum of the reconstructed image,
# then compute the mean of the magnitude values
magnitude = 20 * np.log(np.abs(recon))
mean = np.mean(magnitude)
# the image will be considered "blurry" if the mean value of the
# magnitudes is less than the threshold value
return (mean, mean <= thresh)
pathImg = "path to the image"
image = cv2.imread(pathImg)
# Resizing the image to 500 pixels in width.
image = imutils.resize(image, width= 500)
# Converting the image to gray scale.
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Using the FFT to detect blur.
(mean, blurry) = detect_blur_fft(gray, size=60)
image = np.dstack([gray] * 3)
# This is a conditional statement that will set the color to red if the image is blurry or green
color = (0, 0, 255) if blurry else (0, 255, 0)
text = "Blurry ({:.4f})" if blurry else "Not Blurry ({:.4f})"
text = text.format(mean)
# Adding text to the image.
cv2.putText(image, text, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2)
print("[INFO] {}".format(text))
# show the output image
display(pilImg.fromarray(image))

Approximating edge with rough outline - OpenCV

I've been researching and trying a couple functions to get what I want and I feel like I might be overthinking it.
One version of my code is below. The sample image is here.
My end goal is to find the angle (yellow) of the approximated line with respect to the frame (green line) Final
I haven't even got to the angle portion of the program yet.
The results I was obtaining from the below code were as follows. Canny Closed Small Removed
Anybody have a better way of creating the difference and establishing the estimated line?
Any help is appreciated.
import cv2
import numpy as np
pX = int(512)
pY = int(768)
img = cv2.imread('IMAGE LOCATION', cv2.IMREAD_COLOR)
imgS = cv2.resize(img, (pX, pY))
aimg = cv2.imread('IMAGE LOCATION', cv2.IMREAD_GRAYSCALE)
# Blur image to reduce noise and resize for viewing
blur = cv2.medianBlur(aimg, 5)
rblur = cv2.resize(blur, (384, 512))
canny = cv2.Canny(rblur, 120, 255, 1)
cv2.imshow('canny', canny)
kernel = np.ones((2, 2), np.uint8)
#fringeMesh = cv2.dilate(canny, kernel, iterations=2)
#fringeMesh2 = cv2.dilate(fringeMesh, None, iterations=1)
#cv2.imshow('fringeMesh', fringeMesh2)
closing = cv2.morphologyEx(canny, cv2.MORPH_CLOSE, kernel)
cv2.imshow('Closed', closing)
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(closing, connectivity=8)
#connectedComponentswithStats yields every separated component with information on each of them, such as size
sizes = stats[1:, -1]; nb_components = nb_components - 1
min_size = 200 #num_pixels
fringeMesh3 = np.zeros((output.shape))
for i in range(0, nb_components):
if sizes[i] >= min_size:
fringeMesh3[output == i + 1] = 255
#contours, _ = cv2.findContours(fringeMesh3, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
#cv2.drawContours(fringeMesh3, contours, -1, (0, 255, 0), 1)
cv2.imshow('final', fringeMesh3)
#cv2.imshow("Natural", imgS)
#cv2.imshow("img", img)
cv2.imshow("aimg", aimg)
cv2.imshow("Blur", rblur)
cv2.waitKey()
cv2.destroyAllWindows()
You can fit a straight line to the first white pixel you encounter in each column, starting from the bottom.
I had to trim your image because you shared a screen grab of it with a window decoration, title and frame rather than your actual image:
import cv2
import math
import numpy as np
# Load image as greyscale
im = cv2.imread('trimmed.jpg', cv2.IMREAD_GRAYSCALE)
# Get index of first white pixel in each column, starting at the bottom
yvals = (im[::-1,:]>200).argmax(axis=0)
# Make the x values 0, 1, 2, 3...
xvals = np.arange(0,im.shape[1])
# Fit a line of the form y = mx + c
z = np.polyfit(xvals, yvals, 1)
# Convert the slope to an angle
angle = np.arctan(z[0]) * 180/math.pi
Note 1: The value of z (the result of fitting) is:
array([ -0.74002694, 428.01463745])
which means the equation of the line you are looking for is:
y = -0.74002694 * x + 428.01463745
i.e. the y-intercept is at row 428 from the bottom of the image.
Note 2: Try to avoid JPEG format as an intermediate format in image processing - it is lossy and changes your pixel values - so where you have thresholded and done your morphology you are expecting values of 255 and 0, JPEG will lossily alter those values and you end up testing for a range or thresholding again.
Your 'Closed' image seems to quite clearly segment the two regions, so I'd suggest you focus on turning that boundary into a line that you can do something with. Connected components analysis and contour detection don't really provide any useful information here, so aren't necessary.
One quite simple approach to finding the line angle is to find the first white pixel in each row. To get only the rows that are part of your diagonal, don't include rows where that pixel is too close to either side (e.g. within 5%). That gives you a set of points (pixel locations) on the boundary of your two types of grass.
From there you can either do a linear regression to get an equation for the straight line, or you can get two points by averaging the x values for the top and bottom half of the rows, and then calculate the gradient angle from that.
An alternative approach would be doing another morphological close with a very large kernel, to end up with just a solid white region and a solid black region, which you could turn into a line with canny or findContours. From there you could either get some points by averaging, use the endpoints, or given a smooth enough result from a large enough kernel you could detect the line with hough lines.

Extract a fixed number of squares from an image with Python/OpenCV

I have several scanned images I would like to compute with Python/Opencv. Each of these images (see an example below) contains n rows of coloured squares. Each of these squares have the same size. The goal is to crop each of these squares and to extract the data from it.
I have found there a code which is able to extract squares from an image.
Here is my code where I have used it :
import numpy as np
import cv2
from matplotlib import pyplot as plt
def angle_cos(p0, p1, p2):
import numpy as np
d1, d2 = (p0-p1).astype('float'), (p2-p1).astype('float')
return abs( np.dot(d1, d2) / np.sqrt( np.dot(d1, d1)*np.dot(d2, d2) ) )
def find_squares(img):
import cv2 as cv
import numpy as np
img = cv.GaussianBlur(img, (5, 5), 0)
squares = []
for gray in cv.split(img):
for thrs in range(0, 255, 26):
if thrs == 0:
bin = cv.Canny(gray, 0, 50, apertureSize=5)
bin = cv.dilate(bin, None)
else:
_retval, bin = cv.threshold(gray, thrs, 255, cv.THRESH_BINARY)
contours, _hierarchy = cv.findContours(bin, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)
for cnt in contours:
cnt_len = cv.arcLength(cnt, True)
cnt = cv.approxPolyDP(cnt, 0.02*cnt_len, True)
if len(cnt) == 4 and cv.contourArea(cnt) > 1000 and cv.isContourConvex(cnt):
cnt = cnt.reshape(-1, 2)
max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in range(4)])
if max_cos < 0.1:
squares.append(cnt)
print(len(squares))
return squares
img = cv2.imread("test_squares.jpg",1)
plt.axis("off")
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
squares = find_squares(img)
cv2.drawContours( img, squares, -1, (0, 255, 0), 1 )
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
However, it finds two many squares (100 instead of 15 !!). Looking at the image, it seems that Opencv find a lot of contours for each square.
I'm pretty sure that it can be optimized since the squares have more or less the same size and far from each other. As a very beginner in Opencv, I haven't found yet a way to give more criteria in the function "find squares" in order to get only 15 squares at the end of the routine. Maybe the contour area can be maximized ?
I have also found there a more detailed code (very close to the previous one) but it seems to be developed in a old version of Opencv. I haven't managed to make it work (and so to modify it).
This is another more robust method.
I used this code to find the contours in the image (the full code can be found in this gist):
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Define square size
min_square_size = 987
# Read Image
img = cv2.imread('/home/stephen/Desktop/3eY0k.jpg')
# Threshold and find edges
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Threshold the image - segment white background from post it notes
_, thresh = cv2.threshold(gray, 250, 255, cv2.THRESH_BINARY_INV);
# Find the contours
_, contours, _ = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
I iterated through the contours. I only looked at the contours that were a reasonable size. I found the four corners of each contour.
# Create a list for post-it images
images = []
# Iterate through the contours in the image
for contour in contours:
area = cv2.contourArea(contour)
# If the contour is not really small, or really big
h,w = img.shape[0], img.shape[1]
if area > min_square_size and area < h*w-(2*(h+w)):
# Get the four corners of the contour
epsilon = .1 * cv2.arcLength(contour, True)
approx = cv2.approxPolyDP(contour, epsilon, True)
# Draw the point
for point in approx: cv2.circle(img, tuple(point[0]), 2, (255,0,0), 2)
# Warp it to a square
pts1 = np.float32(approx)
pts2 = np.float32([[0,0],[300,0],[300,300],[0,300]])
M = cv2.getPerspectiveTransform(pts1,pts2)
dst = cv2.warpPerspective(img,M,(300,300))
# Add the square to the list of images
images.append(dst.copy())
The post-it notes are squares, but because the camera warps the objects in the image they do not appear as squares. I used warpPerspective to make the post-it notes square shapes. Only a few of them are shown in this plot (there are more that didn't fit):
If your problem is that too many contours (edges) are found in the image, my suggestion is to modify the edge-finding part first. It'll be by far the easiest modification to make.
In particular, you'll need to change this call:
bin = cv.Canny(gray, 0, 50, apertureSize=5)
The cv.Canny() function takes as arguments two threshold values, the aperture size, and a boolean to indicate whether a precise form of gradient is used. Play with those parameters, and my guess is, you'll get much better results.

Sobel filter implementation in scipy

I tried to implement the Sobel_X filter in scipy with convolve2d function.
I compared with the results from this function:
from scipy.signal import convolve2d
from scipy import misc
from skimage.exposure import rescale_intensity
import cv2
import numpy as np
#https://www.pyimagesearch.com/2016/07/25/convolutions-with-opencv-and-python/
def convolve(image, kernel):
# grab the spatial dimensions of the image, along with
# the spatial dimensions of the kernel
(iH, iW) = image.shape[:2]
(kH, kW) = kernel.shape[:2]
# print("Kh,Kw", kernel.shape[:2])
# allocate memory for the output image, taking care to
# "pad" the borders of the input image so the spatial
# size (i.e., width and height) are not reduced
pad = (kW - 1) // 2
# print("pad", pad)
image = cv2.copyMakeBorder(image, pad, pad, pad, pad,
cv2.BORDER_REPLICATE)
# self.imshow(image, "padded image")
output = np.zeros((iH, iW), dtype="float32")
# loop over the input image, "sliding" the kernel across
# each (x, y)-coordinate from left-to-right and top to
# bottom
for y in np.arange(pad, iH + pad):
for x in np.arange(pad, iW + pad):
# extract the ROI of the image by extracting the
# *center* region of the current (x, y)-coordinates
# dimensions
roi = image[y - pad:y + pad + 1, x - pad:x + pad + 1]
# perform the actual convolution by taking the
# element-wise multiplicate between the ROI and
# the kernel, then summing the matrix
k = (roi * kernel).sum()
# store the convolved value in the output (x,y)-
# coordinate of the output image
output[y - pad, x - pad] = k
# self.imshow(output, "padded image")
# rescale the output image to be in the range [0, 255]
output = rescale_intensity(output, in_range=(0, 255))
output = (output * 255).astype("uint8")
# return the output image
return output
Here are the Sobel_X Kernel and code to compare.
sobelX = np.array((
[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]), dtype="int")]
testim=misc.face(gray=True)
convolved_func=convolve(testim, sobelX)
convolved_np=convolve2d(testim, sobelX, boundary='symm', mode='same')
cv2.imshow("Face", np.hstack((convolved_func,np.array(convolved_np, dtype="uint8"))))
cv2.waitKey(0)
cv2.destroyAllWindows()
As you can see here the results are entirely different
I can't get how to implement these filters to get the same results.
Should I somehow change the filter function or maybe there some special things in numpy to implement it, wright?
I tried to make the function for scipy as in this and that examples, but the results the same or worth (I've got black image).
You will get results slightly different.
Do thresholding to remove all numbers which are less than 0.
convolved_np[convolved_np<0]=0
That will give you something similar, still not the same. Some artifacts appeared.
I think these functions differ, that's why I have got a bit different results. Maybe there are some mistakes, so if you can add some to this answer, I will appreciate it.

Filling holes in image with OpenCV or Skimage

I m trying to fill holes for a chessboard for stereo application. The chessboard is at micro scale thus it is complicated to avoid dust... as you can see :
Thus, the corners detection is impossible. I tried with SciPy's binary_fill_holes or similar approaches but i have a full black image, i dont understand.
Here is a function that replaces the color of each pixel with the color that majority of its neighbor pixels have.
import numpy as np
import cv2
def remove_noise(gray, num):
Y, X = gray.shape
nearest_neigbours = [[
np.argmax(
np.bincount(
gray[max(i - num, 0):min(i + num, Y), max(j - num, 0):min(j + num, X)].ravel()))
for j in range(X)] for i in range(Y)]
result = np.array(nearest_neigbours, dtype=np.uint8)
cv2.imwrite('result2.jpg', result)
return result
Demo:
img = cv2.imread('mCOFl.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
remove_noise(gray, 10)
Input image:
Out put:
Note: Since this function replace the color of corner pixels too, you can sue cv2.goodFeaturesToTrack function to find the corners and restrict the denoising for that pixels
corners = cv2.goodFeaturesToTrack(gray, 100, 0.01, 30)
corners = np.squeeze(np.int0(corners))
You can use morphology: dilate, and then erode with same kernel size.
A faster, more accurate way is to use skimage.morphology.remove_small_objects docs
im = imread('a.png',cv2.IMREAD_GRAYSCALE)
im = im ==255
from skimage import morphology
cleaned = morphology.remove_small_objects(im, 200)

Categories