Trying to extract the pixels outside and inside the edges within it's own area, currently i am applying the scipy Sobel filter like this:
im = scipy.misc.imread(filename)
im = im.astype('int32')
dx = ndimage.sobel(im, axis=0)
dy = ndimage.sobel(im, axis=1)
mag = np.hypot(dx, dy)
mag *= 255.0 / np.max(mag)
scipy.misc.imsave('sobel.jpg', mag)
Currently the results are:
The idea is to get the pixels outside the edge detection, for example these areas:
How can i extract an array of the area outside and inside the sobel filter?
Here's an approach using interactive image segmentation. In this approach, you must manually label some of the foreground pixels and some of the background pixels, like this:
(I did the labeling in MS Paint.) The code below uses the function skimage.segmentation.random_walker to do the image segmentation, and produces this segmented image:
(This approach can also handle images with much more complicated background regions.) Here's the code:
import skimage
import skimage.viewer
import skimage.segmentation
import skimage.data
import skimage.io
import matplotlib.pyplot as plt
import numpy as np
img = skimage.io.imread("D:/Users/Pictures/img.jpg")
imgLabeled = skimage.io.imread("D:/Users/Pictures/imgLabeled.jpg")
redChannel = imgLabeled[:,:,0]
greenChannel = imgLabeled[:,:,1]
blueChannel = imgLabeled[:,:,2]
markers = np.zeros(img.shape,dtype=np.uint)
markers[(redChannel < 20) & (greenChannel > 210) & (blueChannel < 20)] = 1
markers[(redChannel < 20) & (greenChannel < 20) & (blueChannel > 210)] = 2
plt.imshow(markers)
labels = skimage.segmentation.random_walker(img, markers, beta=1000, mode='cg')
seg1 = np.copy(img)
seg1[labels==2] = 0
seg2 = np.copy(img)
seg2[labels==1] = 0
# plt.imsave("D:/Users/Pictures/imgSeg.png",seg1)
plt.figure()
plt.imshow(seg1)
plt.figure()
plt.imshow(seg2)
Related
SO far I have been able to perform medianBlur and Edge Detection, Now I want to o further remove noise from the image, Matlabs region **property functions ** was used to remove all white regions that had a total pixel area of less than the mean pixel area value. How can I implement this on python
import matplotlib.image as mpimg
import numpy as np
import cv2
import os
import math
from collections import defaultdict
from matplotlib import pyplot as plt
import imutils
#import generalized_hough
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
print(gray.shape)
blur = cv2.bilateralFilter(gray,9,75,75)
median = cv2.medianBlur(gray,5)
# display input and output image
titles = ["bilateral Smoothing","median bulr"]
images = [ blur, median]
plt.figure(figsize=(20, 20))
for i in range(2):
plt.subplot(1,2,i+1)
plt.imshow(images[i])
plt.title(titles[i])
plt.xticks([]), plt.yticks([])
plt.show()
sobelX = cv2.Sobel(median,cv2.cv2.CV_64F, 1, 0)
sobelY = cv2.Sobel(median,cv2.cv2.CV_64F, 0,1)
sobelX = np.uint8(np.absolute(sobelX))
sobelY = np.uint8(np.absolute(sobelY))
SobelCombined = cv2.bitwise_or(sobelX,sobelY)
cv2.imshow('img', SobelCombined)
cv2.waitKey(0)
cv2.destroyAllWindows()
Here is a Matlab code that works for the same task.
close all
%upload image of farm
figure,
farm = imread('small_farms.JPG');%change this to the file path of image
imshow(farm);%this shows the original image
%convert the image to grayscale for 2D manipulation
gfarm = rgb2gray(farm);
figure,
imshow(gfarm);%show grayscaled image
%median filters take a m*n area around a coordinate and
%find the median pixel value and set that coordinate to that
%pixel value. It's a method of removing noise or details in an
%image. may want to tune dimensions of filter.
A = medfilt2(gfarm,[4 4]);
figure,
imshow(A);
%perform a logarithmic edge detection filter,
%this picks out the edges of the image, log setting
%was found to wrok best, although 'Sobel' can also be tried
B = edge(A,'log');
%show results of the edge filter
figure,
imshow(B,[]);
%find the areas of the lines made
areas = regionprops(B,'Area');
%find the mean and one standard deviation
men = mean([areas.Area])+0*std([areas.Area]);
%find max pixel area
big = max([areas.Area]);
%remove regions that are too small
C = bwpropfilt(B,'Area',[men big]);
%perform a dilation on the remaining pixels, this
%helps fill in gaps. The size and shape of the dilation
%can be tuned below.
SE = strel('square',4);
C = imdilate(C,SE);
areas2 = regionprops(C,'Area');
%place white border around image to find areas of farms
%that go off the picture
[h,w] = size(C);
C(1,:) = 1;
C(:,1) = 1;
C(h,:) = 1;
C(:,w) = 1;
C = C<1;
%fill in holes
C = imfill(C,'holes');
%show final processed image
figure,imshow(C);
%the section below is for display purpose
%it creates the boundaries of the image and displays them
%in a rainbow fashion
figure,
[B,L,n,A] = bwboundaries(C,'noholes');
imshow(label2rgb(L, #jet, [.5 .5 .5]))
hold on
for k = 1:length(B)
boundary = B{k};
plot(boundary(:,2), boundary(:,1), 'w', 'LineWidth', 2)
end
%The section below prints out the areas of each found
%region by pixel values. These values need to be scaled
%by the real measurements of the images to get relevant
%metrics
centers = regionprops(C,'Centroid','Area');
for k=1:length(centers)
if(centers(k).Area > mean([centers.Area])-std([areas.Area]))
text(centers(k).Centroid(1),centers(k).Centroid(2),string(centers(k).Area));
end
end
I'm facing an issue, and would like some inputs from the community on how to improve the disparity map. I'm following this tutorial for calculating the disparity map between 2 images. The code I have is as follows:
import cv2
import numpy as np
import sys
from matplotlib import pyplot as plt
num_disparities = 64 # number of disparities to check
block = 9 # block size to match
def preprocess_frame(path):
image = cv2.imread(path, 0)
image = cv2.equalizeHist(image)
image = cv2.GaussianBlur(image, (5, 5), 0)
return image
def calculate_disparity_matrix(args):
left_image = preprocess_frame(args[1])
right_image = preprocess_frame(args[2])
rows, cols = left_image.shape
kernel = np.ones([block, block]) / block
disparity_maps = np.zeros(
[left_image.shape[0], left_image.shape[1], num_disparities])
for d in range(0, num_disparities):
# shift image
translation_matrix = np.float32([[1, 0, d], [0, 1, 0]])
shifted_image = cv2.warpAffine(
right_image, translation_matrix,
(right_image.shape[1], right_image.shape[0]))
# calculate squared differences
SAD = abs(np.float32(left_image) - np.float32(shifted_image))
# convolve with kernel and find SAD at each point
filtered_image = cv2.filter2D(SAD, -1, kernel)
disparity_maps[:, :, d] = filtered_image
disparity = np.argmin(disparity_maps, axis=2)
disparity = np.uint8(disparity * 255 / num_disparities)
disparity = cv2.equalizeHist(disparity)
plt.imshow(disparity, cmap='gray', vmin=0, vmax=255)
plt.show()
def calculate_disparity_inbuilt(args):
left_image = preprocess_frame(args[1])
right_image = preprocess_frame(args[2])
rows, cols = left_image.shape
stereo = cv2.StereoBM_create(numDisparities=num_disparities,
blockSize=block)
disparity = stereo.compute(left_image, right_image)
plt.imshow(disparity, cmap='gray', vmin=0, vmax=255)
plt.show()
The problem is that the output that I get from the inbuilt function in OpenCV is hardly similar to the one I've implemented. I was expecting at least a slight similarity between the 2. Is this expected? or am I doing something wrong here?
Implemented Algorithm
OpenCV Algorithm
I'm trying to find the tilt angle in a series of images which look like the created example data below. There should be a clear edge which is visible by eye. However I'm struggling in extracting the edges so far. Is Canny the right way of finding the edge here or is there a better way of finding the edge?
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
# create data
xvals = np.arange(0,2000)
yvals = 10000 * np.exp((xvals - 1600)/200) + 100
yvals[1600:] = 100
blurred = gaussian_filter(yvals, sigma=20)
# create image
img = np.tile(blurred,(2000,1))
img = np.swapaxes(img,0,1)
# rotate image
rows,cols = img.shape
M = cv.getRotationMatrix2D((cols/2,rows/2),3.7,1)
img = cv.warpAffine(img,M,(cols,rows))
# convert to uint8 for Canny
img_8 = cv.convertScaleAbs(img,alpha=(255.0/65535.0))
fig,ax = plt.subplots(3)
ax[0].plot(xvals,blurred)
ax[1].imshow(img)
# find edge
ax[2].imshow(cv.Canny(img_8, 20, 100, apertureSize=5))
You can find the angle by transforming your image to binary (cv2.threshold(cv2.THRESH_BINARY)) then search for contours.
When you locate your contour (line) then you can fit a line on your contour cv2.fitLine() and get two points of your line. My math is not very good but I think that in linear equation the formula goes f(x) = k*x + n and you can get k out of those two points (k = (y2-y1)/(x2-x1)) and finally the angle phi = arctan(k). (If I'm wrong please correct it)
You can also use the rotated bounding rectangle - cv2.minAreaRect() - which already returns the angle of the rectangle (rect = cv2.minAreaRect() --> rect[2]). Hope it helps. Cheers!
Here is an example code:
import cv2
import numpy as np
import math
img = cv2.imread('angle.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, threshold = cv2.threshold(gray,170,255,cv2.THRESH_BINARY)
im, contours, hierarchy = cv2.findContours(threshold,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
for c in contours:
area = cv2.contourArea(c)
perimeter = cv2.arcLength(c, False)
if area < 10001 and 100 < perimeter < 1000:
# first approach - fitting line and calculate with y=kx+n --> angle=tan^(-1)k
rows,cols = img.shape[:2]
[vx,vy,x,y] = cv2.fitLine(c, cv2.DIST_L2,0,0.01,0.01)
lefty = int((-x*vy/vx) + y)
righty = int(((cols-x)*vy/vx)+y)
cv2.line(img,(cols-1,righty),(0,lefty),(0,255,0),2)
(x1, y1) = (cols-1, righty)
(x2, y2) = (0, lefty)
k = (y2-y1)/(x2-x1)
angle = math.atan(k)*180/math.pi
print(angle)
#second approch - cv2.minAreaRect --> returns center (x,y), (width, height), angle of rotation )
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(img,[box],0,(0,0,255),2)
print(rect[2])
cv2.imshow('img2', img)
Original image:
Output:
-3.8493663478518627
-3.7022125720977783
tribol,
it seems like you can take the gradient image G = |Gx| + |Gy| (normalize it to some known range), calc its Histogram and take the top bins of it. it will give you approx mask of the line. Then you can do line fitting. It'll give you a good initial guess.
A very simple way of doing it is as follows... adjust my numbers to suit your knowledge of the data.
Normalise your image to a scale of 0-255.
Choose two points A and B, where A is 10% of the image width in from the left side and B is 10% in from the right side. The distance AB is now 0.8 x 2000, or 1600 px.
Go North from point A sampling your image till you exceed some sensible threshold that means you have met the tilted line. Note the Y value at this point, as YA.
Do the same, going North from point B till you meet the tilted line. Note the Y value at this point, as YB.
The angle you seek is:
tan-1((YB-YA)/1600)
Thresholding as suggested by kavko didn't work that well, as the intensity varied from image to image (I could of course consider the histogram for each image to imrove this approach). I ended up with taking the maximum of the gradient in the y-direction:
def rotate_image(image):
blur = ndimage.gaussian_filter(image, sigma=10) # blur image first
grad = np.gradient(blur, axis= 0) # take gradient along y-axis
grad[grad>10000]=0 # filter unreasonable high values
idx_maxline = np.argmax(grad, axis=0) # get y-indices of max slope = indices of edge
mean = np.mean(idx_maxline)
std = np.std(idx_maxline)
idx = np.arange(idx_maxline.shape[0])
idx_filtered = idx[(idx_maxline < mean+std) & (idx_maxline > mean - std)] # filter positions where highest slope is at different position(blobs)
slope, intercept, r_value, p_value, std_err = stats.linregress(idx_filtered, idx_maxline[idx_filtered])
out = ndimage.rotate(image,slope*180/np.pi, reshape = False)
return out
out = rotate_image(img)
plt.imshow(out)
I've asked this question already, but I've given it a go since. Here's my
code, with a doc string to show what I'm trying to do.
"""
w1_w2_filter_mask2.py
Use xbulge-mask.fits image file as a mask for W1 and W2 bands
and compute the median in patches where the image is not masked
"""
import matplotlib.pyplot as plt
import numpy as np
from astropy.io import fits
from scipy.ndimage.filters import generic_filter as gf
# Open data files
hdulist = fits.open('xbulge-w1.fits')
w1data = hdulist[0].data
hdulist2 = fits.open('xbulge-w2.fits')
w2data = hdulist2[0].data
hdulistmask = fits.open('xbulge-mask.fits')
maskdata = hdulistmask[0].data
# Define physical shape of filter patch
def patch_filter(image_data, radius):
kernel = np.zeros((2*radius+1, 2*radius+1))
y, x = np.ogrid[-radius:radius+1, -radius:radius+1]
patch = x**2 + y**2 <= radius**2
kernel[patch] = 1
filtered_image = gf(image_data, np.median, footprint = kernel)
return filtered_image
# Apply mask to image files
mx1 = np.ma.masked_array(w1data, mask=maskdata)
mx2 = np.ma.masked_array(w2data, mask=maskdata)
# Pass median filtering patch across masked image
radius = 25
w1masked_filtered = patch_filter(mx1, radius)
w2masked_filtered = patch_filter(mx2, radius)
When I print or imshow the masked, filtered array, it has the same effect as when I don't apply the mask image, i.e. (load mask -> load image -> apply mask -> filter in patches) has the same effect as (load image -> filter in patches).
Can anyone see what I'm missing?
I am scanning old photos, so I have the image and a white background from the scanner. My aim is to take the picture, removing the white background. How can I do that ?
An example picture is the following:
My simple approach:
import os
import time
from PIL import Image
from collections import Counter
import numpy as np
def get_cropped_image(image, crop_folder, threshold):
image_name = image.split("\\")[-1]
im = Image.open(image)
pixels = im.load()
width, height = im.size
rows = []
for h_index in xrange(height):
row = []
for w_index in xrange(width):
row.append(pixels[((w_index, h_index))])
color_count = Counter(row)[(255, 255, 255)] / float(len(row))
rows.append([h_index, color_count])
columns = []
for w_index in xrange(width):
column = []
for h_index in xrange(height):
column.append(im.getpixel((w_index, h_index)))
color_count = Counter(column)[(255, 255, 255)] / float(len(column))
columns.append([w_index, color_count])
image_data = csv.writer(open("image_data.csv", "wb")).writerows(zip(rows, columns))
rows_indexes = [i[0] for i in rows if i[1] < threshold]
columns_indexes = [i[0] for i in columns if i[1] < threshold]
x1, y1, x2, y2 = columns_indexes[0], rows_indexes[0], columns_indexes[-1], rows_indexes[-1]
im.crop((x1, y1, x2, y2)).save(os.path.join(cropped_folder, "c_" + image_name))
In the example below, I create a mask by selecting all pixels that are close to white (close, because the values right outside the area of interest are not exactly white). I then invert the mask to find pixels potentially belonging to the image. I then calculate the bounding box of those pixels, and use it to extract the region of interest.
from skimage import io, img_as_float
import matplotlib.pyplot as plt
import numpy as np
image = img_as_float(io.imread('universe.jpg'))
# Select all pixels almost equal to white
# (almost, because there are some edge effects in jpegs
# so the boundaries may not be exactly white)
white = np.array([1, 1, 1])
mask = np.abs(image - white).sum(axis=2) < 0.05
# Find the bounding box of those pixels
coords = np.array(np.nonzero(~mask))
top_left = np.min(coords, axis=1)
bottom_right = np.max(coords, axis=1)
out = image[top_left[0]:bottom_right[0],
top_left[1]:bottom_right[1]]
plt.imshow(out)
plt.show()