I am trying to figure out how to make a script that cuts out images from a sheet of images. I can't understand what to do after I get the contours of the images. My train of thought is to load a sheet, convert it to grayscale, find the contours, use them to cut out the images from the original colored image and save them individually.
import numpy as np
from sys import argv
from PIL import Image
from skimage import measure
# Inicialization
spritesToFind = argv[1]
spriteSize = argv[2]
sheet = Image.open(argv[3])
# To grayscale, so contour finding is easy
grayscale = sheet.convert('L')
# Let numpy do the heavy lifting for converting pixels to black or white
data = np.asarray(grayscale).copy()
# Find the contours we need
contours = measure.find_contours(data, 0.8)
# Now we put it back in PIL land
sprite = Image.fromarray(data)
sprite.save(str(spritesToFind), "PNG")
If you just want to cut out the smallest rectangle containing the contour you can use the (x,y) coordinates in the contour to create a bounding box going from (min(x), min(y)) to (max(x),max(y)).
If you want to zero out everything not inside a contour, you should look into how you can determine if a point is within a polygon, and then zero out everypoint point that is not inside the contour.
Where contours is a list of countours found using measure.find_contours, and x is your image. This shows how to do both a rectangle bounding box extract image_patch and how to extract just the pixels that are part of the polygon new_image:
from matplotlib import path
contour = contours[0]
path = path.Path(contour)
top = min(contour[:,0])
bottom = max(contour[:,0])
left = min(contour[:,1])
right = max(contour[:,1])
new_image = np.zeros([bottom-top,right-left])
for xr in range(new_image.shape[0]):
for yr in range(new_image.shape[1]):
if(path.contains_point([top+xr,left+yr])):
new_image[xr, yr] = x[top+xr, left+yr]
image_patch = x[top:bottom,left:right]
plt.imshow(image_patch)
plt.show()
plt.imshow(new_image)
plt.show()
Related
SO far I have been able to perform medianBlur and Edge Detection, Now I want to o further remove noise from the image, Matlabs region **property functions ** was used to remove all white regions that had a total pixel area of less than the mean pixel area value. How can I implement this on python
import matplotlib.image as mpimg
import numpy as np
import cv2
import os
import math
from collections import defaultdict
from matplotlib import pyplot as plt
import imutils
#import generalized_hough
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
print(gray.shape)
blur = cv2.bilateralFilter(gray,9,75,75)
median = cv2.medianBlur(gray,5)
# display input and output image
titles = ["bilateral Smoothing","median bulr"]
images = [ blur, median]
plt.figure(figsize=(20, 20))
for i in range(2):
plt.subplot(1,2,i+1)
plt.imshow(images[i])
plt.title(titles[i])
plt.xticks([]), plt.yticks([])
plt.show()
sobelX = cv2.Sobel(median,cv2.cv2.CV_64F, 1, 0)
sobelY = cv2.Sobel(median,cv2.cv2.CV_64F, 0,1)
sobelX = np.uint8(np.absolute(sobelX))
sobelY = np.uint8(np.absolute(sobelY))
SobelCombined = cv2.bitwise_or(sobelX,sobelY)
cv2.imshow('img', SobelCombined)
cv2.waitKey(0)
cv2.destroyAllWindows()
Here is a Matlab code that works for the same task.
close all
%upload image of farm
figure,
farm = imread('small_farms.JPG');%change this to the file path of image
imshow(farm);%this shows the original image
%convert the image to grayscale for 2D manipulation
gfarm = rgb2gray(farm);
figure,
imshow(gfarm);%show grayscaled image
%median filters take a m*n area around a coordinate and
%find the median pixel value and set that coordinate to that
%pixel value. It's a method of removing noise or details in an
%image. may want to tune dimensions of filter.
A = medfilt2(gfarm,[4 4]);
figure,
imshow(A);
%perform a logarithmic edge detection filter,
%this picks out the edges of the image, log setting
%was found to wrok best, although 'Sobel' can also be tried
B = edge(A,'log');
%show results of the edge filter
figure,
imshow(B,[]);
%find the areas of the lines made
areas = regionprops(B,'Area');
%find the mean and one standard deviation
men = mean([areas.Area])+0*std([areas.Area]);
%find max pixel area
big = max([areas.Area]);
%remove regions that are too small
C = bwpropfilt(B,'Area',[men big]);
%perform a dilation on the remaining pixels, this
%helps fill in gaps. The size and shape of the dilation
%can be tuned below.
SE = strel('square',4);
C = imdilate(C,SE);
areas2 = regionprops(C,'Area');
%place white border around image to find areas of farms
%that go off the picture
[h,w] = size(C);
C(1,:) = 1;
C(:,1) = 1;
C(h,:) = 1;
C(:,w) = 1;
C = C<1;
%fill in holes
C = imfill(C,'holes');
%show final processed image
figure,imshow(C);
%the section below is for display purpose
%it creates the boundaries of the image and displays them
%in a rainbow fashion
figure,
[B,L,n,A] = bwboundaries(C,'noholes');
imshow(label2rgb(L, #jet, [.5 .5 .5]))
hold on
for k = 1:length(B)
boundary = B{k};
plot(boundary(:,2), boundary(:,1), 'w', 'LineWidth', 2)
end
%The section below prints out the areas of each found
%region by pixel values. These values need to be scaled
%by the real measurements of the images to get relevant
%metrics
centers = regionprops(C,'Centroid','Area');
for k=1:length(centers)
if(centers(k).Area > mean([centers.Area])-std([areas.Area]))
text(centers(k).Centroid(1),centers(k).Centroid(2),string(centers(k).Area));
end
end
I have this image of an eye where I want to get the center of the pupil:
Original Image
I applied adaptive threshold as well as laplacian to the image using this code:
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('C:\Users\User\Documents\module4\input\left.jpg',0)
image = cv2.medianBlur(img,5)
th = cv2.adaptiveThreshold(image,255,cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY,11,2)
laplacian = cv2.Laplacian(th,cv2.CV_64F)
cv2.imshow('output', laplacian)
cv2.imwrite('C:\Users\User\Documents\module4\output\output.jpg', laplacian)
cv2.waitKey(0)
cv2.destroyAllWindows
and the resulting image looks like this: Resulting image by applying adaptive threshold
I want to draw a circle around the smaller inner circle and get its center. I've tried using contours and circular hough transform but it does not correctly detect any circles in the image.
Here is my code for Circular Hough Transform:
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('C:\Users\User\Documents\module4\output\output.jpg',0)
circles = cv2.HoughCircles(img,cv2.HOUGH_GRADIENT,1,20,param1=50,param2=30,minRadius=0,maxRadius=0)
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
cv2.circle(img,(i[0],i[1]),i[2],(255,255,0),2)
# draw the center of the circle
cv2.circle(img,(i[0],i[1]),2,(255,0,255),3)
cv2.imshow('detected circles',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
And here is the code for applying contour:
import cv2
import numpy as np
img = cv2.imread('C:\Users\User\Documents\module4\output\output.jpg',0)
_, contours,hierarchy = cv2.findContours(img, 1, 2)
cnt = contours[0]
(x,y),radius = cv2.minEnclosingCircle(cnt)
center = (int(x),int(y))
radius = int(radius)
img = cv2.circle(img,center,radius,(0,255,255),2)
cv2.imshow('contour', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
The resulting image of this code exactly looks like the image wherein I applied adaptive threshold. I would really appreciate it if anyone can help me solve my problem. I've been stuck with this for a while now. Also, if any of you guys can suggest a better way to detect the center of the pupil besides this method, I would also really appreciate it.
try to apply edge detection instead of shareholding after filtering of original image and then apply hough circle
My thought would be to use the Hough transform like you're doing. But another method might be template matching like this. This assumes you know the approximate radius of the pupil in the image, you can try to build a template.
import skimage
import numpy as np
import matplotlib.pyplot as plt
img = skimage.io.imread('Wjioe.jpg')
#just use grayscale, but you could make separate template for each r,g,b channel
img = np.mean(img, axis=2)
(M,N) = img.shape
mm = M-20
nn = N-20
template = np.zeros([mm,nn])
## Create template ##
#darkest inner circle (pupil)
(rr,cc) = skimage.draw.circle(mm/2,nn/2,4.5, shape=template.shape)
template[rr,cc]=-2
#iris (circle surrounding pupil)
(rr,cc) = skimage.draw.circle(mm/2,nn/2,8, shape=template.shape)
template[rr,cc] = -1
#Optional - pupil reflective spot (if centered)
(rr,cc) = skimage.draw.circle(mm/2,nn/2,1.5, shape=template.shape)
template[rr,cc] = 1
plt.imshow(template)
normccf = skimage.feature.match_template(img, template,pad_input=True)
#center pixel
(i,j) = np.unravel_index( np.argmax(normccf), normccf.shape)
plt.imshow(img)
plt.plot(j,i,'r*')
You're defining a 3 channel color for a gray-scale image. Based on my test it will only read the first value in that tuple. Because the first value in your other colors (in the middle code) starts with 255, it draws a full white circle and because the first value in your last color (in your last code) starts with 0, it draws a full black circle which you can't see.
Just change your color values to a 1 channel color (an int between 0 and 255) and you'll be fine.
So I have this map:
I've already done some filtering and now I have the following image:
Finally, I want to find the coordinates for each of the polygons in the image, but using the find_contours function is not giving me good results, as you can see on the following image.
On the left side, you can see all of the contours (polygons) found (one color for each), and on the right side, it's an example of a polygon it has found (which is clearly wrong).
Having the black and white image, how can I find the coordinates for each of the polygons in it?
So when I downloaded the image, I got a very disjoint kind of image. So what I did was I dilated it.
import numpy as np
from skimage import io, measure, morphology
img = io.imread('img.png', as_grey=True)
img = morphology.binary_dilation(img, selem=np.ones((5,5)))
Then what I did was I inverted the white line with black and the area as white with this
img_inverted = np.invert(img)
Once you have the inverted image, Label it for easy processing.
img_inverted_labeled = measure.label(img_inverted)
n_lbls = np.unique(img_inverted_labeled)[1:]
Now append each part's polygon.
pols = []
for i in n_lbls:
img_part = (img_inverted_labeled == i)
pols.append(measure.find_contours(img_part, level=0)[0])
Hope this helps.
Here is an example of the kinds of images I'll be dealing with:
(source: csverma at pages.cs.wisc.edu)
There is one bright spot on each ball. I want to locate the coordinates of the centre of the bright spot. How can I do it in Python or Matlab? The problem I'm having right now is that more than one points on the spot has the same (or roughly the same) white colour, but what I need is to find the centre of this 'cluster' of white points.
Also, for the leftmost and rightmost images, how can I find the centre of the whole circular object?
You can simply threshold the image and find the average coordinates of what is remaining. This handles the case when there are multiple values that have the same intensity. When you threshold the image, there will obviously be more than one bright white pixel, so if you want to bring it all together, find the centroid or the average coordinates to determine the centre of all of these white bright pixels. There isn't a need to filter in this particular case. Here's something to go with in MATLAB.
I've read in that image directly, converted to grayscale and cleared off the white border that surrounds each of the images. Next, I split up the image into 5 chunks, threshold the image, find the average coordinates that remain and place a dot on where each centre would be:
im = imread('http://pages.cs.wisc.edu/~csverma/CS766_09/Stereo/callight.jpg');
im = rgb2gray(im);
im = imclearborder(im);
%// Split up images and place into individual cells
split_point = floor(size(im,2) / 5);
images = mat2cell(im, size(im,1), split_point*ones(5,1));
%// Show image to place dots
imshow(im);
hold on;
%// For each image...
for idx = 1 : 5
%// Get image
img = images{idx};
%// Threshold
thresh = img > 200;
%// Find coordinates of thresholded image
[y,x] = find(thresh);
%// Find average
xmean = mean(x);
ymean = mean(y);
%// Place dot at centre
%// Make sure you offset by the right number of columns
plot(xmean + (idx-1)*split_point, ymean, 'r.', 'MarkerSize', 18);
end
I get this:
If you want a Python solution, I recommend using scikit-image combined with numpy and matplotlib for plotting. Here's the above code transcribed in Python. Note that I saved the image referenced by the link manually on disk and named it balls.jpg:
import skimage.io
import skimage.segmentation
import numpy as np
import matplotlib.pyplot as plt
# Read in the image
# Note - intensities are floating point from [0,1]
im = skimage.io.imread('balls.jpg', True)
# Threshold the image first then clear the border
im_clear = skimage.segmentation.clear_border(im > (200.0/255.0))
# Determine where to split up the image
split_point = int(im.shape[1]/5)
# Show image in figure and hold to place dots in
plt.figure()
plt.imshow(np.dstack([im,im,im]))
# For each image...
for idx in range(5):
# Extract sub image
img = im_clear[:,idx*split_point:(idx+1)*split_point]
# Find coordinates of thresholded image
y,x = np.nonzero(img)
# Find average
xmean = x.mean()
ymean = y.mean()
# Plot on figure
plt.plot(xmean + idx*split_point, ymean, 'r.', markersize=14)
# Show image and make sure axis is removed
plt.axis('off')
plt.show()
We get this figure:
Small sidenote
I could have totally skipped the above code and used regionprops (MATLAB link, scikit-image link). You could simply threshold the image, then apply regionprops to find the centroids of each cluster of white pixels, but I figured I'd show you a more manual way so you can appreciate the algorithm and understand it for yourself.
Hope this helps!
Use a 2D convolution and then find the point with the highest intensity. You can apply a concave non-linear function (such as exp) on intensity values before applying the 2d convolution, to intensify the bright spots relative to the dimmer parts of the image. Something like conv2(exp(img),ker)
I have a set of boundary points of an object.
I want to draw it using opencv as contour.
I have no idea that how to convert my points to contour representation.
To the same contour representation which is obtained by following call
contours,_ = cv2.findContours(image,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
Any ideas?
Thanks
By looking at the format of the contours I would think something like this should be sufficient:
contours = [numpy.array([[1,1],[10,50],[50,50]], dtype=numpy.int32) , numpy.array([[99,99],[99,60],[60,99]], dtype=numpy.int32)]
This small program gives an running example:
import numpy
import cv2
contours = [numpy.array([[1,1],[10,50],[50,50]], dtype=numpy.int32) , numpy.array([[99,99],[99,60],[60,99]], dtype=numpy.int32)]
drawing = numpy.zeros([100, 100],numpy.uint8)
for cnt in contours:
cv2.drawContours(drawing,[cnt],0,(255,255,255),2)
cv2.imshow('output',drawing)
cv2.waitKey(0)
To create your own contour from a python list of points L
L=[[x1,y1],[x2,y2],[x3,y3],[x4,y4],[x5,y5],[x6,y6],[x7,y7],[x8,y8],[x9,y9],...[xn,yn]]
Create a numpy array ctr from L, reshape it and force its type
ctr = numpy.array(L).reshape((-1,1,2)).astype(numpy.int32)
ctr is our new countour, let's draw it on an existing image
cv2.drawContours(image,[ctr],0,(255,255,255),1)
A contour is simply a curve joining all continuous points so to create your own contour, you can create a np.array() with your (x,y) points in clockwise order
points = np.array([[25,25], [70,10], [150,50], [250,250], [100,350]])
That's it!
There are two methods to draw the contour onto an image depending on what you need:
Contour outline
If you only need the contour outline, use cv2.drawContours()
cv2.drawContours(image,[points],0,(0,0,0),2)
Filled contour
To get a filled contour, you can either use cv2.fillPoly() or cv2.drawContours() with thickness=-1
cv2.fillPoly(image, [points], [0,0,0]) # OR
# cv2.drawContours(image,[points],0,(0,0,0),-1)
Full example code for completeness
import cv2
import numpy as np
# Create blank white image
image = np.ones((400,400), dtype=np.uint8) * 255
# List of (x,y) points in clockwise order
points = np.array([[25,25], [70,10], [150,50], [250,250], [100,350]])
# Draw points onto image
cv2.drawContours(image,[points],0,(0,0,0),2)
# Fill points onto image
# cv2.fillPoly(image, [points], [0,0,0])
cv2.imshow('image', image)
cv2.waitKey()
To add to Cherif KAOUA's answer, I found I had to convert to list and zip my numpy array. Reading in an array of points from a text file:
contour = []
with open(array_of_points,'r') as f:
next(f) // line one of my file gives the number of points
for l in f:
row = l.split()
numbers = [int(n) for n in row]
contour.append(numbers)
ctr = np.array(contour).reshape((-1,1,2)).astype(np.int32)
ctr = ctr.tolist()
ctr = zip(*[iter(ctr)]*len(contour))