Requirement is to crop region of interest from binary image.
I need a rectangle image from a binary image by removing the extra space around the region of interest.
For example:
From this Original image i want only the region of interest marked with yellow color rectangle.
Note: Yellow color rectangle is just for the reference and it is not present in the image that will be processed.
I tried the following python code but it is not giving the required output.
from PIL import Image
from skimage.io import imread
from skimage.morphology import convex_hull_image
import numpy as np
from matplotlib import pyplot as plt
from skimage import io
from skimage.color import rgb2gray
im = imread('binaryImageEdited.png')
plt.imshow(im)
plt.title('input image')
plt.show()
# create a binary image
im1 = 1 - rgb2gray(im)
threshold = 0.8
im1[im1 <= threshold] = 0
im1[im1 > threshold] = 1
chull = convex_hull_image(im1)
plt.imshow(chull)
plt.title('convex hull in the binary image')
plt.show()
imageBox = Image.fromarray((chull*255).astype(np.uint8)).getbbox()
cropped = Image.fromarray(im).crop(imageBox)
cropped.save('L_2d_cropped.png')
plt.imshow(cropped)
plt.show()
Thank you.
Your image is not actually binary on account of two things:
firstly, it has 26 colours, and
secondly it has an (entirely unnecessary) alpha channel.
You can trim it like this:
#!/usr/bin/env python3
from PIL import Image, ImageOps
# Open image and ensure greysale and discard useless alpha channel
im = Image.open("thing.png").convert('L')
# Threshold and invert image as not actually binary
thresh = im.point(lambda p: p < 64 and 255)
# Get bounding box of thresholded image
bbox1 = thresh.getbbox()
crop1 = thresh.crop(bbox1)
# Invert and crop again
crop1n = ImageOps.invert(crop1)
bbox2 = crop1n.getbbox()
crop2 = crop1.crop(bbox2) # You don't actually need this - it's just for debug
# Trim original, unthresholded, uninverted image to the two bounding boxes
result = im.crop(bbox1).crop(bbox2)
result.save('result.png')
even i have similar problem. Also it would be helpful if image saved is in 32X32 px.
Related
I want to crop image automatically, it means I don't want to specify the pixels or coordinates each time, the module/code which should detect the object (i.e. area of interest) and extract it from an image.
Or there is any algorithm to crop desired part from image (Area of Interest)
[for example my image contains two part one is white and other is in RGB form, but I want only the RGB form part from complete image].
I have also tried below code, but it was not helpful.
from PIL import Image
from skimage.io import imread
from skimage.morphology import convex_hull_image
im = imread('L_2d.jpg')
plt.imshow(im)
plt.title('input image')
plt.show()
# create a binary image
im1 = 1 - rgb2gray(im)
threshold = 0.5
im1[im1 <= threshold] = 0
im1[im1 > threshold] = 1
chull = convex_hull_image(im1)
plt.imshow(chull)
plt.title('convex hull in the binary image')
plt.show()
imageBox = Image.fromarray((chull*255).astype(np.uint8)).getbbox()
cropped = Image.fromarray(im).crop(imageBox)
cropped.save('L_2d_cropped.jpg')
plt.imshow(cropped)
plt.show()
import numpy as np
from imageio import imread, imwrite
im1 = imread('https://api.sofascore.app/api/v1/team/2697/image')[...,:3]
im2 = imread('https://api.sofascore.app/api/v1/team/2692/image')[...,:3]
result = np.hstack((im1,im2))
imwrite('result.jpg', result)
Original images opening directly from the url's (I'm trying to concatenate the two images into one and keep the background white):
As can be seen both have no background, but when joining the two via Python, the defined background becomes this moss green:
I tried modifying the color reception:
im1 = imread('https://api.sofascore.app/api/v1/team/2697/image')[...,:1]
im2 = imread('https://api.sofascore.app/api/v1/team/2692/image')[...,:1]
But the result is a Black & White with the background still looking like it was converted from the previous green, even though the PNG's don't have such a background color.
How should I proceed to solve my need?
There is a 4th channel in your images - transparency. You are discarding that channel with [...,:1]. This is a mistake.
If you retain the alpha channel this will work fine:
import numpy as np
from imageio import imread, imwrite
im1 = imread('https://api.sofascore.app/api/v1/team/2697/image')
im2 = imread('https://api.sofascore.app/api/v1/team/2692/image')
result = np.hstack((im1,im2))
imwrite('result.png', result)
However, if you try to make a jpg, you will have a problem:
>>> imwrite('test.jpg', result)
OSError: JPEG does not support alpha channel.
This is correct, as JPGs do not do transparency. If you would like to use transparency and also have your output be a JPG, I suggest a priest.
You can replace the transparent pixels by using np.where and looking for places that the alpha channel is 0:
result = np.hstack((im1,im2))
result[np.where(result[...,3] == 0)] = [255, 255, 255, 255]
imwrite('result.png', result)
If you want to improve image quality, here is a solution. #Brondy
# External libraries used for
# Image IO
from PIL import Image
# Morphological filtering
from skimage.morphology import opening
from skimage.morphology import disk
# Data handling
import numpy as np
# Connected component filtering
import cv2
black = 0
white = 255
threshold = 160
# Open input image in grayscale mode and get its pixels.
img = Image.open("image.jpg").convert("LA")
pixels = np.array(img)[:,:,0]
# Remove pixels above threshold
pixels[pixels > threshold] = white
pixels[pixels < threshold] = black
# Morphological opening
blobSize = 1 # Select the maximum radius of the blobs you would like to remove
structureElement = disk(blobSize) # you can define different shapes, here we take a disk shape
# We need to invert the image such that black is background and white foreground to perform the opening
pixels = np.invert(opening(np.invert(pixels), structureElement))
# Create and save new image.
newImg = Image.fromarray(pixels).convert('RGB')
newImg.save("newImage1.PNG")
# Find the connected components (black objects in your image)
# Because the function searches for white connected components on a black background, we need to invert the image
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(np.invert(pixels), connectivity=8)
# For every connected component in your image, you can obtain the number of pixels from the stats variable in the last
# column. We remove the first entry from sizes, because this is the entry of the background connected component
sizes = stats[1:,-1]
nb_components -= 1
# Define the minimum size (number of pixels) a component should consist of
minimum_size = 100
# Create a new image
newPixels = np.ones(pixels.shape)*255
# Iterate over all components in the image, only keep the components larger than minimum size
for i in range(1, nb_components):
if sizes[i] > minimum_size:
newPixels[output == i+1] = 0
# Create and save new image.
newImg = Image.fromarray(newPixels).convert('RGB')
newImg.save("new_img.PNG")
If you want to change the background of a Image, pixellib is the best solution because it seemed the most reasonable and easy library to use.
import pixellib
from pixellib.tune_bg import alter_bg
change_bg = alter_bg()
change_bg.load_pascalvoc_model("deeplabv3_xception_tf_dim_ordering_tf_kernels.h5")
change_bg.color_bg("sample.png", colors=(255,255,255), output_image_name="colored_bg.png")
This code requires pixellib to be higher or the same as 0.6.1
I have thousands of scale images that I would like to extract the reading of the scale from each image. However, when using the Tesseract it gives wrong values. I tried several process for the image but still running to same issue. From my understanding so far after defining region of interest in the image, it has to be converted to white text with black background. However, I am new to python, I tried some functions to do so but still running to same issue. Would be appreciated if someone can help me on this one. The following link is for the image, as I couldn't uploaded it here as it is more than 2 MiB:
https://mega.nz/file/fZMUDRbL#tg4Tc2VmGMMdEpnZzt7blxZjVLdlhMci9jll0FLnIGI
import cv2
import pytesseract
import matplotlib.pyplot as plt
import numpy as np
import imutils
## Reading Image File
Filename = 'C:\\Users\\Abdullah\\Desktop\\Scale Reading\\' #File Path For Images
IName = 'Disk_Test_1_09_07-00000_0.tif' # Image Name
Image = cv2.imread(Filename + IName,0)
## Image Processing
Image_Crop = Image[1680:1890, 550:1240] # Define Region of Interest of the image
#cv2.imshow("cropped", Image_Crop) # Show Cropped Image
#cv2.waitKey(0) # Show Cropped Image
Mask = Image_Crop > 10 # Thershold Image to Value of X
Mask = np.array(Mask, dtype=np.uint8)
plt.imshow(Mask, alpha=1) # Set Opacity (Max 1)
ret,Binary = cv2.threshold(Mask,0,255,cv2.THRESH_BINARY)
#plt.imshow(Image_Crop, cmap="gray") # Transform Image to Gray
#plt.show()
plt.imshow(Binary,'gray',vmin=0,vmax=255)
plt.show()
## Number Recognition
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe' # Call Location of Tesseract-OCR
data = pytesseract.image_to_string(Binary, lang='eng',config='--psm 6')
print(data)
Here is the image after processing
SO far I have been able to perform medianBlur and Edge Detection, Now I want to o further remove noise from the image, Matlabs region **property functions ** was used to remove all white regions that had a total pixel area of less than the mean pixel area value. How can I implement this on python
import matplotlib.image as mpimg
import numpy as np
import cv2
import os
import math
from collections import defaultdict
from matplotlib import pyplot as plt
import imutils
#import generalized_hough
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
print(gray.shape)
blur = cv2.bilateralFilter(gray,9,75,75)
median = cv2.medianBlur(gray,5)
# display input and output image
titles = ["bilateral Smoothing","median bulr"]
images = [ blur, median]
plt.figure(figsize=(20, 20))
for i in range(2):
plt.subplot(1,2,i+1)
plt.imshow(images[i])
plt.title(titles[i])
plt.xticks([]), plt.yticks([])
plt.show()
sobelX = cv2.Sobel(median,cv2.cv2.CV_64F, 1, 0)
sobelY = cv2.Sobel(median,cv2.cv2.CV_64F, 0,1)
sobelX = np.uint8(np.absolute(sobelX))
sobelY = np.uint8(np.absolute(sobelY))
SobelCombined = cv2.bitwise_or(sobelX,sobelY)
cv2.imshow('img', SobelCombined)
cv2.waitKey(0)
cv2.destroyAllWindows()
Here is a Matlab code that works for the same task.
close all
%upload image of farm
figure,
farm = imread('small_farms.JPG');%change this to the file path of image
imshow(farm);%this shows the original image
%convert the image to grayscale for 2D manipulation
gfarm = rgb2gray(farm);
figure,
imshow(gfarm);%show grayscaled image
%median filters take a m*n area around a coordinate and
%find the median pixel value and set that coordinate to that
%pixel value. It's a method of removing noise or details in an
%image. may want to tune dimensions of filter.
A = medfilt2(gfarm,[4 4]);
figure,
imshow(A);
%perform a logarithmic edge detection filter,
%this picks out the edges of the image, log setting
%was found to wrok best, although 'Sobel' can also be tried
B = edge(A,'log');
%show results of the edge filter
figure,
imshow(B,[]);
%find the areas of the lines made
areas = regionprops(B,'Area');
%find the mean and one standard deviation
men = mean([areas.Area])+0*std([areas.Area]);
%find max pixel area
big = max([areas.Area]);
%remove regions that are too small
C = bwpropfilt(B,'Area',[men big]);
%perform a dilation on the remaining pixels, this
%helps fill in gaps. The size and shape of the dilation
%can be tuned below.
SE = strel('square',4);
C = imdilate(C,SE);
areas2 = regionprops(C,'Area');
%place white border around image to find areas of farms
%that go off the picture
[h,w] = size(C);
C(1,:) = 1;
C(:,1) = 1;
C(h,:) = 1;
C(:,w) = 1;
C = C<1;
%fill in holes
C = imfill(C,'holes');
%show final processed image
figure,imshow(C);
%the section below is for display purpose
%it creates the boundaries of the image and displays them
%in a rainbow fashion
figure,
[B,L,n,A] = bwboundaries(C,'noholes');
imshow(label2rgb(L, #jet, [.5 .5 .5]))
hold on
for k = 1:length(B)
boundary = B{k};
plot(boundary(:,2), boundary(:,1), 'w', 'LineWidth', 2)
end
%The section below prints out the areas of each found
%region by pixel values. These values need to be scaled
%by the real measurements of the images to get relevant
%metrics
centers = regionprops(C,'Centroid','Area');
for k=1:length(centers)
if(centers(k).Area > mean([centers.Area])-std([areas.Area]))
text(centers(k).Centroid(1),centers(k).Centroid(2),string(centers(k).Area));
end
end
I need help for image segmentation. I have a MRI image of brain with tumor. I need to remove cranium (skull) from MRI and then segment only tumor object. How could I do that in python? with image processing. I have tried make contours, but I don't know how to find and remove the largest contour and get only brain without a skull.
Thank's a lot.
def get_brain(img):
row_size = img.shape[0]
col_size = img.shape[1]
mean = np.mean(img)
std = np.std(img)
img = img - mean
img = img / std
middle = img[int(col_size / 5):int(col_size / 5 * 4), int(row_size / 5):int(row_size / 5 * 4)]
mean = np.mean(middle)
max = np.max(img)
min = np.min(img)
img[img == max] = mean
img[img == min] = mean
kmeans = KMeans(n_clusters=2).fit(np.reshape(middle, [np.prod(middle.shape), 1]))
centers = sorted(kmeans.cluster_centers_.flatten())
threshold = np.mean(centers)
thresh_img = np.where(img < threshold, 1.0, 0.0) # threshold the image
eroded = morphology.erosion(thresh_img, np.ones([3, 3]))
dilation = morphology.dilation(eroded, np.ones([5, 5]))
These images are similar to the ones I'm looking at:
Thanks for answers.
Preliminaries
Some preliminary code:
%matplotlib inline
import numpy as np
import cv2
from matplotlib import pyplot as plt
from skimage.morphology import extrema
from skimage.morphology import watershed as skwater
def ShowImage(title,img,ctype):
plt.figure(figsize=(10, 10))
if ctype=='bgr':
b,g,r = cv2.split(img) # get b,g,r
rgb_img = cv2.merge([r,g,b]) # switch it to rgb
plt.imshow(rgb_img)
elif ctype=='hsv':
rgb = cv2.cvtColor(img,cv2.COLOR_HSV2RGB)
plt.imshow(rgb)
elif ctype=='gray':
plt.imshow(img,cmap='gray')
elif ctype=='rgb':
plt.imshow(img)
else:
raise Exception("Unknown colour type")
plt.axis('off')
plt.title(title)
plt.show()
For reference, here's one of the brain+skulls you linked to:
#Read in image
img = cv2.imread('brain.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ShowImage('Brain with Skull',gray,'gray')
Extracting a Mask
If the pixels in the image can be classified into two different intensity classes, that is, if they have a bimodal histogram, then Otsu's method can be used to threshold them into a binary mask. Let's check that assumption.
#Make a histogram of the intensities in the grayscale image
plt.hist(gray.ravel(),256)
plt.show()
Okay, the data is nicely bimodal. Let's apply the threshold and see how we do.
#Threshold the image to binary using Otsu's method
ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_OTSU)
ShowImage('Applying Otsu',thresh,'gray')
Things are easier to see if we overlay our mask onto the original image
colormask = np.zeros(img.shape, dtype=np.uint8)
colormask[thresh!=0] = np.array((0,0,255))
blended = cv2.addWeighted(img,0.7,colormask,0.1,0)
ShowImage('Blended', blended, 'bgr')
Extracting the Brain
The overlap of the brain (shown in red) with the mask is so perfect, that we'll stop right here. To do so, let's extract the connected components and find the largest one, which will be the brain.
ret, markers = cv2.connectedComponents(thresh)
#Get the area taken by each component. Ignore label 0 since this is the background.
marker_area = [np.sum(markers==m) for m in range(np.max(markers)) if m!=0]
#Get label of largest component by area
largest_component = np.argmax(marker_area)+1 #Add 1 since we dropped zero above
#Get pixels which correspond to the brain
brain_mask = markers==largest_component
brain_out = img.copy()
#In a copy of the original image, clear those pixels that don't correspond to the brain
brain_out[brain_mask==False] = (0,0,0)
ShowImage('Connected Components',brain_out,'rgb')
Considering the Second Brain
Running this again with your second image produces a mask with many holes:
We can close many of these holes using a closing transformation:
brain_mask = np.uint8(brain_mask)
kernel = np.ones((8,8),np.uint8)
closing = cv2.morphologyEx(brain_mask, cv2.MORPH_CLOSE, kernel)
ShowImage('Closing', closing, 'gray')
We can now extract the brain:
brain_out = img.copy()
#In a copy of the original image, clear those pixels that don't correspond to the brain
brain_out[closing==False] = (0,0,0)
ShowImage('Connected Components',brain_out,'rgb')
If you need to cite this for some reason:
Richard Barnes. (2018). Using Otsu's method for skull-brain segmentation (v1.0.1). Zenodo. https://doi.org/10.5281/zenodo.6042312
Have you perhaps tried to use python skull_stripping.py
You can modify the parameters but it normally works good.
There are some new studies using deep learning for skull stripping which I found it interesting:
https://github.com/mateuszbuda/brain-segmentation/tree/master/skull-stripping
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 28 17:10:56 2021
#author: K Somasundaram, ka.somasundaram#gmail.com
"""
import numpy as npy
from skimage.filters import threshold_otsu
from skimage import measure
# import image reading module image from matplotlib
import matplotlib.image as img
#import image ploting module pyplot from matplotlib
import matplotlib.pyplot as plt
inim=img.imread('015.bmp')
#Find the dimension of the input image
dimn=inim.shape
print('dim=',dimn)
plt.figure(1)
plt.imshow(inim)
#-----------------------------------------------
# Find a threshold for the image using Otsu method in filters
th=threshold_otsu(inim)
print('Threshold = ',th)
# Binarize using threshold th
binim1=inim>th
plt.figure(2)
plt.imshow(binim1)
#--------------------------------------------------
# Erode the binary image with a structuring element
from skimage.morphology import disk
import skimage.morphology as morph
#Erode it with a radius of 5
eroded_image=morph.erosion(binim1,disk(3))
plt.figure(3)
plt.imshow(eroded_image)
#---------------------------------------------
#------------------------------------------------
# label the binar image
labelimg=measure.label(eroded_image,background=0)
plt.figure(4)
plt.imshow(labelimg)
#--------------------------------------------------
# Find area of the connected regiond
prop=measure.regionprops(labelimg)
# Find the number of objecte in the image
ncount=len(prop)
print ( 'Number of regions=',ncount)
#-----------------------------------------------------
# Find the LLC index
argmax=0
maxarea=0
#Find the largets connected region
for i in range(ncount):
if(prop[i].area >maxarea):
maxarea=prop[i].area
argmax=i
print('max area=',maxarea,'arg max=',argmax)
print('values=',[region.area for region in prop])
# Take only the largest connected region
# Generate a mask of size of th einput image with all zeros
bmask=npy.zeros(inim.shape,dtype=npy.uint8)
# Set all pixel values in whole image to the LCC index to 1
bmask[labelimg == (argmax+1)] =1
plt.figure(5)
plt.imshow(bmask)
#------------------------------------------------
#Dilate the isolated region to recover the pixels lost in erosion
dilated_mask=morph.dilation(bmask,disk(6))
plt.figure(6)
plt.imshow(dilated_mask)
#---------------------------------------
# Extract the brain using the barinmask
brain=inim*dilated_mask
plt.figure(7)
plt.imshow(brain)
-----------------------------------------
Input Image
--------------------