I have the following binary image and I want to remove the spots with a value 0 inside the area of the pixels with value 1.
I tried following code from the skimage package:
im1 = morphology.remove_small_objects(img_test, 500, connectivity=1)
But the code does not do anything. The picture stays the same, no mater how I change the parameters.
Where is my mistake?
import numpy as np
import matplotlib.pyplot as plt
# image posted by OP
URL = "https://i.stack.imgur.com/Pa7Io.png"
# Read image
from skimage import io
from skimage.filters import threshold_otsu
from skimage.color import rgb2gray
image = rgb2gray(io.imread(URL)[21:899, 555:1125, :3]) #index cut the needed part
# Convert to True-False
_2color = image > threshold_otsu(image)
# accumulated true from left to right. Is true if any pixel to the left is True
trueLeft = np.logical_or.accumulate(_2color, axis=1)
# accumulated true from right to left. Is True if any pixel to the right is True
trueRight = np.logical_or.accumulate(_2color[:, ::-1], axis=1)[:, ::-1]
# True if has any true in the left and right
orImage = trueLeft * trueRight
plt.imshow(orImage)
plt.show()
Related
I am trying to remove the black spots from a face of this image using the erosion methods.
I have implemented:
img = skimage.io.imread('blemish.jpeg')
img = skimage.color.rgb2gray(img)
img_inten = skimage.exposure.rescale_intensity(img,in_range=(50,100))
diliation_seed = img_inten.copy()
diliation_seed[1:-1,1:-1] = img_inten.min()
mask = img_inten
eroded_img = skimage.morphology.reconstruction(diliation_seed,mask,method='dilation')
matplotlib.pyplot.imshow(eroded_img,cmap='gray')
My output is always a black image in both the cases. What is going wrong here?
rgb2gray is outputting an image as a matrix of floats, with values in [0;1]
So the rescale_intensity is just outputting a matrix of 0, since you ask for values between 50 and 100 and there is none in the gray img.
you can fix it like this :
import skimage
from skimage import data, exposure, img_as_float
from skimage.morphology import reconstruction
import matplotlib.pyplot as plt
img = skimage.io.imread('blemish.jpeg')
gray_img = 255*skimage.color.rgb2gray(img) # multiply by 255 to get back in the [0;255] range
img_inten = exposure.rescale_intensity(gray_img,in_range=(50,100))
diliation_seed = img_inten.copy()
diliation_seed[1:-1,1:-1] = img_inten.min()
mask = img_inten
eroded_img = reconstruction(diliation_seed,mask,method='dilation')
plt.imshow(eroded_img,cmap='gray')
plt.show()
Is it possible to rotate an image and keep the true values? When I rotate a black and white image I get back grey values. Can I rotate without averaging the pixel values? I can almost do it manually by using np.where() but it gets difficult when there are more than 2 pixel values.
Code:
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from scipy import ndimage
import cv2
filename = 'rot square.png'
img = cv2.imread('square test.png')
img_rot = ndimage.rotate(img, 10, reshape = False)
cv2.imwrite(filename, img_rot)
Original Image
Rotated Image
Averaged Values
True Values
Here:
from PIL import Image
img = Image.open('original.png')
rotated = img.rotate(45)
rotated.save('rotated.png')
Requirement is to crop region of interest from binary image.
I need a rectangle image from a binary image by removing the extra space around the region of interest.
For example:
From this Original image i want only the region of interest marked with yellow color rectangle.
Note: Yellow color rectangle is just for the reference and it is not present in the image that will be processed.
I tried the following python code but it is not giving the required output.
from PIL import Image
from skimage.io import imread
from skimage.morphology import convex_hull_image
import numpy as np
from matplotlib import pyplot as plt
from skimage import io
from skimage.color import rgb2gray
im = imread('binaryImageEdited.png')
plt.imshow(im)
plt.title('input image')
plt.show()
# create a binary image
im1 = 1 - rgb2gray(im)
threshold = 0.8
im1[im1 <= threshold] = 0
im1[im1 > threshold] = 1
chull = convex_hull_image(im1)
plt.imshow(chull)
plt.title('convex hull in the binary image')
plt.show()
imageBox = Image.fromarray((chull*255).astype(np.uint8)).getbbox()
cropped = Image.fromarray(im).crop(imageBox)
cropped.save('L_2d_cropped.png')
plt.imshow(cropped)
plt.show()
Thank you.
Your image is not actually binary on account of two things:
firstly, it has 26 colours, and
secondly it has an (entirely unnecessary) alpha channel.
You can trim it like this:
#!/usr/bin/env python3
from PIL import Image, ImageOps
# Open image and ensure greysale and discard useless alpha channel
im = Image.open("thing.png").convert('L')
# Threshold and invert image as not actually binary
thresh = im.point(lambda p: p < 64 and 255)
# Get bounding box of thresholded image
bbox1 = thresh.getbbox()
crop1 = thresh.crop(bbox1)
# Invert and crop again
crop1n = ImageOps.invert(crop1)
bbox2 = crop1n.getbbox()
crop2 = crop1.crop(bbox2) # You don't actually need this - it's just for debug
# Trim original, unthresholded, uninverted image to the two bounding boxes
result = im.crop(bbox1).crop(bbox2)
result.save('result.png')
even i have similar problem. Also it would be helpful if image saved is in 32X32 px.
Hey i am trying to resize an image without stretching it but adding white pixels instead. I looked arround but i found nothing specifying how that can be achieved from within skimage. So instead i used numpy to add the extra pixels before the resize as arrays of [float(255)].
from skimage.io import imread
from skimage.io import imsave
from skimage.transform import resize
from matplotlib import pyplot as plt
from pylab import cm
import numpy as np
from skimage import morphology
from skimage import measure
from scipy import misc
def process(file_):
im = imread(file_, as_grey=True)
#im = misc.imread(file_)
#im=np.fromfile(file_, dtype=np.int64)
#Filler to avoid stretching
orig_rows, orig_cols = im.shape
print orig_rows, orig_cols
if orig_rows < orig_cols:
for addition in range(0,orig_cols-orig_rows):
#adding white rows
lst = np.array(list(float(255) for x in range(0,orig_cols)))
im= np.vstack((im,lst))
if orig_rows > orig_cols:
for addition in range(0,orig_rows-orig_cols):
#adding white columns
lst = np.array(list([float(255)] for x in range(0,orig_rows)))
im= np.hstack((im,lst))
image = resize(im, (48, 48))
imsave('test.jpg',im)
imsave('test1.jpg',image)
plt.imshow(im, cmap=cm.gray)
plt.show()
When i view the image with pyplot it looks like this
We can see that the columns have been added, but after i save the image with
image = resize(im, (48, 48))
imsave('test.jpg',im)
imsave('test1.jpg',image)
The the images look like negatives, and the resized image looks completely white(next to the dark its invisible on the sites background). Any ideas?
The code below should work. Note that the padded areas' color is not exactly white in order to see the image boundaries in the uploaded image. For white padding set fill_cval = np.max(img).
def resize_padded(img, new_shape, fill_cval=None, order=1):
import numpy as np
from skimage.transform import resize
if fill_cval is None:
fill_cval = np.max(img)
ratio = np.min([n / i for n, i in zip(new_shape, img.shape)])
interm_shape = np.rint([s * ratio for s in img.shape]).astype(np.int)
interm_img = resize(img, interm_shape, order=order, cval=fill_cval)
new_img = np.empty(new_shape, dtype=interm_img.dtype)
new_img.fill(fill_cval)
pad = [(n - s) >> 1 for n, s in zip(new_shape, interm_shape)]
new_img[[slice(p, -p, None) if 0 != p else slice(None, None, None)
for p in pad]] = interm_img
return new_img
import numpy as np
import matplotlib.pylab as plt
from skimage.data import astronaut
from skimage.color import rgb2gray # using luminance
from skimage.io import imsave
img = rgb2gray(astronaut())
# set desired image size
out_size = (30, 100) # height, width
# set the color of the padded area. Here: "95% luminance"
fill_cval = np.max(img) * 0.95
resized_img = resize_padded(img, out_size, fill_cval=fill_cval)
imsave('img.png', img)
imsave('img_res.png', resized_img)
I'm working on pictures that have been converted to grayscale with:
Image.open('image.png').convert('LA')
I add a mask and I plot my picture with it, but while I expect to get grayscale values between 0 and 255, the values are very low as you can see below. There must be something wrong with the format. What do I have to do to get values between 0 and 255?
import numpy as np
import Image
import cv2
import matplotlib.pyplot as plt
import numpy.ma as ma
mask = plt.imread("mask.png")
test = plt.imread("1.png")
width, high = tab.shape
matrix = np.reshape(tab, (width, high))
# I have to force the dimension...
tab = mask[::, ::, 0]
test = tes[::, ::, 0]
test_mask = np.ma.array(ma.masked_array(test, tab.max()-tab))
And this is the plot:
By using OpenCV it works...
img = cv2.imread('test.png',0)