I want to use ssim to compare similarity in 2 images.
I'm getting this error window_shape is incompatible with arr_in.shape .
Why? (What does it mean?)
from skimage.measure import structural_similarity as ssim
from skimage import io
img1 = io.imread('http://pasteio.com/m85cc2eed18c661bf8a0ea7e43779e742')
img2 = io.imread('http://pasteio.com/m1d45b9c70afdb576f1e3b33d342bf7d0')
ssim( img1, img2 )
Traceback (most recent call last): File "", line 1, in
File
"/var/www/wt/local/lib/python2.7/site-packages/skimage/measure/_structural_similarity.py",
line 58, in structural_similarity
XW = view_as_windows(X, (win_size, win_size)) File "/var/www/wt/local/lib/python2.7/site-packages/skimage/util/shape.py",
line 221, in view_as_windows
raise ValueError("window_shape is incompatible with arr_in.shape") ValueError: window_shape is incompatible with
arr_in.shape
I get the same error even when I feed it the same file twice ssim(img1,img1)
You need to make sure your images are the same size to compare them with scikit's ssim:
from skimage.measure import compare_ssim
from skimage.transform import resize
from scipy.ndimage import imread
import numpy as np
# resized image sizes
height = 2**10
width = 2**10
a = imread('a.jpg', flatten=True).astype(np.uint8)
b = imread('b.jpg', flatten=True).astype(np.uint8)
a = resize(a, (height, width))
b = resize(b, (height, width))
sim, diff = compare_ssim(a, b, full=True)
Related
I have tried to convert an RGB image to Otsu binary image (gray scale) but that doesn't seem to work as I get the error as mentioned below.
from cv2 import cv2
import numpy as np
from skimage.color import rgb2gray
from skimage.filters import threshold_otsu
from skimage.io import imread
from skimage.morphology import skeletonize
from skimage.util import invert
import matplotlib.pyplot as plt
img = rgb2gray(imread('Ared.png'))
binary = img > threshold_otsu(img)
np.unique(binary)
skeleton = skeletonize(invert(binary))
cv2.imshow('original', img)
cv2.imshow('skeleton', skeleton)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result in terminal
img = rgb2gray(imread('Ared.png'))
Traceback (most recent call last):
File "preprocessing.py", line 16, in <module>
cv2.imshow('skeleton', skeleton)
TypeError: Expected Ptr<cv::UMat> for argument '%s'
Your format is wrong. You need to change it to float32. That's a common error with opencv. You can change this line to convert it to float32 and it should work fine.
cv2.imshow('skeleton', np.float32(skeleton))
Bad exception messages are bad...
cv2.imshow does not handle binary arrays. The acceptable types according to this answer are uint8, uint16, int, float, and double.
you should be able to convert the array to uint8 with:
skeleton.astype('u1')
This will leave you with values between 0 and 1 though, which is all very dark. If you the multiply the array by 255, the colors should be black and white as expected:
skeleton.astype('u1') * 255
full example with data image from skimage:
from cv2 import cv2
from skimage.filters import threshold_otsu
from skimage.morphology import skeletonize
from skimage.util import invert
from skimage.data import camera
img = camera()
binary = img > threshold_otsu(img)
skeleton = skeletonize(invert(binary))
cv2.imshow('original', img)
cv2.imshow('skeleton', skeleton.astype('u1')*255)
cv2.waitKey(0)
cv2.destroyAllWindows()
I applied Image deconvolution using an unsupervised wiener algorithm and increased the sharpness and contrast on a specific dataset. But I faced an error while compiling the code. It shows AttributeError: 'numpy.ndarray' object has no attribute 'convert'. How to fix it? My code is given below -
import cv2
import glob
from matplotlib import pyplot as plt
from skimage import io, color, restoration, img_as_float
import scipy.stats as st
import numpy as np
from PIL import Image
from PIL import ImageEnhance
all_img = glob.glob('input/*.png')
other_dir = 'output/'
for img_id, img_path in enumerate(all_img):
img = img_as_float(io.imread(img_path,0))
def gkern(kernlen=21, nsig=2):
lim = kernlen//2 + (kernlen % 2)/2
x = np.linspace(-lim, lim, kernlen+1)
kern1d = np.diff(st.norm.cdf(x))
kern2d = np.outer(kern1d, kern1d)
return kern2d/kern2d.sum()
psf = gkern(5,3)
deconvolved, _ = restoration.unsupervised_wiener(img, pdf)
# Applied Sharpness and contrast
enhancer_object = ImageEnhance.Contrast(deconvolved)
out = enhancer_object.enhance(1.4)
enhancer = ImageEnhance.Sharpness(out)
enhanced_im = enhancer.enhance(8.0)
enhanced_cv_im = np.array(enhanced_im)
cl2 = cv2.resize(enhanced_cv_im, (512,512), interpolation = cv2.INTER_CUBIC)
plt.imsave(f"output/unsupervised_wiener_{img_id}.png", cl2, cmap='gray')
It shows the error-
runfile('C:/Users/Junaed/.spyder-py3/unsupervised_wiener.py', wdir='C:/Users/Junaed/.spyder-py3')
Traceback (most recent call last):
File "C:\Users\Junaed\.spyder-py3\unsupervised_wiener.py", line 37, in <module>
enhancer_object = ImageEnhance.Contrast(deconvolved)
AttributeError: 'numpy.ndarray' object has no attribute 'convert'
ImageEnhance.Contrast() is expecting a PIL image which it can run image.convert on but you passed it a numpy array. To convert to PIL you can do this
from PIL import Image
import numpy
im = Image.fromarray(numpy.uint8(deconvolved))
I'm trying to apply a flood_fill method to a certain image. Unfortunately, even though it works on an exemplary image, it doesn't work on mine, which is already binarized.
The code that works:
from skimage import data, filters
from skimage.segmentation import flood, flood_fill
import cv2 as cv
cameraman = data.camera()
flooded = flood_fill(cameraman, (200, 100), 255, tolerance=10)
cv.imshow("aaa",flooded)
cv.waitKey()
And the code that does not:
from skimage import data, filters
from skimage.segmentation import flood, flood_fill
import cv2 as cv
import numpy as np
img = cv.imread("Tubka_binar.png")
flooded = flood_fill(img, (200, 100), 100, tolerance = 10)
cv.imshow("aaa",flooded)
cv.waitKey()
And the errors I get:
Traceback (most recent call last):
File "C:/Users/User/Documents/PW/MAGISTERSKIE/__PRACA/Python/Grubość Tuby.py", line 8, in <module>
flooded = flood_fill(img, (200, 100), 100, tolerance = 10)
File "C:\Users\User\Desktop\PROJEKT_PYTHONOWY\venv\lib\site-packages\skimage\morphology\_flood_fill.py", line 104, in flood_fill
tolerance=tolerance)
File "C:\Users\User\Desktop\PROJEKT_PYTHONOWY\venv\lib\site-packages\skimage\morphology\_flood_fill.py", line 235, in flood
working_image.shape, order=order)
File "<__array_function__ internals>", line 6, in ravel_multi_index
ValueError: parameter multi_index must be a sequence of length 3
Process finished with exit code 1
The image variables in both cases seem to be the same type. The image that I read in the second case is a binarized photo, that takes only two values: 0 and 255.
What is causing this?
Best regards
It looks to me like your second image is not actually grayscale but rather saved (or loaded) as a 3-channel RGB image. If you print img.shape, I bet it’ll be something like (512, 512, 3). You can fix this by changing your reading code to:
img = cv.imread("Tubka_binar.png")[..., 0]
I'm trying to take my HSV values and make an image out of it. Here is my code:
from __future__ import division
from PIL import Image
import numpy as np
import colorsys
fp = open('pixels.txt', 'w')
fp2 = open('hsv.txt', 'w')
im = Image.open('colorimage.png')
imrgb = im.convert("RGB")
scale = 255.0
pixels = list(imrgb.getdata())
width, height = im.size
pixels = [pixels[i * width:(i + 1) * width] for i in xrange(height)]
for pixel in pixels:
for x in pixel:
print>>fp, x
x = [x[0]/255,x[1]/255,x[2]/255]
y = colorsys.rgb_to_hsv(*x)
w = [y[0]*360, y[1]*100, y[2]*100]
h,s,v = [y[0]*360, y[1]*100, y[2]*100]
print>>fp2, w
newimg = Image.new("HSV", im.size)
print "done"
The Image.new says it takes modes: http://pillow.readthedocs.io/en/4.0.x/handbook/concepts.html#concept-modes
But it doesn't read "HSV" as a mode. It says this as the error:
Traceback (most recent call last):
File "RGBtoHIS.py", line 25, in <module>
newimg = Image.new("HSV", im.size)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/PIL-1.1.7-py2.7-macosx-10.6-x86_64.egg/PIL/Image.py", line 1763, in new
return Image()._new(core.fill(mode, size, color))
ValueError: unrecognized mode
Has anyone else had this issue with the Image module?
Other:
I would like to create a Hue image and a Saturation image. Is there a way to do this with the hue and saturation values I have?
You're referencing the Pillow docs, but you're not using Pillow -- you're using the original PIL version 1.1.7, as shown by your error message:
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/
site-packages/PIL-1.1.7-py2.7-macosx-10.6-x86_64.egg/PIL/Image.py", line 1763, in new
and according to its documentation, it doesn't support HSV as a mode (see here).
Uninstall PIL, install Pillow, and then you should be able to do
In [12]: PIL.__version__
Out[12]: '3.4.2'
In [13]: Image.new("HSV", (100,100))
Out[13]: <PIL.Image.Image image mode=HSV size=100x100 at 0x7F4FA00F4F60>
I've tried to resize image with scipy and everything seems to work fine until I try to save the image. When I try to save image I get error that you can see in title. Full traceback is available below.
import numpy as np
import scipy.misc
from PIL import Image
image_path = "img0.jpg"
def load_image(img_path):
img = Image.open(img_path)
img.load()
data = np.asarray(img, dtype="int32")
return data
def save_image(npdata, outfilename):
img = Image.fromarray(np.asarray(np.clip(npdata, 0, 255), dtype="uint8"), "L")
img.save(outfilename)
array_image = load_image(image_path)
array_resized_image = scipy.misc.imresize(array_image, (320, 240), interp='nearest', mode=None)
save_image(array_resized_image, "i1.jpg")
Full traceback of the error:
Traceback (most recent call last):
File "D:/Python/Playground/resize image with scipy.py", line 26, in <module>
save_image(array_resized_image, "i1.jpg")
File "D:/Python/Playground/resize image with scipy.py", line 16, in save_image
img = Image.fromarray(np.asarray(np.clip(npdata, 0, 255), dtype="uint8"), "L")
File "C:\Anaconda2\lib\site-packages\PIL\Image.py", line 2154, in fromarray
raise ValueError("Too many dimensions: %d > %d." % (ndim, ndmax))
ValueError: Too many dimensions: 3 > 2.
don't you need to convert it to a two dimensional array before doing the fromarray(... 'L')?
You can do that using a scipy function or, actually quicker, to multiply the RGB by factors. Like this
npdata = (npdata[:,:,:3] * [0.2989, 0.5870, 0.1140]).sum(axis=2)
array_resized_image has a shape of (320, 240, 3) - three dimensional because red, green and blue components are stored in this way. You can use scipy.misc.imread and scipy.misc.imsave for easier handling file loading and storing, so your example boils down to this:
import scipy.misc
image_path = "img0.jpg"
array_image = scipy.misc.imread(image_path)
array_resized_image = scipy.misc.imresize(array_image, (320, 240), interp='nearest', mode=None)
scipy.misc.imsave("i1.jpg", array_resized_image)