Output Shape not correct - python

One week ago I run this code perfectly. But today I am getting runtime error: output shape not correct
from PIL import Image
import glob
import numpy as np
import scipy.ndimage.filters
import matplotlib.pyplot as plt
image_list_Brownspot = []
for filename in glob.glob('./dataset/BrownSpot/*.jpg'):
im=Image.open(filename)
image_list_Brownspot.append(im.copy())
im.close()
len(image_list_Brownspot)
lap = scipy.ndimage.filters.laplace(image_list_Brownspot[0])
`

Related

read data set image with tensorflow

I am a beginner in deep learning.
I'm working on an article that was posted on github. I downloaded the database which contains 6 types of images: glass, paper, cardboard, plastic, metal and trash.
My question is that I want to read the database as the author did but I can't read it, yet I gave the path correctly.
I show you the author's code to read for example the boxes and second my code that I made on google colab.
import copy
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import tensorflow as tf
cardboard = []
for i in range(1,404):
temp = tf.keras.preprocessing.image.load_img(
path = "trashnet/cardboard/cardboard"+str(i)+".jpg",
grayscale=False, color_mode='rgb',target_size=(227,227))
X = np.array(temp)
cardboard.append(X)
cardboard = np.array(cardboard)
cardboard = np.take(cardboard,np.random.permutation(cardboard.shape[0]),axis=0)
print(cardboard.shape)
import copy
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import tensorflow as tf
cardboard = []
for i in range(1,404):
temp = tf.keras.preprocessing.image.load_img(
path="dataset-resized.zip/dataset-resized/cardboard"+str(i)+".jpg",
grayscale=False, color_mode='rgb',target_size=(227,227))
X = np.array(temp)
cardboard.append(X)
cardboard = np.array(cardboard)
cardboard = np.take(cardboard,np.random.permutation(cardboard.shape[0]),axis=0)
print(cardboard.shape)
/usr/local/lib/python3.7/dist-packages/keras_preprocessing/image/utils.py in load_img(path, grayscale, color_mode, target_size, interpolation)
111 raise ImportError('Could not import PIL.Image. '
112 'The use of `load_img` requires PIL.')
--> 113 with open(path, 'rb') as f:
114 img = pil_image.open(io.BytesIO(f.read()))
115 if color_mode == 'grayscale':
NotADirectoryError: [Errno 20] Not a directory: 'dataset-resized.zip/dataset-resized/cardboard1.jpg'
I was able to replicate the issue using Garbage Classification dataset here. Please find the working code below and try passing the correct path to tf.keras.preprocessing.image.load_img.
import copy
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import tensorflow as tf
cardboard = []
for i in range(1,404):
temp = tf.keras.preprocessing.image.load_img(
path = "/content/garbage classification/Garbage classification/cardboard/cardboard"+str(i)+".jpg",
grayscale=False, color_mode='rgb',target_size=(227,227))
X = np.array(temp)
cardboard.append(X)
cardboard = np.array(cardboard)
cardboard = np.take(cardboard,np.random.permutation(cardboard.shape[0]),axis=0)
print(cardboard.shape)
Output:
(403, 227, 227, 3)

From numpy array to DICOM

My code reads a DICOM file, takes the pixel information to a numpy array then it modifies the numpy array. It uses lists because im trying to operate with multiple DICOM files at the same time.
I havent found any information on how to take my modified numpy array and make it a DICOM file again so i can use it outside Python.
#IMPORT
import cv2
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.pyplot as plt
import SimpleITK as sitk
from glob import glob
import pydicom as dicom
data_path = "C:\\Users\\oliva\\Desktop\\Py tesis\\dicom\\"
output_path = working_path = "C:\\Users\\oliva\\Desktop\\Py tesis\\dicom1\\"
path = glob(data_path + '/*.dcm')
#Checks if we are in the correct path
print ("Total of %d DICOM images.\nFirst 5 filenames:" % len(path))
print ('\n'.join(path[:14]))
data_set = []
for element in path:
imagen=sitk.ReadImage(element)
#imagen = cv2.imread(element)
array_imagen = sitk.GetArrayViewFromImage(imagen)
array2_imagen=array_imagen[0]
imagen_array_norm = np.uint8(cv2.normalize(array2_imagen, None, 0, 255, cv2.NORM_MINMAX))
data_set.append(imagen_array_norm)
#Check
print(len(data_set))
print(type(data_set[1]))
plt.imshow(data_set[4], cmap=plt.cm.gray)
#Equalization
data_set_eq = equal(data_set)
print(len(data_set_eq))
print(type(data_set_eq[6]))
plt.imshow(data_set_eq[7], cmap=plt.cm.gray)
#Filtering
data_set_m = median(data_set)
print(len(data_set_m))
print(type(data_set_m[6]))
plt.imshow(data_set_m[8], cmap=plt.cm.gray)
#Functions
def equal(data):
data_set_eq = []
for element in data_set:
imagen_array_eq = cv2.equalizeHist(element)
data_set_eq.append(imagen_array_eq)
return data_set_eq
def median(data):
data_set_m = []
for element in data_set:
imagen_array_m =cv2.medianBlur(element,5)
data_set_m.append(imagen_array_m)
return data_set_m
I would like some enlightenment on how to produce a DICOM file from my modified numpy array.
You can convert the numpy array back to a SimpleITK image, and then write it out as Dicom. The code would look something like this:
for x in data_set:
img = sitk.GetImageFromArray(x)
sitk.WriteImage(img, "your_image_name_here.dcm")
From the file name suffix, SimpleITK knows to write Dicom.
Note that the filtering you are doing can be accomplished within SimpleITK. You don't really need to use OpenCV. Check out the following filters in SimpleITK: IntensityWindowingImageFilter, AdaptiveHistogramEqualizationFilter, and MedianImageFilter.
https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1IntensityWindowingImageFilter.html
https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1AdaptiveHistogramEqualizationImageFilter.html
https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1MedianImageFilter.html

How do I resolve "Use scipy.optimize.linear_sum_assignment instead"

I am using python script for people detection.
I have the following line in my script:
import time
import cv2 as cv
import glob
import argparse
import sys
import numpy as np
import os.path
from imutils.video import FPS
from collections import deque
from sklearn.utils.linear_assignment_ import linear_assignment
When I run my script I have got the following lines:
/home/user/.local/lib/python3.6/site-packages/sklearn/utils/linear_assignment_.py:127:
DeprecationWarning: The linear_assignment function is deprecated in 0.21 and will be removed from 0.23. Use scipy.optimize.linear_sum_assignment instead.
DeprecationWarning)
Please, advice me how to solve it.
You need to replace the sklearn.utils.linear_assignment_.linear_assignment function by the scipy.optimize.linear_sum_assignment function.
The difference is in the return format: linear_assignment() is returning a numpy array and linear_sum_assignment() a tuple of numpy arrays. You obtain the same output by converting the output of linear_sum_assignment() in array and transpose it.
Your script should look like this:
import time
import cv2 as cv
import glob
import argparse
import sys
import numpy as np
import os.path
from imutils.video import FPS
from collections import deque
from scipy.optimize import linear_sum_assignment
#compute your cost matrix
indices = linear_sum_assignment(cost_matrix)
indices = np.asarray(indices)
indices = np.transpose(indices)
Replace the linear_assignment for linear_sum_assignment
# from sklearn.utils.linear_assignment_ import linear_assignment
from scipy.optimize import linear_sum_assignment
cost = np.array([[4, 1, 3], [2, 0, 5], [3, 2, 2]])
# result = linear_assignment(cost)
result = linear_sum_assignment(cost)
result = np.array(list(zip(*result)))
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html

Pytorch——TypeError: Cannot handle this data type

I'm doing an image processing task and I want to use torch.cat to concat pictures belonging to two different folders. The size of the images in folder 1 is 224*224*3, and the size of the images in folder 2 is 224*224*1.After that, I want to save the generated image.I used the save function, but I got an error, the error code is "TypeError: Cannot handle this data type".Please help me find the solution.
Here is my code:
import glob
import os
import torch
import torchvision.transforms.functional as TF
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import scipy.misc
f1_folder = 'F:\\picture1'
f2_folder = 'F:\\picture2'
f1_images = glob.glob(os.path.join(f1_folder, '*.jpg'))
f2_images = glob.glob(os.path.join(f2_folder, '*.jpg'))
for f1_img, f2_img in zip(f1_images, f2_images):
img1 = Image.open(f1_img)
img2 = Image.open(f2_img)
cat_img = torch.cat((TF.to_tensor(img1), TF.to_tensor(img2)), dim=1)# for 'CHW'
cat_img = cat_img.numpy()
cat_img = np.transpose(cat_img, (1, 2, 0))
im = Image.fromarray(cat_img)
im.save("file.jpeg")

How to create a TFRecord from a NumPy array

I'd like to 1. iterate through a directory of images and turn each image into a NumPy array. I think I have accomplished this with the following code:
import tensorflow as tf
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
from PIL import Image
import os
myimages = []
path_to_images = 'images_animation'
filenum = len([name for name in os.listdir(path_to_images) if os.path.isfile(os.path.join(path_to_images, name))])
#loops through available pngs
for p in range(1, filenum):
## Read in picture
fname = "images_animation/image%03d.png" % p
img = mpimg.imread(fname)
imgplot = plt.imshow(img)
# append AxesImage object to the list
myimages.append([imgplot])
for n, im in enumerate(myimages):
img = Image.open(fname).convert("L")
arr = np.array(img
print(arr)
If I can make this code better or more efficient, please feel free to tell me how.
Now, I'd like to 2. turn these NumPy arrays into TFRecords. What is the best way to do so? I'm near clueless as how to do this, so I have not done much to solve it myself, so I'm looking for a solution.

Categories