How to import a trained model to predict a single image? - python

I trained a CNN model by Keras, and I saved the model by model.save('model.h5') .
But I want to test my model on a single image,I don't know how to import my own image to my model.
# Image generators
train_datagen = ImageDataGenerator(rescale= 1./255)
validation_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(image_size, image_size),
shuffle=True,
batch_size=batch_size,
class_mode='categorical'
)
validation_generator = validation_datagen.flow_from_directory(
validation_data_dir,
target_size=(image_size, image_size),
batch_size=batch_size,
shuffle=True,
class_mode='categorical'
)
# Fit model
history = model.fit_generator(train_generator,
steps_per_epoch=(nb_train_samples // batch_size),
epochs=nb_epoch,
validation_data=validation_generator,
callbacks=[early_stopping],# save_best_model],
validation_steps=(nb_validation_samples // batch_size)
)
# Save model
model.save_weights('full_model_weights.h5')
model.save('model.h5')
I am new in keras. How can I do to deal an image to my model and classify my image to a certain class.
The shape of input:
if K.image_data_format() == 'channels_first':
input_shape = (3, image_size, image_size)
else:
input_shape = (image_size, image_size, 3)
My code of import a image:
from keras.models import load_model
m=load_model("model.h5")
if K.image_data_format() == 'channels_first':
input_shape = (3, image_size, image_size)
else:
input_shape = (image_size, image_size, 3)
cloudy_pic="./Weather/weather_database/cloudy/4152.jpg"
im=Image.open(cloudy_pic).convert('RGB')
data=np.array(im,dtype=np.float32)
data=np.reshape(500, 500,3)
pre=m.predict_classes(data)
pre
And the error:
AttributeError: 'int' object has no attribute 'reshape'
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
<ipython-input-30-ebc72e185819> in <module>()
10 im=Image.open(cloudy_pic).convert('RGB')
11 data=np.array(im,dtype=np.float32)
---> 12 data=np.reshape(500, 500,3)
13 pre=m.predict_classes(data)
14 pre
~/anaconda3/envs/tensorflow/lib/python3.6/site- packages/numpy/core/fromnumeric.py in reshape(a, newshape, order)
230 [5, 6]])
231 """
--> 232 return _wrapfunc(a, 'reshape', newshape, order=order)
233
234
~/anaconda3/envs/tensorflow/lib/python3.6/site-packages/numpy/core/fromnumeric.py in _wrapfunc(obj, method, *args, **kwds)
65 # a downstream library like 'pandas'.
66 except (AttributeError, TypeError):
---> 67 return _wrapit(obj, method, *args, **kwds)
68
69
~/anaconda3/envs/tensorflow/lib/python3.6/site-packages/numpy/core/fromnumeric.py in _wrapit(obj, method, *args, **kwds)
45 except AttributeError:
46 wrap = None
---> 47 result = getattr(asarray(obj), method)(*args, **kwds)
48 if wrap:
49 if not isinstance(result, mu.ndarray):
ValueError: cannot reshape array of size 1 into shape (500,)

You can resize your image before converting it to np array.
img = Image.open(img_path)
img = img.resize((image_size,image_size))
img = np.array(img)
img = img / 255.0
img = img.reshape(1,image_size,image_size,3)
m.predict_classes(img)
The input shape of your model must be [None,image_size,image_size,3], or [None,3,image_size,image_size] if channels_first.

You can do something like this
model = load_model('model.h5')
img=#YOUR IMAGE (Let's say it's 32,32,1)
image_x = 32
image_y = 32
img = cv2.resize(img, (image_x, image_y))
img = np.array(img, dtype=np.float32)
img = np.reshape(img, (-1, image_x, image_y, 1))
pred_probab = model.predict(img)[0]
pred_class = list(pred_probab).index(max(pred_probab))
return max(pred_probab), pred_class

# code for predicting an image stored locally against a trained model
# my local image is 28 x 28 already
import numpy as np
from PIL import Image
from keras.preprocessing import image
img = image.load_img('file path include full file name')# , target_size=(32,32))
img = image.img_to_array(img)
img = img.reshape((1,) + img.shape)
# img = img/255
img = img.reshape(-1,784)
img_class=model.predict_classes(img)
# this model above was already trained
# code from https://machinelearningmastery.com/handwritten-digit-recognition-using-convolutional-#neural-networks-python-keras/
prediction = img_class[0]
classname = img_class[0]
print("Class: ",classname)

Related

What is Dimensions 37 and 50 in Tensorflow?

Hello I am working on semantic segmentation with the DeeplabV3Plus architecture and Tensorflow (Keras). I did it well with another dataset but now I want to do it with my own. But in the first step of loading the data, it shows me a strange error. The function is
tf.data.Dataset.from_tensor_slices
and the error is:
ValueError Traceback (most recent call last)
~\AppData\Local\Temp\ipykernel_20192\306109049.py in <module>
57
58 train_dataset = data_generator(train_images, train_masks)
---> 59 val_dataset = data_generator(val_images, val_masks)
60
61 print("Train Dataset:", train_dataset)
~\AppData\Local\Temp\ipykernel_20192\306109049.py in data_generator(image_list, mask_list)
50
51 def data_generator(image_list, mask_list):
---> 52 dataset = tf.data.Dataset.from_tensor_slices((image_list, mask_list))
53 dataset = dataset.map(load_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
54 dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
~\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\data\ops\dataset_ops.py in from_tensor_slices(tensors, name)
812 Dataset: A `Dataset`.
813 """
--> 814 return TensorSliceDataset(tensors, name=name)
815
816 class _GeneratorState(object):
~\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\data\ops\dataset_ops.py in __init__(self, element, is_files, name)
4720 batch_dim.assert_is_compatible_with(
4721 tensor_shape.Dimension(
-> 4722 tensor_shape.dimension_value(t.get_shape()[0])))
4723
4724 variant_tensor = gen_dataset_ops.tensor_slice_dataset(
~\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\framework\tensor_shape.py in assert_is_compatible_with(self, other)
298 if not self.is_compatible_with(other):
299 raise ValueError("Dimensions %s and %s are not compatible" %
--> 300 (self, other))
301
302 def merge_with(self, other):
ValueError: Dimensions 37 and 50 are not compatible
The Error is "Dimensions 37 and 50 are not compatible", I searched for this, but cannot find a solution.
Code:
import os
import cv2
import numpy as np
from glob import glob
from scipy.io import loadmat
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
IMAGE_SIZE = 512
BATCH_SIZE = 4
NUM_CLASSES = 20
DATA_DIR = r'C:/Users/Joshi/Desktop/CARLA_0.9.13/WindowsNoEditor/PythonAPI/examples/out'
NUM_TRAIN_IMAGES = 250
NUM_VAL_IMAGES = 50
train_images = sorted(glob(os.path.join(DATA_DIR, "out/*")))[:NUM_TRAIN_IMAGES]
train_masks = sorted(glob(os.path.join(DATA_DIR, "Seman/*")))[:NUM_TRAIN_IMAGES]
val_images = sorted(glob(os.path.join(DATA_DIR, "out/*")))[
NUM_TRAIN_IMAGES : NUM_VAL_IMAGES + NUM_TRAIN_IMAGES
]
val_masks = sorted(glob(os.path.join(DATA_DIR, "Seman/*")))[
NUM_TRAIN_IMAGES : NUM_VAL_IMAGES + NUM_TRAIN_IMAGES
]
def read_image(image_path, mask=False):
image = tf.io.read_file(image_path)
if mask:
image = tf.image.decode_png(image, channels=1)
image.set_shape([None, None, 1])
image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE])
else:
image = tf.image.decode_png(image, channels=3)
image.set_shape([None, None, 3])
image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE])
image = image / 127.5 - 1
return image
def load_data(image_list, mask_list):
image = read_image(image_list)
mask = read_image(mask_list, mask=True)
return image, mask
def data_generator(image_list, mask_list):
dataset = tf.data.Dataset.from_tensor_slices((image_list, mask_list))
dataset = dataset.map(load_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
return dataset
train_dataset = data_generator(train_images, train_masks)
val_dataset = data_generator(val_images, val_masks)
print("Train Dataset:", train_dataset)
print("Val Dataset:", val_dataset)
It just was the wrong size for the picture.

Why am I getting attributeerror as ' object has no attribute 'ravel' when I try to interpret image classification model using shap framework?

I'm trying to interpret inference result of image classification model. I tried usign gcam but it wasn't much helpful as heatmap doesn't give pixel level information. I found this library called shap according to there documentation link it'll give pixel level interpretation I followed there one of the example for image classification and used it to replicate for my own problem as shown below
model = load_model(r'Model.h5', compile=False)
image = cv2.imread(r'img.png')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
test_img = [image]
test_img = [np.expand_dims(img, axis=0) for img in test_img]
test_img = tf.concat(test_img, axis=0)
test_img = tf.image.resize(test_img, [224, 224])
test_img = tf.cast(test_img, tf.float32) / 255.0
def f(X):
tmp = X.copy()
# preprocess_input(tmp)
return model(tmp)
masker = shap.maskers.Image("inpaint_telea", test_img.shape)
explainer = shap.Explainer(f, masker, output_names=class_label_mapping)
shap_values = explainer(test_img,outputs=shap.Explanation.argsort.flip[:1])
shap.image_plot(shap_values)
when I executed above code I got error as below
Output exceeds the size limit. Open the full output data in a text
editor
--------------------------------------------------------------------------- AttributeError Traceback (most recent call
last) c:\research\visualize_cnn\visualize_shap.ipynb Cell 8 in <cell
line: 1>()
----> 1 shap_values = explainer(test_img,outputs=shap.Explanation.argsort.flip[:1])
2 shap.image_plot(shap_values)
File
~\AppData\Roaming\Python\Python38\site-packages\shap\explainers_partition.py:136,
in Partition.call(self, max_evals, fixed_context, main_effects,
error_bounds, batch_size, outputs, silent, *args)
132 def call(self, *args, max_evals=500, fixed_context=None, main_effects=False, error_bounds=False, batch_size="auto",
133 outputs=None, silent=False):
134 """ Explain the output of the model on the given arguments.
135 """
--> 136 return super().call(
137 *args, max_evals=max_evals, fixed_context=fixed_context, main_effects=main_effects,
error_bounds=error_bounds, batch_size=batch_size,
138 outputs=outputs, silent=silent
139 )
File
~\AppData\Roaming\Python\Python38\site-packages\shap\explainers_explainer.py:266,
in Explainer.call(self, max_evals, main_effects, error_bounds,
batch_size, outputs, silent, *args, **kwargs)
264 feature_names = [[] for _ in range(len(args))]
265 for row_args in show_progress(zip(*args), num_rows, self.class.name+" explainer", silent):
--> 266 row_result = self.explain_row(
267 *row_args, max_evals=max_evals, main_effects=main_effects, error_bounds=error_bounds,
268 batch_size=batch_size, outputs=outputs, silent=silent, **kwargs
269 )
270 values.append(row_result.get("values", None))
271 output_indices.append(row_result.get("output_indices", None)) ...
---> 91 x = x.ravel()
93 # if mask is not given then we mask the whole image
94 if mask is None:
AttributeError: 'tensorflow.python.framework.ops.EagerTensor' object
has no attribute 'ravel'
The way I do inference is as below
image = cv2.imread('img.png')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
test_img = [image]
test_img = [np.expand_dims(img, axis=0) for img in test_img]
test_img = tf.concat(test_img, axis=0)
test_img = tf.image.resize(test_img, [224, 224])
test_img = tf.cast(test_img, tf.float32) / 255.0
y_pred_on_hot = model.predict(test_img) # , batch_size=1)
y_pred = np.argmax(y_pred_on_hot, axis=1)
print(y_pred)
print(class_label_mapping[y_pred[0]])
This will print the class label of testing image.
How can I interpret the testing image using shap library successfully without any error ?
Any help or suggestion on this will be very helpful

Error: index 255 is out of bounds for axis 1 with size 2 error in preprocessing

I am applying the semantic segmentation and encounter the problem "index 255 is out of bounds for axis 1 with size 2 error in preprocessing" I have two class, building class and background.
While running the "x, y = train_img_gen.next()" it throws me this error.
Can anybody help me. I have tried few things but nothing is working.enter image description here
"""
Define Generator for images and masks so we can read them directly from the drive.
seed=24
batch_size= 16
n_classes=2
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
from tensorflow.keras.utils import to_categorical
#Use this to preprocess input for transfer learning
BACKBONE = 'resnet34'
preprocess_input = sm.get_preprocessing(BACKBONE)
def preprocess_data(img, mask, num_class):
img = scaler.fit_transform(img.reshape(-1, img.shape[-1])).reshape(img.shape)
img = preprocess_input(img)
mask = to_categorical(mask, num_class)
return (img,mask)
#Define the generator.
from tensorflow.keras.preprocessing.image import ImageDataGenerator
def trainGenerator(train_img_path, train_mask_path, num_class):
img_data_gen_args = dict(horizontal_flip=True,
vertical_flip=True,
fill_mode='reflect')
image_datagen = ImageDataGenerator(**img_data_gen_args)
mask_datagen = ImageDataGenerator(**img_data_gen_args)
image_generator = image_datagen.flow_from_directory(
train_img_path,
class_mode = None,
batch_size = batch_size,
seed = seed)
mask_generator = mask_datagen.flow_from_directory(
train_mask_path,
class_mode = None,
color_mode = 'grayscale',
batch_size = batch_size,
seed = seed)
train_generator = zip(image_generator, mask_generator)
for (img, mask) in train_generator:
img, mask = preprocess_data(img, mask, num_class)
yield (img, mask)
train_img_path = "/content/AerialImageDataset/train/data_for_training_and_testing/train_images/"
train_mask_path = "/content/AerialImageDataset/train/data_for_training_and_testing/train_masks/"
train_img_gen = trainGenerator(train_img_path, train_mask_path, num_class=2)
val_img_path = "/content/AerialImageDataset/train/data_for_training_and_testing/val_images/"
val_mask_path = "/content/AerialImageDataset/train/data_for_training_and_testing/val_masks/"
val_img_gen = trainGenerator(val_img_path, val_mask_path, num_class=2)
x, y = train_img_gen.__next__()
"""

ValueError: Failed to find data adapter that can handle input:

I am applying the following code to predict an image as cancerous using a merged model (GoogleNet and ResNet). I have used the concatenate function to merge the model.
However i am getting an error for the line validation_steps = len(test_set) used in model.fit even if there are images in both test set and the training set.
Please help me solve this issue.
from keras.models import load_model
merged_model = load_model('googleResNet.h5')
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('Images/Train',
target_size = (224, 224),
batch_size = 32,
class_mode = 'categorical')
test_set = test_datagen.flow_from_directory('Images/Test',
target_size = (224, 224),
batch_size = 32,
class_mode = 'categorical')
r = merged_model.fit(
[training_set,training_set2],
validation_data = [test_set,test_set2],
epochs=5,
steps_per_epoch = len(training_set),
validation_steps = len(test_set)
)
# loss
plt.plot(r.history['loss'], label='train loss')
plt.plot(r.history['val_loss'], label='val loss')
plt.legend()
plt.show()
plt.savefig('LossVal_loss')
# accuracies
plt.plot(r.history['accuracy'], label='train acc')
plt.plot(r.history['val_accuracy'], label='val acc')
plt.legend()
plt.show()
plt.savefig('AccVal_acc')
#Test the model
new_image = plt.imread('img_004.jpg') #read in the image (3,14,20)
#show the uploaded image
img = plt.imshow(new_image)
from tensorflow.keras.preprocessing import image
img = image.load_img('img_004.jpg',target_size=(224,224))
img = np.asarray(img)
plt.imshow(img)
img = np.expand_dims(img, axis=0)
predictions = model.predict(img)
list_index = [0,1]
x = predictions
for i in range (2):
for j in range(2):
if x[0][list_index][i] > x[0][list_index][j]:
temp = list_index[i]
list_index[i] = list_index[j]
list_index[j] = temp
#Show the sorted labels in order from highesh probability to lowest
print(list_index)
print('')
classification = ['mass','calcifications']
i = 0
for i in range(3):
print(classification[list_index[i]],';',round(predictions[0][list_index[i]]*100,2),'%')
Please find the full error:
Please find the error trace below:
ValueError Traceback (most recent call last)
<ipython-input-21-1294c8191a37> in <module>()
33 epochs=5,
34 steps_per_epoch = len(training_set),
---> 35 validation_steps = len(test_set)
36 )
37
3 frames
/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/engine/data_adapter.py in
select_data_adapter(x,
y)
969 "Failed to find data adapter that can handle "
970 "input: {}, {}".format(
--> 971 _type_name(x), _type_name(y)))
972 elif len(adapter_cls) > 1:
973 raise RuntimeError(
ValueError: Failed to find data adapter that can handle input: (<class
'list'> containing
values of types {"<class
'tensorflow.python.keras.preprocessing.image.DirectoryIterator'>"}), <class
'NoneType'>

Getting 'list index out of range' error when using a flow_from_dataframe method

I am using python and tensorflow and have created a flow from dataframe method (viewed open source code from kaggle) to generate a training and validation datagen. My issue is when i run code for creating a test_X, test_Y sets using the same flow_from_dataframe method.
Originally, this code has worked as kaggle code shows, but for some reason it does not seem to work for me.
I've checked many kaggle kernels, some of which are:
https://www.kaggle.com/digitalchaos666/simple-vgg16/notebook and
https://www.kaggle.com/kmader/attention-on-pretrained-vgg16-for-bone-age
which both have the same code for the problem, but does not seem to run now. Even if you fork the kernel and just run the code as-is, without changing anything, it seems to fail at that point
train_datagen = ImageDataGenerator(samplewise_center=False,
samplewise_std_normalization=False,
horizontal_flip = True,
vertical_flip = False,
height_shift_range = 0.2,
width_shift_range = 0.2,
rotation_range = 5,
shear_range = 0.01,
fill_mode = 'nearest',
zoom_range=0.25,
preprocessing_function = preprocess_input)
train_gen = flow_from_dataframe(train_datagen, df_train,
path_col = 'path',
y_col = 'bone_age_zscore',
target_size = IMG_SIZE,
color_mode = 'rgb',
batch_size = 32)
valid_gen = flow_from_dataframe(train_datagen, df_valid,
path_col = 'path',
y_col = 'bone_age_zscore',
target_size = IMG_SIZE,
color_mode = 'rgb',
batch_size = 256)
# used a fixed dataset for evaluating the algorithm, issue lies here
test_X, test_Y = next(flow_from_dataframe(train_datagen,
df_valid,
path_col = 'path',
y_col = 'bone_age_zscore',
target_size = IMG_SIZE,
color_mode = 'rgb',
batch_size = 512))
The error message is as follows:
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-53-10b59388b3f6> in <module>
36 target_size = IMG_SIZE,
37 color_mode = 'rgb',
---> 38 batch_size = 512)) # one big batch
39
40 print('Complete')
/opt/conda/lib/python3.6/site-packages/keras_preprocessing/image/iterator.py in __next__(self, *args, **kwargs)
102
103 def __next__(self, *args, **kwargs):
--> 104 return self.next(*args, **kwargs)
105
106 def next(self):
/opt/conda/lib/python3.6/site-packages/keras_preprocessing/image/iterator.py in next(self)
114 # The transformation of images is not under thread lock
115 # so it can be done in parallel
--> 116 return self._get_batches_of_transformed_samples(index_array)
117
118 def _get_batches_of_transformed_samples(self, index_array):
/opt/conda/lib/python3.6/site-packages/keras_preprocessing/image/iterator.py in _get_batches_of_transformed_samples(self, index_array)
225 filepaths = self.filepaths
226 for i, j in enumerate(index_array):
--> 227 img = load_img(filepaths[j],
228 color_mode=self.color_mode,
229 target_size=self.target_size,
IndexError: list index out of range
I had solved my issue by creating separate dataframes for each generator.
The train_flow and valid_flow, both make use of the flow_from_dataframe method which takes in a seed value. This allows my training and validation set to be the same whenever I run my code, which was a preference which I needed.
On the other hand, my test_flows did not take in a seed value, so I had created a new method for those.
train_idg = ImageDataGenerator(zoom_range=0.2,
fill_mode='nearest',
rotation_range=25,
width_shift_range=0.25,
height_shift_range=0.25,
vertical_flip=False,
horizontal_flip=True,
shear_range = 0.2,
samplewise_center=False,
samplewise_std_normalization=False)
val_idg = ImageDataGenerator(width_shift_range=0.25,
height_shift_range=0.25,
horizontal_flip=True)
test_idg = ImageDataGenerator()
####
def flow_from_dataframe(imgDatGen, df, batch_size, seed, img_size):
gc.collect()
gen_img = imgDatGen.flow_from_dataframe(dataframe=df,
x_col='path', y_col='boneage_zscore',
batch_size=batch_size, seed=seed, shuffle=True, class_mode='other',
target_size=img_size, color_mode='rgb',
drop_duplicates=False)
gen_gender = imgDatGen.flow_from_dataframe(dataframe=df,
x_col='path', y_col='gender',
batch_size=batch_size, seed=seed, shuffle=True, class_mode='other',
target_size=img_size, color_mode='rgb',
drop_duplicates=False)
while True:
X1i = gen_img.next()
X2i = gen_gender.next()
gc.collect()
yield [X1i[0], X2i[1]], X1i[1]
####
train_flow = flow_from_dataframe(train_idg, train_df, BATCH_SIZE_TRAIN, SEED, IMG_SIZE)
valid_flow = flow_from_dataframe(val_idg, valid_df, BATCH_SIZE_VAL, SEED, IMG_SIZE)
####
def test_gen_2inputs(imgDatGen, df, batch_size, img_size):
gc.collect()
gen_img = imgDatGen.flow_from_dataframe(dataframe=df,
x_col='path', y_col='boneage_zscore',
batch_size=batch_size, shuffle=False, class_mode='other',
target_size=img_size, color_mode='rgb',
drop_duplicates=False)
gen_gender = imgDatGen.flow_from_dataframe(dataframe=df,
x_col='path', y_col='gender',
batch_size=batch_size, shuffle=False, class_mode='other',
target_size=img_size, color_mode='rgb',
drop_duplicates=False)
while True:
X1i = gen_img.next()
X2i = gen_gender.next()
gc.collect()
yield [X1i[0], X2i[1]], X1i[1]
test_flow = test_gen_2inputs(test_idg, test_df, 789, IMG_SIZE)
male_test_flow = test_gen_2inputs(test_idg, male_df, 789, IMG_SIZE)
female_test_flow = test_gen_2inputs(test_idg, female_df, 789, IMG_SIZE)
Thereafter, the code below runs successfully
train_X, train_Y = next(my_train_flow)
test_X, test_Y = next(test_flow)
male_test_X, male_test_Y = next(male_test_flow)
female_test_X, female_test_Y = next(female_test_flow)

Categories