ValueError: Failed to find data adapter that can handle input: - python

I am applying the following code to predict an image as cancerous using a merged model (GoogleNet and ResNet). I have used the concatenate function to merge the model.
However i am getting an error for the line validation_steps = len(test_set) used in model.fit even if there are images in both test set and the training set.
Please help me solve this issue.
from keras.models import load_model
merged_model = load_model('googleResNet.h5')
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('Images/Train',
target_size = (224, 224),
batch_size = 32,
class_mode = 'categorical')
test_set = test_datagen.flow_from_directory('Images/Test',
target_size = (224, 224),
batch_size = 32,
class_mode = 'categorical')
r = merged_model.fit(
[training_set,training_set2],
validation_data = [test_set,test_set2],
epochs=5,
steps_per_epoch = len(training_set),
validation_steps = len(test_set)
)
# loss
plt.plot(r.history['loss'], label='train loss')
plt.plot(r.history['val_loss'], label='val loss')
plt.legend()
plt.show()
plt.savefig('LossVal_loss')
# accuracies
plt.plot(r.history['accuracy'], label='train acc')
plt.plot(r.history['val_accuracy'], label='val acc')
plt.legend()
plt.show()
plt.savefig('AccVal_acc')
#Test the model
new_image = plt.imread('img_004.jpg') #read in the image (3,14,20)
#show the uploaded image
img = plt.imshow(new_image)
from tensorflow.keras.preprocessing import image
img = image.load_img('img_004.jpg',target_size=(224,224))
img = np.asarray(img)
plt.imshow(img)
img = np.expand_dims(img, axis=0)
predictions = model.predict(img)
list_index = [0,1]
x = predictions
for i in range (2):
for j in range(2):
if x[0][list_index][i] > x[0][list_index][j]:
temp = list_index[i]
list_index[i] = list_index[j]
list_index[j] = temp
#Show the sorted labels in order from highesh probability to lowest
print(list_index)
print('')
classification = ['mass','calcifications']
i = 0
for i in range(3):
print(classification[list_index[i]],';',round(predictions[0][list_index[i]]*100,2),'%')
Please find the full error:
Please find the error trace below:
ValueError Traceback (most recent call last)
<ipython-input-21-1294c8191a37> in <module>()
33 epochs=5,
34 steps_per_epoch = len(training_set),
---> 35 validation_steps = len(test_set)
36 )
37
3 frames
/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/engine/data_adapter.py in
select_data_adapter(x,
y)
969 "Failed to find data adapter that can handle "
970 "input: {}, {}".format(
--> 971 _type_name(x), _type_name(y)))
972 elif len(adapter_cls) > 1:
973 raise RuntimeError(
ValueError: Failed to find data adapter that can handle input: (<class
'list'> containing
values of types {"<class
'tensorflow.python.keras.preprocessing.image.DirectoryIterator'>"}), <class
'NoneType'>

Related

Keras ImageDataGenerator flow_from_dataframe : Found 0 validated image filenames belonging to 0 classes

I am working on 3D image data stored as .npy files with shape (128,128,128). When I try to use Keras ImageDataGenerator it throws the following error:
Found 0 validated image filenames belonging to 0 classes.
My code is as follows:
processed_data = r'C:/Users/dush/Desktop/Project/preprocessed'
from tensorflow.keras.preprocessing.image import ImageDataGenerator
img_size = 64
batch_size = 32
data_gen = ImageDataGenerator(horizontal_flip = True,
validation_split=0.2,
fill_mode = "nearest",
zoom_range = 0.3,
width_shift_range = 0.1,
height_shift_range = 0.1,
rotation_range = 30)
train_gen = data_gen.flow_from_dataframe(
dataframe = train,
directory = processed_data,
x_col = 'id',
y_col = 'category',
target_size=(img_size, img_size, img_size),
batch_size = batch_size,
class_mode="binary",
#validate_filenames=False,
subset='training', seed = 23) #image_generator.flow_from_dataframe
valid_gen = data_gen.flow_from_dataframe(
dataframe = train,
directory = processed_data,
x_col = 'id',
y_col = 'category',
target_size=(img_size, img_size, img_size),
#validate_filenames=False,
batch_size=batch_size,
class_mode="binary",
subset='validation', shuffle=False, seed=23)
If I set validate_filenames=False, then it identifies the filenames but get an error when fitting the model. What am I doing wrong?

Evaluate U-Net by layer

I am coming from medical background and a newbie in this machine learning field. I am trying to train my U-Net model using keras and tensorflow for image segmentation. However, my loss value is all NaN and the prediction is all black.
I would like to check the U-Net layer by layer but I don't know how to feed the data and from where to start. What I meant by checking for each layer is that I want to feed my images to first layer for example and see the output from the first layer and then moving on to the second layer and until to the last layer. Just want to see how the output is produced for each layer and to check from where the nan value is started. Really appreciate for your help.
These are my codes.
import os
import matplotlib.pyplot as plt
import tensorflow as tf
from keras_preprocessing.image
import ImageDataGenerator
from tensorflow import keras
#Constants
SEED = 42
BATCH_SIZE_TRAIN = 16
BATCH_SIZE_TEST = 16
IMAGE_HEIGHT = 512
IMAGE_WIDTH = 512
IMG_SIZE = (IMAGE_HEIGHT, IMAGE_WIDTH)
data_dir = 'data'
data_dir_train = os.path.join(data_dir, 'training')
data_dir_train_image = os.path.join(data_dir_train, 'img')
data_dir_train_mask = os.path.join(data_dir_train, 'mask')
data_dir_test = os.path.join(data_dir, 'test')
data_dir_test_image = os.path.join(data_dir_test, 'img')
data_dir_test_mask = os.path.join(data_dir_test, 'mask')
NUM_TRAIN = 1413
NUM_TEST = 210
NUM_OF_EPOCHS = 10
def create_segmentation_generator_train(img_path, mask_path, BATCH_SIZE):
data_gen_args = dict(rescale=1./255)
img_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(*data_gen_args)
img_generator = img_datagen.flow_from_directory(img_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
mask_generator = mask_datagen.flow_from_directory(mask_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
return zip(img_generator, mask_generator)
def create_segmentation_generator_test(img_path, mask_path, BATCH_SIZE):
data_gen_args = dict(rescale=1./255)
img_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(*data_gen_args)
img_generator = img_datagen.flow_from_directory(img_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
mask_generator = mask_datagen.flow_from_directory(mask_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
return zip(img_generator, mask_generator)
def display(display_list):
plt.figure(figsize=(15,15))
title = ['Input Image', 'True Mask', 'Predicted Mask']
for i in range(len(display_list)):
plt.subplot(1, len(display_list), i+1)
plt.title(title[i])
plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i]), cmap='gray')
plt.show()
def show_dataset(datagen, num=1):
for i in range(0,num):
image,mask = next(datagen)
display([image[0], mask[0]])
def unet(n_levels, initial_features=32, n_blocks=2, kernel_size=3, pooling_size=2, in_channels=1, out_channels=1):
#n_blocks = how many conv in each level
inputs = keras.layers.Input(shape=(IMAGE_HEIGHT, IMAGE_WIDTH, in_channels))
x = inputs
convpars = dict(kernel_size=kernel_size, activation='relu', padding='same')
#downstream
skips = {}
for level in range(n_levels):
for _ in range (n_blocks):
x = keras.layers.Conv2D(initial_features * 2 ** level, **convpars)(x)
if level < n_levels - 1:
skips[level] = x
x = keras.layers.MaxPool2D(pooling_size)(x)
#upstream
for level in reversed(range(n_levels-1)):
x = keras.layers.Conv2DTranspose(initial_features * 2 ** level, strides=pooling_size, **convpars)(x)
x = keras.layers.Concatenate()([x, skips[level]])
for _ in range (n_blocks):
x = keras.layers.Conv2D(initial_features * 2 ** level, **convpars)(x)
#output
activation = 'sigmoid' if out_channels == 1 else 'softmax'
x = keras.layers.Conv2D(out_channels, kernel_size=1, activation='sigmoid', padding='same')(x)
return keras.Model(inputs=[inputs], outputs=[x], name=f'UNET-L{n_levels}-F{initial_features}')
EPOCH_STEP_TRAIN = NUM_TRAIN // BATCH_SIZE_TRAIN
EPOCH_STEP_TEST = NUM_TEST // BATCH_SIZE_TRAIN
model = unet(4)
model.compile(optimizer="adam", loss='binary_crossentropy', metrics=['accuracy'])
model.fit_generator(generator=train_generator, steps_per_epoch=EPOCH_STEP_TRAIN, validation_data=test_generator, validation_steps=EPOCH_STEP_TEST, epochs=NUM_OF_EPOCHS)
def show_prediction(datagen, num=1):
for i in range(0,num):
image,mask = next(datagen)
pred_mask = model.predict(image)[0] > 0.5
display([image[0], mask[0], pred_mask])
show_prediction(test_generator, 2)
To investigate your model layer-by-layer please see example how to show summary of the model and also how to save the model:
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
#luodaan input
inputs=keras.Input(shape=(1,))
#luodaan kerros
dense=layers.Dense(64,activation="relu")
x=dense(inputs)
x=layers.Dense(64,activation="relu")(x)
outputs=layers.Dense(10)(x)
#Koostetaa
model=keras.Model(inputs=inputs,outputs=outputs,name="Spesiaali")
#Tarkastellaan
model.summary()
#Tallennellaan
model.save(".\model_to_be_investigated_by_someone_else_to_help_you")
...this makes it possible for you to see the whole model structure for "debugging your AI". If you do not find the solution itself, then add the last row of example to your own code, and then put the resulting folder e.g. to github and ask someone other to see the structure of your model to help you in solving the problem.
The blue drawing illustrates the output of command model.summary() and the red line illustrates the output shape of the first dense layer.

TypeError: ('Keyword argument not understood:', 'input') in Keras

I am trying to implement efficientnetB0 to create an image classifier. I started creating the model only for binary classification now.
Using Keras==2.4.3, tensorflow==2.3.1 and Python 3.6 on ubuntu 18.4
Code for efficientnetB0 -
import os
import zipfile
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import layers
from tensorflow.keras import Model
import matplotlib.pyplot as plt
local_zip = '/tmp/cats_and_dogs_filtered.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp')
zip_ref.close()
base_dir = '/tmp/cats_and_dogs_filtered'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
# Directory with our training cat pictures
train_cats_dir = os.path.join(train_dir, 'cats')
# Directory with our training dog pictures
train_dogs_dir = os.path.join(train_dir, 'dogs')
# Directory with our validation cat pictures
validation_cats_dir = os.path.join(validation_dir, 'cats')
# Directory with our validation dog pictures
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
# Set up matplotlib fig, and size it to fit 4x4 pics
import matplotlib.image as mpimg
nrows = 4
ncols = 4
fig = plt.gcf()
fig.set_size_inches(ncols*4, nrows*4)
pic_index = 100
train_cat_fnames = os.listdir( train_cats_dir )
train_dog_fnames = os.listdir( train_dogs_dir )
next_cat_pix = [os.path.join(train_cats_dir, fname)
for fname in train_cat_fnames[ pic_index-8:pic_index]
]
next_dog_pix = [os.path.join(train_dogs_dir, fname)
for fname in train_dog_fnames[ pic_index-8:pic_index]
]
for i, img_path in enumerate(next_cat_pix+next_dog_pix):
# Set up subplot; subplot indices start at 1
sp = plt.subplot(nrows, ncols, i + 1)
sp.axis('Off') # Don't show axes (or gridlines)
img = mpimg.imread(img_path)
#plt.imshow(img)
#plt.show()
# Add our data-augmentation parameters to ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255., rotation_range = 40, width_shift_range = 0.2, height_shift_range = 0.2, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1.0/255.)
train_generator = train_datagen.flow_from_directory(train_dir, batch_size = 20, class_mode = 'binary', target_size = (224, 224))
validation_generator = test_datagen.flow_from_directory( validation_dir, batch_size = 20, class_mode = 'binary', target_size = (224, 224))
base_model = efn.EfficientNetB0(input_shape = (224, 224, 3), include_top = False, weights = 'imagenet')
for layer in base_model.layers:
layer.trainable = False
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, Flatten, GlobalAveragePooling2D
from keras import backend as K
x = base_model.output
x = Flatten()(x)
x = Dense(1024, activation="relu")(x)
x = layers.Dropout(0.5)(x)
predictions = Dense(1, activation="sigmoid")(x)
model_final = Model(input = base_model.input, output = predictions)
model_final.compile(optimizers.rmsprop(lr=0.0001, decay=1e-6),loss='binary_crossentropy',metrics=['accuracy'])
eff_history = model_final.fit_generator(train_generator, validation_data = validation_generator, steps_per_epoch = 100, epochs = 10)
Error which I got -
Traceback (most recent call last):
File "code_efficientNet.py", line 92, in <module>
model_final = Model(input = base_model.input, output = predictions)
File "/home/ubuntu/classification/lib/python3.6/site-packages/tensorflow/python/training/tracking/base.py", line 457, in _method_wrapper
result = method(self, *args, **kwargs)
File "/home/ubuntu/classification/lib/python3.6/site-packages/tensorflow/python/keras/engine/training.py", line 262, in __init__
'name', 'autocast'})
File "/home/ubuntu/classification/lib/python3.6/site-packages/tensorflow/python/keras/utils/generic_utils.py", line 778, in validate_kwargs
raise TypeError(error_message, kwarg)
TypeError: ('Keyword argument not understood:', 'input')
Reffered this link to write the code
You should give x = model_final.output instead of x = model.output since you have given that the variable name as model_final

Getting 'list index out of range' error when using a flow_from_dataframe method

I am using python and tensorflow and have created a flow from dataframe method (viewed open source code from kaggle) to generate a training and validation datagen. My issue is when i run code for creating a test_X, test_Y sets using the same flow_from_dataframe method.
Originally, this code has worked as kaggle code shows, but for some reason it does not seem to work for me.
I've checked many kaggle kernels, some of which are:
https://www.kaggle.com/digitalchaos666/simple-vgg16/notebook and
https://www.kaggle.com/kmader/attention-on-pretrained-vgg16-for-bone-age
which both have the same code for the problem, but does not seem to run now. Even if you fork the kernel and just run the code as-is, without changing anything, it seems to fail at that point
train_datagen = ImageDataGenerator(samplewise_center=False,
samplewise_std_normalization=False,
horizontal_flip = True,
vertical_flip = False,
height_shift_range = 0.2,
width_shift_range = 0.2,
rotation_range = 5,
shear_range = 0.01,
fill_mode = 'nearest',
zoom_range=0.25,
preprocessing_function = preprocess_input)
train_gen = flow_from_dataframe(train_datagen, df_train,
path_col = 'path',
y_col = 'bone_age_zscore',
target_size = IMG_SIZE,
color_mode = 'rgb',
batch_size = 32)
valid_gen = flow_from_dataframe(train_datagen, df_valid,
path_col = 'path',
y_col = 'bone_age_zscore',
target_size = IMG_SIZE,
color_mode = 'rgb',
batch_size = 256)
# used a fixed dataset for evaluating the algorithm, issue lies here
test_X, test_Y = next(flow_from_dataframe(train_datagen,
df_valid,
path_col = 'path',
y_col = 'bone_age_zscore',
target_size = IMG_SIZE,
color_mode = 'rgb',
batch_size = 512))
The error message is as follows:
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-53-10b59388b3f6> in <module>
36 target_size = IMG_SIZE,
37 color_mode = 'rgb',
---> 38 batch_size = 512)) # one big batch
39
40 print('Complete')
/opt/conda/lib/python3.6/site-packages/keras_preprocessing/image/iterator.py in __next__(self, *args, **kwargs)
102
103 def __next__(self, *args, **kwargs):
--> 104 return self.next(*args, **kwargs)
105
106 def next(self):
/opt/conda/lib/python3.6/site-packages/keras_preprocessing/image/iterator.py in next(self)
114 # The transformation of images is not under thread lock
115 # so it can be done in parallel
--> 116 return self._get_batches_of_transformed_samples(index_array)
117
118 def _get_batches_of_transformed_samples(self, index_array):
/opt/conda/lib/python3.6/site-packages/keras_preprocessing/image/iterator.py in _get_batches_of_transformed_samples(self, index_array)
225 filepaths = self.filepaths
226 for i, j in enumerate(index_array):
--> 227 img = load_img(filepaths[j],
228 color_mode=self.color_mode,
229 target_size=self.target_size,
IndexError: list index out of range
I had solved my issue by creating separate dataframes for each generator.
The train_flow and valid_flow, both make use of the flow_from_dataframe method which takes in a seed value. This allows my training and validation set to be the same whenever I run my code, which was a preference which I needed.
On the other hand, my test_flows did not take in a seed value, so I had created a new method for those.
train_idg = ImageDataGenerator(zoom_range=0.2,
fill_mode='nearest',
rotation_range=25,
width_shift_range=0.25,
height_shift_range=0.25,
vertical_flip=False,
horizontal_flip=True,
shear_range = 0.2,
samplewise_center=False,
samplewise_std_normalization=False)
val_idg = ImageDataGenerator(width_shift_range=0.25,
height_shift_range=0.25,
horizontal_flip=True)
test_idg = ImageDataGenerator()
####
def flow_from_dataframe(imgDatGen, df, batch_size, seed, img_size):
gc.collect()
gen_img = imgDatGen.flow_from_dataframe(dataframe=df,
x_col='path', y_col='boneage_zscore',
batch_size=batch_size, seed=seed, shuffle=True, class_mode='other',
target_size=img_size, color_mode='rgb',
drop_duplicates=False)
gen_gender = imgDatGen.flow_from_dataframe(dataframe=df,
x_col='path', y_col='gender',
batch_size=batch_size, seed=seed, shuffle=True, class_mode='other',
target_size=img_size, color_mode='rgb',
drop_duplicates=False)
while True:
X1i = gen_img.next()
X2i = gen_gender.next()
gc.collect()
yield [X1i[0], X2i[1]], X1i[1]
####
train_flow = flow_from_dataframe(train_idg, train_df, BATCH_SIZE_TRAIN, SEED, IMG_SIZE)
valid_flow = flow_from_dataframe(val_idg, valid_df, BATCH_SIZE_VAL, SEED, IMG_SIZE)
####
def test_gen_2inputs(imgDatGen, df, batch_size, img_size):
gc.collect()
gen_img = imgDatGen.flow_from_dataframe(dataframe=df,
x_col='path', y_col='boneage_zscore',
batch_size=batch_size, shuffle=False, class_mode='other',
target_size=img_size, color_mode='rgb',
drop_duplicates=False)
gen_gender = imgDatGen.flow_from_dataframe(dataframe=df,
x_col='path', y_col='gender',
batch_size=batch_size, shuffle=False, class_mode='other',
target_size=img_size, color_mode='rgb',
drop_duplicates=False)
while True:
X1i = gen_img.next()
X2i = gen_gender.next()
gc.collect()
yield [X1i[0], X2i[1]], X1i[1]
test_flow = test_gen_2inputs(test_idg, test_df, 789, IMG_SIZE)
male_test_flow = test_gen_2inputs(test_idg, male_df, 789, IMG_SIZE)
female_test_flow = test_gen_2inputs(test_idg, female_df, 789, IMG_SIZE)
Thereafter, the code below runs successfully
train_X, train_Y = next(my_train_flow)
test_X, test_Y = next(test_flow)
male_test_X, male_test_Y = next(male_test_flow)
female_test_X, female_test_Y = next(female_test_flow)

How to import a trained model to predict a single image?

I trained a CNN model by Keras, and I saved the model by model.save('model.h5') .
But I want to test my model on a single image,I don't know how to import my own image to my model.
# Image generators
train_datagen = ImageDataGenerator(rescale= 1./255)
validation_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(image_size, image_size),
shuffle=True,
batch_size=batch_size,
class_mode='categorical'
)
validation_generator = validation_datagen.flow_from_directory(
validation_data_dir,
target_size=(image_size, image_size),
batch_size=batch_size,
shuffle=True,
class_mode='categorical'
)
# Fit model
history = model.fit_generator(train_generator,
steps_per_epoch=(nb_train_samples // batch_size),
epochs=nb_epoch,
validation_data=validation_generator,
callbacks=[early_stopping],# save_best_model],
validation_steps=(nb_validation_samples // batch_size)
)
# Save model
model.save_weights('full_model_weights.h5')
model.save('model.h5')
I am new in keras. How can I do to deal an image to my model and classify my image to a certain class.
The shape of input:
if K.image_data_format() == 'channels_first':
input_shape = (3, image_size, image_size)
else:
input_shape = (image_size, image_size, 3)
My code of import a image:
from keras.models import load_model
m=load_model("model.h5")
if K.image_data_format() == 'channels_first':
input_shape = (3, image_size, image_size)
else:
input_shape = (image_size, image_size, 3)
cloudy_pic="./Weather/weather_database/cloudy/4152.jpg"
im=Image.open(cloudy_pic).convert('RGB')
data=np.array(im,dtype=np.float32)
data=np.reshape(500, 500,3)
pre=m.predict_classes(data)
pre
And the error:
AttributeError: 'int' object has no attribute 'reshape'
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
<ipython-input-30-ebc72e185819> in <module>()
10 im=Image.open(cloudy_pic).convert('RGB')
11 data=np.array(im,dtype=np.float32)
---> 12 data=np.reshape(500, 500,3)
13 pre=m.predict_classes(data)
14 pre
~/anaconda3/envs/tensorflow/lib/python3.6/site- packages/numpy/core/fromnumeric.py in reshape(a, newshape, order)
230 [5, 6]])
231 """
--> 232 return _wrapfunc(a, 'reshape', newshape, order=order)
233
234
~/anaconda3/envs/tensorflow/lib/python3.6/site-packages/numpy/core/fromnumeric.py in _wrapfunc(obj, method, *args, **kwds)
65 # a downstream library like 'pandas'.
66 except (AttributeError, TypeError):
---> 67 return _wrapit(obj, method, *args, **kwds)
68
69
~/anaconda3/envs/tensorflow/lib/python3.6/site-packages/numpy/core/fromnumeric.py in _wrapit(obj, method, *args, **kwds)
45 except AttributeError:
46 wrap = None
---> 47 result = getattr(asarray(obj), method)(*args, **kwds)
48 if wrap:
49 if not isinstance(result, mu.ndarray):
ValueError: cannot reshape array of size 1 into shape (500,)
You can resize your image before converting it to np array.
img = Image.open(img_path)
img = img.resize((image_size,image_size))
img = np.array(img)
img = img / 255.0
img = img.reshape(1,image_size,image_size,3)
m.predict_classes(img)
The input shape of your model must be [None,image_size,image_size,3], or [None,3,image_size,image_size] if channels_first.
You can do something like this
model = load_model('model.h5')
img=#YOUR IMAGE (Let's say it's 32,32,1)
image_x = 32
image_y = 32
img = cv2.resize(img, (image_x, image_y))
img = np.array(img, dtype=np.float32)
img = np.reshape(img, (-1, image_x, image_y, 1))
pred_probab = model.predict(img)[0]
pred_class = list(pred_probab).index(max(pred_probab))
return max(pred_probab), pred_class
# code for predicting an image stored locally against a trained model
# my local image is 28 x 28 already
import numpy as np
from PIL import Image
from keras.preprocessing import image
img = image.load_img('file path include full file name')# , target_size=(32,32))
img = image.img_to_array(img)
img = img.reshape((1,) + img.shape)
# img = img/255
img = img.reshape(-1,784)
img_class=model.predict_classes(img)
# this model above was already trained
# code from https://machinelearningmastery.com/handwritten-digit-recognition-using-convolutional-#neural-networks-python-keras/
prediction = img_class[0]
classname = img_class[0]
print("Class: ",classname)

Categories