What is Dimensions 37 and 50 in Tensorflow? - python

Hello I am working on semantic segmentation with the DeeplabV3Plus architecture and Tensorflow (Keras). I did it well with another dataset but now I want to do it with my own. But in the first step of loading the data, it shows me a strange error. The function is
tf.data.Dataset.from_tensor_slices
and the error is:
ValueError Traceback (most recent call last)
~\AppData\Local\Temp\ipykernel_20192\306109049.py in <module>
57
58 train_dataset = data_generator(train_images, train_masks)
---> 59 val_dataset = data_generator(val_images, val_masks)
60
61 print("Train Dataset:", train_dataset)
~\AppData\Local\Temp\ipykernel_20192\306109049.py in data_generator(image_list, mask_list)
50
51 def data_generator(image_list, mask_list):
---> 52 dataset = tf.data.Dataset.from_tensor_slices((image_list, mask_list))
53 dataset = dataset.map(load_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
54 dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
~\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\data\ops\dataset_ops.py in from_tensor_slices(tensors, name)
812 Dataset: A `Dataset`.
813 """
--> 814 return TensorSliceDataset(tensors, name=name)
815
816 class _GeneratorState(object):
~\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\data\ops\dataset_ops.py in __init__(self, element, is_files, name)
4720 batch_dim.assert_is_compatible_with(
4721 tensor_shape.Dimension(
-> 4722 tensor_shape.dimension_value(t.get_shape()[0])))
4723
4724 variant_tensor = gen_dataset_ops.tensor_slice_dataset(
~\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\framework\tensor_shape.py in assert_is_compatible_with(self, other)
298 if not self.is_compatible_with(other):
299 raise ValueError("Dimensions %s and %s are not compatible" %
--> 300 (self, other))
301
302 def merge_with(self, other):
ValueError: Dimensions 37 and 50 are not compatible
The Error is "Dimensions 37 and 50 are not compatible", I searched for this, but cannot find a solution.
Code:
import os
import cv2
import numpy as np
from glob import glob
from scipy.io import loadmat
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
IMAGE_SIZE = 512
BATCH_SIZE = 4
NUM_CLASSES = 20
DATA_DIR = r'C:/Users/Joshi/Desktop/CARLA_0.9.13/WindowsNoEditor/PythonAPI/examples/out'
NUM_TRAIN_IMAGES = 250
NUM_VAL_IMAGES = 50
train_images = sorted(glob(os.path.join(DATA_DIR, "out/*")))[:NUM_TRAIN_IMAGES]
train_masks = sorted(glob(os.path.join(DATA_DIR, "Seman/*")))[:NUM_TRAIN_IMAGES]
val_images = sorted(glob(os.path.join(DATA_DIR, "out/*")))[
NUM_TRAIN_IMAGES : NUM_VAL_IMAGES + NUM_TRAIN_IMAGES
]
val_masks = sorted(glob(os.path.join(DATA_DIR, "Seman/*")))[
NUM_TRAIN_IMAGES : NUM_VAL_IMAGES + NUM_TRAIN_IMAGES
]
def read_image(image_path, mask=False):
image = tf.io.read_file(image_path)
if mask:
image = tf.image.decode_png(image, channels=1)
image.set_shape([None, None, 1])
image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE])
else:
image = tf.image.decode_png(image, channels=3)
image.set_shape([None, None, 3])
image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE])
image = image / 127.5 - 1
return image
def load_data(image_list, mask_list):
image = read_image(image_list)
mask = read_image(mask_list, mask=True)
return image, mask
def data_generator(image_list, mask_list):
dataset = tf.data.Dataset.from_tensor_slices((image_list, mask_list))
dataset = dataset.map(load_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
return dataset
train_dataset = data_generator(train_images, train_masks)
val_dataset = data_generator(val_images, val_masks)
print("Train Dataset:", train_dataset)
print("Val Dataset:", val_dataset)

It just was the wrong size for the picture.

Related

Why am I getting attributeerror as ' object has no attribute 'ravel' when I try to interpret image classification model using shap framework?

I'm trying to interpret inference result of image classification model. I tried usign gcam but it wasn't much helpful as heatmap doesn't give pixel level information. I found this library called shap according to there documentation link it'll give pixel level interpretation I followed there one of the example for image classification and used it to replicate for my own problem as shown below
model = load_model(r'Model.h5', compile=False)
image = cv2.imread(r'img.png')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
test_img = [image]
test_img = [np.expand_dims(img, axis=0) for img in test_img]
test_img = tf.concat(test_img, axis=0)
test_img = tf.image.resize(test_img, [224, 224])
test_img = tf.cast(test_img, tf.float32) / 255.0
def f(X):
tmp = X.copy()
# preprocess_input(tmp)
return model(tmp)
masker = shap.maskers.Image("inpaint_telea", test_img.shape)
explainer = shap.Explainer(f, masker, output_names=class_label_mapping)
shap_values = explainer(test_img,outputs=shap.Explanation.argsort.flip[:1])
shap.image_plot(shap_values)
when I executed above code I got error as below
Output exceeds the size limit. Open the full output data in a text
editor
--------------------------------------------------------------------------- AttributeError Traceback (most recent call
last) c:\research\visualize_cnn\visualize_shap.ipynb Cell 8 in <cell
line: 1>()
----> 1 shap_values = explainer(test_img,outputs=shap.Explanation.argsort.flip[:1])
2 shap.image_plot(shap_values)
File
~\AppData\Roaming\Python\Python38\site-packages\shap\explainers_partition.py:136,
in Partition.call(self, max_evals, fixed_context, main_effects,
error_bounds, batch_size, outputs, silent, *args)
132 def call(self, *args, max_evals=500, fixed_context=None, main_effects=False, error_bounds=False, batch_size="auto",
133 outputs=None, silent=False):
134 """ Explain the output of the model on the given arguments.
135 """
--> 136 return super().call(
137 *args, max_evals=max_evals, fixed_context=fixed_context, main_effects=main_effects,
error_bounds=error_bounds, batch_size=batch_size,
138 outputs=outputs, silent=silent
139 )
File
~\AppData\Roaming\Python\Python38\site-packages\shap\explainers_explainer.py:266,
in Explainer.call(self, max_evals, main_effects, error_bounds,
batch_size, outputs, silent, *args, **kwargs)
264 feature_names = [[] for _ in range(len(args))]
265 for row_args in show_progress(zip(*args), num_rows, self.class.name+" explainer", silent):
--> 266 row_result = self.explain_row(
267 *row_args, max_evals=max_evals, main_effects=main_effects, error_bounds=error_bounds,
268 batch_size=batch_size, outputs=outputs, silent=silent, **kwargs
269 )
270 values.append(row_result.get("values", None))
271 output_indices.append(row_result.get("output_indices", None)) ...
---> 91 x = x.ravel()
93 # if mask is not given then we mask the whole image
94 if mask is None:
AttributeError: 'tensorflow.python.framework.ops.EagerTensor' object
has no attribute 'ravel'
The way I do inference is as below
image = cv2.imread('img.png')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
test_img = [image]
test_img = [np.expand_dims(img, axis=0) for img in test_img]
test_img = tf.concat(test_img, axis=0)
test_img = tf.image.resize(test_img, [224, 224])
test_img = tf.cast(test_img, tf.float32) / 255.0
y_pred_on_hot = model.predict(test_img) # , batch_size=1)
y_pred = np.argmax(y_pred_on_hot, axis=1)
print(y_pred)
print(class_label_mapping[y_pred[0]])
This will print the class label of testing image.
How can I interpret the testing image using shap library successfully without any error ?
Any help or suggestion on this will be very helpful

InvalidArguementError: Cannot add tensor to the batch: number of elements does not match

I have a csv file which looks liks this:
I want to load the image (from df['Image_location']) and text (from df['Content']) together, so I did the following operations:
df = pd.read_csv(csv_data_dir, encoding= 'cp1252')
features = df[['Content', 'Image_location']]
labels = df['Sentiment']
dataset = tf.data.Dataset.from_tensor_slices((features, labels))
def process_path(x):
content, image_path = x[0], x[1]
print(image_path)
img = tf.io.read_file(image_path)
img = tf.io.decode_jpeg(img, channels=3)
return content, img
dataset = dataset.map(lambda x, y: (process_path(x), y))
dataset = dataset.batch(32, drop_remainder = True)
Upon running the training loop:
for step , (x, y) in enumerate(dataset):
print(f"Step:{step}")
InvalidArgumentError Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_19112/3450653832.py in <module>
1 import matplotlib.pyplot as plt
----> 2 for step , (x, y) in enumerate(dataset):
3 print(f"Step:{step}")
4 content = x[0]
5 image = x[1]
~\AppData\Local\Programs\Python\Python39\lib\site-packages\tensorflow\python\data\ops\iterator_ops.py in __next__(self)
798 def __next__(self):
799 try:
--> 800 return self._next_internal()
801 except errors.OutOfRangeError:
802 raise StopIteration
~\AppData\Local\Programs\Python\Python39\lib\site-packages\tensorflow\python\data\ops\iterator_ops.py in _next_internal(self)
781 # to communicate that there is no more data to iterate over.
782 with context.execution_mode(context.SYNC):
--> 783 ret = gen_dataset_ops.iterator_get_next(
784 self._iterator_resource,
785 output_types=self._flat_output_types,
~\AppData\Local\Programs\Python\Python39\lib\site-packages\tensorflow\python\ops\gen_dataset_ops.py in iterator_get_next(iterator, output_types, output_shapes, name)
2842 return _result
2843 except _core._NotOkStatusException as e:
-> 2844 _ops.raise_from_not_ok_status(e, name)
2845 except _core._FallbackException:
2846 pass
~\AppData\Local\Programs\Python\Python39\lib\site-packages\tensorflow\python\framework\ops.py in raise_from_not_ok_status(e, name)
7105 def raise_from_not_ok_status(e, name):
7106 e.message += (" name: " + name if name is not None else "")
-> 7107 raise core._status_to_exception(e) from None # pylint: disable=protected-access
7108
7109
InvalidArgumentError: Cannot add tensor to the batch: number of elements does not match. Shapes are: [tensor]: [344,500,3], [batch]: [500,333,3] [Op:IteratorGetNext]
Any idea where I'm going wrong or how to batch this dataset properly as without dataset = dataset.batch(32, drop_remainder = True), the code works fine.
I can imagine that not all images have the same shape and that is why you are getting mismatches when batch_size > 1. I would recommend resizing all images to the same size. Here is an example:
def process_path(x):
content, image_path = x[0], x[1]
img = tf.io.read_file(image_path)
img = tf.io.decode_png(img, channels=3)
img = tf.image.resize(img,[120, 120], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return content, img
Otherwise, you will have to sort your batches by image size and also take care of the labels.

Tensorflow error. TypeError: Tensor objects are only iterable when eager execution is enabled. To iterate over this tensor use tf.map_fn

I am trying to run this on Amazon Sagemaker but I am getting this error while when I try to run it on my local machine, it works very fine.
this is my code:
import tensorflow as tf
import IPython.display as display
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (12,12)
mpl.rcParams['axes.grid'] = False
import numpy as np
import PIL.Image
import time
import functools
def tensor_to_image(tensor):
tensor = tensor*255
tensor = np.array(tensor, dtype=np.uint8)
if np.ndim(tensor)>3:
assert tensor.shape[0] == 1
tensor = tensor[0]
return PIL.Image.fromarray(tensor)
content_path = tf.keras.utils.get_file('YellowLabradorLooking_nw4.jpg', 'https://example.com/IMG_20200216_163015.jpg')
style_path = tf.keras.utils.get_file('kandinsky3.jpg','https://example.com/download+(2).png')
def load_img(path_to_img):
max_dim = 512
img = tf.io.read_file(path_to_img)
img = tf.image.decode_image(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
shape = tf.cast(tf.shape(img)[:-1], tf.float32)
long_dim = max(shape)
scale = max_dim / long_dim
new_shape = tf.cast(shape * scale, tf.int32)
img = tf.image.resize(img, new_shape)
img = img[tf.newaxis, :]
return img
def imshow(image, title=None):
if len(image.shape) > 3:
image = tf.squeeze(image, axis=0)
plt.imshow(image)
if title:
plt.title(title)
content_image = load_img(content_path)
style_image = load_img(style_path)
plt.subplot(1, 2, 1)
imshow(content_image, 'Content Image')
plt.subplot(1, 2, 2)
imshow(style_image, 'Style Image')
import tensorflow_hub as hub
hub_module = hub.load('https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/1')
stylized_image = hub_module(tf.constant(content_image), tf.constant(style_image))[0]
tensor_to_image(stylized_image)
file_name = 'stylized-image5.png'
tensor_to_image(stylized_image).save(file_name)
This is the exact error I get:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-24-c47a4db4880c> in <module>()
53
54
---> 55 content_image = load_img(content_path)
56 style_image = load_img(style_path)
57
in load_img(path_to_img)
34
35 shape = tf.cast(tf.shape(img)[:-1], tf.float32)
---> 36 long_dim = max(shape)
37 scale = max_dim / long_dim
38
~/anaconda3/envs/amazonei_tensorflow_p36/lib/python3.6/site-packages/tensorflow/python/framework/ops.py in iter(self)
475 if not context.executing_eagerly():
476 raise TypeError(
--> 477 "Tensor objects are only iterable when eager execution is "
478 "enabled. To iterate over this tensor use tf.map_fn.")
479 shape = self._shape_tuple()
TypeError: Tensor objects are only iterable when eager execution is enabled. To iterate over this tensor use tf.map_fn.
Your error is being raised in this function load_img:
def load_img(path_to_img):
max_dim = 512
img = tf.io.read_file(path_to_img)
img = tf.image.decode_image(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
shape = tf.cast(tf.shape(img)[:-1], tf.float32)
long_dim = max(shape)
scale = max_dim / long_dim
new_shape = tf.cast(shape * scale, tf.int32)
img = tf.image.resize(img, new_shape)
img = img[tf.newaxis, :]
return img
Specifically, this line:
long_dim = max(shape)
You are passing a tensor to the built-in Python max function in graph execution mode. You can only iterate through tensors in eager-execution mode. You probably want to use tf.reduce_max instead:
long_dim = tf.reduce_max(shape)

How to use my own picture to generate adversarial example using FGSM?

I am trying to generate adversarial example using FGSM, and the code frame i am using is from Google Colab code(https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/generative/adversarial_fgsm.ipynb#scrollTo=wpYrQ4OQSYWk). And the kernal information of my Jupyter Book is Python3.7. However, when I tried to use my own picture to generate the adversarial example, the compilation failed all the time. Actually, the only part that I changed is "image_path = 'cat.jpg'". Well, I searched the errors on google, but it seems like there are no similar situations like this. Therefore, could you please give a hand,thanks a lot!
Here are my codes:
import tensorflow as tf
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['figure.figsize'] = (8, 8)
mpl.rcParams['axes.grid'] = False
pretrained_model = tf.keras.applications.MobileNetV2(include_top=True,
weights='imagenet')
pretrained_model.trainable = False
# ImageNet labels
decode_predictions = tf.keras.applications.mobilenet_v2.decode_predictions
# Helper function to preprocess the image so that it can be inputted in MobileNetV2
def preprocess(image):
image = tf.cast(image, tf.float32)
image = tf.image.resize(image, (224, 224))
image = tf.keras.applications.mobilenet_v2.preprocess_input(image)
image = image[None, ...]
return image
# Helper function to extract labels from probability vector
def get_imagenet_label(probs):
return decode_predictions(probs, top=1)[0][0]
image_path = 'cat.jpg'
image_raw = tf.io.read_file(image_path)
image = tf.image.decode_image(image_raw)
image = preprocess(image)
image_probs = pretrained_model.predict(image)
plt.figure()
plt.imshow(image[0]*0.5+0.5) # To change [-1, 1] to [0,1]
_, image_class, class_confidence = get_imagenet_label(image_probs)
plt.title('{} : {:.2f}% Confidence'.format(image_class, class_confidence*100))
plt.show()
loss_object = tf.keras.losses.CategoricalCrossentropy()
def create_adversarial_pattern(input_image, input_label):
with tf.GradientTape() as tape:
tape.watch(input_image)
prediction = pretrained_model(input_image)
loss = loss_object(input_label, prediction)
# Get the gradients of the loss w.r.t to the input image.
gradient = tape.gradient(loss, input_image)
# Get the sign of the gradients to create the perturbation
signed_grad = tf.sign(gradient)
return signed_grad
# Get the input label of the image.
labrador_retriever_index = 208
label = tf.one_hot(labrador_retriever_index, image_probs.shape[-1])
label = tf.reshape(label, (1, image_probs.shape[-1]))
perturbations = create_adversarial_pattern(image, label)
plt.imshow(perturbations[0]*0.5+0.5); # To change [-1, 1] to [0,1]
def display_images(image, description):
_, label, confidence = get_imagenet_label(pretrained_model.predict(image))
plt.figure()
plt.imshow(image[0]*0.5+0.5)
plt.title('{} \n {} : {:.2f}% Confidence'.format(description,
label, confidence*100))
plt.show()
epsilons = [0, 0.02, 0.2, 0.4, 0.8, 0.9, 1.0]
descriptions = [('Epsilon = {:0.3f}'.format(eps) if eps else 'Input')
for eps in epsilons]
for i, eps in enumerate(epsilons):
adv_x = image + eps*perturbations
adv_x = tf.clip_by_value(adv_x, -1, 1)
display_images(adv_x, descriptions[i])
And the erros are:
> ---------------------------------------------------------------------------
_FallbackException Traceback (most recent call last)
D:\anaconda3\envs\tensorflow\lib\site-packages\tensorflow_core\python\ops\gen_io_ops.py in read_file(filename, name)
601 _ctx._context_handle, _ctx._thread_local_data.device_name, "ReadFile",
--> 602 name, _ctx._post_execution_callbacks, filename)
603 return _result
_FallbackException: This function does not handle the case of the path where all inputs are not already EagerTensors.
During handling of the above exception, another exception occurred:
UnicodeDecodeError Traceback (most recent call last)
<ipython-input-18-4edeac21c176> in <module>
27
28 image_path = 'cat.jpg'
---> 29 image_raw = tf.io.read_file(image_path)
30 image = tf.image.decode_image(image_raw)
31
D:\anaconda3\envs\tensorflow\lib\site-packages\tensorflow_core\python\ops\gen_io_ops.py in read_file(filename, name)
605 try:
606 return read_file_eager_fallback(
--> 607 filename, name=name, ctx=_ctx)
608 except _core._SymbolicException:
609 pass # Add nodes to the TensorFlow graph.
D:\anaconda3\envs\tensorflow\lib\site-packages\tensorflow_core\python\ops\gen_io_ops.py in read_file_eager_fallback(filename, name, ctx)
654 _attrs = None
655 _result = _execute.execute(b"ReadFile", 1, inputs=_inputs_flat,
--> 656 attrs=_attrs, ctx=_ctx, name=name)
657 _execute.record_gradient(
658 "ReadFile", _inputs_flat, _attrs, _result, name)
D:\anaconda3\envs\tensorflow\lib\site-packages\tensorflow_core\python\eager\execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
59 tensors = pywrap_tensorflow.TFE_Py_Execute(ctx._handle, device_name,
60 op_name, inputs, attrs,
---> 61 num_outputs)
62 except core._NotOkStatusException as e:
63 if name is not None:
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xd5 in position 57: invalid continuation byte
I tested your code with some of my images and everything is ok.
it's just about your image encode.
run again with some other images or change the image encoding.

How to import a trained model to predict a single image?

I trained a CNN model by Keras, and I saved the model by model.save('model.h5') .
But I want to test my model on a single image,I don't know how to import my own image to my model.
# Image generators
train_datagen = ImageDataGenerator(rescale= 1./255)
validation_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(image_size, image_size),
shuffle=True,
batch_size=batch_size,
class_mode='categorical'
)
validation_generator = validation_datagen.flow_from_directory(
validation_data_dir,
target_size=(image_size, image_size),
batch_size=batch_size,
shuffle=True,
class_mode='categorical'
)
# Fit model
history = model.fit_generator(train_generator,
steps_per_epoch=(nb_train_samples // batch_size),
epochs=nb_epoch,
validation_data=validation_generator,
callbacks=[early_stopping],# save_best_model],
validation_steps=(nb_validation_samples // batch_size)
)
# Save model
model.save_weights('full_model_weights.h5')
model.save('model.h5')
I am new in keras. How can I do to deal an image to my model and classify my image to a certain class.
The shape of input:
if K.image_data_format() == 'channels_first':
input_shape = (3, image_size, image_size)
else:
input_shape = (image_size, image_size, 3)
My code of import a image:
from keras.models import load_model
m=load_model("model.h5")
if K.image_data_format() == 'channels_first':
input_shape = (3, image_size, image_size)
else:
input_shape = (image_size, image_size, 3)
cloudy_pic="./Weather/weather_database/cloudy/4152.jpg"
im=Image.open(cloudy_pic).convert('RGB')
data=np.array(im,dtype=np.float32)
data=np.reshape(500, 500,3)
pre=m.predict_classes(data)
pre
And the error:
AttributeError: 'int' object has no attribute 'reshape'
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
<ipython-input-30-ebc72e185819> in <module>()
10 im=Image.open(cloudy_pic).convert('RGB')
11 data=np.array(im,dtype=np.float32)
---> 12 data=np.reshape(500, 500,3)
13 pre=m.predict_classes(data)
14 pre
~/anaconda3/envs/tensorflow/lib/python3.6/site- packages/numpy/core/fromnumeric.py in reshape(a, newshape, order)
230 [5, 6]])
231 """
--> 232 return _wrapfunc(a, 'reshape', newshape, order=order)
233
234
~/anaconda3/envs/tensorflow/lib/python3.6/site-packages/numpy/core/fromnumeric.py in _wrapfunc(obj, method, *args, **kwds)
65 # a downstream library like 'pandas'.
66 except (AttributeError, TypeError):
---> 67 return _wrapit(obj, method, *args, **kwds)
68
69
~/anaconda3/envs/tensorflow/lib/python3.6/site-packages/numpy/core/fromnumeric.py in _wrapit(obj, method, *args, **kwds)
45 except AttributeError:
46 wrap = None
---> 47 result = getattr(asarray(obj), method)(*args, **kwds)
48 if wrap:
49 if not isinstance(result, mu.ndarray):
ValueError: cannot reshape array of size 1 into shape (500,)
You can resize your image before converting it to np array.
img = Image.open(img_path)
img = img.resize((image_size,image_size))
img = np.array(img)
img = img / 255.0
img = img.reshape(1,image_size,image_size,3)
m.predict_classes(img)
The input shape of your model must be [None,image_size,image_size,3], or [None,3,image_size,image_size] if channels_first.
You can do something like this
model = load_model('model.h5')
img=#YOUR IMAGE (Let's say it's 32,32,1)
image_x = 32
image_y = 32
img = cv2.resize(img, (image_x, image_y))
img = np.array(img, dtype=np.float32)
img = np.reshape(img, (-1, image_x, image_y, 1))
pred_probab = model.predict(img)[0]
pred_class = list(pred_probab).index(max(pred_probab))
return max(pred_probab), pred_class
# code for predicting an image stored locally against a trained model
# my local image is 28 x 28 already
import numpy as np
from PIL import Image
from keras.preprocessing import image
img = image.load_img('file path include full file name')# , target_size=(32,32))
img = image.img_to_array(img)
img = img.reshape((1,) + img.shape)
# img = img/255
img = img.reshape(-1,784)
img_class=model.predict_classes(img)
# this model above was already trained
# code from https://machinelearningmastery.com/handwritten-digit-recognition-using-convolutional-#neural-networks-python-keras/
prediction = img_class[0]
classname = img_class[0]
print("Class: ",classname)

Categories