How to check "test data" accuracy and plot them - python

I want to test the accuracy of 11000+ images.
I split the data into two classes ,"Yes" and "No". Then ,split them into 80/20 in training set and test set respectively.
Then I split,the training data again into 80/20 for validation set.
I create "Yes" and "No" folder for each one,validation set, test set, training set.And keep the data in respective folders.
Now, I want to train the model in Google Colab.
After train the model, I want to test the "Accuracy" of the "Test Data Set" and plot them with, specificity, sensitivity, accuracy, loss_Function and recall.
Help need for this. Detailed help will be more appreciated.
Thanks in advance.

#Yefet
Code is here:
import tensorflow as tf
import matplotlib.pyplot as plt
import zipfile
import cv2
import os
import numpy as np
import matplotlib.image as mpimg
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image
from sklearn.model_selection import KFold, StratifiedKFold
img = image.load_img("drive/MyDrive/data03/train/no/1 no.jpeg" , target_size=(200, 200))
plt.imshow(img)
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self,epoch,logs={}):
if (logs.get('accuracy') > .98) & (logs.get('val_accuracy') > .9):
print("Reached 98% accuracy so cancelling training!")
self.model.stop_training = True
callbacks = myCallback()
train_datagen = ImageDataGenerator(rescale=1/255)
validation_datagen = ImageDataGenerator(rescale=1/255)
train_generator = train_datagen.flow_from_directory(
'drive/MyDrive/data03/train', # This is the source directory for training images
target_size=(200, 200), # All images will be resized to 150x150
batch_size=128,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
validation_generator = train_datagen.flow_from_directory(
'drive/MyDrive/data03/validation', # This is the source directory for training images
target_size=(200, 200), # All images will be resized to 150x150
batch_size=128,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
from tensorflow.keras.optimizers import RMSprop
model = tf.keras.models.Sequential([
# Note the input shape is the desired size of the image 150x150 with 3 bytes color
# This is the first convolution
tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(200, 200, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
# The second convolution
tf.keras.layers.Conv2D(32, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# The third convolution
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# The fourth convolution
#tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
#tf.keras.layers.MaxPooling2D(2,2),
# The fifth convolution
#tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
#tf.keras.layers.MaxPooling2D(2,2),
# Flatten the results to feed into a DNN
tf.keras.layers.Flatten(),
# 512 neuron hidden layer
tf.keras.layers.Dense(512, activation='relu'),
# Only 1 output neuron. It will contain a value from 0-1 where 0 for 1 class ('horses') and 1 for the other ('humans')
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.001),
metrics=['accuracy'])
history = model.fit(
train_generator,
steps_per_epoch=1,
epochs=29,
validation_data = validation_generator,callbacks=[callbacks])
dir_path = "drive/MyDrive/data03/test"
for i in os.listdir(dir_path):
img = image.load_img(dir_path+ '//' + i, target_size=(200, 200) )
plt.imshow(img)
plt.show()
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes =model.predict(images)
if classes[0]==0:
print("NO TUMOR")
else:
print("YES ")

Related

Matrix size incompatible: ln[0] : [1:43264], ln[16,512]

I want to build a model that classifies and predicts words from the users lips. With adverb a total of 142657 images that have been preproccessed using the dataset of videos of individual speakers but I get this error when running the model and doesnt even get past the first epoch
heres my code
import os
from silence_tensorflow import silence_tensorflow
silence_tensorflow()
import tensorflow as tf
from tensorflow.keras.layers import Dense, Activation, Dropout, Input, Conv2D, \
MaxPooling2D, Flatten, BatchNormalization
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.image import ImageDataGenerator
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.autograph.set_verbosity(0)
tf.get_logger().setLevel('ERROR')
class AdverbNet(object):
def __init__(self):
self.Model = Sequential()
self.build()
def build(self):
self.Model.add(Input(name='the_input', shape=(224, 224, 1), batch_size=16, dtype='float32'))
self.Model.add(Conv2D(32, (3, 3), activation='sigmoid', name='convo2'))
self.Model.add(MaxPooling2D(pool_size=(2, 2)))
self.Model.add(Conv2D(32, (3, 3), activation='sigmoid', name='convo3'))
self.Model.add(MaxPooling2D(pool_size=(2, 2)))
self.Model.add(Conv2D(64, (3, 3), activation='relu', name='convo4'))
self.Model.add(MaxPooling2D(pool_size=(2, 2)))
self.Model.add(Flatten())
self.Model.add(Dense(512))
self.Model.add(Dropout(0.5))
self.Model.add(BatchNormalization(scale=False))
self.Model.add(Activation('relu'))
self.Model.add(Dropout(0.5))
self.Model.add(Dense(4, activation='softmax'))
def summary(self):
self.Model.summary()
if __name__ == "__main__":
common_path = 'C:/Users/Loide/Desktop/Liphy/'
C = AdverbNet()
C.Model.compile(optimizer="Adam", loss='categorical_crossentropy', metrics=['accuracy'])
C.Model.summary()
with tf.device('/device:GPU:0')
batch_size = 16
epochs = 32
train_dir = common_path + 'Images/Adverb/'
test_dir = common_path + 'Images/Adverb/'
checkpoint_path = common_path + 'SavedModels/Adverb/'
train_image_generator = ImageDataGenerator(rescale=1. / 255) # Generator for training data generate training anD test set
train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
directory=train_dir,
shuffle=True,
target_size=(224, 224),
class_mode='categorical',
color_mode='grayscale')
test_image_generator = ImageDataGenerator(rescale=1. / 255) # Generator for test data
test_data_gen = test_image_generator.flow_from_directory(batch_size=batch_size,
directory=test_dir,
shuffle=False,
target_size=(224, 224),
class_mode='categorical',
color_mode='grayscale')
callback = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy',
patience=10,
restore_best_weights=True,
baseline=0.45)
history = C.Model.fit(train_data_gen,
steps_per_epoch=8916, # Number of images // Batch size
epochs=epochs,
verbose=1,
validation_data=test_data_gen,
validation_steps=187,
callbacks=[callback])
C.Model.save(checkpoint_path, save_format='tf')
**and I get the following error **
[Matrix size-incompatible: In[0]: [1,43264], In[1]: [16,512]
[[{{node gradient_tape/sequential/dense/MatMul/MatMul_1}}]] [Op:__inference_train_function_1179]
Seems like you have two matrices (one 1x43265 and one 16x512). You try to multiply them but its product is mathematically not defined. You need one matrix to be a (a x b) matrix and the other to be a (b x c) matrix. Thats why your program can't run. If your images are a test dataset try to follow the instructions step by step. If not, your preprocessing is probably bad.

Why am I getting very small number as CNN prediction?

I created a CNN using Tensorflow to identify pneumonia and sometimes it returns a very small number as a prediction. why is this happening?
I have attached the link for the dataset
Here I how I process and load the data.
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator( rescale = 1.0/255. )
val_datagen = ImageDataGenerator( rescale = 1.0/255. )
test_datagen = ImageDataGenerator( rescale = 1.0/255. )
train_generator = train_datagen.flow_from_directory('/kaggle/input/chest-xray-pneumonia/chest_xray/chest_xray/train/',
batch_size=20,
class_mode='binary',
target_size=(350, 350))
validation_generator = val_datagen.flow_from_directory('/kaggle/input/chest-xray-pneumonia/chest_xray/chest_xray/val/',
batch_size=20,
class_mode = 'binary',
target_size = (350, 350))
test_generator = test_datagen.flow_from_directory('/kaggle/input/chest-xray-pneumonia/chest_xray/chest_xray/test/',
batch_size=20,
class_mode = 'binary',
target_size = (350, 350
And here the Model, compile and fit functions
import tensorflow as tf
model = tf.keras.models.Sequential([
# Note the input shape is the desired size of the image 150x150 with 3 bytes color
tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(350, 350, 3)),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# Flatten the results to feed into a DNN
tf.keras.layers.Flatten(),
# 512 neuron hidden layer
tf.keras.layers.Dense(1024, activation='relu'),
# Only 1 output neuron. It will contain a value from 0-1 where 0 for 1 class ('cats') and 1 for the other ('dogs')
tf.keras.layers.Dense(1, activation='sigmoid')
])
compile the model
from tensorflow.keras.optimizers import RMSprop
model.compile(optimizer=RMSprop(learning_rate=0.001),
loss='binary_crossentropy',
metrics = ['accuracy'])
model fit
history = model.fit(train_generator,
validation_data=validation_generator,
steps_per_epoch=200,
epochs=2000,
validation_steps=200,
callbacks=[callbacks],
verbose=2)
The evaluation metrics as followings, loss: 0.2351 - accuracy: 0.9847
The prediction shows a very small number for the negative pneumonia, and for positive it shows more than .50.
I have two questions:
why I get a very small number as 2.xxxx * 10e-20?
why I can't get the following values as null?
val_acc = history.history[ 'val_accuracy' ]
val_loss = history.history['val_loss' ]
I see that there is no problem with your code, neither with the results you get.
This is a binary classification problem (2 classes: Positive or negative pneumonia), and the output of your model is one neurone giving values between 0 and 1.
So if the output is higher than 0.5, this means positive pneumonia. Otherwise, when you have a very small value like 2 * 10e-20 this means that it's negative pneumonia.
For your second question, you are not supposed to have accuracy and loss values to be null simply because the model is well trained and has 98% accuracy on training data.

Predictions using CNN in Tensorflow

I'm trying to create an binary image classifier to detect if someone is using a surgical mask following the 'Cat vs Dogs' example available at the TensorFlow website (https://www.tensorflow.org/tutorials/images/classification)
I've created a small dataset with some images of people wearing surgical masks and some people without it, trainned my CNN and got an accuray around 70%, which is fine by now. But the thing is, how do I make predictions? The 'CatVsDogs' example stops at augmentation.
Right now I'm not worried about accuracy, just wondering how I can get predictions from my model.
This is my code:
import sys
import time
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
import random
import keras
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import adam
IMG_SIZE = 100 # Image dimensions
batch_size = 100 # Amount of data that will be fed to the NN at a time
epochs = 1 # Amount of times the data will be pass to the NN
training_data = []
#img_array = []
new_array = []
####### DATASET LOCATION #######
TRAIN_DIR = 'C:/Users/Alex/Google Drive/Colab Notebooks/MaskDetector/Train/' #Create variable to store the path of the training images directory
VALIDATION_DIR = 'C:/Users/Alex/Google Drive/Colab Notebooks/MaskDetector/Validate/' #Create variable to store the path of the validation images directory
TEST_DIR = 'C:/Users/Alex/Google Drive/Colab Notebooks/MaskDetector/Test/' #Create variable to store the path of the testing images directory
CATEGORIES = ['MaskOn','MaskOff'] #Categories 'MaskOn' and 'MaskOff' same name as the folders
TRAIN_DIR_MASKON = os.path.join(TRAIN_DIR, 'MaskOn') # Directory with pics of ppl with masks for training
TRAIN_DIR_MASKOFF = os.path.join(TRAIN_DIR, 'MaskOff') # Directory with pics of ppl without masks for training
VALIDATION_DIR_MASKON = os.path.join(VALIDATION_DIR, 'MaskOn') # Directory with pics of ppl with masks for validation
VALIDATION_DIR_MASKOFF = os.path.join(VALIDATION_DIR, 'MaskOff') # Directory with pics of ppl without masks for validation
###########################################
####### Shows the size of the dataset #######
num_maskon_tr = len(os.listdir(TRAIN_DIR_MASKON))
num_maskoff_tr = len(os.listdir(TRAIN_DIR_MASKOFF))
num_maskon_val = len(os.listdir(VALIDATION_DIR_MASKON))
num_maskoff_val = len(os.listdir(VALIDATION_DIR_MASKOFF))
total_train = num_maskon_tr + num_maskoff_tr
total_val = num_maskon_val + num_maskoff_val
###################### DATA AUGMENTATION ######################
######### FLIP #########
image_gen = ImageDataGenerator(rescale=1./255, horizontal_flip=True)
train_data_gen = image_gen.flow_from_directory(batch_size=batch_size,
directory=TRAIN_DIR,
shuffle=True,
target_size=(IMG_SIZE, IMG_SIZE))
augmented_images = [train_data_gen[0][0][0] for i in range(5)]
# Re-use the same custom plotting function defined and used above to visualize the training images
######### ROTATE 45° #########
image_gen = ImageDataGenerator(rescale=1./255, rotation_range=45)
train_data_gen = image_gen.flow_from_directory(batch_size=batch_size,
directory=TRAIN_DIR,
shuffle=True,
target_size=(IMG_SIZE, IMG_SIZE))
augmented_images = [train_data_gen[0][0][0] for i in range(5)]
######### ZOOM FROM 0 TO 10% #########
# zoom_range from 0 - 1 where 1 = 100%.
image_gen = ImageDataGenerator(rescale=1./255, zoom_range=0.5) #
train_data_gen = image_gen.flow_from_directory(batch_size=batch_size,
directory=TRAIN_DIR,
shuffle=True,
target_size=(IMG_SIZE, IMG_SIZE))
augmented_images = [train_data_gen[0][0][0] for i in range(5)]
####################################################################################################
### PREPARES THE DATA TO BE FED INTO THE NN ###
train_image_generator = ImageDataGenerator(rescale=1./255)
image_gen_val = ImageDataGenerator(rescale=1./255)
test_data_generator = ImageDataGenerator(rescale=1./255) #Use this to make predictons?Don't know yet
val_data_gen = image_gen_val.flow_from_directory(batch_size=batch_size,
directory=VALIDATION_DIR,
target_size=(IMG_SIZE, IMG_SIZE),
class_mode='binary')
####################################################################################
train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
directory=TRAIN_DIR,
shuffle=True,
target_size=(IMG_SIZE, IMG_SIZE),
class_mode='binary')
#####################################################################################
test_generator = test_data_generator.flow_from_directory(TEST_DIR,
target_size=(IMG_SIZE, IMG_SIZE),
batch_size=batch_size,
class_mode="binary",
shuffle=True)
############ NN Model ############
model = Sequential([
Conv2D(16, 3, padding='same', activation='relu',
input_shape=(IMG_SIZE, IMG_SIZE ,3)),
MaxPooling2D(),
Dropout(0.2),
Conv2D(32, 3, padding='same', activation='relu'),
MaxPooling2D(),
Conv2D(64, 3, padding='same', activation='relu'),
MaxPooling2D(),
Dropout(0.2),
Flatten(),
Dense(512, activation='relu'),
Dense(1)
])
#########################################################
############ COMPILES THE NN ############
model.compile(optimizer='adam',
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
############ PERFORMS THE TRAINNING ############
history = model.fit_generator(
train_data_gen,
steps_per_epoch=total_train // batch_size,
epochs=epochs,
validation_data=val_data_gen,
validation_steps=total_val // batch_size
)
You've clearly found model.compile() and model.fit_generator() - all you need to do is head over to the documentation and find the other methods. Here's a link that'll tell you how to use model.predict(). Use that for your prediction.

Tensorflow Data being Sorted into 1 class

Hello StackOverflowers!
I'm currently struggling with an error in my code.
When I run the code, it for some reason sorts the training images into one class, defeating the purpose of the code. I can't seem to find the reason why it does this. Can you please help me?
(Image of error)
This is probably a rookie mistake, and I apologize for that.
Here is my code:
from tensorflow import keras
import pandas as pd
from keras.models import load_model
from keras.utils import CustomObjectScope
from keras.initializers import glorot_uniform
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import numpy as np
line = "__________________________________"
data_x = os.path.join('/tmp/MyData/car-or-no-car/car')
data_y = os.path.join('/tmp/MyData/car-or-no-car/no-car')
print('Total training x images:', len(os.listdir(data_x)))
print('Total training y images:', len(os.listdir(data_y)))
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('acc')>0.9999):
print("Accuracy has reached 99.99 percent so cancelling training!")
self.model.stop_training = True
train_datagen = ImageDataGenerator(rescale=1/255)
train_generator = train_datagen.flow_from_directory(
'/tmp/MyData/car-or-no-car', # This is the source directory for training images
target_size=(300, 300), # All images will be resized to 150x150
batch_size=128,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
callbacks = myCallback()
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(300, 300, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(32, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
from tensorflow.keras.optimizers import RMSprop
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.001),
metrics=['acc'])
train_datagen = ImageDataGenerator(rescale=1/255)
train_generator = train_datagen.flow_from_directory(
'/tmp/MyData/', # This is the source directory for training images
target_size=(300, 300), # All images will be resized to 150x150
batch_size=128,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
history = model.fit_generator(
train_generator,
steps_per_epoch=8,
epochs=10,
verbose=1)
print(line)
model.save('testing.h5')
print("The Model has been Saved!")
print(line)

How to predict input image using trained model in Keras?

I trained a model to classify images from 2 classes and saved it using model.save(). Here is the code I used:
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
# dimensions of our images.
img_width, img_height = 320, 240
train_data_dir = 'data/train'
validation_data_dir = 'data/validation'
nb_train_samples = 200 #total
nb_validation_samples = 10 # total
epochs = 6
batch_size = 10
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=5)
model.save('model.h5')
It successfully trained with 0.98 accuracy which is pretty good. To load and test this model on new images, I used the below code:
from keras.models import load_model
import cv2
import numpy as np
model = load_model('model.h5')
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
img = cv2.imread('test.jpg')
img = cv2.resize(img,(320,240))
img = np.reshape(img,[1,320,240,3])
classes = model.predict_classes(img)
print classes
It outputs:
[[0]]
Why wouldn't it give out the actual name of the class and why [[0]]?
If someone is still struggling to make predictions on images, here is the optimized code to load the saved model and make predictions:
# Modify 'test1.jpg' and 'test2.jpg' to the images you want to predict on
from keras.models import load_model
from keras.preprocessing import image
import numpy as np
# dimensions of our images
img_width, img_height = 320, 240
# load the model we saved
model = load_model('model.h5')
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# predicting images
img = image.load_img('test1.jpg', target_size=(img_width, img_height))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict_classes(images, batch_size=10)
print classes
# predicting multiple images at once
img = image.load_img('test2.jpg', target_size=(img_width, img_height))
y = image.img_to_array(img)
y = np.expand_dims(y, axis=0)
# pass the list of multiple images np.vstack()
images = np.vstack([x, y])
classes = model.predict_classes(images, batch_size=10)
# print the classes, the images belong to
print classes
print classes[0]
print classes[0][0]
You can use model.predict() to predict the class of a single image as follows [doc]:
# load_model_sample.py
from keras.models import load_model
from keras.preprocessing import image
import matplotlib.pyplot as plt
import numpy as np
import os
def load_image(img_path, show=False):
img = image.load_img(img_path, target_size=(150, 150))
img_tensor = image.img_to_array(img) # (height, width, channels)
img_tensor = np.expand_dims(img_tensor, axis=0) # (1, height, width, channels), add a dimension because the model expects this shape: (batch_size, height, width, channels)
img_tensor /= 255. # imshow expects values in the range [0, 1]
if show:
plt.imshow(img_tensor[0])
plt.axis('off')
plt.show()
return img_tensor
if __name__ == "__main__":
# load model
model = load_model("model_aug.h5")
# image path
img_path = '/media/data/dogscats/test1/3867.jpg' # dog
#img_path = '/media/data/dogscats/test1/19.jpg' # cat
# load a single image
new_image = load_image(img_path)
# check prediction
pred = model.predict(new_image)
In this example, a image is loaded as a numpy array with shape (1, height, width, channels). Then, we load it into the model and predict its class, returned as a real value in the range [0, 1] (binary classification in this example).
keras predict_classes (docs) outputs A numpy array of class predictions. Which in your model case, the index of neuron of highest activation from your last(softmax) layer. [[0]] means that your model predicted that your test data is class 0. (usually you will be passing multiple image, and the result will look like [[0], [1], [1], [0]] )
You must convert your actual label (e.g. 'cancer', 'not cancer') into binary encoding (0 for 'cancer', 1 for 'not cancer') for binary classification. Then you will interpret your sequence output of [[0]] as having class label 'cancer'
That's because you're getting the numeric value associated with the class. For example if you have two classes cats and dogs, Keras will associate them numeric values 0 and 1. To get the mapping between your classes and their associated numeric value, you can use
>>> classes = train_generator.class_indices
>>> print(classes)
{'cats': 0, 'dogs': 1}
Now you know the mapping between your classes and indices. So now what you can do is
if classes[0][0] == 1:
prediction = 'dog'
else:
prediction = 'cat'
Forwarding the example by #ritiek, I'm a beginner in ML too, maybe this kind of formatting will help see the name instead of just class number.
images = np.vstack([x, y])
prediction = model.predict(images)
print(prediction)
i = 1
for things in prediction:
if(things == 0):
print('%d.It is cancer'%(i))
else:
print('%d.Not cancer'%(i))
i = i + 1

Categories