Always getting 0 for prediction from tensorflow lite model - python

I have model trained and saved using following code
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
from tensorflow.python.keras.optimizer_v2.rmsprop import RMSprop
train_data_gen = ImageDataGenerator(rescale=1 / 255)
validation_data_gen = ImageDataGenerator(rescale=1 / 255)
# Flow training images in batches of 120 using train_data_gen generator
train_generator = train_data_gen.flow_from_directory(
'datasets/train/',
classes=['bad', 'good'],
target_size=(200, 200),
batch_size=120,
class_mode='binary')
validation_generator = validation_data_gen.flow_from_directory(
'datasets/valid/',
classes=['bad', 'good'],
target_size=(200, 200),
batch_size=19,
class_mode='binary',
shuffle=False)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(16, (3, 3), activation='relu', input_shape=(200, 200, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
# Flatten the results to feed into a DNN
tf.keras.layers.Flatten(),
# 512 neuron hidden layer
tf.keras.layers.Dense(512, activation='relu'),
# Only 1 output neuron. It will contain a value from 0-1
# where 0 for 1 class ('bad') and 1 for the other ('good')
tf.keras.layers.Dense(1, activation='sigmoid')])
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.001),
metrics='accuracy')
model.fit(train_generator,
steps_per_epoch=10,
epochs=25,
verbose=1,
validation_data=validation_generator,
validation_steps=8)
print("Evaluating the model :")
model.evaluate(validation_generator)
print("Predicting :")
validation_generator.reset()
predictions = model.predict(validation_generator, verbose=1)
print(predictions)
model.save("models/saved")
Then this model is converted to tflite using
import tensorflow as tf
def saved_model_to_tflite(model_path, quantize):
converter = tf.lite.TFLiteConverter.from_saved_model(model_path)
model_saving_path = "models/converted/model.tflite"
if quantize:
converter.optimizations = [tf.lite.Optimize.DEFAULT]
model_saving_path = "models/converted/model-quantized.tflite"
tflite_model = converter.convert()
with open(model_saving_path, 'wb') as f:
f.write(tflite_model)
Then model is tested for single image using
import tensorflow as tf
def run_tflite_model(tflite_file, test_image):
interpreter = tf.lite.Interpreter(model_path=str(tflite_file))
interpreter.allocate_tensors()
print(interpreter.get_input_details())
input_details = interpreter.get_input_details()[0]
output_details = interpreter.get_output_details()[0]
interpreter.set_tensor(input_details["index"], test_image)
interpreter.invoke()
output = interpreter.get_tensor(output_details["index"])[0]
prediction = output.argmax()
return prediction
main.py
if __name__ == '__main__':
converted_model = "models/converted/model.tflite"
bad_image_path = "datasets/experiment/bad/b.png"
good_image_path = "datasets/experiment/good/g.png"
img = io.imread(bad_image_path)
resized = resize(img, (200, 200)).astype('float32')
test_image = np.expand_dims(resized, axis=0)
prediction = run_tflite_model(converted_model, test_image)
print(prediction)
despite what I image I feed into the model I am always getting prediction as 0. What is wrong here?

You forgot to normalize the image before passing it to the tflite model.
resized = resize(img, (200, 200)).astype('float32')
resized = resized / 255.
test_image = np.expand_dims(resized, axis=0)
prediction = run_tflite_model(converted_model, test_image)
Edit:
You are performing a binary classification task not a multi-class classification task so you do not need to take the max value in the output array as it only produces a single value in the range of 0 to 1. You can interpret the results as being a positive example if the value is greater than or equal to 0.5 and a negative example if it is less than 0.5.
import tensorflow as tf
def run_tflite_model(tflite_file, test_image):
interpreter = tf.lite.Interpreter(model_path=str(tflite_file))
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
interpreter.set_tensor(input_details[0]["index"], test_image)
interpreter.invoke()
predictions = interpreter.get_tensor(output_details[0]["index"])
return 1 if predictions >= 0.5 else 0 # 1 = good, 0 = bad

Related

how to build cnn model for defect detection with three classes and test one image on it

I built a CNN model to detect two kinds of defects on an image. These classes are 'big' and 'small' and the accuracy is really good. The architecture of my model is in the below:
inputs = tf.keras.Input(shape=(120, 120, 3))
x = tf.keras.layers.Conv2D(filters=16, kernel_size=(3, 3), activation='relu')(inputs)
x = tf.keras.layers.MaxPool2D(pool_size=(2, 2))(x)
x = tf.keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(x)
x = tf.keras.layers.MaxPool2D(pool_size=(2, 2))(x)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
outputs = tf.keras.layers.Dense(1, activation='sigmoid')(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy']
)
print(model.summary())
history = model.fit(
train_data,
validation_data=val_data,
epochs=100,
callbacks=[
tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
patience=3,
restore_best_weights=True
)
]
)
Now, I want to use this CNN model for multi classes and the classes would be 'big', 'small', 'other'. I have the data set, but I don't know how to change the model for three classes. Also, at the end I want to test one image to my CNN model and get the label if the inserted image is big, small or other, but I don't know how.
Try this:
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(input_shape = (120, 120, 3), filters=16, kernel_size=(3, 3), activation='relu'))
model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu'))
model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
model.add(tf.keras.layers.GlobalAveragePooling2D())
model.add(tf.keras.layers.Dense(3, activation='softmax'))
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
to predict, you can use this code:
from PIL import Image
import numpy as np
from skimage import transform
def load(filename):
np_image = Image.open(filename)
np_image = np.array(np_image).astype('float32')/255
np_image = transform.resize(np_image, (120, 120, 3))
np_image = np.expand_dims(np_image, axis=0)
return np_image
folder_path = 'Dataset/test/4.jpg'
image = load(folder_path)
pred = model.predict_classes(image)
pred.tolist()[0]

Tensorflow CNN only 1 output predict value

I've already looked into similar topics but none of the tips helped me. My model predicts and outputs only 1 class, even in the console I see only 1 array value. I have to check if the font in the account number is fake or real. It prints an accuracy of 0.99 even 1.00 but after manually checking it with model.predicts it only outputs 0's. I train it on 1000 pictures of each class. Any solutions? My code:
train = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1/255)
validation = train_set = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1/255)
validation_set = validation.flow_from_directory('Samples', class_mode='binary', batch_size=30, target_size=(500, 50), shuffle=True, seed=42, color_mode='rgb')
train_set = train.flow_from_directory(directory='Train', class_mode='binary', batch_size=30, target_size=(500, 50), shuffle=True, seed=42, color_mode='rgb')
print(train_set.classes)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(16, kernel_size = (3, 3), activation='relu', input_shape=(500, 50, 3)),
tf.keras.layers.MaxPool2D(pool_size = (2, 2)),
tf.keras.layers.Conv2D(32, kernel_size = (3, 3), activation='relu'),
tf.keras.layers.MaxPool2D(pool_size = (2, 2)),
tf.keras.layers.Conv2D(64, kernel_size = (3, 3), activation='relu'),
tf.keras.layers.MaxPool2D(pool_size = (2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
print(train_set.class_indices)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics= ['accuracy'])
model.fit(train_set, epochs=2, validation_data=validation_set)
path = 'Samples/'
DIRECTORIES = ['Fake', 'Real']
for dir in DIRECTORIES:
for file in os.listdir(path+dir):
img = tf.keras.preprocessing.image.load_img(path+dir+'/'+file)
img = tf.keras.preprocessing.image.img_to_array(img)
img = np.expand_dims(img, axis=0)
images = np.vstack([img])
val = np.argmax(model.predict(images))
print(val)
Photos to compare:
Real
Fake
Output
You're using your model as if you had two output neurons in your output layer. np.argmax(model.predict(images)) would return the index of the neuron with the maximum value, but since you only have 1, it will always return 0. Just check if the value returned by predict exceeds the threshhold or alternativly use two neurons.

ValueError: Cannot set tensor: Dimension mismatch. Got 3 but expected 4 for input 0

I am new to TF and Keras. I have model trained and saved using following code
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
from tensorflow.python.keras.optimizer_v2.rmsprop import RMSprop
train_data_gen = ImageDataGenerator(rescale=1 / 255)
validation_data_gen = ImageDataGenerator(rescale=1 / 255)
# Flow training images in batches of 120 using train_data_gen generator
train_generator = train_data_gen.flow_from_directory(
'datasets/train/',
classes=['bad', 'good'],
target_size=(200, 200),
batch_size=120,
class_mode='binary')
validation_generator = validation_data_gen.flow_from_directory(
'datasets/valid/',
classes=['bad', 'good'],
target_size=(200, 200),
batch_size=19,
class_mode='binary',
shuffle=False)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(16, (3, 3), activation='relu', input_shape=(200, 200, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
# Flatten the results to feed into a DNN
tf.keras.layers.Flatten(),
# 512 neuron hidden layer
tf.keras.layers.Dense(512, activation='relu'),
# Only 1 output neuron. It will contain a value from 0-1
# where 0 for 1 class ('bad') and 1 for the other ('good')
tf.keras.layers.Dense(1, activation='sigmoid')])
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.001),
metrics='accuracy')
model.fit(train_generator,
steps_per_epoch=10,
epochs=25,
verbose=1,
validation_data=validation_generator,
validation_steps=8)
print("Evaluating the model :")
model.evaluate(validation_generator)
print("Predicting :")
validation_generator.reset()
predictions = model.predict(validation_generator, verbose=1)
print(predictions)
model.save("models/saved")
Then this model is converted to tflite using
import tensorflow as tf
def saved_model_to_tflite(model_path, quantize):
converter = tf.lite.TFLiteConverter.from_saved_model(model_path)
model_saving_path = "models/converted/model.tflite"
if quantize:
converter.optimizations = [tf.lite.Optimize.DEFAULT]
model_saving_path = "models/converted/model-quantized.tflite"
tflite_model = converter.convert()
with open(model_saving_path, 'wb') as f:
f.write(tflite_model)
Then model is tested for single image using
import tensorflow as tf
def run_tflite_model(tflite_file, test_image):
interpreter = tf.lite.Interpreter(model_path=str(tflite_file))
interpreter.allocate_tensors()
print(interpreter.get_input_details())
input_details = interpreter.get_input_details()[0]
output_details = interpreter.get_output_details()[0]
interpreter.set_tensor(input_details["index"], test_image)
interpreter.invoke()
output = interpreter.get_tensor(output_details["index"])[0]
prediction = output.argmax()
return prediction
main.py
if __name__ == '__main__':
converted_model = "models/converted/model.tflite"
bad_image_path = "datasets/experiment/bad/b.png"
good_image_path = "datasets/experiment/good/g.png"
img = io.imread(bad_image_path)
resized = resize(img, (200, 200)).astype('float32')
prediction = run_tflite_model(converted_model, resized)
print(prediction)
But I am getting even though I resized the image to 200 by 200
ValueError: Cannot set tensor: Dimension mismatch. Got 3 but expected 4 for input 0.
If I do print(interpreter.get_input_details())
[{'name': 'serving_default_conv2d_input:0', 'index': 0, 'shape': array([ 1, 200, 200, 3], dtype=int32), 'shape_signature': array([ -1, 200, 200, 3], dtype=int32), 'dtype': <class 'numpy.float32'>, 'quantization': (0.0, 0), 'quantization_parameters': {'scales': array([], dtype=float32), 'zero_points': array([], dtype=int32), 'quantized_dimension': 0}, 'sparsity_parameters': {}}]
So it seems shape of input is 'shape': array([ 1, 200, 200, 3] I do get the part 200, 200, 3 it seems 1 is batch size according to docs?
How could I remove batch size from input shape?
Instead of removing batch size in the graph, you can expand the dimension by using expand_dims:
test_image = np.expand_dims(test_image, axis=0)
For android, you can easily prepare the float [1][32][32][3] input array from the float [32][32][3] input array by using loops to copy the values.

Input 0 of layer sequential is incompatible with the layer

I created a model and then loaded it in another script and try to perform a prediction from it however I can not understand why the shape being passed to the function is incorrect.
This is how the model is created:
batch_size = 1232
epochs = 5
IMG_HEIGHT = 400
IMG_WIDTH = 400
model1 = np.load("training_data.npy", allow_pickle=True)
model2 = np.load("training_data_1.npy", allow_pickle=True)
data = np.asarray(np.concatenate((model1, model2), axis=0)) # 1232
train_data = data[:-100]
X_train = np.asarray(np.array([i[0] for i in train_data]))
Y_train = np.asarray([i[1] for i in train_data])
validation_data = data[-100:]
X_val = np.asarray(np.array([i[0] for i in validation_data]))
Y_val = np.asarray([i[1] for i in validation_data])
model = Sequential([
Conv2D(16, 3, padding='same', activation='relu',
input_shape=(IMG_HEIGHT, IMG_WIDTH, 3)),
MaxPooling2D(),
Conv2D(32, 3, padding='same', activation='relu'),
MaxPooling2D(),
Conv2D(64, 3, padding='same', activation='relu'),
MaxPooling2D(),
Flatten(),
Dense(512, activation='relu'),
Dense(1)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
history = model.fit(X_train, Y_train, steps_per_epoch=batch_size, epochs=epochs,
validation_data=(X_val, Y_val), validation_steps=batch_size)
model.save("test")
And this is how I'm trying to make a prediction:
batch_size = 1232
epochs = 5
IMG_HEIGHT = 400
IMG_WIDTH = 400
model = tf.keras.models.load_model('test')
test_1 = cv2.imread('./Data/Images/test_no.jpg')
test_1 = cv2.resize(test_1, (IMG_HEIGHT, IMG_WIDTH))
prediction = model.predict([test_1])[0]
print(prediction)
When printing the shape of the test image the output is: (400, 400, 3)
I also tried using the numpy operation reshape when passing the test image to predict. However the error is always:
ValueError: Input 0 of layer sequential is incompatible with the layer: expected ndim=4, found ndim=3. Full shape received: [None, 400, 3]
Add extra dimension to your input as [n_items,400,400,3]
import tensorflow as tf
X_train = tf.expand_dims(X_train, axis =-1)

Tensorflow Data being Sorted into 1 class

Hello StackOverflowers!
I'm currently struggling with an error in my code.
When I run the code, it for some reason sorts the training images into one class, defeating the purpose of the code. I can't seem to find the reason why it does this. Can you please help me?
(Image of error)
This is probably a rookie mistake, and I apologize for that.
Here is my code:
from tensorflow import keras
import pandas as pd
from keras.models import load_model
from keras.utils import CustomObjectScope
from keras.initializers import glorot_uniform
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import numpy as np
line = "__________________________________"
data_x = os.path.join('/tmp/MyData/car-or-no-car/car')
data_y = os.path.join('/tmp/MyData/car-or-no-car/no-car')
print('Total training x images:', len(os.listdir(data_x)))
print('Total training y images:', len(os.listdir(data_y)))
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('acc')>0.9999):
print("Accuracy has reached 99.99 percent so cancelling training!")
self.model.stop_training = True
train_datagen = ImageDataGenerator(rescale=1/255)
train_generator = train_datagen.flow_from_directory(
'/tmp/MyData/car-or-no-car', # This is the source directory for training images
target_size=(300, 300), # All images will be resized to 150x150
batch_size=128,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
callbacks = myCallback()
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(300, 300, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(32, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
from tensorflow.keras.optimizers import RMSprop
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.001),
metrics=['acc'])
train_datagen = ImageDataGenerator(rescale=1/255)
train_generator = train_datagen.flow_from_directory(
'/tmp/MyData/', # This is the source directory for training images
target_size=(300, 300), # All images will be resized to 150x150
batch_size=128,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
history = model.fit_generator(
train_generator,
steps_per_epoch=8,
epochs=10,
verbose=1)
print(line)
model.save('testing.h5')
print("The Model has been Saved!")
print(line)

Categories