i need to make a confusion matrix for a CNN model - python

hi im new to machine learning and i just wanted to know how to make a confusion matrix from this code i just followed the instructions on youtube and i think im lost i just need to plot the confusion matrix my data sets is all about cancer and has 2 categories with and withought cancer i just followed the video of sentdex and changed his data sets
import tensorflow as tf
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
import numpy
import matplotlib.pyplot as plt
import os
import cv2
DATADIR = "C:/Users/Acer/imagerec/MRI"
CATEGORIES = ["yes", "no"]
for category in CATEGORIES:
path = os.path.join(DATADIR,category)
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path,img) ,cv2.IMREAD_GRAYSCALE)
plt.imshow(img_array, cmap='gray')
plt.show()
break
break
print(img_array)
print(img_array.shape)
IMG_SIZE = 50
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
plt.imshow(new_array, cmap='gray')
plt.show()
training_data = []
def create_training_data():
for category in CATEGORIES:
path = os.path.join(DATADIR, category)
class_num = CATEGORIES.index(category)
for img in os.listdir(path):
try:
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
training_data.append([new_array, class_num])
except Exception as e:
pass
create_training_data()
print(len(training_data))
import random
random.shuffle(training_data)
for sample in training_data[:10]:
print(sample[1])
X = []
y = []
for features, label in training_data:
X.append(features)
y.append(label)
X = numpy.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
import pickle
pickle_in = open("X.pickle","rb")
X = pickle.load(pickle_in)
pickle_in = open("y.pickle","rb")
y = pickle.load(pickle_in)
X = X/255.0
model = Sequential()
model.add(Conv2D(256, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(X, y, batch_size=5, epochs=1, validation_split=0.1)
model.save('64x2-CNN.model')

This will show you where the classifier predicted right/wrong on the training data (because there is not test set in your code).
from sklearn.metrics import confusion_matrix
pred = model.predict(X)
conf = confusion_matrix(y, pred)
Out[1]:
array([[5, 8], # rows are actual classes, columns are predicted classes
[9, 3]], dtype=int64)
To plot it (minimal example):
import seaborn as sns
sns.heatmap(conf, annot=True)

Related

Neural Network classifier with low accuracy does not improve when changing neuron count

I made a neural network to recognize objects, I trained this model using 7 categories of images. When I train this model, I always get the accuracy of 0.217. Even I changed each count of neuron of each layers, still I get the accuracy of 0.217
categories of training image data
(I used open cv to convert images to arrays and used pickle to store datasets)
'create data set'
import numpy as np
import os
import cv2
import pickle
import random
datadir = r"C:\Users\pc\Desktop\Tenserflow\upgrade1\Images"
categories = []
for root, dirs, files in os.walk(datadir, topdown=False):
for name in dirs:
categories.append(name)
training_data = []
img_size = 100
def create_training_data():
for category in categories:
path = os.path.join(datadir, category)
class_num = categories.index(category)
for img in os.listdir(path):
try:
img_array = cv2.imread(os.path.join(path, img),cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array, (img_size, img_size))
training_data.append([new_array,class_num])
except Exception as e:
pass
create_training_data()
random.shuffle(training_data)
x =[]
y =[]
for features ,label in training_data:
x.append(features)
y.append(label)
x = np.array(x).reshape(-1, img_size, img_size, 1)
y = np.array(y)
file1 = open('x.pickle', 'wb')
file2 = open('y.pickle', 'wb')
pickle.dump(x, file1)
pickle.dump(y, file2)
file1.close()
file2.close()
'training code'
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D
import pickle
import numpy as np
x =pickle.load(open("x.pickle", "rb"))
y =pickle.load(open("y.pickle", "rb"))
x = x/255.0
model = Sequential()
model.add(Conv2D(3,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(7,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(7,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(5))
model.add(Activation("relu"))
model.add(Dense(7))
model.add(Activation('softmax'))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=['accuracy'])
model.fit(x, y, epochs = 2, validation_split = 0.1)
I see two issues in your code:
1: you use categorical_crossentropy as your loss. This loss assumes that the targets are in format [1,0,0,0,0,0,0], [0,1,0,0,0,0,0], [0,0,1,0,0,0,0] etc but your dataset creation seems to have the targets as [0], [1], [2] etc.
Either you need to switch to sparse_categorical_crossentropy loss and single output feature or use to_categorical function for your targets. I would suggest using the to_categorical function so you don't need to change your network.
More info on those:
https://www.tensorflow.org/api_docs/python/tf/keras/losses/SparseCategoricalCrossentropy
https://www.tensorflow.org/api_docs/python/tf/keras/utils/to_categorical
2: Your network is far too simple to give you a good accuracy. You use far too few filters in your Conv2D. Try using 16, 32 and 32 filters for example. Also your Dense layer is far too small. Try using 128 for the first and 7 for the second dense layer.

Deep learning model not giving predictions as input layer is incompatible

bELOW IS A SIMPLE MODEL FOR IMAGE CLASSIFICATION OF HAND GESTURE RECOGNITION using Kaggle dataset
# -- coding: utf-8 --
"""kaggle_dataset_code.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1kfj2kPVrioXlWX_CDDOGEfxlwMUj5vs6
"""
!pip install kaggle
#You can download the kaggl.json file from your kaggle account. We are going to upload the kaggle.json file.
from google.colab import files
files.upload()
#making kaggle directory as kaggle website has guided.
!mkdir -p ~/.kaggle
!cp kaggle.json ~/.kaggle/
#Giving specical permissions to the kaggle.json file.
!chmod 600 ~/.kaggle/kaggle.json
downloading the kaggle dataset from the website by copying the API token
!kaggle datasets download -d gti-upm/leapgestrecog
#Unzip the dataset
zip_data_path = "/content/leapgestrecog.zip"
from zipfile import ZipFile
file_name = "leapgestrecog.zip"
with ZipFile(file_name,'r') as zip:
zip.extractall()
print("done")
import cv2
image_data = []
CATEGORIES = ["01_palm", '02_l','03_fist','04_fist_moved','05_thumb','06_index','07_ok','08_palm_moved','09_c','10_down']
IMG_SIZE = 50
import os
unzipped_data_path = "/content/leapgestrecog/leapGestRecog/"
print(os.listdir(unzipped_data_path))
for dr in os.listdir(unzipped_data_path):
for category in CATEGORIES:
class_index = CATEGORIES.index(category)
path = os.path.join(unzipped_data_path, dr, category)
for image in os.listdir(path):
image_array = cv2.imread(os.path.join(path, image), cv2.IMREAD_GRAYSCALE)
image_data.append([cv2.resize(image_array, (IMG_SIZE, IMG_SIZE)), class_index])
#image data of a 19000th image
image_data[19000]
import random
random.shuffle(image_data)
input_data = []
label = []
for X, y in image_data:
input_data.append(X)
label.append(y)
import matplotlib.pyplot as plt # for plotting
plt.figure(1, figsize=(10,10))
for i in range(1,10):
plt.subplot(3,3,i)
plt.imshow(image_data[i][0], cmap='hot')
plt.xticks([])
plt.yticks([])
plt.title(CATEGORIES[label[i]][3:])
plt.show()
import numpy as np
input_data = np.array(input_data)
label = np.array(label)
input_data = input_data/255.0
import keras
label = keras.utils.to_categorical(label, num_classes=10,dtype='i1')
label[0]
input_data.shape = (-1, IMG_SIZE, IMG_SIZE, 1)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(input_data, label, test_size = 0.3, random_state=0)
from keras.layers import Conv2D, Activation, MaxPool2D, Dense, Flatten, Dropout
model = keras.models.Sequential()
model.add(Conv2D(filters = 32, kernel_size = (3,3), input_shape = (IMG_SIZE, IMG_SIZE, 1)))
model.add(Activation('relu'))
model.add(Conv2D(filters = 32, kernel_size = (3,3)))
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.3))
model.add(Conv2D(filters = 64, kernel_size = (3,3)))
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer = 'rmsprop',
metrics = ['accuracy'])
model.summary()
model.fit(X_train, y_train, epochs = 7, batch_size=32, validation_data=(X_test, y_test))
score = model.evaluate(X_test, y_test, batch_size=128)
print(score)
model.save("kaggle_dataset_model.h5")
but i get the similar following error no matter which model i try
ValueError: Input 0 of layer sequential_2 is incompatible with the layer: expected axis -1 of input shape to have value 1 but received input with shape [None, 50, 50, 3]
The code where I want the model to make predictions is below
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1PWDO7aYA6Lhl9FgdgMHh8fj-vlLF_mTw
"""
from keras.models import load_model
from keras.preprocessing import image
import numpy as np
# dimensions of our images
img_width = 50
img_height = 50
# load the model we saved
model = load_model('KaggleModelLeapGesture.h5')
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
from google.colab import files
from keras.preprocessing import image
uploaded = files.upload()
for fn in uploaded.keys():
# predicting images
path = fn
img = image.load_img(path, target_size=(50, 50))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict(images, batch_size=10)
print(fn)
print(classes)
As Dr. Snoopy suggested, the model is trained on Grey scale images, but you are trying to predict on RGB image. Kindly use the grey scale version of the image.
Coming to your next question regarding the predictions, the last layer of your model is having model.add(Dense(10, activation='softmax')) - that means you have 10 class to be predicted and as you have used softmax function, it gives the probability of the image belonging to these 10 different classes. The sum of all the probability will be equal to 1.

How to implement CAM without visualize_cam in this code?

I want to make Class activation map, so I have write the code
from keras.datasets import mnist
from keras.layers import Conv2D, Dense, GlobalAveragePooling2D
from keras.models import Model, Input
from keras.utils import to_categorical
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train_resized = x_train.reshape((60000, 28, 28, 1))
x_test_resized = x_test.reshape((10000, 28, 28, 1))
y_train_hot_encoded = to_categorical(y_train)
y_test_hot_encoded = to_categorical(y_test)
inputs = Input(shape=(28,28, 1))
x = Conv2D(64, (3,3), activation='relu')(inputs)
x = Conv2D(64, (3,3), activation='relu')(x)
x = GlobalAveragePooling2D()(x)
predictions = Dense(10, activation='softmax')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train_resized, y_train_hot_encoded, epochs=30, batch_size=256, shuffle=True, validation_split=0.3)
works fine, so I have imported visualize_cam module
from vis.visualization import visualize_cam
import matplotlib.pyplot as plt
import numpy as np
for i in range(10):
ind = np.where(y_test == i)[0][0]
plt.subplot(141)
plt.imshow(x_test_resized[ind].reshape((28,28)))
for j,modifier in enumerate([None, 'guided', 'relu']):
heat_map = visualize_cam(model, 4, y_test[ind], x_test_resized[ind], backprop_modifier=modifier)
plt.subplot(1,4,j+2)
plt.imshow(heat_map)
plt.show()
but the visualize_cam didn`t work well
I tried many times to fix the module but it doesn`t go well
(it depends on scipy which version is below 1.3. but )
so I have to implement cam without that module
Is there any solution to replace visualize_cam into other option to implement CAM?
Here is a scipy library independent implementation.
from keras.datasets import mnist
from keras.layers import Conv2D, Dense, GlobalAveragePooling2D
from keras.models import Model, Input
from keras.utils import to_categorical
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train_resized = x_train.reshape((60000, 28, 28, 1))
x_test_resized = x_test.reshape((10000, 28, 28, 1))
y_train_hot_encoded = to_categorical(y_train)
y_test_hot_encoded = to_categorical(y_test)
inputs = Input(shape=(28,28, 1))
x = Conv2D(64, (3,3), activation='relu')(inputs)
x = Conv2D(64, (3,3), activation='relu', name='final_conv')(x)
x = GlobalAveragePooling2D()(x)
predictions = Dense(10, activation='softmax')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train_resized, y_train_hot_encoded, epochs=1, batch_size=256, shuffle=True, validation_split=0.3)
import numpy as np
import cv2
import io
import requests
from PIL import Image
import matplotlib.pyplot as plt
# Using Keras implementation from tensorflow
from tensorflow.python.keras import applications
from tensorflow.python.keras.preprocessing.image import load_img, img_to_array
from tensorflow.python.keras import backend as K
# Get the layer of the last conv layer
fianlconv = model.get_layer('final_conv')
# Get the weights matrix of the last layer
weight_softmax = model.layers[-1].get_weights()[0]
# Function to generate Class Activation Mapping
HEIGHT = 28
WIDTH = 28
def returnCAM(feature_conv, weight_softmax, class_idx):
size_upsample = (WIDTH, HEIGHT)
# Keras default is channels last, hence nc is in last
bz, h, w, nc = feature_conv.shape
output_cam = []
for idx in class_idx:
cam = np.dot(weight_softmax[:, idx], np.transpose(feature_conv.reshape(h*w, nc)))
cam = cam.reshape(h, w)
cam = cam - np.min(cam)
cam_img = cam / np.max(cam)
cam_img = np.uint8(255 * cam_img)
output_cam.append(cv2.resize(cam_img, size_upsample))
return output_cam
x = x_test_resized[0,:,:,0]
plt.imshow(x)
plt.show()
classes = {1:'1', 2: '2', 3: '3', 4:'4', 5:'5', 6:'6', 7:'7', 8:'8', 9:'9', 0:'0'}
probs_extractor = K.function([model.input], [model.output])
features_conv_extractor = K.function([model.input], [fianlconv.output])
probs = probs_extractor([np.expand_dims(x, 0).reshape(1,28,28,1)])[0]
features_blob = features_conv_extractor([np.expand_dims(x, 0).reshape(1,28,28,1)])[0]
features_blobs = []
features_blobs.append(features_blob)
idx = np.argsort(probs)
probs = np.sort(probs)
for i in range(-1, -6, -1):
print('{:.3f} -> {}'.format(probs[0, i], classes[idx[0, i]]))
CAMs = returnCAM(features_blobs[0], weight_softmax, [idx[0, -1]])
heatmap = cv2.applyColorMap(cv2.resize(CAMs[0], (28, 28)), cv2.COLORMAP_JET)
result = heatmap[:,:,0] * 0.3 + x * 0.5
print(result.shape)
plt.imshow(result)
plt.show()
N.B: I'm plotting normalized images so the result isn't great, I also trained only for 1 epoch. For better results, you may try training more, change to appropriate color space.

How to save images properly classified with CNN?

Handwriting recognition problem with CNN. There is a requirement: from 10000 test images, save 1000 images (.png or .jpg)accurate classified each folder of 100 images (0 -> 9). How do I do? i need an instruction about code. thanks! code :
import keras
from keras.datasets import mnist
from keras.layers import Dense, Activation, Flatten, Conv2D,
MaxPooling2D
from keras.models import Sequential
from keras.utils import to_categorical
import numpy as np
import matplotlib.pyplot as plt
(train_X,train_Y), (test_X,test_Y) = mnist.load_data()
train_X = train_X.reshape(-1, 28,28, 1)
test_X = test_X.reshape(-1, 28,28, 1)
train_X = train_X.astype('float32')
test_X = test_X.astype('float32')
test_X = test_X / 255
train_Y_one_hot = to_categorical(train_Y)
test_Y_one_hot = to_categorical(test_Y)
model = Sequential()
model.add(Conv2D(64, (3,3), input_shape=(28, 28, 1)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(64, (3,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),metrics=['accuracy'])
model.fit(train_X, train_Y_one_hot, batch_size=64, epochs=1)
test_loss, test_acc = model.evaluate(test_X, test_Y_one_hot)
print('Test loss', test_loss)
print('Test accuracy', test_acc)
model.save('123.model')
predictions = model.predict(test_X)
print(np.argmax(np.round(predictions[235])))
plt.imshow(test_X[235].reshape(28, 28), cmap = 'Greys_r')
plt.show()
Complete Code for Saving the Correctly Classified Test Images in the respective folders of their Labels
(0 to 9), 100 images in each folder is mentioned below:
import keras
from keras.datasets import mnist
from keras.layers import Dense, Activation, Flatten, Conv2D, MaxPooling2D
from keras.models import Sequential
from keras.utils import to_categorical
import numpy as np
import matplotlib.pyplot as plt
(train_X,train_Y), (test_X,test_Y) = mnist.load_data()
train_X = train_X.reshape(-1, 28,28, 1)
test_X = test_X.reshape(-1, 28,28, 1)
train_X = train_X.astype('float32')
train_X = train_X/255
test_X = test_X.astype('float32')
test_X = test_X / 255
#train_Y_one_hot = to_categorical(train_Y)
#test_Y_one_hot = to_categorical(test_Y)
model = Sequential()
model.add(Conv2D(64, (3,3), input_shape=(28, 28, 1)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(64, (3,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss=keras.losses.sparse_categorical_crossentropy,
optimizer=keras.optimizers.Adam(),metrics=['accuracy'])
#model.fit(train_X, train_Y_one_hot, batch_size=64, epochs=1)
model.fit(train_X, train_Y, batch_size=64, epochs=1)
#test_loss, test_acc = model.evaluate(test_X, test_Y_one_hot)
test_loss, test_acc = model.evaluate(test_X, test_Y)
print('Test loss', test_loss)
print('Test accuracy', test_acc)
predictions = model.predict(test_X)
#****Actual Code which you need is mentioned below
import matplotlib
import matplotlib.pyplot as plt
import os
def save_fig(fig_id, Label):
path = os.path.join('MNIST_Images', Label, fig_id + "." + "png")
plt.tight_layout()
plt.savefig(path, format="png", dpi=300)
plt.close()
%matplotlib agg
%matplotlib agg #These 2 lines are required to prohibit Graph being displayed in Jupyter Notebook. You can comment these if you are using other IDE
No_Of_Rows = predictions.shape[0]
Count_Dict = {}
for i in range(10):
key = 'Count_' + str(i)
Count_Dict[key] = 0
for Each_Row in range(No_Of_Rows):
if np.argmax(predictions[Each_Row]) == test_Y[Each_Row]:
Label = str(test_Y[Each_Row])
Count_Dict['Count_' + Label] = Count_Dict['Count_' + Label] + 1
Count_Of_Label = Count_Dict['Count_' + Label]
if Count_Of_Label <= 100:
plt.imshow(test_X[Each_Row].reshape(28, 28), cmap = 'Greys_r')
plt.show()
save_fig(str(Count_Of_Label), Label)
I've commented the below lines of code which are not required, as the Labels are already in Numeric Format.
train_Y_one_hot = to_categorical(train_Y)
test_Y_one_hot = to_categorical(test_Y)
Also, I've replaced categorical_crossentropy with sparse_categorical_crossentropy in model.compile, as we are not Encoding the Variables.

So I've made a Neural Network model for classifying dogs and cats with tensorflow keras, but it's accuracy isn't improving. Any suggestions?

So I'm fairly new to this stuff and I have written some code, written below, where the model I made doesn't seem to be classifying the cats and dogs. Basically the data is a large collection of cat and dog pictures, starting with 12500 cats, and then 12500 dogs in that order. I just can't seem to work out why this isn't working. I have tried adjusting weights and batch size, but nothing works. Any suggestions?
import tensorflow as tf
from tensorflow import keras
import numpy as np
import cv2
import os
import random
import pickle
#cat: 1, dog: 0
training_data_path = '/Path/to/folder/containing/images'
IMG_SIZE = 100
training_data = []
def create_training_data():
counter = 0
for file in os.listdir(training_data_path):
if 'cat' in str(file):
label = 1
img_path = os.path.join(training_data_path, file)
array = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(array, (IMG_SIZE, IMG_SIZE))
training_data.append([new_array, label])
print(counter)
counter += 1
else:
label = 0
img_path = os.path.join(training_data_path, file)
print(img_path)
print(label)
array = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(array, (IMG_SIZE, IMG_SIZE))
training_data.append([new_array, label])
print(counter)
counter += 1
random.shuffle(training_data)
X = []
y = []
for point in training_data:
X.append(point[0])
y.append(point[1])
X = np.array(X)
y = np.array(y)
pickle_out = open('X3.pickle', 'wb')
pickle.dump(X, pickle_out)
pickle_out.close()
pickle_out = open('y3.pickle', 'wb')
pickle.dump(y, pickle_out)
pickle_out.close()
def NN_model():
pickle_in = open('X3.pickle', 'rb')
pickle_in_2 = open('y3.pickle', 'rb')
batch_size = 1000
X = pickle.load(pickle_in)
y = pickle.load(pickle_in_2)
inputs = keras.Input(shape=(X.shape[1:]))
f = keras.layers.Flatten()(inputs)
predictions = keras.layers.Dense(2, activation=tf.nn.softmax)(f)
model = keras.Model(inputs=inputs, outputs=predictions)
model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.1),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(X, y, epochs=5, batch_size=batch_size)
NN_model()
You have basically a single dense layer with two units to classify your images.
This will not work, your network is simply too "simple" to be able to classify the images.
I suggest you have a look at the following tutorial:
Keras Image Classification
Snippet of their model below:
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(3, 150, 150)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))

Categories