I'm currently working on facial expression recognition using keras. I have gone through several tutorials and applied those guidelines. But I could not achieve an accuracy above 57%. I want to achieve atleast 90% prediction accuracy on my model.
import tensorflow as tf
import numpy as np
import pandas as pd
import keras
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, BatchNormalization, Dropout, AveragePooling2D
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
num_classes = 7 #angry, disgust, fear, happy, sad, surprise, neutral
batch_size = 256
epochs = 100
num_features = 32
with open("fer2013.csv") as f:
content = f.readlines()
lines = np.array(content)
num_of_instances = lines.size
print("number of instances: ",num_of_instances)
print("instance length: ",len(lines[1].split(",")[1].split(" ")))
x_train, y_train, x_test, y_test = [], [], [], []
for i in range(1,num_of_instances):
try:
emotion, img, usage = lines[i].split(",")
val = img.split(" ")
pixels = np.array(val, 'float32')
emotion = keras.utils.to_categorical(emotion, num_classes)
if 'Training' in usage:
y_train.append(emotion)
x_train.append(pixels)
elif 'PublicTest' in usage:
y_test.append(emotion)
x_test.append(pixels)
except:
print("Error found")
#data transformation for train and test sets
x_train = np.array(x_train, 'float32')
y_train = np.array(y_train, 'float32')
x_test = np.array(x_test, 'float32')
y_test = np.array(y_test, 'float32')
x_train /= 255 #normalize inputs between [0, 1]
x_test /= 255
x_train = x_train.reshape(x_train.shape[0], 48, 48, 1)
x_train = x_train.astype('float32')
x_test = x_test.reshape(x_test.shape[0], 48, 48, 1)
x_test = x_test.astype('float32')
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
model = Sequential()
#1st convolution layer
model.add(Conv2D(64, (5, 5), activation='relu', input_shape=(48,48,1)))
model.add(MaxPooling2D(pool_size=(5,5), strides=(2, 2)))
model.add(Dropout(0.5))
#2nd convolution layer
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(AveragePooling2D(pool_size=(3,3), strides=(2, 2)))
model.add(Dropout(0.5))
#3rd convolution layer
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(AveragePooling2D(pool_size=(3,3), strides=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
#fully connected neural networks
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))
checkpoint = ModelCheckpoint("weights_new/emotionweights.h5",
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='min')
earlystop = EarlyStopping(monitor='val_loss',
min_delta= 0,
verbose= 1,
patience= 9,
restore_best_weights=True)
redule_lr = ReduceLROnPlateau(monitor='val_loss',
factor= 0.2,
patience= 3,
verbose= 1,
min_delta= 0.0001)
callbacks = [earlystop, checkpoint, redule_lr]
from keras.optimizers import Adam
gen = ImageDataGenerator()
train_generator = gen.flow(x_train, y_train, batch_size=batch_size)
model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=['accuracy'])
model.fit_generator(train_generator, steps_per_epoch=64, epochs=epochs, callbacks=callbacks, verbose=1, validation_data=(x_test, y_test))
fer_json = model.to_json()
with open("weights_new/json_model.json", "w") as json_file:
json_file.write(fer_json)
I used the above code to get almost 57% accuracy on test data.
Are there any other architectures or procedures which would help me get 90% accuracy??
Related
I am using the Keras script at https://modal-python.readthedocs.io/en/latest/content/examples/Keras_integration.html to perform active learning for a binary classification task. After completing the active learning loop, how do we extract the image names and labels in the training set that gives the optimal test performance and write them to a CSV file?
from tensorflow import keras
import numpy as np
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from modAL.models import ActiveLearner
from modAL.uncertainty import entropy_sampling
# build function for the Keras' scikit-learn API
def create_keras_model():
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
return model
# create the classifier
classifier = KerasClassifier(create_keras_model)
# read training data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 28, 28, 1).astype('float32') / 255
X_test = X_test.reshape(10000, 28, 28, 1).astype('float32') / 255
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
# assemble initial data
n_initial = 1000
initial_idx = np.random.choice(range(len(X_train)), size=n_initial, replace=False)
X_initial = X_train[initial_idx]
y_initial = y_train[initial_idx]
# generate the pool
# remove the initial data from the training dataset
X_pool = np.delete(X_train, initial_idx, axis=0)
y_pool = np.delete(y_train, initial_idx, axis=0)
"""
Training the ActiveLearner
"""
# initialize ActiveLearner
learner = ActiveLearner(
estimator=classifier,
X_training=X_initial, y_training=y_initial,
verbose=1
)
# the active learning loop
n_queries = 100
for idx in range(n_queries):
query_idx, query_instance = learner.query(X_pool, n_instances=100, verbose=0)
print(query_idx)
learner.teach(
X=X_pool[query_idx], y=y_pool[query_idx], only_new=True,
verbose=1
)
# remove queried instance from pool
X_pool = np.delete(X_pool, query_idx, axis=0)
y_pool = np.delete(y_pool, query_idx, axis=0)
# the final accuracy score
print(learner.score(X_test, y_test, verbose=1))
Hi I have using this code and getting error
ValueError: Data cardinality is ambiguous: x sizes: 150000y sizes: 50000
Make sure all arrays contain the same number of samples.
I tried changing the reshape option and even numpy.transpose but no use can anyone help?
import numpy as np
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D
(x_train, y_train) , (x_test, y_test) = datasets.cifar10.load_data()
#x_train.shape #(50000, 32, 32, 3)
#x_test.shape #(10000, 32, 32, 3)
x_train = x_train.reshape(-1, 32, 32, 1)
x_test = x_test.reshape(-1, 32, 32 ,1)
x_train = x_train.astype('float32') # change integers to 32-bit floating point numbers
x_test = x_test.astype('float32')
x_train /= 255.0
x_test /= 255.0
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Conv2D(32, (3, 3), padding='same', activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
model.add(tf.keras.layers.Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
model.add(tf.keras.layers.Conv2D(128, (3, 3), padding='same', activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
model.add(tf.keras.layers.Conv2D(256, (3, 3), padding='same', activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
model.add(tf.keras.layers.Conv2D(512, (3, 3), padding='same', activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(512, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(512, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax))
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.build(input_shape=(512,32,32,1))
model.summary()
model.fit(x_train, y_train, batch_size=1000, epochs=1)
score = model.evaluate(x_test, y_test)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
predictions = model.predict([x_test])
#print(predictions)
print(np.argmax(predictions[0]))
img_path = x_test[0]
print(img_path.shape)
if(len(img_path.shape) == 3):
plt.imshow(np.squeeze(img_path))
elif(len(img_path.shape) == 2):
plt.imshow(img_path)
else:
print("Higher dimensional data")
there are some changes you would have to make. I will write an example for you
import numpy as np
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D
(x_train, y_train) , (x_test, y_test) = datasets.cifar10.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255.0
x_test /= 255.0
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.InputLayer(input_shape=(32,32,3)))
model.add(tf.keras.layers.Conv2D(32, (3, 3), padding='same', activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax))
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(x_train, y_train, batch_size=32, epochs=1)
Changes:
you don't need to reshape x_train, x_test as they are already in correct shape.
It is always good to use tf.keras.layers.InputLayer instead of building the model later.
I haven't made that change but whenever possible you should use tf.keras.Sequential to make models. (more readable, less prone to error). Functional api (or your current method) is for when you need to make some complex architecture.
You can now increase the model (more layers). I used a few just to show you an example.
The input_shape in the InputLayer is considered as (batch_size, img_width, img_height, img_channels) as the batch_size could be non-uniform and hence taken as None by default so we don't give it and hence we only pass (img_width, img_height, img_channels) and as your input has 32 imgwith 32 imgheight and 3 channels we pass it (32, 32, 3).
If it solved your issue then kindly upvote or give a green tick.
This is my model:
def make_model():
model = Sequential()
model.add(Conv2D(kernel_size=(3, 3), filters=16, input_shape=(32, 32,1), padding='same'))
model.add(LeakyReLU(0.1))
model.add(Conv2D(kernel_size=(3, 3), filters=32, padding='same'))
model.add(LeakyReLU(0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(kernel_size=(3, 3), filters=32, padding='same'))
model.add(LeakyReLU(0.1))
model.add(Conv2D(kernel_size=(3, 3), filters=64, padding='same'))
model.add(LeakyReLU(0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256))
model.add(LeakyReLU(0.1))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Activation('softmax'))
return model
Compile part:
INIT_LR = 5e-3 # initial learning rate
BATCH_SIZE = 32
EPOCHS = 10
from tensorflow.keras import backend as K
K.clear_session()
model = make_model()
model.compile(
loss='categorical_crossentropy', # we train 10-way classification
optimizer=tf.keras.optimizers.Adamax(lr=INIT_LR), # for SGD
metrics=['accuracy'] # report accuracy during training
)
def lr_scheduler(epoch):
return INIT_LR * 0.9 ** epoch
# callback for printing of actual learning rate used by optimizer
class LrHistory(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs={}):
print("Learning rate:", K.get_value(model.optimizer.lr))
Fitting:
model.fit(
X_train.reshape(-1, 32, 32, 1), y_train, # prepared data
batch_size=BATCH_SIZE,
epochs=EPOCHS,
callbacks=[keras.callbacks.LearningRateScheduler(lr_scheduler),
LrHistory(),
tfa.callbacks.TQDMProgressBar() ],
validation_data=(X_test, y_test),
shuffle=True,
verbose=0,
initial_epoch=None or 0
)
My Data_trainX shape:
My Data_trainy shape
My input shape is compatible with models Conv2D layer's input shape.
I've looked at other questions about this applied those solutions, but it didn't work.
It seems everything correct to me. Where am I doing wrong?
While you are reshaping the training data X_train to fit the model specifications, you are not doing this with the validation data X_test.
Reshape X_test as well and it should work fine:
model.fit(
X_train.reshape(-1, 32, 32, 1), y_train,
...
validation_data=(X_test.reshape(-1, 32, 32, 1), y_test), # <-- apply changes here
...
)
I have altered the code take from here for binary classification with 1 output neuron
import os
from keras.models import Sequential
from sklearn.model_selection import train_test_split
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import optimizers
from skimage import io
from skimage.transform import resize
from keras.utils import to_categorical
import numpy as np
import tensorflow as tf
import random
import glob
n_category_samples = 4000
batch_size = 32
num_classes = 2
epochs = 10
n_image_rows = 106
n_image_cols = 106
n_channels = 3
def train_selfie_model():
random_seed = 1
tf.random.set_seed(random_seed)
np.random.seed(random_seed)
x_train, y_train = prepare_train_set()
x_train, x_test, y_train, y_test = train_test_split(x_train, y_train, test_size=0.30, random_state=42)
mean = np.array([0.5, 0.5, 0.5])
std = np.array([1, 1, 1])
x_train = x_train.astype('float')
x_test = x_test.astype('float')
for i in range(3):
x_train[:, :, :, i] = (x_train[:, :, :, i] - mean[i]) / std[i]
x_test[:, :, :, i] = (x_test[:, :, :, i] - mean[i]) / std[i]
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
model = compile_model()
print(model.summary())
print(y)
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss: ', score[0])
print('Test accuracy: ', score[1])
model_path = os.getcwd() + "/models/saved/selfie-model/"
model.save(model_path)
def prepare_train_set():
positive_samples = glob.glob('datasets/drunk_resize_frontal_faces/pos/*')[0:n_category_samples]
negative_samples = glob.glob('datasets/drunk_resize_frontal_faces/neg/*')[0:n_category_samples]
negative_samples = random.sample(negative_samples, len(positive_samples))
x_train = []
y_train = []
for i in range(len(positive_samples)):
x_train.append(resize(io.imread(positive_samples[i]), (n_image_rows, n_image_cols)))
y_train.append(1)
if i % 1000 == 0:
print('Reading positive image number ', i)
for i in range(len(negative_samples)):
x_train.append(resize(io.imread(negative_samples[i]), (n_image_rows, n_image_cols)))
y_train.append(0)
if i % 1000 == 0:
print('Reading negative image number ', i)
x_train = np.array(x_train)
y_train = np.array(y_train)
return x_train, y_train
def compile_model():
model_input_shape = (n_image_rows, n_image_cols, n_channels)
model = Sequential()
model.add(
Conv2D(8, kernel_size=(3, 3), activation='relu', strides=(1, 1), padding='same', input_shape=model_input_shape))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))
model.add(Dropout(0.25))
model.add(Conv2D(16, kernel_size=(3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))
model.add(Dropout(0.25))
model.add(Conv2D(16, kernel_size=(3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))
model.add(Conv2D(8, kernel_size=(3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))
model.add(Flatten())
model.add(Dense(10, activation='relu'))
# single output neuron
model.add(Dense(1, activation='sigmoid'))
sgd = optimizers.SGD(lr=.001, momentum=0.9, decay=0.000005, nesterov=False)
model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
return model
But I am getting following error when running train_selfie_model()
ValueError: logits and labels must have the same shape ((32, 1) vs (32, 2))
at
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test))
I am new to TF and Keras. Seems like this is an array dimension mismatch. But how could I fix this?
The problem is
At
def train_selfie_model():
...
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
...
you set y_train andy_test to one-hot encoding (vectors of shape (2,). But at
def compile_model():
...
model.add(Dense(1, activation='sigmoid'))
...
you have only one output neuron (output of shape (1,)).
So commenting out / removing following lines will fix your problem.
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
To make a (32, 1) array appear as a (32, 2), you can construct a view:
arr = np.lib.stride_tricks.as_strided(arr, shape=(arr.shape[0], 2), strides=(arr.strides[0], 0))
You could take an even more manual approach:
arr = np.ndarray(shape=(arr.shape[0], 2), strides=(arr.strides[0], 0), dtype=arr.dtype, buffer=arr)
Both well view the same memory twice, so you should normally treat them as read-only. To copy the data, use any of the following:
arr = np.concatenate((arr,) * 2, axis=-1)
arr = np.repeat(arr, 2, axis=-1)
arr = np.tile(arr, [1, 2])
I used this code to produce my dataset in keras. but when I implement my code it produces this error:
ValueError: Error when checking input: expected conv2d_1_input to
have 4 dimensions, but got array with shape (454, 512, 512)
and I can not solve it. could you please tell me what is the problem? I expand the dimension before using in network but it does not work! could you please answer me fast, due to I search for several days but I could not find the solution and I do not have enough time:
import os,cv2
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from sklearn.cross_validation import train_test_split
from keras import backend as K
#K.set_image_dim_ordering('th')
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD,RMSprop,adam
#%%
PATH = os.getcwd()
# Define data path
data_path = r"E:\PhD\thesis\deepwatermark\databasetest\train"
data_dir_list = os.listdir(data_path)
img_rows=512
img_cols=512
num_channel=1
num_epoch=20
# Define the number of classes
num_classes = 7
labels_name={'CRP':0,'GF':1,'GN':2,'JPG':3,'MED':4,'ROT':5,'SP':6}
img_data_list=[]
labels_list = []
for dataset in data_dir_list:
img_list=os.listdir(data_path+'/'+ dataset)
print ('Loading the images of dataset-'+'{}\n'.format(dataset))
label = labels_name[dataset]
for img in img_list:
input_img=cv2.imread(data_path + '/'+ dataset + '/'+ img )
input_img=cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY)
input_img_resize=cv2.resize(input_img,(512,512))
img_data_list.append(input_img_resize)
labels_list.append(label)
img_data = np.array(img_data_list)
img_data = img_data.astype('float32')
img_data /= 255
print (img_data.shape)
labels = np.array(labels_list)
# print the count of number of samples for different classes
print(np.unique(labels,return_counts=True))
# convert class labels to on-hot encoding
Y = np_utils.to_categorical(labels, num_classes)
#Shuffle the dataset
x,y = shuffle(img_data,Y, random_state=2)
# Split the dataset
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=2)
img_data= np.expand_dims(img_data, axis=4)**
print (img_data.shape)
# Defining the model
input_shape=img_data[0].shape
model = Sequential()
model.add(Convolution2D(32, 3,3,border_mode='same',input_shape=input_shape))
model.add(Activation('relu'))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
#model.add(Convolution2D(64, 3, 3))
#model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
#sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
#model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=["accuracy"])
model.compile(loss='categorical_crossentropy', optimizer='rmsprop',metrics=["accuracy"])
# Viewing model_configuration
model.summary()
model.get_config()
model.layers[0].get_config()
model.layers[0].input_shape
model.layers[0].output_shape
model.layers[0].get_weights()
np.shape(model.layers[0].get_weights()[0])
model.layers[0].trainable
#%%
# Training
hist = model.fit(X_train, y_train, batch_size=16, nb_epoch=num_epoch, verbose=1, validation_data=(X_test, y_test))
my new code with generator is here, did you see any problem? my dataset is the same as before.
import numpy as np
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
from sklearn.utils import shuffle
from sklearn.cross_validation import train_test_split
from keras import backend as K
#K.set_image_dim_ordering('th')
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.optimizers import SGD,RMSprop,adam
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
#
valid_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
#
test_datagen = ImageDataGenerator(rescale=1./255)
#
train_generator = train_datagen.flow_from_directory(
directory=r"E:\databasetest\train",
target_size=(512, 512),
color_mode="grayscale",
batch_size=32,
class_mode="categorical",
shuffle=True,
seed=42
)
#
valid_generator = valid_datagen.flow_from_directory(
directory=r"E:\databasetest\validation",
target_size=(512, 512),
color_mode="grayscale",
batch_size=32,
class_mode="categorical",
shuffle=True,
seed=42
)
#
test_generator = test_datagen.flow_from_directory(
directory=r"E:\databasetest\test",
target_size=(512, 512),
color_mode="grayscale",
batch_size=16,
class_mode=None,
shuffle=False,
seed=42
)
#
## neural network model
model = Sequential()
model.add(Conv2D(32, (3,3),border_mode='same', input_shape = (512, 512, 1), activation = 'relu'))
model.add(Activation('relu'))
model.add(Conv2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Conv2D(64, 3, 3))
model.add(Activation('relu'))
#model.add(Convolution2D(64, 3, 3))
#model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(7))
model.add(Activation('softmax'))
model.summary()
model.compile(loss = 'categorical_crossentropy',
optimizer = 'rmsprop',
metrics = ['accuracy'])
STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size
STEP_SIZE_VALID=valid_generator.n//valid_generator.batch_size
model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
epochs=10
)
but when I implement it I received this error again:
ResourceExhaustedError: OOM when allocating tensor with shape[32,32,512,512] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc
[[Node: conv2d_1/convolution = Conv2D[T=DT_FLOAT, data_format="NCHW", dilations=[1, 1, 1, 1], padding="SAME", strides=[1, 1, 1, 1], use_cudnn_on_gpu=true, _device="/job:localhost/replica:0/task:0/device:GPU:0"](conv2d_1/convolution-0-TransposeNHWCToNCHW-LayoutOptimizer, conv2d_1/kernel/read)]]
for img in img_list:
input_img=cv2.imread(data_path + '/'+ dataset + '/'+ img )
input_img=cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY)
input_img_resize=cv2.resize(input_img,(512,512))
--->input_img_resize = np.expand_dims(input_img_resize, axis=-1)
img_data_list.append(input_img_resize)
labels_list.append(label)
this will make all your arrays 512x512x1, which should do the trick and ends up zith an array of shape (454, 512, 512, 1). You sure you want to use grayscaled images though?
Another thing is this snippet of code
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, `random_state=2)`
img_data= np.expand_dims(img_data, axis=4)**
print (img_data.shape)
You apply another dimension to your img_data, after you've already declared x_train, etc. And in the end you feed x_train, which is not extended, hence the error. If you do it in the beginning, and remove the expanding in the end, then your code should work.
EDIT OOM
I recommend creating a separate question for the OOM problem, so more people see it. Possible problems are the size of the images and the batch size. Reduce the image size to 64 x 64 and change the batch size to 5. If that still raises the error, try also kicking out this dense layer.
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
If these reductions still cause the error then I have the following questions: are you running on GPU/CPU, and which one?
Just to repeat myself: the code is fine, just needs a few changes perhaps.