Keras metric does not provide same result as metric calculated in callback - python

I'm trying to do regression using a pretrained vgg16 network. As loss and also metric I have chosen the mean absolute error. I wanted to do a check if this score is actually correct and implemented the mean absolute score myself in a callback. However, the results are not the same as can be seen by the output:
Training MAE:126.649451276
Epoch 1/100
638/638 [==============================] - 406s - loss: 38.9601 - mean_absolute_error: 38.9601
Training MAE:40.7683742351
Epoch 2/100
638/638 [==============================] - 362s - loss: 19.8719 - mean_absolute_error: 19.8719
Training MAE:43.2516028945
The Training MAE should be the same (or at least almost the same), as the loss or the mean_absolute_error in the epoch above. For the first epoch this is ok. For the second epoch it is not. There the MAE is 43.24 but the loss is 19.87 and the mean_absolute_error provided by keras is 19.87.
I've cleaned up my code and tried to find a reason why but I can't find it. Why is this happening?
My code:
from keras.layers.core import Flatten, Dense, Dropout
import numpy as np
from keras.preprocessing import image
from keras.applications.vgg16 import VGG16
from keras import optimizers
from keras.models import Model
import os
from keras.layers.core import *
from keras.callbacks import Callback, ModelCheckpoint
os.environ["CUDA_VISIBLE_DEVICES"]="2"
model_checkpoints = "/home/usr/PycharmProjects/RSNA/model_checkpoints/model2.hdf5"
data_dir = "/home/usr/PycharmProjects/RSNA/data/"
data_training = "dataset/training"
training_images = "boneage-training-dataset/"
training_gt = "training_gt/"
n_batch = 16
n_training_samples = 10213
n_validation_samples = 1136
n_testing_samples = 1262
def mae(X,y,mdl):
pred = mdl.predict(X)
gt = y
return str(np.mean(np.abs(np.array(gt)-np.array(pred))))
class LossHistory(Callback):
def on_epoch_begin(self, epoch, logs={}):
mae_score = mae(X_train,y_train,self.model)
print "Training MAE:" + mae_score
def regression_flow_from_directory(flow_from_directory_gen, rev_indices):
for x, y in flow_from_directory_gen:
yield x, [float(rev_indices[val]) for val in y]
if __name__ == '__main__':
width = 224
height = 224
X_train = []
y_train = []
train_datagen = image.ImageDataGenerator(
rescale=1./255,
width_shift_range=0.2,
height_shift_range= 0.2,
)
train_generator = train_datagen.flow_from_directory(
data_dir+data_training,
target_size=(width, height),
batch_size=n_batch,
color_mode='rgb',
class_mode='sparse',
seed=42)
indices = train_generator.class_indices
rev_indices = dict((v,k) for k, v in indices.iteritems())
train_generator = regression_flow_from_directory(train_generator,rev_indices)
i = 0
print "Epcohs: " + str(n_training_samples//n_batch)
for x,y in train_generator:
if i <= n_training_samples//n_batch:
X_train.extend(x)
y_train.extend(y)
i +=1
else:
break;
print "Maximum: " + str(np.max(y_train))
X_train = np.array(X_train)
print X_train.shape
model = VGG16(weights='imagenet', include_top=False,input_shape = (224, 224, 3))
last = model.output
x = Flatten(name='flatten')(last)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dropout(0.5, noise_shape=None, seed=None)(x)
x = Dense(4096, activation='relu', name='fc2')(x)
x = Dense(1, activation='relu', name='predictions')(x)
my_model = Model(input=model.input, output=x)
my_model.compile(loss="mae", optimizer=optimizers.SGD(lr=0.00001, momentum=0.9),
metrics=["mae"])
history = LossHistory()
print my_model.summary()
print n_validation_samples//n_batch
my_model.fit_generator(
train_generator,
steps_per_epoch=n_training_samples//n_batch,
epochs=100,
callbacks=[history],
)

Related

Model Accuracy is High but Val_Accuracy is low

I'm trying to improve my val accuracy as it is very low. I have tried changing the batch_size, the number of images being used for validation and training. Added in extra dense levels but none of them have worked. The dataset I'm using has not been split up yet into Training and Validation which is what I have done using partitioning. I have given the values for the samples as you can see below and have tried to increase the VALIDATION_SAMPLES but when I do, my cluster keeps crashing.
TRAINING_SAMPLES = 10000
VALIDATION_SAMPLES = 2000
TEST_SAMPLES = 2000
IMG_WIDTH = 178
IMG_HEIGHT = 218
BATCH_SIZE = 32
NUM_EPOCHS = 20
def generate_df(partition, attr, num_samples):
df_ = df_par_attr[(df_par_attr['partition'] == partition)
& (df_par_attr[attr] == 0)].sample(int(num_samples/2))
df_ = pd.concat([df_,
df_par_attr[(df_par_attr['partition'] == partition)
& (df_par_attr[attr] == 1)].sample(int(num_samples/2))])
# for Training and Validation
if partition != 2:
x_ = np.array([load_reshape_img(images_folder + fname) for fname in df_.index])
x_ = x_.reshape(x_.shape[0], 218, 178, 3)
y_ = np_utils.to_categorical(df_[attr],2)
# for Test
else:
x_ = []
y_ = []
for index, target in df_.iterrows():
im = cv2.imread(images_folder + index)
im = cv2.resize(cv2.cvtColor(im, cv2.COLOR_BGR2RGB), (IMG_WIDTH, IMG_HEIGHT)).astype(np.float32) / 255.0
im = np.expand_dims(im, axis =0)
x_.append(im)
y_.append(target[attr])
return x_, y_
My training model is build after the partitioning which you can see below
# Train data
x_train, y_train = generate_df(0, 'Male', TRAINING_SAMPLES)
# Train - Data Preparation - Data Augmentation with generators
train_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
)
train_datagen.fit(x_train)
train_generator = train_datagen.flow(
x_train, y_train,
batch_size=BATCH_SIZE,
)
The same also goes for the validation
# Validation Data
x_valid, y_valid = generate_df(1, 'Male', VALIDATION_SAMPLES)
# Validation - Data Preparation - Data Augmentation with generators
valid_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input,
)
valid_datagen.fit(x_valid)
validation_generator = valid_datagen.flow(
x_valid, y_valid,
)
I tried playing around with the layers but got told that it wouldn't really affect your val_accuracy
x = inc_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation="relu")(x)
x = Dropout(0.5)(x)
x = Dense(256, activation="relu")(x)
predictions = Dense(2, activation="softmax")(x)
I tried using the 'adam' optimizer but it made no difference when compared to sgd
model_.compile(optimizer=SGD(lr=0.0001, momentum=0.9)
, loss='categorical_crossentropy'
, metrics=['accuracy'])
hist = model_.fit_generator(train_generator
, validation_data = (x_valid, y_valid)
, steps_per_epoch= TRAINING_SAMPLES/BATCH_SIZE
, epochs= NUM_EPOCHS
, callbacks=[checkpointer]
, verbose=1
)
Who ever told you modifying the model won't effect validation accuracy in most cases is dead wrong. The problem you have in your model is it is not deep enough to extract the features of the images. Below is the code I have used on hundreds of models and has proved to be very accurate with respect to achieving low training and validation loss and avoid over fitting
from tensorflow import keras
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Dense, Activation,Dropout,Conv2D, MaxPooling2D,BatchNormalization, Flatten
from tensorflow.keras.optimizers import Adam, Adamax
from tensorflow.keras.metrics import categorical_crossentropy
from tensorflow.keras import regularizers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Model, load_model
def make_model(img_img_size, class_count,lr=.001, trainable=True):
img_shape=(img_size[0], img_size[1], 3)
model_name='EfficientNetB3'
base_model=tf.keras.applications.efficientnet.EfficientNetB3(include_top=False, weights="imagenet",input_shape=img_shape, pooling='max')
base_model.trainable=trainable
x=base_model.output
x=keras.layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001 )(x)
x = Dense(256, kernel_regularizer = regularizers.l2(l = 0.016),activity_regularizer=regularizers.l1(0.006),
bias_regularizer=regularizers.l1(0.006) ,activation='relu')(x)
x=Dropout(rate=.45, seed=123)(x)
output=Dense(class_count, activation='softmax')(x)
model=Model(inputs=base_model.input, outputs=output)
model.compile(Adamax(learning_rate=lr), loss='categorical_crossentropy', metrics=['accuracy'])
return model, base_model # return the base_model so the callback can control its training state
TRAINING_SAMPLES = 10000
VALIDATION_SAMPLES = 2000
TEST_SAMPLES = 2000
IMG_WIDTH = 178
IMG_HEIGHT = 218
BATCH_SIZE = 32
NUM_EPOCHS = 20
img_size=(IMG_HEIGHT,IMG_WIDTH)
class_count=2
model, base_model=make_model(img_size, class_count, lr=.001, trainable=True)
I also recommend that you use two keras callbacks. One is to control the learning rate. Documentation for that is here. The other controls early stopping and saves the model with the lowest validation loss. Documentation for that is here.
My recommended code for these callbacks is shown below
rlronp=tf.keras.callbacks.ReduceLROnPlateau(monitor="val_loss", factor=0.5, patience=2,verbose=1)
estop=tf.keras.callbacks.EarlyStopping(monitor="val_loss", patience=4, verbose=1,restore_best_weights=True)
callbacks=[rlronp, estop]
put the above code prior to using model.fit. In model.fit set the parameter
callbacks=callbacks

Keras layers and neurons grid search

I made a custom loss function for cost-sensitive learning and to set the optimal parameters for neural network, I experienced with different parameters.(layers, batch size, epochs)
Then, the result was best when layers only has input and output layers.
I'm curious about this result.
Is it acceptable? then could you please tell me the reason?
Does that result occur because the dataset is already normalized?
Here's the code.
Thank you in advance.
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
import tensorflow as tf
from keras.layers import Dense
from keras.callbacks import History
from keras.models import Sequential
import keras.backend as K
# Define dataset
X,y = make_classification(n_samples=150000, n_features=10, n_informative=4,
n_redundant=4, n_repeated=2, n_classes=2, n_clusters_per_class=3,
class_sep = 0.5, weights=[0.9,0.1], random_state=27)
X_train, X_true, y_train, y_true = train_test_split(X, y, test_size=0.33, random_state=42)
# Define Function
def custom_loss_wrapper(p):
def custom_loss(y_true, y_pred):
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
neg_y_true = 1 - y_true
neg_y_pred = 1 - y_pred
fp = K.sum(y_pred * (1 - y_true))
fn = K.sum((1 - y_pred) * y_true)
cost = tf.cast(fn * p + fp, tf.float32)
return cost
return custom_loss
def FindLayerNodesLinear(n_layers, first_layer_nodes, last_layer_nodes):
layers = []
nodes_increment = (last_layer_nodes - first_layer_nodes)/ (n_layers-1)
nodes = first_layer_nodes
for i in range(1, n_layers+1):
layers.append(math.ceil(nodes))
nodes = nodes + nodes_increment
return layers
def createmodel_for_grid_search(n_layers, first_layer_nodes, last_layer_nodes):
p =5
model = Sequential()
n_nodes = FindLayerNodesLinear(n_layers, first_layer_nodes, last_layer_nodes)
for i in range(1, n_layers):
if i==1:
model.add(Dense(first_layer_nodes, input_dim=X_train.shape[1], activation='relu'))
else:
model.add(Dense(n_nodes[i-1], activation='sigmoid'))
#Finally, the output layer should have a single node in binary classification
model.add(Dense(1, activation = 'sigmoid'))
model.compile(optimizer='adam', loss=custom_loss_wrapper(p) )
return model
# Grid search by hand
batch_sizes = [16, 32, 64, 128]
epochs = [10,20,30,50,100]
b = 0
loss_= []
for a in range(2,6):
for i in range(len(batch_sizes)):
batch_size_ = batch_sizes[i]
for j in range(len(epochs)):
epochs_ = epochs[j]
loss = np.round(hist_list[b][-1],3)
loss_.append(loss)
print('layers: {}, batch size: {}, epoch: {}, and loss: {}'.format(a, batch_size_, epochs_, loss))
b +=1
print('')

Problem with custom layer in Tensorflow (not calling)

I'm trying to implement the Large Margin Cosine Loss in Tensorflow. I've found the following class that does it:
import math
import numpy as np
import tensorflow as tf
from tensorflow import keras
import tensorflow.keras.backend as K
from tensorflow.keras.layers import Layer
from tensorflow.keras.initializers import Constant
from tensorflow.python.keras.utils import tf_utils
def _resolve_training(layer, training):
if training is None:
training = K.learning_phase()
if isinstance(training, int):
training = bool(training)
if not layer.trainable:
# When the layer is not trainable, override the value
training = False
return tf_utils.constant_value(training)
class CosFace(keras.layers.Layer):
"""
Implementation of CosFace layer. Reference: https://arxiv.org/abs/1801.09414
Arguments:
num_classes: number of classes to classify
s: scale factor
m: margin
regularizer: weights regularizer
"""
def __init__(self,
num_classes,
s=30.0,
m=0.35,
regularizer=None,
name='cosface',
**kwargs):
super().__init__(name=name, **kwargs)
self._n_classes = num_classes
self._s = float(s)
self._m = float(m)
self._regularizer = regularizer
def build(self, input_shape):
embedding_shape, label_shape = input_shape
self._w = self.add_weight(shape=(embedding_shape[-1], self._n_classes),
initializer='glorot_uniform',
trainable=True,
regularizer=self._regularizer)
def call(self, inputs, training=None):
"""
During training, requires 2 inputs: embedding (after backbone+pool+dense),
and ground truth labels. The labels should be sparse (and use
sparse_categorical_crossentropy as loss).
"""
print('calling CosFace Layer...')
embedding, label = inputs
# Squeezing is necessary for Keras. It expands the dimension to (n, 1)
label = tf.reshape(int(label), [-1], name='label_shape_correction')
# Normalize features and weights and compute dot product
x = tf.nn.l2_normalize(embedding, axis=1, name='normalize_prelogits')
w = tf.nn.l2_normalize(self._w, axis=0, name='normalize_weights')
cosine_sim = tf.matmul(x, w, name='cosine_similarity')
training = _resolve_training(self, training)
if not training:
# We don't have labels if we're not in training mode
return self._s * cosine_sim
else:
one_hot_labels = tf.one_hot(label,
depth=self._n_classes,
name='one_hot_labels')
theta = tf.math.acos(K.clip(
cosine_sim, -1.0 + K.epsilon(), 1.0 - K.epsilon()))
final_theta = tf.where(tf.cast(one_hot_labels, dtype=tf.bool),
tf.math.cos(theta) - self._m,
tf.math.cos(theta),
name='final_theta')
print(final_theta)
output = tf.math.cos(final_theta, name='cosine_sim_with_margin')
return self._s * output
I'm testing it on a simple CNN trained on the MNIST dataset. However the train doesn't work. Here is the Network architecture:
label = keras.layers.Input((), name="input/labels")
input = keras.layers.Input(shape=[28,28,1], name="input/image")
margin = CosFace(num_classes=10, dtype='float32')
x = keras.layers.Conv2D(64, (3,3), padding="same")(input)
x = keras.layers.Activation("relu")(x)
x = keras.layers.MaxPooling2D((2,2))(x)
x = keras.layers.Conv2D(32, (3,3), padding="same")(x)
x = keras.layers.Activation("relu")(x)
x = keras.layers.MaxPooling2D(pool_size=(2,2))(x)
x = keras.layers.Conv2D(16, (3,3), padding="same")(x)
x = keras.layers.Activation("relu")(x)
x = keras.layers.MaxPooling2D(pool_size=(2,2))(x)
x = keras.layers.Dropout(0.25)(x)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(128)(x)
x = keras.layers.Activation("relu", name="dense")(x)
x = keras.layers.Dropout(0.25)(x)
x = margin([x, label])
output = keras.layers.Activation("softmax")(x)
model_cos = keras.Model(inputs=[input, label], outputs=output)
model_cos.compile(loss="sparse_categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
H_cos = model_cos.fit((X_train, y_train), y_train, batch_size=64, epochs=3, verbose=1)
And this is the output:
Epoch 1/3
calling CosFace Layer...
Tensor("functional_11/cosface/final_theta:0", shape=(None, 10), dtype=float32)
calling CosFace Layer...
Tensor("functional_11/cosface/final_theta:0", shape=(None, 10), dtype=float32)
860/860 [==============================] - 7s 8ms/step - loss: 0.3194 - accuracy: 0.9751
Epoch 2/3
860/860 [==============================] - 6s 7ms/step - loss: 0.0545 - accuracy: 1.0000
Epoch 3/3
860/860 [==============================] - 6s 7ms/step - loss: 0.0368 - accuracy: 1.0000
I don't understand what's going on, first of all the real accuracy isn't 1, second of all, it looks like that after the second epoch the CosFace layer is not called anymore.
Do you have any idea on how to fix this?

Keras model with high accuracy but low val_acc

I am using resnet50 transfer learning for the Oxford-IIIT Pet Dataset to classify 37 breeds of cats and dogs. The idea is to follow fastai implementation closely using Keras code. However, I managed to get a training accuracy as high as 90% but can't seem to increase my val_accuracy higher than a random guess (1/37 or ~ 3% val_acc).
Any idea how do Keras compute the validation acc and how can I improve it? Or is there something wrong with my preprocessing steps? Thanks a lot.
To get my validation steps, I use sklearn StratifiedShuffleSplit to get a balanced validation set.
# Create dataframe with labels and filenames
annotations = pd.read_csv("annotation/list.txt",header=None,delim_whitespace=True)
annotations.drop([1,2,3],axis=1, inplace=True)
annotations.columns = ["filenames"]
# Create label columns
trans = str.maketrans("_0123456789"," ")
annotations["labels"] = annotations["filenames"].str.translate(trans).str.strip()
annotations["filenames"] = annotations["filenames"] +".jpg"
# Creating a validation set
from sklearn.model_selection import StratifiedShuffleSplit
df_array = annotations.to_numpy(copy=True)
sss = StratifiedShuffleSplit(n_splits = 1, test_size=0.2)
valid_idx = [test for _,test in sss.split(df_array[:,0],df_array[:,1])]
validation = annotations.iloc[valid_idx[0]]
annotations.drop(valid_idx[0], inplace=True)
Then, constructing my generator and training my model.
from tensorflow.keras.preprocessing.image import ImageDataGenerator
bs = 64
def normalize(x):
imagenet_mean = np.array([0.485, 0.456, 0.406]).reshape(1,1,3)
imagenet_sd = np.array([0.229, 0.224, 0.225]).reshape(1,1,3)
return (x- imagenet_mean)/imagenet_sd
train_datagen = ImageDataGenerator(rescale=1/255.,
horizontal_flip = True,
rotation_range=10,
width_shift_range = 0.1,
height_shift_range =0.1,
brightness_range =(0.9,1.1),
shear_range =0.1,
preprocessing_function=normalize)
train_generator = train_datagen.flow_from_dataframe(dataframe=annotations,
directory =os.getcwd(),
x_col="filenames",
y_col="labels",
target_size = (224,224),
batch_size = bs,
)
val_datagen = ImageDataGenerator(rescale=1/255.,
preprocessing_function=normalize)
validation_generator = val_datagen.flow_from_dataframe(dataframe=validation,
directory =os.getcwd(),
x_col="filenames",
y_col="labels",
target_size = (224,224),
batch_size=bs,
)
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras import optimizers
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, Flatten, BatchNormalization, Dropout
base_model = ResNet50(include_top=False,weights="imagenet")
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Flatten()(x)
x = BatchNormalization(epsilon=1e-05,momentum=0.1)(x)
x = Dropout(0.25)(x)
x = Dense(512,activation="relu")(x)
x = BatchNormalization(epsilon=1e-05,momentum=0.1)(x)
x = Dropout(0.5)(x)
predictions = Dense(37,activation="softmax")(x)
model = Model(inputs=base_model.input,outputs=predictions)
for layer in base_model.layers:
layer.trainable = False
lr= 0.001
opti = optimizers.Adam(lr=lr, decay=lr/50)
model.compile(optimizer=opti,
loss="categorical_crossentropy",
metrics=["accuracy"])
model.fit_generator(train_generator,
epochs=10,
validation_data = validation_generator)
for layer in base_model.layers:
layer.trainable = True
model.fit_generator(train_generator,
epochs=10,
validation_data = validation_generator)
By the 10 epochs before unfreezing my layers
loss = 0.2189
acc = 0.9255
val_loss = 5.5082
val_acc = 0.0401

How can I correct the dimension error I keep getting when I train RNN using keras library?

I want to build 40-class LSTM classifier to analyze time series data. I have a 13 dimension real time data collected from 13 sensors. When I run the code below I keep getting this error message.
ValueError: Error when checking model input: the list of Numpy arrays
that you are passing to your model is not the size the model expected.
Expected to see 1 arrays but instead got the following list of 241458
arrays: [array([[ 0.64817517, 0.12892013, 0.01879949, 0.00946322,
0.00458952,
0.01668651, 0.04776124, 0.03301365, 0.0360659 , 0.15013408,
0.10112171, 0.05494366, 0.02620634],
RNN code
from __future__ import print_function
import keras
from keras import metrics
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout, Activation
from keras.utils import np_utils
from keras.layers.normalization import BatchNormalization
from sklearn.cross_validation import train_test_split
import pandas as pd
from keras.callbacks import CSVLogger
from keras.models import load_model
from keras.layers import LSTM
import numpy as np
import tensorflow as tf
from sklearn.preprocessing import LabelEncoder
import keras
def top_k_acc(y_true, y_pred):
return metrics.top_k_categorical_accuracy(y_true, y_pred, k=5)
# train Parameters
sequence_length = 60
data_dim = 13
num_classes = 40
batch_size = 15000
epochs = 10
# tf.set_random_seed(777) # reproducibility
def MinMaxScaler(data):
''' Min Max Normalization
Parameters
----------
data : numpy.ndarray
input data to be normalized
shape: [Batch size, dimension]
Returns
----------
data : numpy.ndarry
normalized data
shape: [Batch size, dimension]
References
----------
.. [1] http://sebastianraschka.com/Articles/2014_about_feature_scaling.html
'''
numerator = data - np.min(data, 0)
denominator = np.max(data, 0) - np.min(data, 0)
# noise term prevents the zero division
return numerator / (denominator + 1e-7)
# Load data
xy = np.loadtxt('sc_total_for 60s v4.0 test.csv', delimiter=',', skiprows=1)
x = xy[:, 1:14]
x = MinMaxScaler(x)
y = xy[:,0]
# Build a dataset
x_data = []
y_data = []
for i in range(0, len(y) - sequence_length):
_x = x[i:i + sequence_length]
_y = y[i + sequence_length]
# print(_x, "->", _y)
x_data.append(_x)
y_data.append(_y)
# One-hot encoding
encoder = LabelEncoder()
encoder.fit(y_data)
encoded_Y = encoder.transform(y_data)
dummy_y = np_utils.to_categorical(encoded_Y)
#train/test split
x_train,x_test,y_train,y_test=train_test_split(x_data,dummy_y,random_state=4,test_size=0.3);
# print(x_train[0],"->",y_train[0])
# Network
model = Sequential()
model.add(LSTM(40, batch_input_shape=(batch_size, sequence_length, data_dim),return_sequences=True))
model.add(LSTM(40, return_sequences=False))
model.add(Dense(40))
model.add(Activation("linear"))
# model.add(Dense(40))
# model.add(Dense(25, init='uniform', activation='relu'))
# model.add(BatchNormalization())
# model.add(Dense(30, init='uniform', activation='relu'))
# model.add(BatchNormalization())
# model.add(Dense(40, init='uniform', activation='softmax'))
model.summary()
model.compile(loss='mean_squared_error',
optimizer='adam',
metrics=['accuracy'])
csv_logger = CSVLogger('LSTM 1111.log')
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test),
callbacks=[csv_logger])
score = model.evaluate(x_test, y_test, verbose=0)
predictions=model.predict(x_test)
# model.save('New Model6 save.h5')
#plot_model(model, to_file='model1.png')
# print('Test loss:', score[0])
# print('Test accuracy:', score[1])
The issue is:
# Build a dataset
x_data = []
y_data = []
for i in range(0, len(y) - sequence_length):
_x = x[i:i + sequence_length]
_y = y[i + sequence_length]
# print(_x, "->", _y)
x_data.append(_x)
y_data.append(_y)
You're building a list of 2d numpy arrays for x_data when Keras expects a single, three-dimensional array for LSTM. Do this instead:
num_samples = len(y) - sequence_length
x_data = np.zeros((num_samples, sequence_length, data_dim))
y_data = np.zeros((num_samples))
for i in range(num_samples):
x_data[i] = x[i:i + sequence_length]
y_data[i] = y[i + sequence_length]

Categories