def create_model():
model = Sequential()
model.add(LSTM(50, return_sequences=True, input_shape=(40002, 12)))
model.add(LSTM(50, return_sequences= True))
model.add(LSTM(50, return_sequences= True))
model.add(tf.keras.layers.LSTM(30))
model.add(Dense(2, activation='linear'))
def rmse(Y_test, prediction):
return K.sqrt(K.mean(K.square(Y_test-prediction)))
# compile
model.compile(optimizer='adam', loss=rmse, metrics=['mean_squared_error', rmse])
return model
# fit the model
model = create_model()
model.fit(x_train, Y_train, shuffle=False, verbose=1, epochs=10)
# # predict model
prediction = model.predict(x_test, verbose=0)
print(prediction)
How to calculate mean relative error for tensor inputs i.e my Y_test and prediction are tensor.
Y_test and prediction as 2 values
Example:
Y_test = [[0.2,0.003],
[0.3, 0.008]]
prediction = [[0.4,0.005],
[0.5,0.007]]
mean_relative_error = mean(absolute(0.2-0.4)/0.2 + absolute(0.003-0.005)/0.003), mean(absolute(0.3-0.5)/0.3 + absolute(0.008-0.007)/0.008)
mean_relative_error = [0.533, 0.3925]
Please note that I don't want to use it for backpropagation to improve the network.
Would have added like this:
from tensorflow.math import reduce_mean, abs, reduce_sum
relative_error = reduce_mean(reduce_sum(abs(prediction-Y_test)/prediction, axis=1))
# [0.9, 0.54285717]
mean_relative_error = reduce_mean(relative_error)
# 0.7214286
I couldn't use tf.keras.losses.MeanAbsoluteError(reduction=tf.keras.losses.Reduction.NONE) because of a bug. The MeanAbsoluteError still does reduce to mean despite specifying it not to. The bug reported HERE
Related
So I am doing a classification machine learning with the input of (batch, step, features).
In order to improve the accuracy of this model, I intended to apply a self-attention layer to it.
I am unfamiliar with how to use it for my case since most examples online are concerned with embedding NLP models.
def opt_select(optimizer):
if optimizer == 'Adam':
adamopt = tf.keras.optimizers.Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
return adamopt
elif optimizer == 'RMS':
RMSopt = tf.keras.optimizers.RMSprop(lr=learning_rate, rho=0.9, epsilon=1e-6)
return RMSopt
else:
print('undefined optimizer')
def LSTM_attention_model(X_train, y_train, X_test, y_test, num_classes, loss,batch_size=68, units=128, learning_rate=0.005,epochs=20, dropout=0.2, recurrent_dropout=0.2,optimizer='Adam'):
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if (logs.get('acc') > 0.90):
print("\nReached 90% accuracy so cancelling training!")
self.model.stop_training = True
callbacks = myCallback()
model = tf.keras.models.Sequential()
model.add(Masking(mask_value=0.0, input_shape=(X_train.shape[1], X_train.shape[2])))
model.add(Bidirectional(LSTM(units, dropout=dropout, recurrent_dropout=recurrent_dropout)))
model.add(SeqSelfAttention(attention_activation='sigmoid'))
model.add(Dense(num_classes, activation='softmax'))
opt = opt_select(optimizer)
model.compile(loss=loss,
optimizer=opt,
metrics=['accuracy'])
history = model.fit(X_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(X_test, y_test),
verbose=1,
callbacks=[callbacks])
score, acc = model.evaluate(X_test, y_test,
batch_size=batch_size)
yhat = model.predict(X_test)
return history, that
This led to IndexError: list index out of range
What is the correct way to apply this layer to my model?
As requested, one may use the following codes to simulate a set of the dataset.
import tensorflow as tf
from tensorflow.keras.layers import Dense, Dropout,Bidirectional,Masking,LSTM
from keras_self_attention import SeqSelfAttention
X_train = np.random.rand(700, 50,34)
y_train = np.random.choice([0, 1], 700)
X_test = np.random.rand(100, 50, 34)
y_test = np.random.choice([0, 1], 100)
batch_size= 217
epochs = 600
dropout = 0.6
Rdropout = 0.7
learning_rate = 0.00001
optimizer = 'RMS'
loss = 'categorical_crossentropy'
num_classes = y_train.shape[1]
LSTM_attention_his,yhat = LSTM_attention_model(X_train,y_train,X_test,y_test,loss =loss,num_classes=num_classes,batch_size=batch_size,units=32,learning_rate=learning_rate,epochs=epochs,dropout = 0.5,recurrent_dropout=Rdropout,optimizer=optimizer)
Here is how I would rewrite the code -
import tensorflow as tf
from tensorflow.keras.layers import Dense, Dropout, Bidirectional, Masking, LSTM, Reshape
from keras_self_attention import SeqSelfAttention
import numpy as np
def opt_select(optimizer):
if optimizer == 'Adam':
adamopt = tf.keras.optimizers.Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
return adamopt
elif optimizer == 'RMS':
RMSopt = tf.keras.optimizers.RMSprop(lr=learning_rate, rho=0.9, epsilon=1e-6)
return RMSopt
else:
print('undefined optimizer')
def LSTM_attention_model(X_train, y_train, X_test, y_test, num_classes, loss, batch_size=68, units=128,
learning_rate=0.005, epochs=20, dropout=0.2, recurrent_dropout=0.2, optimizer='Adam'):
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if (logs.get('accuracy') > 0.90):
print("\nReached 90% accuracy so cancelling training!")
self.model.stop_training = True
callbacks = myCallback()
model = tf.keras.models.Sequential()
model.add(Masking(mask_value=0.0, input_shape=(X_train.shape[1], X_train.shape[2])))
model.add(Bidirectional(LSTM(units, dropout=dropout, recurrent_dropout=recurrent_dropout, return_sequences=True)))
model.add(SeqSelfAttention(attention_activation='sigmoid'))
model.add(Reshape((-1, model.output.shape[1]*model.output.shape[2])))
model.add(Dense(num_classes, activation='softmax'))
opt = opt_select(optimizer)
model.compile(loss=loss,
optimizer=opt,
metrics=['accuracy'])
history = model.fit(X_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(X_test, y_test),
verbose=1,
callbacks=[callbacks])
score, acc = model.evaluate(X_test, y_test,
batch_size=batch_size)
yhat = model.predict(X_test)
return history, that
X_train = np.random.rand(700, 50,34)
y_train = np.random.choice([0, 1], (700, 1))
X_test = np.random.rand(100, 50, 34)
y_test = np.random.choice([0, 1], (100, 1))
batch_size= 217
epochs = 600
dropout = 0.6
Rdropout = 0.7
learning_rate = 0.00001
optimizer = 'RMS'
loss = 'categorical_crossentropy'
num_classes = y_train.shape[1]
LSTM_attention_his,yhat = LSTM_attention_model(
X_train,y_train,X_test,y_test,
loss =loss,num_classes=num_classes,batch_size=batch_size,units=32,
learning_rate=learning_rate,epochs=epochs,dropout = 0.5,recurrent_dropout=Rdropout,optimizer=optimizer
)
These are the changes I had to make to get this to start training -
The original issue was caused by the LSTM layer outputting the wrong dimensions. The SeqSelfAttention layer needs a 3D input (one dimension corresponding to the sequence of the data) which was missing from the output of the LSTM layer. As mentioned by #today, in the comments, this can be solved by adding return_sequences=True to the LSTM layer.
But even with that modification,the code still gives an error at when trying to compute the cost function.The issue is that, the output of the self-attention layer is (None, 50, 64) when this is directly passed into the Dense layer, the final output of the network becomes (None, 50, 1). This doesn't make sense for what we are trying to do, because the final output should just contain a single label for each datapoint (it should have the shape (None, 1)). The issue is the output from the self-attention layer which is 3 dimensional (each data point has a (50, 64) feature vector). This needs to be reshaped into a single dimensional feature vector for the computation to make sense. So I added a reshape layer model.add(Reshape((-1, ))) between the attention layer and the Dense layer.
In addition, the myCallback class is testing if logs.get('acc') is > 0.9 but I think it should be (logs.get('accuracy').
To comment on OP's question in the comment on what kind of column should be added, in this case, it was just a matter of extracting the full sequential data from the LSTM layer. Without the return_sequence flag, the output from the LSTM layer is (None, 64) This is simply the final features of the LSTM without the intermediate sequential data.
I tried to optimize hyperparameters in my keras CNN made for image classification. I decided to use grid search from sklearn. I overcame the fundamental difficulty with making x and y out of keras flow_from_directory but it still doesn't work.
Error in the last line
ValueError: dropout is not a legal parameter
def grid_model(optimizer='adam',
kernel_initializer='random_uniform',
dropout=0.2,
loss='categorical_crossentropy'):
model = Sequential()
model.add(Conv2D(6,(5,5),activation="relu",padding="same",
input_shape=(img_width, img_height, 3)))
model.add(MaxPooling2D((2,2)))
model.add(Dropout(dropout))
model.add(Conv2D(16,(5,5),activation="relu"))
model.add(MaxPooling2D((2,2)))
model.add(Dropout(dropout))
model.add(Flatten())
model.add(Dense(120, activation='relu', kernel_initializer=kernel_initializer))
model.add(Dropout(dropout))
model.add(Dense(84, activation='relu', kernel_initializer=kernel_initializer))
model.add(Dropout(dropout))
model.add(Dense(10, activation='softmax'))
model.compile(loss=loss,
optimizer=optimizer,
metrics=['accuracy'])
return model
train_generator = ImageDataGenerator(rescale=1/255)
validation_generator = ImageDataGenerator(rescale=1/255)
# Retrieve images and their classes for train and validation sets
train_flow = train_generator.flow_from_directory(directory=train_data_dir,
batch_size=batch_size,
target_size=(img_height,img_width))
validation_flow = validation_generator.flow_from_directory(directory=validation_data_dir,
batch_size=batch_size,
target_size=(img_height,img_width),
shuffle = False)
clf = KerasClassifier(build_fn=grid_model(), epochs=epochs, verbose=0)
param_grid = {
'clf__optimizer':['adam', 'Nadam'],
'clf__epochs':[100, 200],
'clf__dropout':[0.1, 0.2, 0.5],
'clf__kernel_initializer':['normal','uniform'],
'clf__loss':['categorical_crossentropy',
'sparse_categorical_crossentropy',
'kullback_leibler_divergence']
}
pipeline = Pipeline([('clf',clf)])
(X_train, Y_train) = train_flow.next()
grid = GridSearchCV(pipeline, cv=2, param_grid=param_grid)
grid.fit(X_train, Y_train)
The problem is in this line:
clf = KerasClassifier(build_fn=grid_model(), epochs=epochs, verbose=0)
change it to
clf = KerasClassifier(build_fn=grid_model, epochs=epochs, verbose=0)
The grid_model method should not be invoked but a reference to it should be passed.
Also, in the list of losses, 'sparse_categorical_crossentropy'(integer) cannot be used because the output shape required of the model is incompatible with that of 'categorical_crossentropy'(one-hot).
I get time series data from 08/02/2014 to now date (08/02/2019). In this code RNN can predict and compare result with test set. I want to predict more than test set such as predict to date 15/02/2019 How to use Keras predict more than data set ?
df = pdr.get_data_yahoo('ibm',
start=datetime.datetime(2014, 02, 08),
end=pd.datetime.now().date())
train = df.loc[:datetime.datetime(2019, 1,14), ['Close']]
test = df.loc[datetime.datetime(2019, 1,15):, ['Close']]
sc = MinMaxScaler()
train_sc = sc.fit_transform(train)
test_sc = sc.transform(test)
X_train = train_sc[:-1]
y_train = train_sc[1:]
X_test = test_sc[:-1]
y_test = test_sc[1:]
K.clear_session()
model = Sequential()
model.add(Dense(12, input_dim=1, activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.summary()
model.fit(X_train, y_train, epochs=200, batch_size=2)
y_pred = model.predict(X_test)
Can someone please explain why the following code achieves only about 50% classification accuracy?
I am trying to classify lists of 20 items into 0 or 1. The lists are all 5s or all 6s.
import numpy as np
import keras
from sklearn.model_selection import train_test_split
positive_samples = [[5]*20]*100
negative_samples = [[6]*20]*100
x_list = np.array(positive_samples+negative_samples, dtype=np.float32)
y_list = np.array([1]*len(positive_samples)+[0]*len(negative_samples), dtype=np.float32)
x_train, x_test, y_train, y_test = train_test_split(x_list, y_list, test_size=0.20, random_state=42)
y_train = keras.utils.to_categorical(y_train, 2)
y_test = keras.utils.to_categorical(y_test, 2)
model = keras.models.Sequential()
model.add(keras.layers.Dense(10, input_dim=x_train.shape[1], kernel_initializer='normal', activation='relu'))
model.add(keras.layers.Dense(5, kernel_initializer='normal', activation='relu'))
model.add(keras.layers.Dense(2, kernel_initializer='normal', activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=10, epochs=20, verbose=2, validation_data=(x_test, y_test))
print (model.evaluate(x_test, y_test, verbose=0))
Since the last output layer has 2 values per sample, you need to use a softmax activation instead of sigmoid.
Also, that means binary_crossentropy cannot be used, and you have to use categorical_crossentropy.
I have also normalized the dataset x_list by dividing with the maximum (6).
x_list /= x_list.max()
Also, you need to shuffle the dataset, by passing shuffle=True in train_test_split.
import numpy as np
import keras
from sklearn.model_selection import train_test_split
positive_samples = [[5]*20]*100
negative_samples = [[6]*20]*100
x_list = np.array(positive_samples+negative_samples, dtype=np.float32)
y_list = np.array([1]*len(positive_samples)+[0]*len(negative_samples), dtype=np.float32)
x_list /= x_list.max()
x_train, x_test, y_train, y_test = train_test_split(x_list, y_list, test_size=0.20, shuffle=True, random_state=42)
y_train = keras.utils.to_categorical(y_train, 2)
y_test = keras.utils.to_categorical(y_test, 2)
model = keras.models.Sequential()
model.add(keras.layers.Dense(10, input_dim=x_train.shape[1], kernel_initializer='normal', activation='relu'))
model.add(keras.layers.Dense(5, kernel_initializer='normal', activation='relu'))
model.add(keras.layers.Dense(2, kernel_initializer='normal', activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=10, epochs=100, verbose=2, validation_data=(x_test, y_test))
print (model.evaluate(x_test, y_test, verbose=0))
A sigmoid activation in the output makes sense only when there is 1 output, in which the value would be in range [0, 1] signifying probability of the instance being a 1.
In case of 2 (or more) output neurons, it is necessary we normalize the probabilities to sum upto 1 so we use a softmax layer instead.
Data should be normalized before feeding it to the network, this is normally done by changing the values to be between 0 and 1 or -1 and 1. Setting the input to;
positive_samples = [[1]*20]*100
negative_samples = [[-1]*20]*100
works or the model could be changed to:
model = keras.models.Sequential()
model.add(BatchNormalization())
model.add(keras.layers.Dense(10, kernel_initializer='normal', activation='relu'))
model.add(keras.layers.Dense(5, kernel_initializer='normal', activation='relu'))
model.add(keras.layers.Dense(2, kernel_initializer='normal', activation='sigmoid'))
I understand that using
dataframe = pandas.read_csv("IrisDataset.csv", header=None)
dataset = dataframe.values
X = dataset[:,0:4].astype(float)
Y = dataset[:,4]
# encode class values as integers
encoder = LabelEncoder()
encoder.fit(Y)
encoded_Y = encoder.transform(Y)
# convert integers to dummy variables (i.e. one hot encoded)
dummy_y = np_utils.to_categorical(encoded_Y)
def baseline_model():
# create model
model = Sequential()
model.add(Dense(8, input_dim=4, activation='relu'))
model.add(Dense(3, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
estimator = KerasClassifier(build_fn=baseline_model, epochs=50, batch_size=5, verbose=0)
estimator.fit(X, dummy_y)
predictions=estimator.predict(X)
to create the predictions, metrics can be calculated by
print "PRECISION\t", precision_score(Y,encoder.inverse_transform(predictions), average=None)
where Y is the labels of the training set. But if instead of the estimator, I use this:
model = Sequential()
model.add(Dense(8, input_dim=4, activation='relu'))
model.add(Dense(3, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']
model.fit(X, dummy_y,epochs=50,batch_size=5, shuffle=True, verbose=1)
predictions=model.predict(x=tst_X,batch_size=50,verbose=1)
then predictions has different form and I can't use it as a parameter for the calculations.
Is there another way to calculate precision and other metrics?
Do I need to transform predictions?
The output of your Sequential model will have the shape (3,), containing the estimated class probabilities. Next, you have to get the predicted (most-likely) class for each prediction, i.e. you have to take the argmax
predictions = model.predict(x=tst_X, batch_size=50, verbose=1)
predictions = np.argmax(predictions, 1)
Then, you can use the rest of the code, just as it is.
Otherwise, you could also use the predict_classes function of the Sequential model directly, which is basically doing the same thing:
predictions = model.predict_classes(x=tst_X, batch_size=50, verbose=1)