Related
I have Encoder-Decoder LSTM model that learns to predict 12 months data in advance, while looking back 12 months. If it helps at all, my dataset has around 10 years in total (120 months). I keep 8 years for training/validation, and 2 years for testing. My understanding is that my model does not have access to the testing data at the training time.
The puzzling thing is that my model predictions are simply a shift of previous points. But how did my model know the actual previous points at the time of prediction? I did not give the monthly values in the testing set to the model! If we say that it simply copies the previous point which you give as input, then I am saying that I am giving it 12 months with completely different values than the ones it predicts (so it does not copy the 12 months I am giving), but the forecasted values are shifts of actual ones (which have never been seen).
Below is an example:
My code source is from here:
Below is my code:
#train/test splitting
split_position=int(len(scaled_data)*0.8)# 8 years for training
train=scaled_data[0:split_position]
test=scaled_data[split_position:]
#print(train)
print('length of train=',len(train))
#print(test)
print('length of test=',len(test))
# split train and test data into yearly train/test sets (3d)[observation,year, month]
def split_data_yearly(train, test):
# restructure into windows of yearly data
train = array(split(train, len(train)/12))
test = array(split(test, len(test)/12))
return train, test
# evaluate one or more yearly forecasts against expected values
def evaluate_forecasts(actual, predicted):
scores = list()
# calculate an RMSE score for each day
for i in range(actual.shape[1]):
# calculate mse
mse = mean_squared_error(actual[:, i], predicted[:, i])
# calculate rmse
rmse = math.sqrt(mse)
# store
scores.append(rmse)
# calculate overall RMSE
s = 0
for row in range(actual.shape[0]):
for col in range(actual.shape[1]):
s += (actual[row, col] - predicted[row, col])**2
score = math.sqrt(s / (actual.shape[0] * actual.shape[1]))
################plot prediction vs actual###############################
predicted=predicted.reshape(predicted.shape[0],predicted.shape[1])
jump=12
inv_scores = list()
for i in range(len(predicted)):
sample_predicted = predicted[i,:]
sample_actual=actual[i,:]
#inverse normalization
sample_predicted_inv= scaler.inverse_transform(sample_predicted.reshape(-1, 1))
sample_actual_inv= scaler.inverse_transform(sample_actual.reshape(-1, 1))
#print(sample_actual_inv)
#print(data_sd[(split_position+(i*jump)-1):(split_position+(i*jump-1))+len(sample_actual_inv)])
#inverse differencing
s=numpy.array(smoothed).reshape(-1,1)
sample_actual_inv=sample_actual_inv+s[(split_position+(i*jump)):(split_position+(i*jump))+len(sample_actual_inv)]
sample_predicted_inv=sample_predicted_inv+s[(split_position+(i*jump)):(split_position+(i*jump))+len(sample_actual_inv)]
months=['August-'+str(19+i),'September-'+str(19+i),'October-'+str(19+i),'November-'+str(19+i),'December-'+str(19+i),'January-'+str(20+i),'February-'+str(20+i),'March-'+str(20+i),'April-'+str(20+i),'May-'+str(20+i),'June-'+str(20+i),'July-'+str(20+i)]
pyplot.plot( months,sample_actual_inv,'b-',label='Actual')
pyplot.plot(months,sample_predicted_inv,'--', color="orange",label='Predicted')
pyplot.legend()
pyplot.xticks(rotation=25)
pyplot.title('Encoder Decoder LSTM Prediction', y=1.08)
pyplot.show()
################### determine RMSE after inversion ################################
mse = mean_squared_error(sample_actual_inv, sample_predicted_inv)
rmse = math.sqrt(mse)
inv_scores.append(rmse)
return score, scores,inv_scores
# summarize scores
def summarize_scores(name, score, scores):
s_scores = ', '.join(['%.1f' % s for s in scores])
print('%s: [%.3f] %s' % (name, score, s_scores))
# convert history into inputs and outputs
def to_supervised(train, n_input, n_out=12):
# flatten data
data = train.reshape((train.shape[0]*train.shape[1], train.shape[2]))
X, y = list(), list()
in_start = 0
# step over the entire history one time step at a time
for _ in range(len(data)):
# define the end of the input sequence
in_end = in_start + n_input
out_end = in_end + n_out
# ensure we have enough data for this instance
if out_end <= len(data):
X.append(data[in_start:in_end, :])
y.append(data[in_end:out_end, 0])
# move along one time step
in_start += 1
return array(X), array(y)
# train the model
def build_model(train, n_input):
# prepare data
train_x, train_y = to_supervised(train, n_input)
#take portion for validation
val_size=12;
test_x,test_y=train_x[-val_size:], train_y[-val_size:]
train_x,train_y=train_x[0:-val_size],train_y[0:-val_size]
# define parameters
verbose, epochs, batch_size = 1,25, 8
n_timesteps, n_features, n_outputs = train_x.shape[1], train_x.shape[2], train_y.shape[1]
# reshape output into [samples, timesteps, features]
train_y = train_y.reshape((train_y.shape[0], train_y.shape[1], 1))
# define model
model = Sequential()
model.add(LSTM(64, activation='relu', input_shape=(n_timesteps, n_features)))
model.add(RepeatVector(n_outputs))
model.add(LSTM(64, activation='relu', return_sequences=True))
model.add(TimeDistributed(Dense(100, activation='relu')))
model.add(TimeDistributed(Dense(1)))
#sgd = optimizers.SGD(lr=0.004, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mse', optimizer='adam')
# fit network
train_history= model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, validation_data=(test_x, test_y),verbose=verbose)
loss = train_history.history['loss']
val_loss = train_history.history['val_loss']
pyplot.plot(loss)
pyplot.plot(val_loss)
pyplot.legend(['loss', 'val_loss'])
pyplot.show()
return model
# make a forecast
def forecast(model, history, n_input):
# flatten data
data = array(history)
data = data.reshape((data.shape[0]*data.shape[1], data.shape[2]))
# retrieve last observations for input data
input_x = data[-n_input:, :]
# reshape into [1, n_input, n]
input_x = input_x.reshape((1, input_x.shape[0], input_x.shape[1]))
# forecast the next year
yhat = model.predict(input_x, verbose=0)
# we only want the vector forecast
yhat = yhat[0]
return yhat
# evaluate a single model
def evaluate_model(train, test, n_input):
# fit model
model = build_model(train, n_input)
# history is a list of yearly data
history = [x for x in train]
# walk-forward validation over each year
predictions = list()
for i in range(len(test)):
# predict the year
yhat_sequence = forecast(model, history, n_input)
# store the predictions
predictions.append(yhat_sequence)
# get real observation and add to history for predicting the next year
history.append(test[i,:])
# evaluate predictions days for each year
predictions = array(predictions)
score, scores, inv_scores = evaluate_forecasts(test[:, :, 0], predictions)
return score, scores,inv_scores
# split into train and test
train, test = split_data_yearly(train, test)
# evaluate model and get scores
n_input = 12
score, scores, inv_scores = evaluate_model(train, test, n_input)
# summarize scores
summarize_scores('lstm', score, scores)
print('RMSE score after inversion:',inv_scores)
# plot scores
months=['July','August','September','October','November','December','January','February','March','April','May','June']
#pyplot.plot(months, scores, marker='o', label='lstm')
#pyplot.show()
Differencing is the key here!
After further investigation, I found out that my model produces values that is almost zero before differencing (not learning).....When I invert the differencing, I am adding zero to the actual value in the previous timestep, which results in the shifted pattern above.
Therefore, I need to tune my LSTM model to make it learn or maybe remove the zeros part in the data itself since I have many of those.
Is it possible to apply k-fold on fitting the model and later on predicting?
We have a structure built like the following, where data is trained upon time-series data between 2020-02-01 and 2021-02-01, and then tested on time-series data between 2021-04-01 and 2021-04-08, but unfortunately the results for predicting is very bad, and when we calculate the evaluation metrics, it is obvious something is wrong. Since we only have one year of data to train on, we might believe we are underfitting, and therefore we believe k-fold would help us.
What we want exactly is to k-fold on the period 2020-02-01 to 2021-02-01 and then afterwards predict on the period 2021-04-01 to 2021-04-08. By doing this, we efficiently train the model with the 1 year of data we have, and ensure a model fit to do a proper prediction. Is this possible, and perhaps an even better question to ask, is this the correct way of predicting data?
Our data looks like the following, where datetime is in %y%m%d format
datetime temperature solar humidity rain temp_60
2020-02-01 00:00:00, 4.32, 22.84, 82.12, 16.36, 3.12
2020-02-01 00:10:00, 4.38, 21.99, 82.11, 16.25, 3.11
...
2021-11-01 00:00:00, 9.94, 15.43, 82.29, 14.83, 3.11
Code:
TargetVariable = ['temp_60']
predictors = ['temperature', 'humidity', 'rain', 'solar']
columnAll = ['datetime', 'temperature', 'humidity', 'rain', 'solar', 'temp_60', 'Predicted_temp_60', 'APE']
feature_selection_list = []
if __name__ == '__main__':
# Setting random seed for the initial starting weights
rndseed = 7
seed(rndseed)
np.random.seed(rndseed)
random.seed(rndseed)
# Retrieve data from csv file
dataframe = pd.read_csv("../data.csv")
Algorithm(dataframe=dataframe)
def Algorithm(dataframe):
# Split data
dateToTrain = dataframe.loc[dataframe['datetime'].between('2020-02-01', '2021-02-01', inclusive=True)]
dateToPredict = dataframe.loc[dataframe['datetime'].between('2021-04-01', '2021-04-08', inclusive=True)]
# Initialize the pipeline
estimator = pipelining()
# Get values of the predictors and target variable for training data
# For the datetime between 2020-02-01 and 2021-02-01
X_train_year, y_train_year = dateToTrain[predictors].values, dateToTrain[TargetVariable].values
# Get values of the predictors and target variable for testing data
# For the datetime between 2021-04-01 and 2021-04-08
X_test_all, y_test_all = dateToPredict[predictors].values, dateToPredict[TargetVariable].values
# ...
ann = ArtificialNeuralNetwork(predictors=predictors, pipeline=estimator, X_train=X_train_year, y_train=y_train_year)
annPredictionDataframe = ann.modelFitAndPredict(X_test=X_test_all, y_test=y_test_all)
# ...
annPredictionDataframe['datetime'] = dateToPredict['datetime'].values
kfold_score = ann.evaluateScoreKFold()
annPredictionDataframe = annPredictionDataframe[columnAll]
# Evaluation of the performance of the Artificial Neural Network (ANN)
evaluation = performanceEvaluation(y_test_orig=annPredictionDataframe['temp_60'], y_test_pred=annPredictionDataframe['Predicted_temp_60'])
evaluation["Generalization Error"] = kfold_score.mean()
def pipelining():
# Standardizing the features
estimators = [('standardize', StandardScaler())]
estimators.append(('mlp', KerasRegressor(build_fn=make_regression_ann, batch_size=10, epochs=100)))
# The pipeline can be used as any other estimator
# and avoids leaking the test set into the train set
pipeline = Pipeline(estimators)
return pipeline
def make_regression_ann(initializer='uniform', activation='relu', optimizer='rmsprop', loss='mae', neurons=12):
inputDim = len(predictors) if selection == 1 or selection == 3 or selection == 4 else len(tmp)
# create ANN model
model = Sequential()
# Defining the Input layer and FIRST hidden layer, both are same!
# model.add(Dense(units=neurons, input_dim=len(feature_selection_list), kernel_initializer=initializer, activation=activation, activity_regularizer=l1(0.0001)))
model.add(Dense(units=neurons, input_dim=inputDim, kernel_initializer=initializer, activation=activation, activity_regularizer=l1(0.0001)))
# Defining the Second layer of the model
# after the first layer we don't have to specify input_dim as keras configure it automatically
model.add(Dense(units=neurons, kernel_initializer=initializer, activation=activation))
# The output neuron is a single fully connected node
# Since we will be predicting a single number
model.add(Dense(1, kernel_initializer=initializer))
# Compiling the model
model.compile(loss=loss, optimizer=optimizer)
return model
def performanceEvaluation(y_test_orig, y_test_pred):
evaluationColumns = ['Coefficient of Determination (R2)', 'Root Mean Square Error (RMSE)', 'Mean Squared Error (MSE)', 'Mean Absolute Percent Error (MAPE)', 'Mean Absolute Error (MAE)', 'Mean Bias Error (MBE)']
# Computing the Mean Absolute Percent Error
MAPE = mean_absolute_percentage_error(y_test_orig, y_test_pred)
# Computing R2 Score
r2 = r2_score(y_test_orig, y_test_pred)
# Computing Mean Square Error (MSE)
MSE = mean_squared_error(y_test_orig, y_test_pred)
# Computing Root Mean Square Error (RMSE)
RMSE = mean_squared_error(y_test_orig, y_test_pred, squared=False)
# Computing Mean Absolute Error (MAE)
MAE = mean_absolute_error(y_test_orig, y_test_pred)
# Computing Mean Bias Error (MBE)
MBE = np.mean(y_test_pred - y_test_orig) # here we calculate MBE
eval_list = [r2, RMSE, MSE, MAPE, MAE, MBE]
dataframe = pd.DataFrame([eval_list], columns=evaluationColumns)
return dataframe
In a seperate file. This is where we do the fit and prediction:
class ArtificialNeuralNetwork:
def __init__(self, predictors, pipeline, X_train, y_train):
self.predictors = predictors
self.pipeline = pipeline
self.X_train = X_train
self.y_train = y_train
def evaluateScoreKFold(self):
cv = KFold(n_splits=10)
results = cross_val_score(self.pipeline, X=self.X_train, y=self.y_train, cv=cv, scoring="neg_mean_absolute_error")
print(f'Cross Validation Results: {results}')
print("Standardized: % .2f( % .2f) MAE" % (results.mean(), results.std()))
return results
def modelFitAndPredict(self, X_test, y_test):
# THIS IS WHERE WE SHOULD PROBABABLY DO KFOLD WITH THE ONE YEAR DATA TO TRAIN UPON THIS PIPELINE. IS THIS CORRECTLY ASSUMED???
self.pipeline.fit(self.X_train, self.y_train)
# Generating Predictions on testing data
Predictions = self.pipeline.predict(X_test)
TestingData = pd.DataFrame(data=X_test, columns=self.predictors)
TestingData['temp_60'] = y_test
TestingData['Predicted_temp_60'] = Predictions
TestingData.head()
# Computing the absolute percent error
APE = 100 * (abs(TestingData['temp_60'] - TestingData['Predicted_temp_60']) / TestingData['temp_60'])
TestingData['APE'] = APE
# Rounding all floats to 2 decimals
TestingData = TestingData.round(2)
TestingData.to_csv("TestingData.csv")
return TestingData
The Evaluation Metrics:
Generalization Error R2 RMSE MSE MAPE MAE MBE
0.921 -327.534 6.945 48.229 0.811 5.212 1.835
The purpose of cross-validation (K-fold) is model checking, not model building.
Once you have checked with cross-validation that you obtain similar metrics for every split, you have to train your model with all your training data.
Maybe, what you are looking for is ensemble methods. But I'm not sure if they can be applied with NN. A clear example is Random forest: many decision trees algorithm are trained and evaluated.
Finally, just comment that with NN, in general, as more data you have, better knowledge you obtain, so maybe splitting data to obtain different models is not the best option.
I currently have a RNN model for time series predictions. It uses 3 input features "value", "temperature" and "hour of the day" of the last 96 time steps to predict the next 96 time steps of the feature "value".
Here you can see a schema of it:
and here you have the current code:
#Import modules
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error
from tensorflow import keras
# Define the parameters of the RNN and the training
epochs = 1
batch_size = 50
steps_backwards = 96
steps_forward = 96
split_fraction_trainingData = 0.70
split_fraction_validatinData = 0.90
randomSeedNumber = 50
#Read dataset
df = pd.read_csv('C:/Users/Desktop/TestData.csv', sep=';', header=0, low_memory=False, infer_datetime_format=True, parse_dates={'datetime':[0]}, index_col=['datetime'])
# standardize data
data = df.values
indexWithYLabelsInData = 0
data_X = data[:, 0:3]
data_Y = data[:, indexWithYLabelsInData].reshape(-1, 1)
scaler_standardized_X = StandardScaler()
data_X = scaler_standardized_X.fit_transform(data_X)
data_X = pd.DataFrame(data_X)
scaler_standardized_Y = StandardScaler()
data_Y = scaler_standardized_Y.fit_transform(data_Y)
data_Y = pd.DataFrame(data_Y)
# Prepare the input data for the RNN
series_reshaped_X = np.array([data_X[i:i + (steps_backwards+steps_forward)].copy() for i in range(len(data) - (steps_backwards+steps_forward))])
series_reshaped_Y = np.array([data_Y[i:i + (steps_backwards+steps_forward)].copy() for i in range(len(data) - (steps_backwards+steps_forward))])
timeslot_x_train_end = int(len(series_reshaped_X)* split_fraction_trainingData)
timeslot_x_valid_end = int(len(series_reshaped_X)* split_fraction_validatinData)
X_train = series_reshaped_X[:timeslot_x_train_end, :steps_backwards]
X_valid = series_reshaped_X[timeslot_x_train_end:timeslot_x_valid_end, :steps_backwards]
X_test = series_reshaped_X[timeslot_x_valid_end:, :steps_backwards]
Y_train = series_reshaped_Y[:timeslot_x_train_end, steps_backwards:]
Y_valid = series_reshaped_Y[timeslot_x_train_end:timeslot_x_valid_end, steps_backwards:]
Y_test = series_reshaped_Y[timeslot_x_valid_end:, steps_backwards:]
# Build the model and train it
np.random.seed(randomSeedNumber)
tf.random.set_seed(randomSeedNumber)
model = keras.models.Sequential([
keras.layers.SimpleRNN(10, return_sequences=True, input_shape=[None, 3]),
keras.layers.SimpleRNN(10, return_sequences=True),
keras.layers.TimeDistributed(keras.layers.Dense(1))
])
model.compile(loss="mean_squared_error", optimizer="adam", metrics=['mean_absolute_percentage_error'])
history = model.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size, validation_data=(X_valid, Y_valid))
#Predict the test data
Y_pred = model.predict(X_test)
# Inverse the scaling (traInv: transformation inversed)
data_X_traInv = scaler_standardized_X.inverse_transform(data_X)
data_Y_traInv = scaler_standardized_Y.inverse_transform(data_Y)
series_reshaped_X_notTransformed = np.array([data_X_traInv[i:i + (steps_backwards+steps_forward)].copy() for i in range(len(data) - (steps_backwards+steps_forward))])
X_test_notTranformed = series_reshaped_X_notTransformed[timeslot_x_valid_end:, :steps_backwards]
Y_pred_traInv = scaler_standardized_Y.inverse_transform (Y_pred)
Y_test_traInv = scaler_standardized_Y.inverse_transform (Y_test)
# Calculate errors for every time slot of the multiple predictions
abs_diff = np.abs(Y_pred_traInv - Y_test_traInv)
abs_diff_perPredictedSequence = np.zeros((len (Y_test_traInv)))
average_LoadValue_testData_perPredictedSequence = np.zeros((len (Y_test_traInv)))
abs_diff_perPredictedTimeslot_ForEachSequence = np.zeros((len (Y_test_traInv)))
absoluteError_Load_Ratio_allPredictedSequence = np.zeros((len (Y_test_traInv)))
absoluteError_Load_Ratio_allPredictedTimeslots = np.zeros((len (Y_test_traInv)))
mse_perPredictedSequence = np.zeros((len (Y_test_traInv)))
rmse_perPredictedSequence = np.zeros((len(Y_test_traInv)))
for i in range (0, len(Y_test_traInv)):
for j in range (0, len(Y_test_traInv [0])):
abs_diff_perPredictedSequence [i] = abs_diff_perPredictedSequence [i] + abs_diff [i][j]
mse_perPredictedSequence [i] = mean_squared_error(Y_pred_traInv[i] , Y_test_traInv [i] )
rmse_perPredictedSequence [i] = np.sqrt(mse_perPredictedSequence [i])
abs_diff_perPredictedTimeslot_ForEachSequence [i] = abs_diff_perPredictedSequence [i] / len(Y_test_traInv [0])
average_LoadValue_testData_perPredictedSequence [i] = np.mean (Y_test_traInv [i])
absoluteError_Load_Ratio_allPredictedSequence [i] = abs_diff_perPredictedSequence [i] / average_LoadValue_testData_perPredictedSequence [i]
absoluteError_Load_Ratio_allPredictedTimeslots [i] = abs_diff_perPredictedTimeslot_ForEachSequence [i] / average_LoadValue_testData_perPredictedSequence [i]
rmse_average_allPredictictedSequences = np.mean (rmse_perPredictedSequence)
absoluteAverageError_Load_Ratio_allPredictedSequence = np.mean (absoluteError_Load_Ratio_allPredictedSequence)
absoluteAverageError_Load_Ratio_allPredictedTimeslots = np.mean (absoluteError_Load_Ratio_allPredictedTimeslots)
absoluteAverageError_allPredictedSequences = np.mean (abs_diff_perPredictedSequence)
absoluteAverageError_allPredictedTimeslots = np.mean (abs_diff_perPredictedTimeslot_ForEachSequence)
Here you have some test data Download Test Data
So now I actually would like to include not only past values of the features into the prediction but also future values of the features "temperature" and "hour of the day" into the prediction. The future values of the feature "temperature" can for example be taken from an external weather forecasting service and for the feature "hour of the day" the future values are know before (in the test data I have included a "forecast" of the temperature that is not a real forecast; I just randomly changed the values).
This way, I could assume that - for several applications and data - the forecast could be improved.
In a schema it would look like this:
Can anyone tell me, how I can do that in Keras with a RNN (or LSTM)? One way could be to include the future values as independant features as input. But I would like the model to know that the future values of a feature are connected to the past values of a feature.
Reminder: Does anybody have an idea how to do this? I'll highly appreciate every comment.
The standard approach is to use an encoder-decoder architecture (see 1 and 2 for instance):
The encoder takes as input the past values of the features and of the target and returns an output representation.
The decoder takes as input the encoder output and the future values of the features and returns the predicted values of the target.
You can use any architecture for the encoder and for the decoder and you can also consider different approaches for passing the encoder output to the decoder (e.g. adding or concatenating it to the decoder input features, adding or concatenating it to the output of some intermediate decoder layer, or adding it to the final decoder output), the code below is just an example.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.layers import Input, Dense, LSTM, TimeDistributed, Concatenate, Add
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
# define the inputs
target = ['value']
features = ['temperatures', 'hour of the day']
sequence_length = 96
# import the data
df = pd.read_csv('TestData.csv', sep=';', header=0, low_memory=False, infer_datetime_format=True, parse_dates={'datetime': [0]}, index_col=['datetime'])
# scale the data
target_scaler = StandardScaler().fit(df[target])
features_scaler = StandardScaler().fit(df[features])
df[target] = target_scaler.transform(df[target])
df[features] = features_scaler.transform(df[features])
# extract the input and output sequences
X_encoder = [] # past features and target values
X_decoder = [] # future features values
y = [] # future target values
for i in range(sequence_length, df.shape[0] - sequence_length):
X_encoder.append(df[features + target].iloc[i - sequence_length: i])
X_decoder.append(df[features].iloc[i: i + sequence_length])
y.append(df[target].iloc[i: i + sequence_length])
X_encoder = np.array(X_encoder)
X_decoder = np.array(X_decoder)
y = np.array(y)
# define the encoder and decoder
def encoder(encoder_features):
y = LSTM(units=100, return_sequences=True)(encoder_features)
y = TimeDistributed(Dense(units=1))(y)
return y
def decoder(decoder_features, encoder_outputs):
x = Concatenate(axis=-1)([decoder_features, encoder_outputs])
# x = Add()([decoder_features, encoder_outputs])
y = TimeDistributed(Dense(units=100, activation='relu'))(x)
y = TimeDistributed(Dense(units=1))(y)
return y
# build the model
encoder_features = Input(shape=X_encoder.shape[1:])
decoder_features = Input(shape=X_decoder.shape[1:])
encoder_outputs = encoder(encoder_features)
decoder_outputs = decoder(decoder_features, encoder_outputs)
model = Model([encoder_features, decoder_features], decoder_outputs)
# train the model
model.compile(optimizer=Adam(learning_rate=0.001), loss='mse')
model.fit([X_encoder, X_decoder], y, epochs=100, batch_size=128)
# extract the last predicted sequence
y_true = target_scaler.inverse_transform(y[-1, :])
y_pred = target_scaler.inverse_transform(model.predict([X_encoder, X_decoder])[-1, :])
# plot the last predicted sequence
plt.plot(y_true.flatten(), label='actual')
plt.plot(y_pred.flatten(), label='predicted')
plt.show()
In the example above the model takes two inputs, X_encoder and X_decoder, so in your case when generating the forecasts you can use the past observed temperatures in X_encoder and the future temperature forecasts in X_decoder.
It is a pytorch code to time series prediction with an known external/exogenous regressor to the given period forecasted.Hope it helps!!!Have a marvellous day !!!
The input format is a 3d Tensor an output 1d array (MISO-Multiple Inputs Single Output)
def CNN_Attention_Bidirectional_LSTM_Encoder_Decoder_predictions(model,data ,regressors, extrapolations_leght):
n_input = extrapolations_leght
pred_list = []
batch = data[-n_input:]
model = model.train()
pred_list.append(torch.cat(( model(batch)[-1], torch.FloatTensor(regressors.iloc[1,[1]]).to(device).unsqueeze(0)),1))
batch = torch.cat((batch[n_input-1].unsqueeze(0), pred_list[-1].unsqueeze(0)),1)
batch = batch[:, 1:, :]
for i in range(n_input-1):
model = model.eval()
pred_list.append(torch.cat((model(batch).squeeze(0), torch.FloatTensor(regressors.iloc[i+1,[1]]).to(device).unsqueeze(0)),1))
batch = torch.cat((batch, pred_list[-1].unsqueeze(0)),1)
batch = batch[:, 1:, :]
model = model.train()
return np.array([pred_list[j].cpu().detach().numpy() for j in range(n_input)])[:,:, 0]
I've stored the coefficients of intercept, AR, MA off ARIMA model of statsmodel package
x = df_sku
x_train = x['Weekly_Volume_Sales']
x_train_log = np.log(x_train)
x_train_log[x_train_log == -np.inf] = 0
x_train_mat = x_train_log.as_matrix()
model = ARIMA(x_train_mat, order=(1,1,1))
model_fit = model.fit(disp=0)
res = model_fit.predict(start=1, end=137, exog=None, dynamic=False)
print(res)
params = model_fit.params
But I'm unable to find any documentation on statsmodel that lets me refit the model parameters onto a set of new data and predict N steps.
Has anyone been able to accomplishing refitting the model and predicting out of time samples ?
I'm trying to accomplish something similar to R:
# Refit the old model with testData
new_model <- Arima(as.ts(testData.zoo), model = old_model)
Here is a code you can use:
def ARIMAForecasting(data, best_pdq, start_params, step):
model = ARIMA(data, order=best_pdq)
model_fit = model.fit(start_params = start_params)
prediction = model_fit.forecast(steps=step)[0]
#This returns only last step
return prediction[-1], model_fit.params
#Get the starting parameters on train data
best_pdq = (3,1,3) #It is fixed, but you can search for the best parameters
model = ARIMA(train_data, best_pdq)
model_fit = model.fit()
start_params = model_fit.params
data = train_data
predictions = list()
for t in range(len(test_data)):
real_value = data[t]
prediction = ARIMAForecasting(data, best_pdq, start_params, step)
predictions.append(prediction)
data.append(real_value)
#After you can compare test_data with predictions
Details you can check here:
https://www.statsmodels.org/dev/generated/statsmodels.tsa.arima_model.ARIMA.fit.html#statsmodels.tsa.arima_model.ARIMA.fit
Great question. I have found such example: https://alkaline-ml.com/pmdarima/develop/auto_examples/arima/example_add_new_samples.html
briefly:
import pmdarima as pmd
...
### split data as train/test:
train, test = ...
### fit initial model on `train` data:
arima = pmd.auto_arima(train)
...
### update initial fit with `test` data:
arima.update(test)
...
### create forecast using updated fit for N steps:
new_preds = arima.predict(n_periods=10)
After getting help on so many thing I'm here for a last time for my last problem that I can't find a solution.
Following my previous question an user pointed the fact that my bad results on my time series prediction may be because of my architecture not converging.
After looking at it and tried some fixes I've found on other questions (set weight, lower learning rate, change optimizer/activation) I can't seem to get better results, always getting an accuracy at 0 (or 0.0003, which isn't good enough).
My code:
import numpy
import numpy as np
import tflearn
from pandas import DataFrame
from pandas import Series
from pandas import concat
from pandas import read_csv
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from math import sqrt
import datetime
# Preprocessing function
from tflearn import Accuracy, Momentum
def preprocess(data):
return np.array(data, dtype=np.int32)
def parser(x):
return datetime.datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
# frame a sequence as a supervised learning problem
def timeseries_to_supervised(data, lag=1):
df = DataFrame(data)
columns = [df.shift(i) for i in range(1, lag + 1)]
columns.append(df)
df = concat(columns, axis=1)
df.fillna(0, inplace=True)
return df
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return Series(diff)
# invert differenced value
def inverse_difference(history, yhat, interval=1):
return yhat + history[-interval]
# scale train and test data to [-1, 1]
def scale(train, test):
# fit scaler
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(train)
# transform train
train = train.reshape(train.shape[0], train.shape[1])
train_scaled = scaler.transform(train)
# transform test
test = test.reshape(test.shape[0], test.shape[1])
test_scaled = scaler.transform(test)
return scaler, train_scaled, test_scaled
# inverse scaling for a forecasted value
def invert_scale(scaler, X, value):
new_row = [x for x in X] + [value]
array = numpy.array(new_row)
array = array.reshape(1, len(array))
inverted = scaler.inverse_transform(array)
return inverted[0, -1]
def fit_lstm(train, batch_size, nb_epoch, neurons):
X, y = train[0:-1], train[:, -1]
X = X[:, 0].reshape(len(X), 1, 1)
y = y.reshape(len(y), 1)
print (X.shape)
print (y.shape)
# Build neural network
net = tflearn.input_data(shape=[None, 1, 1])
tnorm = tflearn.initializations.uniform(minval=-1.0, maxval=1.0)
net = tflearn.dropout(net, 0.8)
net = tflearn.fully_connected(net, 1, activation='linear', weights_init=tnorm)
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
loss='mean_square')
# Define model
model = tflearn.DNN(net, tensorboard_verbose=3, best_val_accuracy=0.6)
model.fit(X, y, n_epoch=nb_epoch, batch_size=batch_size, shuffle=False, show_metric=True)
score = model.evaluate(X, y, batch_size=128)
print (score)
return model
# make a one-step forecast
def forecast_lstm(model, X):
X = X.reshape(len(X), 1, 1)
yhat = model.predict(X)
return yhat[0, 0]
# Load CSV file, indicate that the first column represents labels
data = read_csv('nowcastScaled.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)
# transform data to be stationary
raw_values = data.values
diff_values = difference(raw_values, 1)
# transform data to be supervised learning
supervised = timeseries_to_supervised(diff_values, 1)
supervised_values = supervised.values
# split data into train and test-sets
train, test = supervised_values[0:10000], supervised_values[10000:10100]
# transform the scale of the data
scaler, train_scaled, test_scaled = scale(train, test)
repeats = 1
for r in range(repeats):
# fit the model
lstm_model = fit_lstm(train_scaled, 128, 6, 1)
# forecast the entire training dataset to build up state for forecasting
train_reshaped = train_scaled[:, 0].reshape(len(train_scaled), 1, 1)
print (lstm_model.predict(train_reshaped))
# walk-forward validation on the test data
predictions = list()
error_scores = list()
for i in range(len(test_scaled)):
# make one-step forecast
X, y = test_scaled[i, 0:-1], test_scaled[i, -1]
yhat = forecast_lstm(lstm_model, X)
# invert scaling
yhat = invert_scale(scaler, X, yhat)
# # invert differencing
yhat = inverse_difference(raw_values, yhat, len(test_scaled) + 1 - i)
# store forecast
predictions.append(yhat)
rmse = sqrt(mean_squared_error(raw_values[10000:10100], predictions))
print('%d) Test RMSE: %.3f' % (1, rmse))
error_scores.append(rmse)
print predictions
print raw_values[10000:10100]
Here is the result I get from running it (raising epoch doesn't seem to make it better):
Training Step: 472 | total loss: 0.00486 | time: 0.421s
| Adam | epoch: 006 | loss: 0.00486 - binary_acc: 0.0000 -- iter: 9856/9999
Training Step: 473 | total loss: 0.00453 | time: 0.427s
| Adam | epoch: 006 | loss: 0.00453 - binary_acc: 0.0000 -- iter: 9984/9999
Training Step: 474 | total loss: 0.00423 | time: 0.430s
| Adam | epoch: 006 | loss: 0.00423 - binary_acc: 0.0000 -- iter: 9999/9999
I've tried to lower/raise most of the setting but nothing.
Here is an extract of the data I'm using (univariate time series), using more or less data in training didn't do anything either.
(Ps: My code is mostly from this tutorial, I had to change a bit it since I wanted to try using Tflearn)
You cant define accuracy for a regression problem. You just track the MSE of the predicted and the actual. Your training loss seems to be low, so if the prediction is not close, then either your scaling inverse is not right or your overfitting.