LSTM many to one prediction using keras input shape - python

Initially, i have a csv file with 6 columns: date,electricity consumption and 4 other climate features that have an impact on the consumption ( such as temperature, humidity etc)
So far, i can run my LSTM on the consumption column only , and it has given me very accurate results, but i need to feed my LSTM with the other features. I tried to modify the python code according to previous comments here but still having a reshape error.
here s my code after some modifications:
import numpy
import matplotlib.pyplot as plt
import pandas
import math
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset) - look_back - 1):
a = dataset[i:(i + look_back), :]
dataX.append(a)
dataY.append(dataset[i + look_back, 2])
return numpy.array(dataX), numpy.array(dataY)
# fix random seed for reproducibility
numpy.random.seed(7)
# load the dataset
dataframe = pandas.read_csv('out_meteo.csv', engine='python')
dataset = dataframe.values
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
# split into train and test sets
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size, :], dataset[train_size:len(dataset), :]
# reshape into X=t and Y=t+1
look_back = 3
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
# reshape input to be [samples, time steps, features]
trainX = numpy.reshape(trainX, (trainX.shape[0], look_back, 3))
testX = numpy.reshape(testX, (testX.shape[0],look_back, 3))
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(4, input_shape=(look_back,3)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
history= model.fit(trainX, trainY,validation_split=0.33, nb_epoch=5, batch_size=32)
# make predictions
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
# Get something which has as many features as dataset
trainPredict_extended = numpy.zeros((len(trainPredict),3))
# Put the predictions there
trainPredict_extended[:,2] = trainPredict
# Inverse transform it and select the 3rd column.
trainPredict = scaler.inverse_transform(trainPredict_extended)[:,2]
print(trainPredict)
# Get something which has as many features as dataset
testPredict_extended = numpy.zeros((len(testPredict),3))
# Put the predictions there
testPredict_extended[:,2] = testPredict[:,0]
# Inverse transform it and select the 3rd column.
testPredict = scaler.inverse_transform(testPredict_extended)[:,2]
trainY_extended = numpy.zeros((len(trainY),3))
trainY_extended[:,2]=trainY
trainY=scaler.inverse_transform(trainY_extended)[:,2]
testY_extended = numpy.zeros((len(testY),3))
testY_extended[:,2]=testY
testY=scaler.inverse_transform(testY_extended)[:,2]
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(trainY, trainPredict))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY, testPredict))
print('Test Score: %.2f RMSE' % (testScore))
# shift train predictions for plotting
trainPredictPlot = numpy.empty_like(dataset)
trainPredictPlot[:, :] = numpy.nan
trainPredictPlot[look_back:len(trainPredict)+look_back, 2] = trainPredict
# shift test predictions for plotting
testPredictPlot = numpy.empty_like(dataset)
testPredictPlot[:, :] = numpy.nan
testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, 2] = testPredict
# plot baseline and predictions
plt.plot(scaler.inverse_transform(dataset))
plt.plot(trainPredictPlot)
plt.plot(testPredictPlot)
plt.show()
the error i am getting is the following
Traceback (most recent call last):
File "desp.py", line 48, in <module>
trainX = numpy.reshape(trainX, (trainX.shape[0], look_back, 3))
File "/usr/local/lib/python2.7/dist-packages/numpy/core/fromnumeric.py", line 232, in reshape
return _wrapfunc(a, 'reshape', newshape, order=order)
File "/usr/local/lib/python2.7/dist-packages/numpy/core/fromnumeric.py", line 57, in _wrapfunc
return getattr(obj, method)(*args, **kwds)
ValueError: cannot reshape array of size 35226 into shape (1957,3,3)
Please note that I am still a newbie and that the reshape concept is still a little ambigus to me.

As an answer to your question I would suggest to check about multidimensional lists / arrays in python / numpy.
Also, here is a link to explanation regarding shapes of the tensor in Keras
https://github.com/fchollet/keras/issues/2045

here is my final code that takes all of the columns
import numpy
import matplotlib.pyplot as plt
import pandas
import math
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset) - look_back - 1):
a = dataset[i:(i + look_back), :]
dataX.append(a)
dataY.append(dataset[i + look_back, 2])
return numpy.array(dataX), numpy.array(dataY)
# fix random seed for reproducibility
numpy.random.seed(7)
#load the dataset
dataframe = pandas.read_csv('out_meteo.csv', engine='python')
dataset = dataframe.values
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
# split into train and test sets
train_size = int(len(dataset) * 0.7)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size, :], dataset[train_size:len(dataset), :]
# reshape into X=t and Y=t+1
look_back = 3
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(20, input_shape=(look_back,6)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
history= model.fit(trainX, trainY,validation_split=0.33, nb_epoch=15, batch_size=15)
# make predictions
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
print(trainPredict)
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(trainY, trainPredict))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY, testPredict))
print('Test Score: %.2f RMSE' % (testScore))
# shift train predictions for plotting
trainPredictPlot = numpy.empty_like(dataset)
trainPredictPlot[:, :] = numpy.nan
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict
# shift test predictions for plotting
testPredictPlot = numpy.empty_like(dataset)
testPredictPlot[:, :] = numpy.nan
testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict
# plot baseline and predictions
plt.plot((dataset))
plt.plot(trainPredictPlot)
plt.plot(testPredictPlot)
plt.show()
So far, it works well on all of my csv columns, i have also removed many lines ( reshape , MinMAxScaler transformation) but still cannot visualize my final data correctly ( with real values), it shows really small values or a strict line.
the return train and test score for this dataset are respectively 0.03 and 0.05

Before plotting, try to do
testPredict = scaler.inverse_transform(testPredict)

Related

How to loop through various train and test splits

I have various train and test splits that I create using TimeSeriesSplit(). My dataframe has 377 observations with 6 input variables and 1 target variable.
I split my dataframe into train and test using the following code:
#train set
i=0
for X_train, X_test in tscv.split(data):
i=i+1
print ("No of observations under train%s=%s"%(i,len(X_train)))
print ("No of observations under test%s=%s" % (i, len(X_test)))
X_train1, X_test1 = data[:67, :-1], data[67:129,:-1]
X_train2, X_test2 = data[:129,:-1], data[129:191,:-1]
X_train3, X_test3 = data[:191,:-1], data[191:253,:-1]
X_train4, X_test4 = data[:253,:-1], data[253:315,:-1]
X_train5, X_test5 = data[:315,:-1], data[315:377,:-1]
#test set
i=0
for y_train, y_test in tscv.split(data):
i=i+1
print ("No of observations under train%s=%s"%(i,len(y_train)))
print ("No of observations under test%s=%s" % (i, len(y_test)))
y_train1, y_test1 = data[:67, -1], data[67:129 ,-1]
y_train2, y_test2 = data[:129,-1], data[129:191,-1]
y_train3, y_test3 = data[:191,-1], data[191:253,-1]
y_train4, y_test4 = data[:253,-1], data[253:315,-1]
y_train5, y_test5 = data[:315,-1], data[315:377,-1]
So i have 5 splits in total. I want to train my lstm model looping through these splits but I am not sure how best I can do that. Here’s the code for my lstm:
# split into input and outputs
train_X, train_y = X_train, y_train
test_X, test_y = X_test, y_test
#reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,LSTM, Flatten
import matplotlib.pyplot as pyplot
# design network
model = Sequential()
model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(1))
model.compile(loss='mae', optimizer='adam')
history = model.fit(train_X, train_y, epochs=700
, batch_size=72, validation_data=(test_X, test_y), verbose=2, shuffle=False)
# plot history
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.legend()
pyplot.show()
#predictions
y_lstm = model.predict(test_X)
#metrics for test set
mse_lstm = mean_squared_error(y_test, y_lstm)
rmse_lstm = np.sqrt(mse_lstm)
r2_lstm = r2_score(y_test, y_lstm)
mae_lstm = mean_absolute_error(y_test, y_lstm)
#train metics
train = model.predict(X_t_reshaped)
msetrain = mean_squared_error(y_train, train)
rmsetrain = np.sqrt(msetrain)
r2train = r2_score(y_train, train)
What can I do to use the above code to loop through all my different splits and store the results in a list or dataframe?
I want to also plot the predicted results as shown below
This is the grapgh am getting based on #Ashraful answer
Replace your last Code block using this,
from sklearn.metrics import mean_squared_error
from sklearn.metrics import *
import numpy as np
import csv
Round = 3 # define the number of digits after decimal point you want
fields = ['Fold_No', 'mse_lstm', 'rmse_lstm', 'r2_lstm','mae_lstm']
csvfile = open('Summary.csv', 'w')
csvwriter = csv.writer(csvfile)
csvwriter.writerow(fields)
for fold in range(1,6):
print(f'Running fold {fold}')
# split into input and outputs
train_X, train_y = eval(f'X_train{fold}'),eval(f'y_train{fold}')
test_X, test_y = eval(f'X_test{fold}'),eval(f'y_test{fold}')
print(train_X.shape)
#reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,LSTM, Flatten
import matplotlib.pyplot as pyplot
# design network
model = Sequential()
model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(1))
model.compile(loss='mae', optimizer='adam')
history = model.fit(train_X, train_y, epochs=2
, batch_size=72, validation_data=(test_X, test_y), verbose=2, shuffle=False)
# plot history
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.legend()
pyplot.show()
#predictions
train_output = model.predict(train_X)
y_lstm = model.predict(test_X)
pyplot.plot(train_output, label='Training output')
pyplot.plot(train_y, label='Obesrved Training Target')
# pyplot.plot(train_y, label='Training value')
pyplot.plot(test_y, label='Obesrved Predic. Target')
pyplot.plot(y_lstm, label='Predicted Output')
pyplot.legend(loc='upper right')
# pyplot.legend()
pyplot.show()
#metrics for test set
mse_lstm = mean_squared_error(y_test1, y_lstm)
rmse_lstm = np.sqrt(mse_lstm)
r2_lstm = r2_score(y_test1, y_lstm)
mae_lstm = mean_absolute_error(y_test1, y_lstm)
csvwriter.writerow([f'Fold_{fold}',round(mse_lstm,Round), round(rmse_lstm,Round), round(r2_lstm,Round),round(mae_lstm,Round)])
csvfile.close()
#read stored CSV file
summary= pd.read_csv('Summary.csv')
print(summary)
Also, my implementatin in colab file you can find here.

Analysis, forecasting the next element of the sequence

I am communicating through a translator. Do not swear too much for this.
I have a question.
How can you implement a neural network. To analyze and predict the next step of the sequence.
Not a great example.
Sequence:
112233112233112233 ......
or
111211312111211312 ......
You can take any sequence.
But here's how to teach a neural network to predict the next step in a sequence.
Even in such simple sequences as in the example.
I have the following code.
But this is a neural network. Doesn't predict the next step. And repeats the previous ones.
How can this be fixed?
import numpy
import pandas as pd
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.metrics import accuracy_score
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
xset = []
for j in range(dataset.shape[1]):
a = dataset[i:(i+look_back), j]
xset.append(a)
dataX.append(xset)
dataY.append(dataset[i + look_back,0])
return numpy.array(dataX), numpy.array(dataY)
# fix random seed for reproducibility
numpy.random.seed(1)
# load the dataset
file = 'test123456.xlsx'
xl = pd.ExcelFile(file)
dataframe = xl.parse('Sheet1')
dataset = dataframe.values
dataset = dataset.astype('float32')
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0,1))
dataset = scaler.fit_transform(dataset)
# split into train and test sets
train_size = int(len(dataset) * 0.75)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
# reshape into X=t and Y=t+1
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
# reshape input to be [samples, time steps, features]
trainX = numpy.reshape(trainX, (trainX.shape[0],1,trainX.shape[1]))
testX = numpy.reshape(testX, (testX.shape[0],1,testX.shape[1]))
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(8, input_shape=(1, look_back)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='Adam')
model.fit(trainX, trainY, epochs=10000, batch_size=1, verbose=2)
# make predictions
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
# invert predictions
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
#
print("X=%s, Predicted=%s" % (testPredict[-1],testX[-1]))
print("X=%s, Predicted=%s" % (testPredict[0],testX[0]))
By changing the settings of this neural network. Does not improve results.
I will be grateful for any help.
P.s. I'm learning the language.))
That is the problem. On any data (sequence), this neural network cannot predict the next step. How to customize her to predict the next step. What is the error of this code?

Why can model not even predict sine

I am trying to generate a learned timeseries with an LSTM RNN using Keras, so I want to predict a datapoint, and feed it back in as input to predict the next one and so on, so that I can actually generate the timeseries (for example given 2000 datapoints, predict the next 2000)
I am trying it like this, but the Test score RMSE is 1.28 and the prediction is basically a straight line
# LSTM for international airline passengers problem with regression framing
import numpy
import matplotlib.pyplot as plt
from pandas import read_csv
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return numpy.array(dataX), numpy.array(dataY)
# fix random seed for reproducibility
numpy.random.seed(7)
# load the dataset
dataset = np.sin(np.linspace(0,35,10000)).reshape(-1,1)
print(type(dataset))
print(dataset.shape)
dataset = dataset.astype('float32')
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
# split into train and test sets
train_size = int(len(dataset) * 0.5)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
# reshape into X=t and Y=t+1
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
# reshape input to be [samples, time steps, features]
trainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = numpy.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(16, input_shape=(1, look_back)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, epochs=10, batch_size=1, verbose=2)
# make predictions
trainPredict = model.predict(trainX)
testPredict = list()
prediction = model.predict(testX[0].reshape(1,1,1))
for i in range(trainX.shape[0]):
prediction = model.predict(prediction.reshape(1,1,1))
testPredict.append(prediction)
testPredict = np.array(testPredict).reshape(-1,1)
# invert predictions
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
print('Test Score: %.2f RMSE' % (testScore))
# shift train predictions for plotting
trainPredictPlot = numpy.empty_like(dataset)
trainPredictPlot[:, :] = numpy.nan
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict
# shift test predictions for plotting
testPredictPlot = numpy.empty_like(dataset)
testPredictPlot[:, :] = numpy.nan
testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict
# plot baseline and predictions
plt.plot(scaler.inverse_transform(dataset))
plt.plot(trainPredictPlot)
plt.plot(testPredictPlot)
plt.show()
What am I doing wrong?
I see multiple issues with your code. Your value for look_back is 1, which means the LSTM sees only one Sample at a time, which is obviously not sufficient to learn anything about the sequence.
You probably did this so that you can make the final prediction at the end by feeding the prediction from the previous step as the new input. To correct way to make this work is to train with more timesteps and then change to network to a stateful LSTM with a single timestep.
Also, when you do the final prediction you have to show the network more than one ground truth sample. Otherwise the position on the sine is ambigious. (is it going up or down in the next step?)
I slapped together q quick example. Here is how I generated the data:
import numpy as np
numSamples = 1000
numTimesteps = 50
width = np.pi/2.0
def getRandomSine(numSamples = 100, width = np.pi):
return np.sin(np.linspace(0,width,numSamples) + (np.random.rand()*np.pi*2))
trainX = np.stack([getRandomSine(numSamples = numTimesteps+1) for _ in range(numSamples)])
valX = np.stack([getRandomSine(numSamples = numTimesteps+1) for _ in range(numSamples)])
trainX = trainX.reshape((numSamples,numTimesteps+1,1))
valX = valX.reshape((numSamples,numTimesteps+1,1))
trainY = trainX[:,1:,:]
trainX = trainX[:,:-1,:]
valY = valX[:,1:,:]
valX = valX[:,:-1,:]
Here I trained the model:
import keras
from keras.models import Sequential
from keras import layers
model = Sequential()
model.add(layers.recurrent.LSTM(32,return_sequences=True,input_shape=(numTimesteps, 1)))
model.add(layers.recurrent.LSTM(32,return_sequences=True))
model.add(layers.wrappers.TimeDistributed(layers.Dense(1,input_shape=(1,10))))
model.compile(loss='mean_squared_error',
optimizer='adam')
model.summary()
model.fit(trainX, trainY, nb_epoch=50, validation_data=(valX, valY), batch_size=32)
And here I changed the trained model to allow the continues prediction:
# serialize the model and get its weights, for quick re-building
config = model.get_config()
weights = model.get_weights()
config[0]['config']['batch_input_shape'] = (1, 1, 1)
config[0]['config']['stateful'] = True
config[1]['config']['stateful'] = True
from keras.models import model_from_config
new_model = Sequential().from_config(config)
new_model.set_weights(weights)
#create test sine
testX = getRandomSine(numSamples = numTimesteps*10, width = width*10)
new_model.reset_states()
testPredictions = []
# burn in
for i in range(numTimesteps):
prediction = new_model.predict(np.array([[[testX[i]]]]))
testPredictions.append(prediction[0,0,0])
# prediction
for i in range(numTimesteps, len(testX)):
prediction = new_model.predict(prediction)
testPredictions.append(prediction[0,0,0])
# plot result
import matplotlib.pyplot as plt
plt.plot(np.stack([testPredictions,testX]).T)
plt.show()
Here is what the result looks like. The prediction errors add up and very quickly it diverges from the input sine. But it clearly learned the general shape of sines. You can now try to improve on this by trying different layers, activation functions etc.
I was working a bit on a different architecture and uploaded it on github.
So for all people who are looking into predicting a time series point by point, I hope this helps.
The results look like this:

Keras doesn't make good predictions

Two months ago I started working with keras in order to obtain a pump pattern for using it in other software.
I don't know the reason(s) why the patterns I obtain have nothing to do with the real ones. I have tried by establishing few features (inputs) in the dataset, and also with more inputs, but there is no way it works.
The results seem like this:
Where:
Blue: dataset (real data I'm trying to "aproximate")
Orange: prediction
The dataset is a Time Series
hereis the csv file with the dataset
Here is the code:
import numpy
import matplotlib.pyplot as plt
import pandas
import math
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from keras.regularizers import l2, activity_l2
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset) - look_back - 1):
a = dataset[i:(i + look_back), 0:4]
dataX.append(a)
dataY.append(dataset[i + look_back, 4])
return numpy.array(dataX), numpy.array(dataY)
# fix random seed for reproducibility
seed=7
numpy.random.seed(seed)
# load dataset
dataframe = pandas.read_csv('datos_horarios.csv', engine='python')
dataset = dataframe.values
# normalizar el dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
#split data into train data and test data
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size, :], dataset[train_size:len(dataset), :]
# reshape to X=t y Y=t+1
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
# reshape inputs to be [samples, time steps, features]
trainX = numpy.reshape(trainX, (trainX.shape[0], look_back, 4))
testX = numpy.reshape(testX, (testX.shape[0], look_back, 4))
# create and adjust LSTM network
model = Sequential()
model.add(Dropout(0.3, input_shape=(look_back,4)))
model.add(LSTM(6, input_shape=(look_back,4), W_regularizer=l2(0.001)))
model.add(Dense(10))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam' ,momentum=0.99)
history= model.fit(trainX, trainY,validation_split=0.33, nb_epoch=250, batch_size=32)
# Plot
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epochs')
plt.legend(['training', 'validation'], loc='upper right')
plt.show()
# make predictions
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
print(trainPredict)
numero_inputs=4
inp=numero_inputs-1
# Get something which has as many features as dataset
trainPredict_extended = numpy.zeros((len(trainPredict),numero_inputs+1))
# Put the predictions there
trainPredict_extended[:,inp+1] = trainPredict[:,0]
# Inverse transform it and select the 3rd column.
trainPredict = scaler.inverse_transform(trainPredict_extended)[:,inp+1]
# Get something which has as many features as dataset
testPredict_extended = numpy.zeros((len(testPredict),numero_inputs+1))
# Put the predictions there
testPredict_extended[:,inp+1] = testPredict[:,0]
# Inverse transform it and select the 3rd column.
testPredict = scaler.inverse_transform(testPredict_extended)[:,inp+1]
trainY_extended = numpy.zeros((len(trainY),numero_inputs+1))
trainY_extended[:,inp+1]=trainY
trainY=scaler.inverse_transform(trainY_extended)[:,inp+1]
testY_extended = numpy.zeros((len(testY),numero_inputs+1))
testY_extended[:,inp+1]=testY
testY=scaler.inverse_transform(testY_extended)[:,inp+1]
# Calcular error medio cuadratico
trainScore = math.sqrt(mean_squared_error(trainY, trainPredict))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY, testPredict))
print('Test Score: %.2f RMSE' % (testScore))
# add train predictions to the plot
trainPredictPlot = numpy.empty_like(dataset)
trainPredictPlot[:, :] = numpy.nan
trainPredictPlot[look_back:len(trainPredict)+look_back, 0] = trainPredict
# add test predictions to the plot
testPredictPlot = numpy.empty_like(dataset)
testPredictPlot[:, :] = numpy.nan
testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, 0] = testPredict
# Plot real data and training and test predictions
serie,=plt.plot(scaler.inverse_transform(dataset)[:,numero_inputs]) #invierto muestras en formato (0,1) a valores reales y los ploteo
entrenamiento,=plt.plot(trainPredictPlot[:,0],linestyle='--') #ploteo las predicciones de entrenamiento
prediccion_test,=plt.plot(testPredictPlot[:,0],linestyle='--')
plt.ylabel(' (m3)')
plt.xlabel('h')
plt.legend([serie,entrenamiento,prediccion_test],['Time series','Training','Prediction'], loc='upper right')
plt.show()
Any ideas about how I can fix this problem? Or, at least, what the problem is?
INPUTS BY COLUMN:
Time of the day (each half an hour), converted to decimal.
Day of the week (1-Monday...7-sunday)
Month of the year (1-12)
Day of the month (1-31)
OUTPUT:
Pumped water (m3)
EDIT
Using the #a_guest 's code, and changing some parameters, such as the number of epochs or the history value, the results are really nice:
Not an answer but I share the code here with which I obtained the following results:
Note that the network parameters are chosen arbitrarily, i.e. not optimized. That is you can most likely get better results by varying those parameters. Also varying the value of history (or look_back in your case) probably has a significant effect on the quality of predictions.
from keras.models import Sequential
from keras.layers import Dense
import matplotlib.pyplot as plt
import numpy
numpy.random.seed(12)
history = 96
def generate_data():
data = numpy.loadtxt('datos_horarios.csv', delimiter=',', dtype=float)
# Normalize data.
data[:, -1] /= numpy.max(data[:, -1])
train_test_data = []
for i in xrange(data.shape[0] - history - 1):
# Include the reference value here, will be extracted later.
train_test_data.append(data[i:i+history+1, -1].flatten())
return numpy.array(train_test_data)
train_test_data = generate_data()
# Shuffle data set in order to randomly select training and test data.
numpy.random.shuffle(train_test_data)
n_samples = train_test_data.shape[0]
n_train_samples = int(0.8 * n_samples)
train_data = train_test_data[:n_train_samples, :-1]
train_data_reference = train_test_data[:n_train_samples, -1][:, None]
test_data = train_test_data[n_train_samples:, :-1]
test_data_reference = train_test_data[n_train_samples:, -1]
print 'Tranining data: ', train_data
print 'Reference values: ', train_data_reference
model = Sequential()
model.add(Dense(history, input_dim=history, activation='sigmoid'))
model.add(Dense(history/2, activation='sigmoid'))
model.add(Dense(history/4, activation='sigmoid'))
model.add(Dense(history/8, activation='sigmoid'))
model.add(Dense(history/16, activation='sigmoid'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='mean_squared_error', metrics=['accuracy'])
model.summary()
model.fit(train_data, train_data_reference, shuffle=True, nb_epoch=200, batch_size=10)
# Use the complete data set to see the network performance.
# Regenerate data set because it was shuffled before.
train_test_data = generate_data()
test_data_predicted = model.predict(train_test_data[:, :-1]).flatten()
test_data_reference = train_test_data[:, -1]
relative_deviation = test_data_predicted/test_data_reference - 1.0
print 'Relative deviation: ', relative_deviation
plt.figure()
plt.plot(range(len(test_data_reference)), test_data_reference, 'b-', label='reference')
plt.plot(range(len(test_data_predicted)), test_data_predicted, 'r--', label='predicted')
plt.xlabel('test case #')
plt.ylabel('predictions')
plt.title('Reference values vs predicted values')
plt.legend()
plt.figure()
plt.plot(range(len(test_data_predicted)), relative_deviation, 'bx', label='relative deviation')
plt.xlabel('test case #')
plt.ylabel('relative deviation')
plt.title('Relative deviation of predicted values (predicted / reference - 1)')
plt.legend()
plt.show()

Error in LSTM during testing

My data is of 68871 x 43, where the features are in the column no. 1-42 and label in column no. 43
My keras LSTM code for classification of the data is
import numpy
import matplotlib.pyplot as plt
import pandas
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
#if i==0
# print len(a)
dataX.append(a)
dataY.append(dataset[i + look_back, 43])
return numpy.array(dataX), numpy.array(dataY)
# fix random seed for reproducibility
numpy.random.seed(7)
# load the dataset
#dataframe = pandas.read_csv('international-airline-passengers.csv', usecols=[1], engine='python', skipfooter=3)
dataset = numpy.loadtxt("Source.txt", delimiter=" ")
#dataset = dataframe.values
#dataset = dataset.astype('float32')
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
# split into train and test sets
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
# reshape into X=t and Y=t+1
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
# reshape input to be [samples, time steps, features]
trainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = numpy.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(3, input_dim=look_back))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, nb_epoch=1, batch_size=1)
score, acc = model.evaluate(testX, testY)
print('Test score:', score)
print('Test accuracy:', acc)
I am getting this error during testing time
Please help resolve this, many thanks in advance
I think that your problem is that model.evaluate(testX, testY) is only returning one value.
Your error message tells you that numpy.float64 isn't iterable. What it means it that model.evaluate(testX, testY) returns a float64 and thus, you cannot put it return value into two variables score, acc.
It would be like doing:
def single_return():
return np.float64(10)
a, b = single_return()
(Note that this code will raise the exact same error).
I would then suggest, both to fix it now, but also as a quite good practice for the future to always return into a single variable, then to split. It make error message more clear, as only line having problem will be the affectation, not the evaluation one.
Hope it helps.
pltrdy

Categories