How to predict actual future values using CNN and LSTM - python

I am using CNN + LSTM to predict stock closing stock prices of SPY. I have also tested the predictions by comparing the valid data with the predicted data, and the model works. But how do I predict actual future values.
What I want to do I just try to predict the next day's closing price.
What would I change in the code below?
df = web.DataReader("SPY", data_source="yahoo", start="2000-01-01", end="2022-10-19")
X = []
Y = []
window_size=100
for i in range(1 , len(df) - window_size -1 , 1):
first = df.iloc[i,2]
temp = []
temp2 = []
for j in range(window_size):
temp.append((df.iloc[i + j, 2] - first) / first)
temp2.append((df.iloc[i + window_size, 2] - first) / first)
X.append(np.array(temp).reshape(100, 1))
Y.append(np.array(temp2).reshape(1, 1))
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, shuffle=True)
train_X = np.array(x_train)
test_X = np.array(x_test)
train_Y = np.array(y_train)
test_Y = np.array(y_test)
train_X = train_X.reshape(train_X.shape[0],1,100,1)
test_X = test_X.reshape(test_X.shape[0],1,100,1)
model = tf.keras.Sequential()
model.add(TimeDistributed(Conv1D(64, kernel_size=3, activation='relu', input_shape=(None, 100, 1))))
model.add(TimeDistributed(MaxPooling1D(2)))
model.add(TimeDistributed(Conv1D(128, kernel_size=3, activation='relu')))
model.add(TimeDistributed(MaxPooling1D(2)))
model.add(TimeDistributed(Conv1D(64, kernel_size=3, activation='relu')))
model.add(TimeDistributed(MaxPooling1D(2)))
model.add(TimeDistributed(Flatten()))
# model.add(Dense(5, kernel_regularizer=L2(0.01)))
# LSTM layers
model.add(Bidirectional(LSTM(100, return_sequences=True)))
model.add(Dropout(0.5))
model.add(Bidirectional(LSTM(100, return_sequences=False)))
model.add(Dropout(0.5))
#Final layers
model.add(Dense(1, activation='linear'))
model.compile(optimizer='adam', loss='mse', metrics=['mse', 'mae'])
history = model.fit(train_X, train_Y, validation_data=(test_X,test_Y), epochs=75,batch_size=40, verbose=1, shuffle =True)
# predict probabilities for test set
yhat_probs = model.predict(test_X, verbose=0)
# reduce to 1d array
yhat_probs = yhat_probs[:, 0]
predicted = model.predict(test_X)
test_label = test_Y.reshape(-1,1)
predicted = np.array(predicted[:,0]).reshape(-1,1)
len_t = len(train_X)
for j in range(len_t , len_t + len(test_X)):
temp = df.iloc[j,3]
test_label[j - len_t] = test_label[j - len_t] * temp + temp
predicted[j - len_t] = predicted[j - len_t] * temp + temp

Related

My TensorFlow prediction model show simple line

I try to predict LTC coin price on 100 steps in future. All code works good, I create model architecture, save model, load model, load 1000 records for training.
Everising work normal, but after several cycles model show just simple line.
Here is code:
df = df[['y', 'h', 'o', 'l']] # ,'t'
df2 = df.values
print(len(df))
training = int(np.ceil(len(df) * .95))
print(training)
# quit()
# prepere data for tensorflow
# MinMaxScaler expecting like 1 feature
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(df)
print(f"scaled_data {len(scaled_data)}")
# How many past days of data we want to use to predict the next day price
prediction_days = 500
train_data = scaled_data[0:int(training), :]
print(f"train_data {len(train_data)}")
# Preparing the Training data
X_train = []
y_train = []
X_test = []
y_test = []
for x in range(prediction_days, len(train_data)):
X_train.append(scaled_data[x - prediction_days:x, 0])
y_train.append(scaled_data[x, 0])
X_test.append(scaled_data[x - prediction_days:x, 0])
y_test.append(scaled_data[x, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
X_test, y_test = np.array(X_test), np.array(y_test)
# Reshaping so that it will work in Neural net
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
print(X_train.shape)
print(y_train.shape)
print("Files")
print(os.path.isfile('model.h5'))
if os.path.isfile('model.h5') is False:
model = Sequential()
model.add(LSTM(units=50, return_sequences=True, input_shape=(X_train.shape[1], 1)))
model.add(Dropout(0.2))
model.add(LSTM(units=50, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=50))
model.add(Dropout(0.2))
model.add(Dense(units=100))
# define the optimization algorithm
# best learning rate for
opt = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(optimizer=opt, loss='mean_squared_error')
model.fit(X_train, y_train, epochs=5, validation_data=(X_test, y_test)) #, callbacks=[keras.callbacks.LearningRateScheduler(lambda epoch: 1e-8 * 10 ** (epoch / 30))]
# evaluate the model
model.save('model.h5')
#del model
model = load_model('model.h5')
# loss, accuracy = my_model.evaluate(X_test, y_test)
# print(f"accuracy: {accuracy * 100:.2f}%")
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
model.save_weights('model_weight.h5')
model.load_weights('model_weight.h5')
else:
# load json and create model
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights("model.h5")
print("Loaded model from disk")
opt = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(optimizer=opt, loss='mean_squared_error')
# train the model, iterating on the data in batches
model.fit(X_train, y_train, epochs=5, validation_data=(X_test, y_test)) # callbacks=[keras.callbacks.LearningRateScheduler(lambda epoch: 1e-8 * 10 ** (epoch / 30))]
# evaluate the model
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
model.save_weights('model_weight.h5')
model.load_weights('model_weight.h5')
test_data = 60
# actual_prices = df.values
total_dataset = df.values
print(f"Counter")
model_inputs = total_dataset[len(total_dataset) - test_data - prediction_days:]
model_inputs = model_inputs.reshape(-1, 1)
model_inputs = scaler.fit_transform( model_inputs) # ValueError: X has 1 features, but MinMaxScaler is expecting 4 features as input.
real_data = [model_inputs[len(model_inputs) - prediction_days: len(model_inputs) + 1, 0]]
# len of real_data = 101
real_data = np.array(real_data)
real_data = np.reshape(real_data, (real_data.shape[0], real_data.shape[1], 1))
# reshape real_data to (100, 100, 1)
prediction = model.predict(real_data)
# evaluation metrics
prediction = scaler.inverse_transform(prediction)
prediction = prediction.reshape(-1, 1)
# plot
# plt.plot(df, color='green')
plt.plot(prediction, color='green')
plt.legend()
plt.show()
Here is first cycle of model:
Here is 7-10 cycle of model:
Maybe some problem with learning process?
Need to use not linear optimizer
from keras.optimizers import SGD
....
sgd = SGD(learning_rate=0.01)
model.compile(loss='mse', optimizer=sgd)

Convolutional Neural Network - 1D - Feature Classification Error

I am trying to modify the following example to simulate CNN for my set of data and running into some errors
https://machinelearningmastery.com/cnn-models-for-human-activity-recognition-time-series-classification/
X = D.replace(['Resting', 'Swimming', 'Feeding', 'Non directed motion'], [0, 1, 2, 3])
X_Label = X['Label'].to_numpy()
X_Data = X[['X_static','Y_static','Z_static','X_dynamic','Y_dynamic','Z_dynamic']].to_numpy()
X_names = ['X_static','Y_static','Z_static','X_dynamic','Y_dynamic','Z_dynamic']
X_Label_Names = np.array(['Resting', 'Swimming', 'Feeding', 'Non directed motion'])
X_Data is a 5600 by 6 column numpy matrix. Each column represents a type of measurement data over time
X_Label is a 5600 by 1 column consisting of values of 0 through 3 that represents the features or attributes. 0 represents resting, 1 represents swimming and so on.
X = X_Data
y = X_Label
def load_dataset_f(X,y):
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, stratify=y, random_state=random_state
)
trainX = X_train
trainy = y_train
testX = X_test
testy = y_test
print(trainX)
print(trainX.shape)
print(trainy.shape)
return trainX, trainy, testX, testy
# fit and evaluate a model
def evaluate_model_f(trainX, trainy, testX, testy):
verbose, epochs, batch_size = 2, 10, 20
n_timesteps, n_features, n_outputs = 6, 1, 1
print('n timesteps --------------------------------------------------------------------')
print(n_timesteps)
model = Sequential()
model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(n_timesteps,n_features)))
model.add(Conv1D(filters=64, kernel_size=3, activation='relu'))
model.add(Dropout(0.5))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(Dense(n_outputs, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# fit network
print(to_categorical(trainy))
model.fit(trainX.reshape(len(trainX),6,1), to_categorical(trainy))
# evaluate model
_, accuracy = model.evaluate(testX, testy, batch_size=batch_size, verbose=0)
return accuracy
def run_experiment_f(repeats=1):
# load data
trainX, trainy, testX, testy = load_dataset_f(X,y)
print(trainX)
# repeat experiment
scores = list()
for r in range(repeats):
score = evaluate_model_f(trainX, trainy, testX, testy)
score = score * 100.0
print('>#%d: %.3f' % (r+1, score))
scores.append(score)
# summarize results
summarize_results(scores)
load_dataset_f(X,y)
run_experiment_f()
I am unfamiliar with the tensorflow library and getting errors at model.fit(), I am not sure to how to approach this. The matrix presented in the example was 3D where as my data is 2D, not sure if that matters. How do I get this code to work ?
You need to make sure that your input to your Conv1D layer has the shape (timesteps, features) and that your last output layer's units equals the number of unique labels in your dataset. Here is a working example:
import tensorflow as tf
trainX = tf.random.normal((32, 6))
trainy = tf.random.uniform((32, 1), maxval=4)
verbose, epochs, batch_size = 2, 10, 20
n_timesteps, n_features, n_outputs = 6, 1, 4
print('n timesteps --------------------------------------------------------------------')
print(n_timesteps)
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(n_timesteps,n_features)))
model.add(tf.keras.layers.Conv1D(filters=64, kernel_size=3, activation='relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.MaxPooling1D(pool_size=2))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(100, activation='relu'))
model.add(tf.keras.layers.Dense(n_outputs, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# fit network
print(tf.keras.utils.to_categorical(trainy))
trainX = tf.expand_dims(trainX, axis=2)
model.fit(trainX, tf.keras.utils.to_categorical(trainy))

How to predicted class label (0 or 1) for new cases with new features value after building the DNN model? [duplicate]

This question already has answers here:
Keras, how do I predict after I trained a model?
(6 answers)
Closed 2 years ago.
I built the DNN model for predicting the survivability of breast cancer patients, I Want to predict class label (Died or Survive)for new cases, how to implement it? In summary: What I would like to apply is to predict a new case, without conducting the training process directly, just I want to show the result of the class label either died from breast cancer (1) or survived(0).
dataset=pd.read_csv("C:/Users/User/Desktop/mixed 7 12-2-2020.csv",encoding='cp1252')
array = dataset.values
X = array[:, 0:33]
y = array[:, 33]
n_features, n_outputs = X.shape[1], 1
def create_model():
model = Sequential()
model.add(Input(n_features))
model.add(BatchNormalization())
model.add(Dense(51, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(68, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(85, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(85, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(68, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(51, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(n_outputs, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='Adam',
metrics=['accuracy'])
#model.summary()
return model
checkpoint = tf.keras.callbacks.ModelCheckpoint(
"model.{epoch:02d}-{val_loss:.2f}.hdf5",
monitor='val_accuracy',
verbose=1,
save_best_only=True)
EPOCHS = 50
BATCH_SIZE = 128
N_FOLDS = 10
ACC_array = np.array([])
sensitivity_array = np.array([])
specificity_array = np.array([])
kf = KFold(n_splits=N_FOLDS, shuffle=True)
kf = kf.split(X, y)
for train_index, valid_index in kf:
X_train, X_valid = X[train_index], X[valid_index]
y_train, y_valid = y[train_index], y[valid_index]
model = create_model()
# fit network
model.fit(X_train, y_train, validation_data=(X_valid, y_valid),
epochs=EPOCHS, batch_size=BATCH_SIZE,
# callbacks=[checkpoint]
)
#model.load_weights("model.best.hdf5")
X_valid = X_valid.astype(np.float32)
predictions = model.predict(X_valid, batch_size=BATCH_SIZE)
y_pred = np.round(predictions[:, 0])
confusion = skl.metrics.confusion_matrix(y_valid, y_pred)
TP = confusion[1, 1]
TN = confusion[0, 0]
FP = confusion[0, 1]
FN = confusion[1, 0]
# Accuracy
accuracy = (TP + TN) / float(TP + TN + FP + FN)
# Sensitivity
sensitivity = TP / float(FN + TP)
# Specificity
specificity = TN / float(TN + FP)
ACC_array = np.append(ACC_array, accuracy)
sensitivity_array = np.append(sensitivity_array, sensitivity)
specificity_array = np.append(specificity_array, specificity)
ACC_mean = np.mean(ACC_array, axis=0)
print('mean Accuracy', ACC_mean * 100)
sensitivity_mean = np.mean(sensitivity_array, axis=0)
print('mean sensitivity', sensitivity_mean * 100)
specificity_mean = np.mean(specificity_array, axis=0)
print('mean specificity', specificity_mean * 100)
This answer: Get class labels from Keras functional model may be what you are looking for.
I'm assuming you want to convert the raw probabilities output by your model into a discrete class (1 or 0).

how to applying Cross Validation for all 10 fold (training and testing)?

I have this code and I want to apply 10 fold cross-validation for all fold(testing and training), The code below is only applied for one fold.
how to write a loop for all fold (9 fold for Training, 1 fold for testing and loop for all fold)?
dataset=pd.read_csv("C:/Users/User/Desktop/minority class equal.csv",encoding='cp1252')
dataset.shape
#output(53480, 37)
array = dataset.values
X = np.expand_dims(array[:,0:36], axis=-1)
Y = np.expand_dims(array[:,36], axis=-1)
kf = KFold(n_splits=10)
kf.get_n_splits(X)
ACC_array = np.array([])
sensitivity_array = np.array([])
specificity_array = np.array([])
for trainindex, testindex in kf.split(X):
Xtrain, Xtest = X[trainindex], X[testindex]
Ytrain, Ytest = Y[trainindex], Y[testindex]
Ytrain = np.expand_dims(Ytrain, axis=-1)
Ytest = np.expand_dims(Ytest, axis=-1)
print(Xtrain.shape, Ytrain.shape)
n_timesteps, n_features, n_outputs =Xtrain.shape[0], Xtrain.shape[1], Ytrain.shape[1]
model = Sequential()
model.add(Conv1D(filters=64, kernel_size=1, activation='relu',input_shape=(n_features,1)))
model.add(Conv1D(filters=64, kernel_size=1, activation='relu'))
model.add(Dropout(0.5))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(Dense(n_outputs, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(Xtrain, Ytrain, epochs=20, batch_size=100)
Xtest = Xtest.astype(np.float32)
Predictions = model.predict(Xtest,batch_size =1024)
rounded = [round(x[0]) for x in Predictions]
Y_predection = pd.DataFrame(rounded)
Y_predection = Y_predection.iloc[:, 0]
confusion = metrics.confusion_matrix(Ytest,Y_predection)
TP= confusion[1, 1]
TN= confusion[0, 0]
FP= confusion[0, 1]
FN= confusion[1, 0]
Accuracy=(TP + TN) / float(TP + TN + FP + FN)
sensitivity = TP / float(FN + TP)
specificity= TN / float(TN + FP)
ACC_array = np.append(ACC_array,Accuracy)
sensitivity_array = np.append(sensitivity_array,sensitivity)
specificity_array = np.append(specificity_array,specificity)
ACC_mean=np.mean(ACC_array, axis=0 )
print ('mean Accuracy',ACC_mean*100)
sensitivity_mean=np.mean(sensitivity_array, axis=0 )
print ('mean sensitivity',sensitivity_mean*100)
specificity_mean=np.mean(specificity_array, axis=0 )
print ('mean specificity',specificity_mean*100)
This code does not have any error but applying 10fold cross-validation just for (one fold for testing and 9 for training).

Testing accuracy higher than training accuracy

Why is the testing accuracy higher than my training accuracy? This is not the case for the validation accuracy. Is it because of the way I am splitting my dataset?
Modifying the network did not work so I am guessing I am doing something wrong in the dataset preparation part.
The dataset is composed of packet captures of malware and normal activities.. dataset.txt file contains total of 777 rows and 28 columns.
#converting dataset and labels to numpy arrays
x = np.genfromtxt("dataset.txt", delimiter=",")
y = np.genfromtxt("label.txt", delimiter=",")
#handling missing values
x[np.isnan(x)] = 0
#shuffling the data
indices = np.arange(x.shape[0])
np.random.shuffle(indices)
x = x[indices]
y = y[indices]
#dividing the dataset into train and test
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)
#building the model
def build_model():
model = models.Sequential()
model.add(layers.Dense(32, activation='relu', input_shape=(28,)))
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(32, activation='relu'))
model.add(Dropout(0.2))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy',
metrics=['accuracy'])
return model
'''cross validation
k = 5
num_val_samples = len(x_train) // k
all_scores = []
for i in range(k):
print('fold #', i)
x_val = x_train[i * num_val_samples: (i + 1) * num_val_samples]
y_val = y_train[i * num_val_samples: (i + 1) * num_val_samples]
partial_x_train = np.concatenate([x_train[:i * num_val_samples],
x_train[(i + 1) * num_val_samples:]], axis=0)
partial_y_train = np.concatenate([y_train[:i * num_val_samples],
y_train[(i + 1) * num_val_samples:]], axis=0)
model = build_model()
model.fit(partial_x_train, partial_y_train,epochs=20, batch_size=16,
verbose=0)
val_loss, val_acc = model.evaluate(x_val, y_val, verbose=0)
all_scores.append(val_acc)
print(all_scores)
val_acc = np.mean(all_scores)
print(val_loss , val_acc)
'''
#training the model with the entire training dataset
model = build_model()
model.fit(x_train, y_train, epochs=20, batch_size=16)
#confusion matrix
y_pred = model.predict(x_test)
y_pred = (y_pred > 0.5)
result = confusion_matrix(y_test, y_pred)
print ('Confusion Matrix:')
print(result)
#calculating the test accuracy
model_acc = accuracy_score(y_test, y_pred)
print('Test Accuracy:')
print(model_acc)
This is because keras reports running average accuracy for each epoch. For small number of epochs this means that by the end of an epoch your model is better than it was on average during this epoch.
This could also be due to randomly having 'easier' samples in the test set, but this would not happen each run if you split it randomly in the same portion of the code.

Categories