I try to predict LTC coin price on 100 steps in future. All code works good, I create model architecture, save model, load model, load 1000 records for training.
Everising work normal, but after several cycles model show just simple line.
Here is code:
df = df[['y', 'h', 'o', 'l']] # ,'t'
df2 = df.values
print(len(df))
training = int(np.ceil(len(df) * .95))
print(training)
# quit()
# prepere data for tensorflow
# MinMaxScaler expecting like 1 feature
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(df)
print(f"scaled_data {len(scaled_data)}")
# How many past days of data we want to use to predict the next day price
prediction_days = 500
train_data = scaled_data[0:int(training), :]
print(f"train_data {len(train_data)}")
# Preparing the Training data
X_train = []
y_train = []
X_test = []
y_test = []
for x in range(prediction_days, len(train_data)):
X_train.append(scaled_data[x - prediction_days:x, 0])
y_train.append(scaled_data[x, 0])
X_test.append(scaled_data[x - prediction_days:x, 0])
y_test.append(scaled_data[x, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
X_test, y_test = np.array(X_test), np.array(y_test)
# Reshaping so that it will work in Neural net
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
print(X_train.shape)
print(y_train.shape)
print("Files")
print(os.path.isfile('model.h5'))
if os.path.isfile('model.h5') is False:
model = Sequential()
model.add(LSTM(units=50, return_sequences=True, input_shape=(X_train.shape[1], 1)))
model.add(Dropout(0.2))
model.add(LSTM(units=50, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=50))
model.add(Dropout(0.2))
model.add(Dense(units=100))
# define the optimization algorithm
# best learning rate for
opt = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(optimizer=opt, loss='mean_squared_error')
model.fit(X_train, y_train, epochs=5, validation_data=(X_test, y_test)) #, callbacks=[keras.callbacks.LearningRateScheduler(lambda epoch: 1e-8 * 10 ** (epoch / 30))]
# evaluate the model
model.save('model.h5')
#del model
model = load_model('model.h5')
# loss, accuracy = my_model.evaluate(X_test, y_test)
# print(f"accuracy: {accuracy * 100:.2f}%")
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
model.save_weights('model_weight.h5')
model.load_weights('model_weight.h5')
else:
# load json and create model
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights("model.h5")
print("Loaded model from disk")
opt = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(optimizer=opt, loss='mean_squared_error')
# train the model, iterating on the data in batches
model.fit(X_train, y_train, epochs=5, validation_data=(X_test, y_test)) # callbacks=[keras.callbacks.LearningRateScheduler(lambda epoch: 1e-8 * 10 ** (epoch / 30))]
# evaluate the model
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
model.save_weights('model_weight.h5')
model.load_weights('model_weight.h5')
test_data = 60
# actual_prices = df.values
total_dataset = df.values
print(f"Counter")
model_inputs = total_dataset[len(total_dataset) - test_data - prediction_days:]
model_inputs = model_inputs.reshape(-1, 1)
model_inputs = scaler.fit_transform( model_inputs) # ValueError: X has 1 features, but MinMaxScaler is expecting 4 features as input.
real_data = [model_inputs[len(model_inputs) - prediction_days: len(model_inputs) + 1, 0]]
# len of real_data = 101
real_data = np.array(real_data)
real_data = np.reshape(real_data, (real_data.shape[0], real_data.shape[1], 1))
# reshape real_data to (100, 100, 1)
prediction = model.predict(real_data)
# evaluation metrics
prediction = scaler.inverse_transform(prediction)
prediction = prediction.reshape(-1, 1)
# plot
# plt.plot(df, color='green')
plt.plot(prediction, color='green')
plt.legend()
plt.show()
Here is first cycle of model:
Here is 7-10 cycle of model:
Maybe some problem with learning process?
Need to use not linear optimizer
from keras.optimizers import SGD
....
sgd = SGD(learning_rate=0.01)
model.compile(loss='mse', optimizer=sgd)
Related
I am using CNN + LSTM to predict stock closing stock prices of SPY. I have also tested the predictions by comparing the valid data with the predicted data, and the model works. But how do I predict actual future values.
What I want to do I just try to predict the next day's closing price.
What would I change in the code below?
df = web.DataReader("SPY", data_source="yahoo", start="2000-01-01", end="2022-10-19")
X = []
Y = []
window_size=100
for i in range(1 , len(df) - window_size -1 , 1):
first = df.iloc[i,2]
temp = []
temp2 = []
for j in range(window_size):
temp.append((df.iloc[i + j, 2] - first) / first)
temp2.append((df.iloc[i + window_size, 2] - first) / first)
X.append(np.array(temp).reshape(100, 1))
Y.append(np.array(temp2).reshape(1, 1))
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, shuffle=True)
train_X = np.array(x_train)
test_X = np.array(x_test)
train_Y = np.array(y_train)
test_Y = np.array(y_test)
train_X = train_X.reshape(train_X.shape[0],1,100,1)
test_X = test_X.reshape(test_X.shape[0],1,100,1)
model = tf.keras.Sequential()
model.add(TimeDistributed(Conv1D(64, kernel_size=3, activation='relu', input_shape=(None, 100, 1))))
model.add(TimeDistributed(MaxPooling1D(2)))
model.add(TimeDistributed(Conv1D(128, kernel_size=3, activation='relu')))
model.add(TimeDistributed(MaxPooling1D(2)))
model.add(TimeDistributed(Conv1D(64, kernel_size=3, activation='relu')))
model.add(TimeDistributed(MaxPooling1D(2)))
model.add(TimeDistributed(Flatten()))
# model.add(Dense(5, kernel_regularizer=L2(0.01)))
# LSTM layers
model.add(Bidirectional(LSTM(100, return_sequences=True)))
model.add(Dropout(0.5))
model.add(Bidirectional(LSTM(100, return_sequences=False)))
model.add(Dropout(0.5))
#Final layers
model.add(Dense(1, activation='linear'))
model.compile(optimizer='adam', loss='mse', metrics=['mse', 'mae'])
history = model.fit(train_X, train_Y, validation_data=(test_X,test_Y), epochs=75,batch_size=40, verbose=1, shuffle =True)
# predict probabilities for test set
yhat_probs = model.predict(test_X, verbose=0)
# reduce to 1d array
yhat_probs = yhat_probs[:, 0]
predicted = model.predict(test_X)
test_label = test_Y.reshape(-1,1)
predicted = np.array(predicted[:,0]).reshape(-1,1)
len_t = len(train_X)
for j in range(len_t , len_t + len(test_X)):
temp = df.iloc[j,3]
test_label[j - len_t] = test_label[j - len_t] * temp + temp
predicted[j - len_t] = predicted[j - len_t] * temp + temp
#Forecast future runoff based on meteorological data and historical runoff
Streamflow=pd.read_csv('###.csv', delimiter=',')
x = Streamflow.drop('Q',axis=1)
Y = Streamflow['Q']
X = np.array(x)
y = np.array(Y)
test_size = int(len(X) * 0.15)
valid_size = int(len(X) * 0.15)
train_size= len(X) - (valid_size+test_size)
y_train, y_valid, y_test = y[0:train_size], y[train_size:train_size+valid_size], y[-test_size:]
X_train, X_valid, X_test = X[0:train_size], X[train_size:train_size+valid_size], X[train_size+valid_size:]
X_train = np.reshape(X_train, (X_train.shape[0], 1, X_train.shape[1]))
X_valid = np.reshape(X_valid, (X_valid.shape[0], 1, X_valid.shape[1]))
X_test = np.reshape(X_test, (X_test.shape[0], 1, X_test.shape[1]))
input=X_train[1:]
input_shape=X_train.shape[1:]
print (y_train.shape, y_valid.shape, y_test.shape)
model = Sequential()
model.add(LSTM(150, input_shape=X_train.shape[1:], activation='relu',return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(300, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(200, activation='relu'))
model.add(Dropout(0.6))
model.add(Dense(1, activation='relu'))
optimizer = tf.keras.optimizers.SGD(learning_rate=e_LR)
model.compile(optimizer=optimizer,loss='MeanAbsoluteError')
history = model.fit(X_train, y_train, epochs=e_epoch, batch_size=e_batch_size, verbose=0, validation_data=(X_valid, y_valid), shuffle=True)
enter image description here
This is model loss and the result. The results of the training period and the verification period are acceptable, but the results of the testing period are too poor. How should I modify the model?
(The data is not normalized because the normalized prediction is a straight line. )
Sorry, my reputation is not enough for me to comment directly. You can try the following three solutions: 1. Reduce the learning rate as much as possible 2. Reduce the model complexity, such as reducing the hidden size of LSTM. 3. Increase the number of training rounds.
I am not a data scientist and not very professional in machine learning. I am trying to improve the results of this model for predicting the trend for a stock movement (-1:down, 0:no change, +1:up). Here is the code in python and plots for the model
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, shuffle= False) #Shuffle set to False
#Normalizing data
scaler = MinMaxScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
#setting up the model of tensorflow
input_layer = Input(shape=(X_train.shape[1],1))
x=input_layer
for _ in range(2): # five layers
x = Dropout(0.5)(x) # Dropout to avoid overfitting
x = CuDNNLSTM(X_train.shape[1], return_sequences = True)(x) # using LSTM with return sequences to adopt to time sequences
x = GlobalAveragePooling1D()(x) #Global averaging to one layer shape to feed to a dense categorigal classification
output = Dense(y.shape[1], activation='softmax')(x)
model = Model(inputs=input_layer, outputs=output)
opt = Adam(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics = ['acc'])
#creating an early stop based on minmizing val_loss
early_stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=200,restore_best_weights=True)
#fit the model
r = model.fit(X_train, y_train, epochs = 200000, batch_size=16400,
validation_data = (X_test, y_test), callbacks=[early_stop], shuffle=False)
#plot the results.
pd.DataFrame(r.history).plot()
model learning plot
Hello I'm getting this error for my prediction model. I'm getting datas from an excel file with 4 input and 4 output. I'm new in deep learning. I can see it's about y_test but I dont know what I should write exactly. Here's my code. Thanks for any help in advance
df = pd.read_excel ("C:/Users/hayri/Desktop/aa.xlsx")
xdata = df
print(xdata)
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(xdata)
df = pd.read_excel ("C:/Users/hayri/Desktop/bb.xlsx")
ydata = df
min_max_scaler = preprocessing.MinMaxScaler()
y_scaled = min_max_scaler.fit_transform(ydata)
(x_train, x_test, y_train, y_test)= train_test_split(x_scaled,y_scaled,test_size = 0.2,random_state=0)
conv = Sequential()
conv.add(Conv1D(filters=32, kernel_size=4, activation='relu', input_shape=(4, 4)))
conv.add(Dropout(0.5))
conv.add(MaxPooling1D(3))
conv.add(Flatten())
conv.add(Dense(4, activation = 'sigmoid'))
sgd = optimizers.SGD(lr=0.3, momentum = 0.6, decay = 0, nesterov = False)
conv.compile(loss = 'binary_crossentropy', optimizer ='sgd', metrics = ['accuracy'])
history=conv.fit(x_train, y_train, batch_size =10, epochs =200,validation_data=(x_test,y_test), verbose = 1)
score = conv.evaluate(x_test, y_test, batch_size=10)
y_test=np.argmax(y_test, axis=1)
y_pred = conv.predict(x_test,batch_size=64)
y_pred = np.argmax(y_pred, axis=1)
results = confusion_matrix(y_test, y_pred)
sns.heatmap(results,cmap="Blues")
accuracy = conv.evaluate(x_test, y_test)
print('Accuracy: %.2f' % (accuracy*100))
model.compile(loss = 'binary_crossentropy', optimizer ='adam', metrics = ['accuracy'])
history=model.fit(x_train, y_train, batch_size =10, epochs =200,validation_data=(x_test,y_test), verbose = 1)
score = model.evaluate(x_test, y_test, batch_size=10)
y_pred = model.predict(x_test,batch_size=64)
y_pred = np.argmax(y_pred, axis=1)
Why is the testing accuracy higher than my training accuracy? This is not the case for the validation accuracy. Is it because of the way I am splitting my dataset?
Modifying the network did not work so I am guessing I am doing something wrong in the dataset preparation part.
The dataset is composed of packet captures of malware and normal activities.. dataset.txt file contains total of 777 rows and 28 columns.
#converting dataset and labels to numpy arrays
x = np.genfromtxt("dataset.txt", delimiter=",")
y = np.genfromtxt("label.txt", delimiter=",")
#handling missing values
x[np.isnan(x)] = 0
#shuffling the data
indices = np.arange(x.shape[0])
np.random.shuffle(indices)
x = x[indices]
y = y[indices]
#dividing the dataset into train and test
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)
#building the model
def build_model():
model = models.Sequential()
model.add(layers.Dense(32, activation='relu', input_shape=(28,)))
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(32, activation='relu'))
model.add(Dropout(0.2))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy',
metrics=['accuracy'])
return model
'''cross validation
k = 5
num_val_samples = len(x_train) // k
all_scores = []
for i in range(k):
print('fold #', i)
x_val = x_train[i * num_val_samples: (i + 1) * num_val_samples]
y_val = y_train[i * num_val_samples: (i + 1) * num_val_samples]
partial_x_train = np.concatenate([x_train[:i * num_val_samples],
x_train[(i + 1) * num_val_samples:]], axis=0)
partial_y_train = np.concatenate([y_train[:i * num_val_samples],
y_train[(i + 1) * num_val_samples:]], axis=0)
model = build_model()
model.fit(partial_x_train, partial_y_train,epochs=20, batch_size=16,
verbose=0)
val_loss, val_acc = model.evaluate(x_val, y_val, verbose=0)
all_scores.append(val_acc)
print(all_scores)
val_acc = np.mean(all_scores)
print(val_loss , val_acc)
'''
#training the model with the entire training dataset
model = build_model()
model.fit(x_train, y_train, epochs=20, batch_size=16)
#confusion matrix
y_pred = model.predict(x_test)
y_pred = (y_pred > 0.5)
result = confusion_matrix(y_test, y_pred)
print ('Confusion Matrix:')
print(result)
#calculating the test accuracy
model_acc = accuracy_score(y_test, y_pred)
print('Test Accuracy:')
print(model_acc)
This is because keras reports running average accuracy for each epoch. For small number of epochs this means that by the end of an epoch your model is better than it was on average during this epoch.
This could also be due to randomly having 'easier' samples in the test set, but this would not happen each run if you split it randomly in the same portion of the code.