#Forecast future runoff based on meteorological data and historical runoff
Streamflow=pd.read_csv('###.csv', delimiter=',')
x = Streamflow.drop('Q',axis=1)
Y = Streamflow['Q']
X = np.array(x)
y = np.array(Y)
test_size = int(len(X) * 0.15)
valid_size = int(len(X) * 0.15)
train_size= len(X) - (valid_size+test_size)
y_train, y_valid, y_test = y[0:train_size], y[train_size:train_size+valid_size], y[-test_size:]
X_train, X_valid, X_test = X[0:train_size], X[train_size:train_size+valid_size], X[train_size+valid_size:]
X_train = np.reshape(X_train, (X_train.shape[0], 1, X_train.shape[1]))
X_valid = np.reshape(X_valid, (X_valid.shape[0], 1, X_valid.shape[1]))
X_test = np.reshape(X_test, (X_test.shape[0], 1, X_test.shape[1]))
input=X_train[1:]
input_shape=X_train.shape[1:]
print (y_train.shape, y_valid.shape, y_test.shape)
model = Sequential()
model.add(LSTM(150, input_shape=X_train.shape[1:], activation='relu',return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(300, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(200, activation='relu'))
model.add(Dropout(0.6))
model.add(Dense(1, activation='relu'))
optimizer = tf.keras.optimizers.SGD(learning_rate=e_LR)
model.compile(optimizer=optimizer,loss='MeanAbsoluteError')
history = model.fit(X_train, y_train, epochs=e_epoch, batch_size=e_batch_size, verbose=0, validation_data=(X_valid, y_valid), shuffle=True)
enter image description here
This is model loss and the result. The results of the training period and the verification period are acceptable, but the results of the testing period are too poor. How should I modify the model?
(The data is not normalized because the normalized prediction is a straight line. )
Sorry, my reputation is not enough for me to comment directly. You can try the following three solutions: 1. Reduce the learning rate as much as possible 2. Reduce the model complexity, such as reducing the hidden size of LSTM. 3. Increase the number of training rounds.
Related
I am new to ML and I am creating a CNN model for Sentiment analysis using word2vec. My word2vec contains negative value also. While fitting the model I got an error -
InvalidArgumentError in model.fit(X_train, Y_train, epochs=3, batch_size=64)
InvalidArgumentError: Graph execution error: Detected at node 'sequential_30/embedding_29/embedding_lookup'
This is the code to create the model
def get_vec(x):
doc = nlp(x)
vec = doc.vector
return vec
df['vec'] = df['text'].apply(lambda x: get_vec(x))
XTrain = df['vec'].to_numpy()
XTrain = XTrain.reshape(-1, 1)
XTrain = np.concatenate(np.concatenate(XTrain, axis = 0), axis = 0).reshape(-1, 300)
YTrain = df['target']
X_train, X_test, Y_train, Y_test = train_test_split(XTrain, YTrain, test_size = .3, random_state = 45, stratify = YTrain)
# Pad the sequence to the same length
max_review_length = 1600
X_train = pad_sequences(X_train, maxlen=max_review_length)
top_words = (len(nlp.vocab)) + 1
# Using embedding from Keras
embedding_vecor_length = 300
model = Sequential()
model.add(Embedding(top_words, embedding_vecor_length, input_length=max_review_length))
# Convolutional model (3x conv, flatten, 2x dense)
model.add(Convolution1D(64, 3, padding='same'))
model.add(Convolution1D(32, 3, padding='same'))
model.add(Convolution1D(16, 3, padding='same'))
model.add(Flatten())
model.add(Dropout(0.2))
model.add(Dense(180,activation='sigmoid'))
model.add(Dropout(0.2))
model.add(Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train, Y_train, epochs=3, batch_size=64)
When I replace all negative values in df['vec'], code is working without error but with 0 accuracy. What is wrong in this? Please help. Thanks in advance..
#Input 13 features
#Output Binary
# 297 data points
x = x.iloc[:,[0,1,2,3,4,5,6,7,8,9,10,11,12]].values
y1= y['Target'}
# Stratified K fold cross Validation
kf = StratifiedKFold(n_splits=10,random_state=None)
num_features=13
num_predictions=2
#Splitting data
for train_index, test_index in kf.split(x,y1):
x_train, x_test = x[train_index], x[test_index]
y_train, y_test = y1[train_index], y1[test_index]
# Standardization of data
sc=StandardScaler(0,1)
X_train = sc.fit_transform(x_train)
X_test = sc.transform(x_test)
print(X_train.shape) # o/p: (267,13)
Print(y_train.shape) # o/p: (267)
X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], -1))
X_test = X_test.reshape((X_test.shape[0], X_test.shape[1], -1))
# Convert class vectors to binary class matrices.
y_train = np.reshape(y_train, (y_train.shape[0], num_predictions))
y_test = np.reshape(y_test, (y_test.shape[0], num_predictions))
verbose, epochs, batch_size = 1, 10, 32
n_timesteps, n_features, n_outputs = X_train.shape[1],X_train.shape[2],y_train.shape[1]
model = Sequential()
model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape (n_timesteps,n_features)))
model.add(Conv1D(filters=64, kernel_size=3, activation='relu'))
model.add(Dropout(0.5))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(Dense(297, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# fit network
model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, verbose=verbose)
# evaluate model
accuracy = model.evaluate(X_test, y_test, batch_size=batch_size, verbose=0)
print(accuracy)
How can i input data to feed into CNN which requires 3 dimensions of data. How to solve issue
ValueError: cannot reshape array of size 267 into shape (267,2).
Imagine you have a line of 100 squares, and you want to make it a rectangle. Could you turn it into a rectangle by making it 2x100? No, but you could make it 50x2.
In short, you can't make a rectangle that has more values than the original.
here is my code
...
look_back = 20
train_size = int(len(data) * 0.80)
test_size = len(data) - train_size
train = data[0:train_size]
test = data[train_size:len(data)]
x_train, y_train = create_dataset(train, look_back)
x_test, y_test = create_dataset(test, look_back)
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
y_train=np.repeat(y_train.reshape(-1,1), 20, axis=1).reshape(-1,20,1)
y_test=np.repeat(y_test.reshape(-1,1), 20, axis=1).reshape(-1,20,1)
...
model = Sequential()
model.add(LSTM(512, return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(512, return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(1, return_sequences=True))
model.compile(loss='mean_squared_error', optimizer='rmsprop', metrics=['accuracy'])
model.summary()
model.fit(x_train, y_train, epochs=10, batch_size=64)
p = model.predict(x_test)
and I want to predict the next value So,
predictions = model.predict(x_train) and shape is (62796, 20, 1)
and I coded the following site how to use the Keras model to forecast for future dates or events?
future = []
currentStep = predictions[-20:, :, :] # -20 is last look_back number
for i in range(10):
currentStep = model.predict(currentStep)
future.append(currentStep)
in this code future's result is
but p = model.predict(x_test)'s [:4000] result is
The difference between the two results is very large.
is this right way to Predict the next value??
I don't know where it went wrong or the code went wrong.
I hope for your opinion.
full source is https://gist.github.com/Lay4U/654f70bd1fb9c4f7d5bdb21ddcb588ab
According to your code you are trying to predict next value using lstm.
So here you have to reshape your input data correctly to reflect the time steps and features.
model.add(LSTM(512, return_sequences=True))
instead of this code you have to write :
model.add(LSTM(512, input_shape=(look_back,x)))
x = input features in your training data.
I guess this article will help to moderate your code and predict the future value:
enter link description here
This article will help you to understand more about how to predict future value:
enter link description here
Thank you
There are multiple methods you can try. There is no one right way at the moment. You can train a seperate model for predicting t+1, t+2 ... t+n. One LSTM model predicts t+1 while another predicts t+n. That is called a DIRMO strategy.
Your strategy (recursive strategy) is particularly risky because the model can propagate the error through multiple time horizons.
You can find a good comparison of alternative strategies in this paper.
https://www.sciencedirect.com/science/article/pii/S0957417412000528?via%3Dihub
Why is the testing accuracy higher than my training accuracy? This is not the case for the validation accuracy. Is it because of the way I am splitting my dataset?
Modifying the network did not work so I am guessing I am doing something wrong in the dataset preparation part.
The dataset is composed of packet captures of malware and normal activities.. dataset.txt file contains total of 777 rows and 28 columns.
#converting dataset and labels to numpy arrays
x = np.genfromtxt("dataset.txt", delimiter=",")
y = np.genfromtxt("label.txt", delimiter=",")
#handling missing values
x[np.isnan(x)] = 0
#shuffling the data
indices = np.arange(x.shape[0])
np.random.shuffle(indices)
x = x[indices]
y = y[indices]
#dividing the dataset into train and test
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)
#building the model
def build_model():
model = models.Sequential()
model.add(layers.Dense(32, activation='relu', input_shape=(28,)))
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(32, activation='relu'))
model.add(Dropout(0.2))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy',
metrics=['accuracy'])
return model
'''cross validation
k = 5
num_val_samples = len(x_train) // k
all_scores = []
for i in range(k):
print('fold #', i)
x_val = x_train[i * num_val_samples: (i + 1) * num_val_samples]
y_val = y_train[i * num_val_samples: (i + 1) * num_val_samples]
partial_x_train = np.concatenate([x_train[:i * num_val_samples],
x_train[(i + 1) * num_val_samples:]], axis=0)
partial_y_train = np.concatenate([y_train[:i * num_val_samples],
y_train[(i + 1) * num_val_samples:]], axis=0)
model = build_model()
model.fit(partial_x_train, partial_y_train,epochs=20, batch_size=16,
verbose=0)
val_loss, val_acc = model.evaluate(x_val, y_val, verbose=0)
all_scores.append(val_acc)
print(all_scores)
val_acc = np.mean(all_scores)
print(val_loss , val_acc)
'''
#training the model with the entire training dataset
model = build_model()
model.fit(x_train, y_train, epochs=20, batch_size=16)
#confusion matrix
y_pred = model.predict(x_test)
y_pred = (y_pred > 0.5)
result = confusion_matrix(y_test, y_pred)
print ('Confusion Matrix:')
print(result)
#calculating the test accuracy
model_acc = accuracy_score(y_test, y_pred)
print('Test Accuracy:')
print(model_acc)
This is because keras reports running average accuracy for each epoch. For small number of epochs this means that by the end of an epoch your model is better than it was on average during this epoch.
This could also be due to randomly having 'easier' samples in the test set, but this would not happen each run if you split it randomly in the same portion of the code.
Try to predict texture images' labels, an image can contain two labels like['banded', 'striped'], though most of them only have one label.
The output accuracy is extremely high....the first epoch can have 0.96 acc...but the prediction array are all close to 0, which is wrong, there must be at least one number is relevant closed to 1.
Can someone help me?
Thank you!!
Here are the code
Input image = (read by opencv)/255
Multi-labels = First LabelEncoder convert to numbers, then keras.to_categorical
Then I built a CNN model as follow
X_train, X_test, y_train, y_test = train_test_split(img_array, test_value, test_size=0.1)
model = Sequential()
model.add(Conv2D(filters=64, kernel_size=(5, 5), padding='Same', data_format='channels_last', activation='relu',
input_shape=(300, 300, 3)))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Conv2D(filters=32, kernel_size=(3, 3), padding='Same', activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(300, init ='uniform',activation='relu'))
model.add(Dense(285, init = 'uniform',activation='sigmoid'))
model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, batch_size= 24, epochs=10, validation_split=0.15)
If your model has only 2 labels, the last layer should be
model.add(Dense(2, init = 'uniform',activation='sigmoid'))
However, your class imbalance can also affect the accuracy. If your class imbalance is too high, your model will show 95%+ training, validation, and testing accuracies but the individual accuracies will still be low and the model will not work for real world data.
The detailed and class-based accuracy can be understood using :
from sklearn.metrics import classification_report
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.30)
X_test1, X_valid, y_test1, y_valid = train_test_split(X_test, y_test, test_size=0.30)
model.fit(X_train, y_train, batch_size=64, epochs=8, shuffle=True, validation_data=(X_test1,y_test1), callbacks=[metrics])
Y_TEST = np.argmax(y_valid, axis=1)
y_pred = model.predict_classes(X_valid)
print("#"*50,"\n",classification_report(Y_TEST, y_pred))
Please share your class distribution for further understanding.
Not sure why the number of neurons in Dense layer is 285. If there are 47 categories, then the output neurons of Dense layer should be 47. Also, use a kernel initializer like he_normal instead of uniform. https://github.com/keras-team/keras-applications/blob/master/keras_applications/resnet50.py
model.add(Dense(47, activation='sigmoid'))
model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
This is a multi-label classification example with 5 classes.
https://github.com/suraj-deshmukh/Keras-Multi-Label-Image-Classification