Convolutional LSTM Model Dimension Incompatibility when making predictions & prediction dimension issues - python
I structured a Convolutional LSTM model to predict the forthcoming Bitcoin price data, using the analyzed past data of the Bitcoin close price and other features.
Let me jump straight to the code:
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import tensorflow as tf
import tensorflow.keras as keras
import keras_tuner as kt
from keras_tuner import HyperParameters as hp
from keras.models import Sequential
from keras.layers import InputLayer, ConvLSTM1D, LSTM, Flatten, RepeatVector, Dense, TimeDistributed
from keras.callbacks import EarlyStopping
from tensorflow.keras.metrics import RootMeanSquaredError
from tensorflow.keras.optimizers import Adam
import keras.backend as K
from keras.losses import Huber
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
DIR = '../input/btc-features-targets'
SEG_DIR = '../input/segmented'
segmentized_features = os.listdir(SEG_DIR)
btc_train_features = []
for seg in segmentized_features:
train_features = pd.read_csv(f'{SEG_DIR}/{seg}')
train_features.set_index('date', inplace=True)
btc_train_features.append(scaler.fit_transform(train_features.values))
btc_train_targets = pd.read_csv(f'{DIR}/btc_train_targets.csv')
btc_train_targets.set_index('date', inplace=True)
btc_test_features = pd.read_csv(f'{DIR}/btc_test_features.csv')
btc_tef1 = btc_test_features.iloc[:111]
btc_tef2 = btc_test_features.iloc[25:]
btc_tef1.set_index('date', inplace=True)
btc_tef2.set_index('date', inplace=True)
btc_test_targets = pd.read_csv(f'{DIR}/btc_test_targets.csv')
btc_test_targets.set_index('date', inplace=True)
btc_trt_log = np.log(btc_train_targets)
btc_tefs1 = scaler.fit_transform(btc_tef1.values)
btc_tefs2 = scaler.fit_transform(btc_tef2.values)
btc_tet_log = np.log(btc_test_targets)
scaled_train_features = []
for features in btc_train_features:
shape = features.shape
scaled_train_features.append(np.expand_dims(features, [0,3]))
shape_2 = btc_tefs1.shape
btc_tefs1 = np.expand_dims(btc_tefs1, [0,3])
shape_3 = btc_tefs2.shape
btc_tefs2 = np.expand_dims(btc_tefs2, [0,3])
btc_trt_log = btc_trt_log.values[0]
btc_tet_log = btc_tet_log.values[0]
def build(hp):
model = keras.Sequential()
# Input Layer
model.add(InputLayer(input_shape=(111,32,1)))
# ConvLSTM1D
convLSTM_hp_filters = hp.Int(name='convLSTM_filters', min_value=32, max_value=512, step=32)
convLSTM_hp_kernel_size = hp.Choice(name='convLSTM_kernel_size', values=[3,5,7])
convLSTM_activation = hp.Choice(name='convLSTM_activation', values=['selu', 'relu'])
model.add(ConvLSTM1D(filters=convLSTM_hp_filters,
kernel_size=convLSTM_hp_kernel_size,
padding='same',
activation=convLSTM_activation,
use_bias=True,
bias_initializer='zeros'))
# Flatten
model.add(Flatten())
# RepeatVector
model.add(RepeatVector(5))
# LSTM
LSTM_hp_units = hp.Int(name='LSTM_units', min_value=32, max_value=512, step=32)
LSTM_activation = hp.Choice(name='LSTM_activation', values=['selu', 'relu'])
model.add(LSTM(units=LSTM_hp_units, activation=LSTM_activation, return_sequences=True))
# TimeDistributed Dense
dense_units = hp.Int(name='dense_units', min_value=32, max_value=512, step=32)
dense_activation = hp.Choice(name='dense_activation', values=['selu', 'relu'])
model.add(TimeDistributed(Dense(units=dense_units, activation=dense_activation)))
# TimeDistributed Dense_Output
model.add(Dense(1))
# Set Learning Rate
hp_learning_rate = hp.Choice(name='learning_rate', values=[1e-2, 1e-3, 1e-4])
# Compile Model
model.compile(optimizer=Adam(learning_rate=hp_learning_rate),
loss=Huber(),
metrics=[RootMeanSquaredError()])
return model
tuner = kt.Hyperband(build,
objective=kt.Objective('root_mean_squared_error', direction='min'),
max_epochs=10,
factor=3)
early_stop = EarlyStopping(monitor='root_mean_squared_error', patience=5)
opt_hps = []
for train_features in scaled_train_features:
tuner.search(train_features, btc_trt_log, epochs=50, callbacks=[early_stop])
opt_hps.append(tuner.get_best_hyperparameters(num_trials=1)[0])
models, epochs = ([] for _ in range(2))
for hps in opt_hps:
model = tuner.hypermodel.build(hps)
models.append(model)
history = model.fit(train_features, btc_trt_log, epochs=70, verbose=0)
rmse = history.history['root_mean_squared_error']
best_epoch = rmse.index(min(rmse)) + 1
epochs.append(best_epoch)
hypermodel = tuner.hypermodel.build(opt_hps[0])
for train_features, epoch in zip(scaled_train_features, epochs): hypermodel.fit(train_features, btc_trt_log, epochs=epoch)
tp1 = hypermodel.predict(btc_tefs1).flatten()
tp2 = hypermodel.predict(btc_tefs2).flatten()
test_predictions = np.concatenate((tp1, tp2[86:]), axis=None)
The hyperparameters of the model are configured using keras_tuner; as there were ResourceExhaustError issues output by the notebook when training is done with the full features dataset, sequentially segmented datasets are used instead (and apparently, referring to the study done utilizing the similar model architecture, training is able to be efficiently done through this training approach).
The input dimension of each segmented dataset is (111,32,1).
There aren't any issues reported until before the last code block. The models work fine. Yet, when the .predict() function is executed, the notebook prints out an error, which states that the dimension of the input features for making predictions is incompatible with the dimension of the input features used while training. I did not understand the reason behind its occurrence, since as far as I know, the input dimensions of a train dataset for a DNN model cannot be identical as the input dimensions of a test dataset.
Even though all the price data from 2018 to early 2021 are used as training datasets, predictions are only needed for the mid 2021 timeframe.
The dataset used for prediction has a dimension of (136,32,1).
I tried matching the dimension of this dataset to (111,32,1), through index slicing.
Now this showed issues in the output dimension. While predictions should be made for 136 data points, the result only returned 10.
Are there any issues relevant to the model configuration? Cannot interpret the current situation.
Related
What causes tensorflow keras Conv1D to only run the 1st epoch?
currently I am using tensorflow to create a neural network with a 1D convolutional layer and Dense layer to predict a single output value. The input array for the neural network is an array of 1500 samples; each sample is an array of 27x13 values. I started training in the same manner as I did without the 1D conv layer, but the training stopped during the first epoch without warning. I found that multiprocessing might be the cause and for that, I should turn multiprocessing off as discussed here: https://github.com/stellargraph/stellargraph/issues/1006 basically adding this to my keras model: use_multiprocessing=False That did not change anything, after which I found that I should probably use a DataSet to bypass multiprocessing issues according to https://github.com/stellargraph/stellargraph/issues/1206 Replace tf.keras.Sequence objects with tf.data.Dataset #1206 after struggling with the difference between tf.data.Dataset.from_tensors and tf.data.Dataset.from_tensor_slices I found the following code to start executing the model.fit block again. As you might have guessed, it still stops running after the first epoch: main loop started Epoch 1/5 Press any key to continue . . . Can someone pinpoint the source of the halting of the program? This is my code: import random import numpy as np from keras import backend as K import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from keras.models import load_model from keras.callbacks import CSVLogger EPOCHS = 5 BATCH_SIZE = 16 def tfdata_generator(x, y, is_training, batch_size=BATCH_SIZE): '''Construct a data generator using `tf.Dataset`. ''' dataset = tf.data.Dataset.from_tensor_slices((x, y)) if is_training: dataset = dataset.shuffle(1500) # depends on sample size dataset = dataset.batch(BATCH_SIZE) dataset = dataset.repeat() dataset = dataset.prefetch(1) return dataset def main(): print("main loop started") X_train = np.random.randn(1500, 27, 13) Y_train = np.random.randn(1500, 1) training_set = tfdata_generator(X_train, Y_train, is_training=True) data = np.random.randn(1500, 27, 13), Y_train training_set = tf.data.Dataset.from_tensors((X_train, Y_train)) logstring = "C:\Documents\Conv1D" csv_logger = CSVLogger((logstring + ".csv"), append=True, separator=';') early_stopper = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=20, min_delta=0.00001) model = keras.Sequential() model.add(layers.Conv1D( filters=10, kernel_size=9, strides=3, padding="valid")) model.add(layers.Flatten()) model.add(layers.Dense(70, activation='relu', name="layer2")) model.add(layers.Dense(1)) optimizer =keras.optimizers.Adam(learning_rate=0.0001) model.compile(optimizer=optimizer, loss="mean_squared_error") # WARNING:tensorflow:multiprocessing can interact badly with TensorFlow, causing nondeterministic deadlocks. For high performance data pipelines tf.data is recommended. model.fit(training_set, epochs = EPOCHS, batch_size=BATCH_SIZE, verbose = 2, #validation_split=0.2, use_multiprocessing=False); model.summary() modelstring = "C:\Documents\Conv1D_finishedmodel" model.save(modelstring, overwrite=True) model = load_model(modelstring) main()
How to solve the TypeError about Deep Neural Network using my CSV file?
I have a CSV file to train my model. Here is my dataset: Time,Emoji_NUM?,Website_NUM?,Y)1,Y)2,Y)3,Y)4,Y)5,Y)6,Y)7,Y)8,Y)9,Y)10,Y)11,Y)12,Y)13,Y)14,Y)15,Y)16,Y)17,Y)18,Y)19,Y)20,Y)21,Y)22,Y)23,Y)24,Y)25,Y)26,Y)27,Y)28,Y)29,Y)30,Y)31,Y)32,Y)33,Y)34,Y)35,Y)36,Y)37,Y)38,Y)39,Y)40,Y)41,Y)42,Y)43,Y)44,Y)45,Y)46,Y)47,Y)48,Y)49,B)1,B)2,B)3,B)4,B)5,B)6,B)7,B)8,B)9,B)10,B)11,B)12,B)13,B)14,B)15,B)16,B)17,B)18,B)19,B)20,B)21,B)22,B)23,B)24,B)25,B)26,B)27,Target 0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 3,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 23,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0 23,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0 9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 5,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 8,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 8,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 8,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 9,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 16,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 12,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 10,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 17,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 1,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 13,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 13,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 12,7,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 13,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0 1,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 12,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 20,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 16,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 13,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 13,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 13,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 13,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 16,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 The Target column is the output, and the rest of the columns are the features. The following is my code: import pandas as pd import numpy as np input_file = 'data.csv' df = pd.read_csv(input_file, encoding='utf-8') X_Data = df[['Time','Emoji_NUM?','Website_NUM?','Y)1','Y)2','Y)3','Y)4','Y)5','Y)6','Y)7','Y)8','Y)9','Y)10','Y)11','Y)12','Y)13','Y)14','Y)15','Y)16','Y)17','Y)18','Y)19','Y)20','Y)21','Y)22','Y)23','Y)24','Y)25','Y)26','Y)27','Y)28','Y)29','Y)30','Y)31','Y)32','Y)33','Y)34','Y)35','Y)36','Y)37','Y)38','Y)39','Y)40','Y)41','Y)42','Y)43','Y)44','Y)45','Y)46','Y)47','Y)48','Y)49','B)1','B)2','B)3','B)4','B)5','B)6','B)7','B)8','B)9','B)10','B)11','B)12','B)13','B)14','B)15','B)16','B)17','B)18','B)19','B)20','B)21','B)22','B)23','B)24','B)25','B)26','B)27']].values y_Data = df['Target'].values X_Data.shape y_Data.shape #assert len(set(train_X).intersection(valid_X).intersection(test_X)) == 0 print(f"Train has {len(train_y)} data") print(f"Valid has {len(valid_y)} data") print(f"Test has {len(test_y)} data") from sklearn import preprocessing from keras.models import Sequential from keras.layers import Dense, Dropout from keras.optimizers import SGD, Adam model = Sequential() model.add(Dense(64, input_dim=3, activation='relu')) model.add(Dense(1)) model.compile(loss='mse', optimizer=SGD(lr=0.1), metrics=['mse','mape']) import math print("Starting training ") batch_size = math.floor(len(train_y)/5000) dnn = model.fit(train_X, train_y, epochs=20,batch_size=batch_size) When runnning dnn = model.fit(train_X, train_y, epochs=20,batch_size=batch_size), I got the following Error: ValueError: Input 0 of layer sequential_1 is incompatible with the layer: expected axis -1 of input shape to have value 3 but received input with shape (None, 79) How to resolve this?
Ignoring that train_X, train_y etc. aren't defined in your code - right now you're instructing the first layer to take only three values, but want to use 79. This is actually stated in the error message - if you change the input_dim=3 to input_dim=79, it will work.
Python keras sequential model predicts the same value (y_train average) for all inputs
I'm trying to build a sequential neural network with keras. I generate a dataset with inserting randoms in a known function and train my model with this dataset, long enough to get a steady loss. Then I ask the model to predict the x_train values, but instead of predicting something close to y_train, it returns the same value regardless of the input x. This value also happens to be the average of y_train values. I don't understand what I'm doing wrong and why this is happening. I'm using the following function for training the model: def train_model(x_train,y_train,batch_size,input_size,layer_sizes,activations,optimizer,epochs,loss='MeanSquaredError'): assert len(layer_sizes) == len(activations) n_layers=len(layer_sizes) model = Sequential() model.add(LayerNormalization(input_dim=input_size)) model.add(Dense(layer_sizes[0],kernel_regularizer='l2',kernel_initializer='ones',activation=activations[0],input_dim=input_size,name='layer1')) for i in range(1,n_layers): model.add(Dense(layer_sizes[i],kernel_initializer='ones',activation=activations[i],name=f'layer{i+1}')) model.compile( optimizer = optimizer, loss = loss, #MeanSquaredLogarithmicError ) print(model.summary()) history = model.fit(x_train,y_train,batch_size=batch_size,epochs=epochs) loss_history = history.history['loss'] plt.scatter(x=np.arange(1,epochs+1),y=loss_history) plt.show() return model I then created an arbitrary function (just for test purposes) as: def func(x1,x2,x3,x4): y=(x1**3+(x2*x3+2))/(x4+x2*x1) return y and made a random dataset with this function: def random_points_in_range(n,ranges): points = np.empty((n,len(ranges))) for i,element in enumerate(ranges): start=min(element[1],element[0]) interval=abs(element[1]-element[0]) rand_check = np.random.rand(n) randoms = ( rand_check*interval ) + start points[:,i] = randoms.T return points def generate_random_dataset(n=200,ranges=[(0,10),(0,10),(0,10),(0,10)]): x_dataset = random_points_in_range(n,ranges) y_dataset = np.empty(n) for i in range(n): x1,x2,x3,x4 = x_dataset[i] y_dataset[i] = func(x1,x2,x3,x4) return x_dataset,y_dataset I then train a model with these functions: x_train,y_train = generate_random_dataset() layer_sizes = [6,8,10,10,1] activations = [LeakyReLU(),'relu','swish','relu','linear'] opt = Adam(learning_rate=0.001) epochs = 3000 model=train_model(x_train,y_train,5,4,layer_sizes,activations,opt,epochs,loss='MeanSquaredError') if you want to run the code these are things you need to import: import numpy as np from matplotlib import pyplot as plt from sklearn.model_selection import train_test_split import random from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense from tensorflow.keras.layers import LayerNormalization from tensorflow.keras.optimizers import Adam from tensorflow.keras import regularizers
LSTM model has constant accuracy and doesn't variate
i'm stuck as you can see, with my lstm model. I'm trying to predict the amount of tons to produce per month. When i run the model to train the accuracy is almost constant, it has a minimal variation like: 0.34406 0.34407 0.34408 I tried different combination of activations, initializers and parameters, and the acc don't increase. I don't know if the problem here is my data, my model or this value is the max acc the model can reach. Here is the code (if you notice some libraries unused, its because i made some changes by the first version) import numpy as np import pandas as pd from pandas.tseries.offsets import DateOffset from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler from sklearn import preprocessing import keras %tensorflow_version 2.x import tensorflow as tf from tensorflow import keras from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Flatten from tensorflow.keras.layers import LSTM from tensorflow.keras.layers import Dropout from keras.optimizers import Adam import warnings warnings.filterwarnings("ignore") %matplotlib inline from plotly.offline import iplot import matplotlib.pyplot as plt import chart_studio.plotly as py import plotly.offline as pyoff import plotly.graph_objs as go df_ventas = pd.read_csv('/content/drive/My Drive/proyectoPanimex/DEOPE.csv', parse_dates=['Data Emissão'], index_col=0, squeeze=True) #df_ventas = df_ventas.resample('M').sum().reset_index() df_ventas = df_ventas.drop(columns= ['weekday', 'month'], axis=1) df_ventas = df_ventas.reset_index() df_ventas = df_ventas.rename(columns= {'Data Emissão':'Fecha','Un':'Cantidad'}) df_ventas['dia'] = [x.day for x in df_ventas.Fecha] df_ventas['mes']=[x.month for x in df_ventas.Fecha] df_ventas['anio']=[x.year for x in df_ventas.Fecha] df_ventas = df_ventas[:-48] df_ventas = df_ventas.drop(columns='Fecha') df_diff = df_ventas.copy() df_diff['cantidad_anterior'] = df_diff['Cantidad'].shift(1) df_diff = df_diff.dropna() df_diff['diferencia'] = (df_diff['Cantidad'] - df_diff['cantidad_anterior']) df_supervised = df_diff.drop(['cantidad_anterior'],axis=1) #adding lags for inc in range(1,31): nombre_columna = 'retraso_' + str(inc) df_supervised[nombre_columna] = df_supervised['diferencia'].shift(inc) df_supervised = df_supervised.dropna() df_supervisedNumpy = df_supervised.to_numpy() train = df_supervisedNumpy scaler = MinMaxScaler(feature_range=(0, 1)) X_train = scaler.fit(train) train = train.reshape(train.shape[0], train.shape[1]) train_scaled = scaler.transform(train) X_train, y_train = train_scaled[:, 1:], train_scaled[:, 0:1] X_train = X_train.reshape(X_train.shape[0], 1, X_train.shape[1]) #LSTM MODEL model = Sequential() act = 'tanh' actF = 'relu' model.add(LSTM(200, activation = act, input_dim=34, return_sequences=True )) model.add(Dropout(0.15)) #model.add(Flatten()) model.add(LSTM(200, activation= act)) model.add(Dropout(0.2)) #model.add(Flatten()) model.add(Dense(200, activation= act)) model.add(Dropout(0.3)) model.add(Dense(1, activation= actF)) optimizer = keras.optimizers.Adam(lr=0.00001) model.compile(optimizer=optimizer, loss=keras.losses.binary_crossentropy, metrics=['accuracy']) history = model.fit(X_train, y_train, batch_size = 100, epochs = 50, verbose = 1) hist = pd.DataFrame(history.history) hist['Epoch'] = history.epoch hist History plot: loss acc Epoch 0 0.847146 0.344070 0 1 0.769400 0.344070 1 2 0.703548 0.344070 2 3 0.698137 0.344070 3 4 0.653952 0.344070 4 As you can see the only value that change its loss, but what is going on with Acc?. I'm starting with machine learning, and i have no more knowledge to can see my errors. Thanks!
A Dense(1, activation='softmax') will always freeze and not learn anything A Dense(1, activation='relu') will very probably freeze and not learn anything A Dense(1, activation='sigmoid') is ideal for classification (binary) problems and somewhat good for regression with values between 0 and 1. A Dense(1, activation='tanh') is somewhat good for regression with values between -1 and 1 A Dense(1, activation='softplus') is somewhat good for regression with values between 0 and +infinite A Dense(1, actiavation='linear') is good for regression in general with no limits (but it's highly recommended that the data be normalized before) For regression, you can't use accuracy, but the metrics 'mae' and 'mse' don't provide "relative" difference, they provide "absolute" mean difference, one linear, the other squared.
Your output activation should be linear for continuous prediction or softmax for classification. Also multiply your learning rate by 100. Your loss should be mean_absolute_error. You could also easily divide your lstm neurons by a factor of 10. The tanh should be replaced by relu or the likes. For your accuracy problem, it makes no sense to use accuracy, since you're not trying to classify. For metrics, you can use mae. You're trying to know how far the prediction is from the actual target, on a continuous scale. Accuracy is for categories, not continuous data.
Bad accuracy when prediction happens
After I trained my model for the toxic challenge at Keras the accuracy of the prediction is bad. I'm not sure if I'm doing something wrong, but the accuracy during the training period was pretty good ~0.98. How I trained import sys, os, re, csv, codecs, numpy as np, pandas as pd import matplotlib.pyplot as plt from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation from keras.layers import Bidirectional, GlobalMaxPool1D from keras.models import Model from keras import initializers, regularizers, constraints, optimizers, layers train = pd.read_csv('train.csv') list_classes = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"] y = train[list_classes].values list_sentences_train = train["comment_text"] max_features = 20000 tokenizer = Tokenizer(num_words=max_features) tokenizer.fit_on_texts(list(list_sentences_train)) list_tokenized_train = tokenizer.texts_to_sequences(list_sentences_train) maxlen = 200 X_t = pad_sequences(list_tokenized_train, maxlen=maxlen) inp = Input(shape=(maxlen, )) embed_size = 128 x = Embedding(max_features, embed_size)(inp) x = LSTM(60, return_sequences=True,name='lstm_layer')(x) x = GlobalMaxPool1D()(x) x = Dropout(0.1)(x) x = Dense(50, activation="relu")(x) x = Dropout(0.1)(x) x = Dense(6, activation="sigmoid")(x) model = Model(inputs=inp, outputs=x) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) batch_size = 32 epochs = 2 print(X_t[0]) model.fit(X_t,y, batch_size=batch_size, epochs=epochs, validation_split=0.1) model.save("m.hdf5") This is how I predict model = load_model('m.hdf5') list_sentences_train = np.array(["I love you Stackoverflow"]) max_features = 20000 tokenizer = Tokenizer(num_words=max_features) tokenizer.fit_on_texts(list(list_sentences_train)) list_tokenized_train = tokenizer.texts_to_sequences(list_sentences_train) maxlen = 200 X_t = pad_sequences(list_tokenized_train, maxlen=maxlen) print(X_t) print(model.predict(X_t)) Output [[ 1.97086316e-02 9.36032447e-05 3.93966911e-03 5.16672269e-04 3.67353857e-03 1.28102733e-03]]
In inference (i.e. prediction) phase, you should use the same pre-processing steps you have used during training of the model. Therefore, you should not create a new Tokenizer instance and fit it on your test data. Rather, if you want to be able to do prediction later with the same model, besides the model you must also save all the statistics you obtained from the training data like the vocabulary in Tokenizer instance. Therefore it would be like this: import pickle # building and training of the model as you have done ... # store all the data we need later: model and tokenizer model.save("m.hdf5") with open('tokenizer.pkl', 'wb') as handler: pickle.dump(tokenizer, handler) And now in prediction phase: import pickle model = load_model('m.hdf5') with open('tokenizer.pkl', 'rb') as handler: tokenizer = pickle.load(handler) list_sentences_train = ["I love you Stackoverflow"] # use the the same tokenizer instance you used in training phase list_tokenized_train = tokenizer.texts_to_sequences(list_sentences_train) maxlen = 200 X_t = pad_sequences(list_tokenized_train, maxlen=maxlen) print(model.predict(X_t))