Related
I'm attempting a bidirectional LSTM for datasets from a csv and training it by subsetting x and y; x has a shape of (29903, 10) and y's got a shape of (29903, 10). Regardless of adding a third dimension to x by reshaping it by (-1, 10, 1) I'm getting a value error due to variation in input sizes of 10 and 2, return_sequence set to True or otherwise.
Value Error encountered: 'ValueError: Dimensions must be equal, but are 10 and 2 for '{{node mean_absolute_error/sub}} = Sub[T=DT_FLOAT](sequential_9/bidirectional_9/concat, IteratorGetNext:1)' with input shapes: [?,10,10], [?,2,1].'
Here's the code:
`
lyst = pandas.read_csv('legasee.csv', index_col=0)
x = pandas.DataFrame(lest.iloc[:,0:10])#.values
y = pandas.DataFrame(lest.iloc[:,10:13])#.values
x.shape, y.shape, x, y
trinx, tex, triy, tey = train_test_split(x, y, test_size = 0.2, random_state = 0)
scaX = StandardScaler()
scaY = StandardScaler()
trinx = scaX.fit_transform(trinx)
triy = scaY.fit_transform(triy)
tex = scaX.fit_transform(tex)
tey= scaY.fit_transform(tey)
trinx = trinx.reshape(-1, 10, 1)
triy = triy.reshape(-1, 2, 1)
moe = keras.Sequential
(
keras.layers.Bidirectional
(
layers.LSTM(5, return_sequences=True, activation = 'tanh'),
),
# keras.layers.Flatten(),
# keras.layers.Dense(10, activation = 'tanh'),
)
moe.compile
(
loss = 'mae', #from_logits=True
optimizer=keras.optimizers.Adam(lr=0.01),
metrics=['accuracy'],
)
moe.fit(trinx, triy, batch_size=64, epochs=10, verbose=2)
`
Any help would genuinely be appreciated.
Is it possible to look at this code in LSTM? I want to train the data with the shape which I put here but I receive an error regarding the size of the batch I think so. I do not know which size of the batch. currently, the size of a batch that I choose is 64. should I put another size for the batch or the error is not related to the size of the batch?
should I choose for this code: the shape of X (7311, 17, 124) and shape of Y(7311, 1)
InvalidArgumentError: Incompatible shapes: [16] vs. [64]
[[node gradient_tape/binary_crossentropy/weighted_loss/Mul (defined at <ipython-input-74-f95f7e276c58>:1) ]] [Op:__inference_train_function_138498]
df = pd.read_csv("train_data.csv")
timestep = 17 #from 1 to 23 (17 with the current NaN strategy)
threshold_for_classification = -8
X_scaler = MinMaxScaler()
y_scaler = MinMaxScaler()
fill_X = -0.01
seed = 11
#RNN hiperparameter
epochs = 75
batch = 64
val_split = 0.25
test_split = 0.25
lr = 0.0001
adam = optimizers.Nadam() #(lr)
class_weight = {True: 5.,
False: 1.}
verbose = 1
#Dropping first the empty column and then rows with NaNs
df = df.drop("c_rcs_estimate", axis=1)
df = df.dropna(how='any')
#Filtering events with len=1 or min_tca > 2 or max_tca < 2
def conditions(event):
x = event["time_to_tca"].values
return ((x.min()<2.0) & (x.max()>2.0) & (x.shape[0]>1))
df = df.groupby('event_id').filter(conditions)
#OHE for c_object_type (5 categories) -> 5 new features
df["mission_id"] = df["mission_id"].astype('category')
df["c_object_type"] = df["c_object_type"].astype('category')
df = pd.get_dummies(df)
#Getting y as 1D-array
y = df.groupby(["event_id"])["risk"].apply(lambda x: x.iloc[-1]).values.reshape(-1, 1)
#Scaling y
_ = y_scaler.fit(df["risk"].values.reshape(-1, 1)) #using the whole risk feature to scale the target 'y'
y = y_scaler.transform(y)
#Getting X as df (dropping rows with tca < 2)
df = df.loc[df["time_to_tca"]>2]
#Adding feature 'event_length' for counting how many instances each event has
df["event_length"] = df.groupby('event_id')['event_id'].transform(lambda x: x.value_counts().idxmax())
#Scaling X
df = pd.DataFrame(X_scaler.fit_transform(df), columns=df.columns)
#Transforming X into a 3D-array
events = df["event_id"].nunique() #rows
features = len(df.columns) #columns
X = np.zeros((events,timestep,features))
X.fill(fill_X)
i = 0
def df_to_3darray(event):
global X, i
#Transforming an event to time series (1,timesteps, columns)
row = event.values.reshape(1,event.shape[0],event.shape[1])
#Condition is needed to slice arrays correctly
#Condition -> is timestep greater than the event's time series length?
if(timestep>=row.shape[1]):
X[i:i+1,-row.shape[1]:,:] = row
else:
X[i:i+1,:,:] = row[:,-timestep:,:]
#index to iterate over X array
i = i + 1
#dataframe remains intact, while X array has been filled.
return event
df.groupby("event_id").apply(df_to_3darray)
#Dropping event_id to remove noise
X = X[:,:,1:]
#TODO: Padding with specific values column-wise instead of zeros.
#TODO: Separating time dependent and independent feature in 2 X arrays
print(X.shape, y.shape)
#computing scaled threshold
th = np.array([threshold_for_classification]).reshape(-1,1)
th = y_scaler.transform(th)
threshold_scaled = th[0,0]
#Splitting arrays
y_boolean = (y > threshold_scaled).reshape(-1,1)
X_train, X_test, y_train_numeric, y_test_numeric = train_test_split(X, y,
stratify=y_boolean,
shuffle=True,
random_state=seed,
test_size = test_split
)
y_train_boolean = (y_train_numeric > threshold_scaled).reshape(-1,1)
X_train, X_val, y_train_numeric, y_val_numeric = train_test_split(X_train, y_train_numeric,
stratify=y_train_boolean,
shuffle=True,
random_state=seed,
test_size = val_split
)
#transforming it into a classification task -> y_train, y_test boolean
y_train = (y_train_numeric > threshold_scaled).reshape(-1,1)
y_val = (y_val_numeric > threshold_scaled).reshape(-1,1)
y_test = (y_test_numeric > threshold_scaled).reshape(-1,1)
X_train = tf.convert_to_tensor(X_train,dtype=tf.int64)
X_test = tf.convert_to_tensor( X_test,dtype=tf.int64)
y_train_numeric = tf.convert_to_tensor(y_train_numeric,dtype=tf.int64)
y_test_numeric = tf.convert_to_tensor(y_test_numeric,dtype=tf.int64)
y_train_boolean = tf.convert_to_tensor(y_train_boolean,dtype=tf.int64)
X_val = tf.convert_to_tensor(X_val,dtype=tf.int64)
y_val_numeric = tf.convert_to_tensor(y_val_numeric,dtype=tf.int64)
y_train = tf.convert_to_tensor(y_train,dtype=tf.int64)
y_val = tf.convert_to_tensor(y_val,dtype=tf.int64)
y_test = tf.convert_to_tensor(y_test,dtype=tf.int64)
y_boolean = tf.convert_to_tensor(y_boolean,dtype=tf.int64)
#Percentage of high risks in train
print("TRAIN {:0.1f}, {:0.1f}, {:0.3f}".format(np.sum(y_train), y_train.shape[0], np.sum(y_train)/y_train.shape[0]))
#Percentage of high risks in val
print("VAL {:0.1f}, {:0.1f}, {:0.3f}".format(np.sum(y_val), y_val.shape[0], np.sum(y_val)/y_val.shape[0]))
#Percentage of high risks in test
print("TEST {:0.1f}, {:0.1f}, {:0.3f}".format(np.sum(y_test), y_test.shape[0], np.sum(y_test)/y_test.shape[0]))
# Model activation selu
input_tensor = Input(batch_shape=(batch, timestep, X_train.shape[2]))
rnn_1 = LSTM(32, stateful=False, dropout=0.15, recurrent_dropout=0.3, return_sequences=True, kernel_regularizer=L1L2(l1=0.0, l2=0.01))(input_tensor)
batch_1 = BatchNormalization()(rnn_1)
rnn_2 = LSTM(16, stateful=False, dropout=0.15, recurrent_dropout=0.3, return_sequences=True, kernel_regularizer=L1L2(l1=0.0, l2=0.01))(batch_1)
batch_2 = BatchNormalization()(rnn_2)
rnn_3 = LSTM(8, stateful=False, dropout=0.15, recurrent_dropout=0.3, return_sequences=False, kernel_regularizer=L1L2(l1=0.0, l2=0.01))(batch_2)
batch_3 = BatchNormalization()(rnn_3)
output_tensor = Dense(units = 1, activation='sigmoid')(batch_3)
model = Model(inputs=input_tensor,
outputs= output_tensor)
model.compile(loss='binary_crossentropy',
optimizer=adam,
metrics=['accuracy'])
model.summary()
model_history = model.fit(X_train, y_train,
epochs=epochs,
batch_size=batch,
#shuffle=True, #OJO
validation_data=(X_val, y_val),
verbose=verbose,
class_weight=class_weight
).history
I would suggest changing this line
input_tensor = Input(batch_shape=(batch, timestep, X_train.shape[2]))
to
input_tensor = tf.keras.layers.Input(shape=(timestep, X_train.shape[2]))
and then defining your batch_size in model.fit and make sure X_train and y_train have the same number of samples.
Here is my block of code:
x_train = []
def preprocess_dataset(batch_size, normalize=True):
#first accessing RSSI columns to train x-axis
col_list = [0, 1]
trainX_data_frame = pd.read_csv('/home/Documents/generated_rssi_dataset.csv', usecols=col_list)
trainX_rows = pd.DataFrame(trainX_data_frame)
for trainX_row in trainX_rows:
train_x1 = trainX_row.loc[0]
#train_x1 = trainX_row[0].loc[trainX_row]
train_x2 = trainX_row.loc[1]
training_x = ((train_x1 + train_x2)/2)
x_train = x_train.append(training_x)
return np.array(x_train), np.array(y_train)
for i in range(training_cycles):
x_train = preprocess_dataset(x_train)
y_train = preprocess_dataset(np.array(y_train))
x_train = x_train.reshape(x_train, time_steps, n_features)
history = model.fit(x_train, y_train,epochs=30,batch_size=10,validation_split=0.2)
I am getting an attribute error caused by a named error. I have checked the other parts of the code, it is not missing any declarations or definitions anywhere. Only in this part of the code 'the function preprocess_dataset' has some error. I understand it is due to the for loop, the possibility is that if the loop isn't executed then x_train won't have any attribute. But I don't know how to resolve this issue. Any help will be greatly appreciated.
Note: y_train is a similar block so I have not added it here in the code section.
%%%%%% update 4th July 2021%%%%
using comment by Nicolas Gervais, changed the below.
x_train = []
y_train = []
def preprocess_dataset_x(batch_size, normalize=True):
#first accessing RSSI columns to train x-axis
col_list_x = [0, 1]
trainX_data_frame = pd.read_csv('/home/kobuki/Documents/generated_rssi_dataset.csv', usecols=col_list_x)
trainX_rows = pd.DataFrame(trainX_data_frame)
for index, row in trainX_rows.iterrows():
train_x1 = trainX_rows.loc[0]
train_x2 = trainX_rows.loc[1]
training_x = ((train_x1 + train_x2)/2)
x_train.append(training_x)
print("x_train calculated and stored in array")
return np.array(x_train)
def preprocess_dataset_y(batch_size, normalize=True):
#accessing loc coordinates to train y-axis
col_list_y = [2, 3]
trainY_data_frame = pd.read_csv('/home/kobuki/Documents/generated_rssi_dataset.csv', usecols = col_list_y)
trainY_rows = pd.DataFrame(trainY_data_frame)
for index, row in trainY_rows.iterrows():
train_y1 = trainY_rows.loc[2]
train_y2 = trainY_rows.loc[3]
training_y = (train_y1, train_y2)
y_train.append(training_y)
print("y_train calculated and stored in array")
return np.array(y_train)
for i in range(training_cycles):
x_train = preprocess_dataset_x(np.array(x_train))
y_train = preprocess_dataset_y(np.array(y_train))
### x_train = tf.data.Dataset.from_tensor_slices(x_train)
### y_train = tf.data.Dataset.from_tensor_slices(y_train)
### x_train = x_train.reshape(x_train, time_steps, n_features)
### y_train = y_train.reshape(y_train, time_steps, n_features)
history = model.fit(x_train, y_train,epochs=30,batch_size=10)
after this point, while fitting the model to a conv1D I am getting the below error now.
I am getting errors with reshaping also and the array created along x_train, y_train is 460, 460, 2. I don't know why it says min_ndim expected as 3. Please advise.
%%% Model %%%
def createCnnLstmModel(time_steps, n_features):
##CorNet architecture
model = Sequential()
model.add(Conv1D(filters=32,kernel_size=5,activation='relu',input_shape=(time_steps, n_features)))
model.add(BatchNormalization())
model.add(MaxPooling1D(pool_size=4))
model.add(Dropout(0.1))
model.add(Conv1D(filters=32,kernel_size=5,activation='relu',input_shape=(time_steps, n_features)))
model.add(BatchNormalization())
model.add(MaxPooling1D(pool_size=4))
model.add(Dropout(0.1))
model.add(LSTM(128,activation='tanh',return_sequences=True))
model.add(LSTM(128,activation='tanh'))
model.add(Dense(1))
model.compile(optimizer='RMSProp',loss='MAE',metrics=['mae','mape',soft_acc])
model.summary()
return model
timesteps = 460 and n_features are 2.
I'm trying to create a stock prediction model in botch PyTorch and Keras. I have already followed some tutorials online and modified to fit my data and it works fine.
Now I'm translating that code into a compatible Keras model. I've already created the model and did the predictions but the problem is that the regressor.predict() function from Keras returns a (1006,19) numpy array whereas when I do predictions = model(x_test) it returns a (1006,1) which is what I need for my following work so I can plot the results.
Here's my Keras code so far:
from keras.models import Sequential
from keras.layers import LSTM, Dense, Dropout
lookback = 20
x_train_keras, y_train_keras, x_test_keras, y_test_keras = split_data(price, lookback)
print('x_train.shape = ',x_train_keras.shape) # x_train.shape = (1006, 19, 1)
print('y_train.shape = ',y_train_keras.shape) # y_train.shape = (1006, 1)
print('x_test.shape = ',x_test_keras.shape) # x_test.shape = (252, 19, 1)
print('y_test.shape = ',y_test_keras.shape) # y_test.shape = (252, 1)
regression = Sequential()
regression.add(LSTM(units=50, return_sequences=True, kernel_initializer='glorot_uniform', input_shape=(x_train_keras.shape[1],1)))
regression.add(Dropout(0.2))
regression.add(LSTM(units=50,kernel_initializer='glorot_uniform',return_sequences=True))
regression.add(Dropout(0.2))
regression.add(LSTM(units=50,kernel_initializer='glorot_uniform',return_sequences=True))
regression.add(Dropout(0.2))
regression.add(LSTM(units=50,kernel_initializer='glorot_uniform',return_sequences=True))
regression.add(Dropout(0.2))
regression.add(Dense(units=1))
regression.compile(optimizer='adam', loss='mean_squared_error')
from keras.callbacks import History
history = History()
history = regression.fit(x_train_keras, y_train_keras, batch_size=30, epochs=100, callbacks=[history])
train_predict_keras = regression.predict(x_train_keras)
train_predict_keras = train_predict_keras.reshape((train_predict_keras.shape[0], train_predict_keras.shape[1]))
predict = pd.DataFrame(scaler.inverse_transform(train_predict_keras))
original = pd.DataFrame(scaler.inverse_transform(y_train_keras))
fig = plt.figure()
fig.subplots_adjust(hspace=0.2, wspace=0.2)
plt.subplot(1,2,1)
ax = sns.lineplot(x=original.index, y=original[0], label='Data', color='royalblue')
ax = sns.lineplot(x=predict.index, y=predict[0], label='Training Prediction', color='tomato')
ax.set_title('Stock Price', size=14, fontweight='bold')
ax.set_xlabel("Days", size = 14)
ax.set_ylabel("Cost (USD)", size = 14)
ax.set_xticklabels('', size=10)
plt.subplot(1,2,2)
ax = sns.lineplot(data=history.history.get('loss'), color='royalblue')
ax.set_xlabel("Epoch", size = 14)
ax.set_ylabel("Loss", size = 14)
ax.set_title("Training Loss", size = 14, fontweight='bold')
fig.set_figheight(6)
fig.set_figwidth(16)
# Make predictions
test_predict_keras = regression.predict(x_test_keras)
# Invert predictions
train_predict_keras = scaler.inverse_transform(train_predict_keras)
y_train_keras = scaler.inverse_transform(y_train_keras)
test_predict_keras = scaler.inverse_transform(test_predict_keras.reshape((test_predict_keras.shape[0], test_predict_keras.shape[1])))
y_test = scaler.inverse_transform(y_test_keras)
# Calculate root MSE
trainScore = math.sqrt(mean_squared_error(y_train[:,0], y_train_pred[:,0]))
print(f'Train score {trainScore:.2f} RMSE')
testScore = math.sqrt(mean_squared_error(y_test[:,0], y_test_pred[:,0]))
print(f'Test score {testScore:.2f} RMSE')
# shift train predictions for plotting
trainPredictPlot_keras = np.empty_like(price)
trainPredictPlot_keras[:, :] = np.nan
trainPredictPlot_keras[lookback:len(train_predict_keras)+lookback, :] = train_predict_keras
# shift test predictions for plotting
testPredictPlot_keras = np.empty_like(price)
testPredictPlot_keras[:, :] = np.nan
testPredictPlot_keras[len(train_predict_keras)+lookback-1:len(price)-1, :] = test_predict_keras
original = scaler.inverse_transform(price['Close'].values.reshape(-1,1))
predictions_keras = np.append(trainPredictPlot_keras, testPredictPlot_keras, axis=1)
predictions_keras = np.append(predictions_keras, original, axis=1)
result_keras = pd.DataFrame(predictions_keras)
The error occurs in the trainPredictPlot_keras[lookback:len(train_predict_keras)+lookback, :] = train_predict_keras line saying could not broadcast input array from shape (1006,19) into shape (1006,1)
Set the return_sequences to False for the last LSTM layer. You need to do as follows:
....
....
regression.add(LSTM(units=50,kernel_initializer='glorot_uniform',
return_sequences=False))
regression.add(Dropout(0.2))
regression.add(Dense(units=1))
regression.compile(optimizer='adam', loss='mean_squared_error')
Check doc:
return_sequences: Boolean. Whether to return the last output. in the output sequence, or the full sequence. Default: False.
This question already has answers here:
How do I create a variable-length input LSTM in Keras?
(4 answers)
Closed 5 years ago.
Despite going through multiple examples, I still don't understand how to classify sequences of varying length using Keras, similar to this question. I can train a network that detects frequencies of sinusoid with varying length, by using masking:
from keras import models
from keras.layers.recurrent import LSTM
from keras.layers import Dense, Masking
from keras.optimizers import RMSprop
from keras.losses import categorical_crossentropy
from keras.preprocessing.sequence import pad_sequences
import numpy as np
def gen_noise(noise_len, mag):
return np.random.uniform(size=noise_len) * mag
def gen_sin(t_val, freq):
return 2 * np.sin(2 * np.pi * t_val * freq)
def train_rnn(x_train, y_train, max_len, mask, number_of_categories):
epochs = 3
batch_size = 500
# three hidden layers of 256 each
vec_dims = 1
hidden_units = 256
in_shape = (max_len, vec_dims)
model = models.Sequential()
model.add(Masking(mask, name="in_layer", input_shape=in_shape,))
model.add(LSTM(hidden_units, return_sequences=False))
model.add(Dense(number_of_categories, input_shape=(number_of_categories,),
activation='softmax', name='output'))
model.compile(loss=categorical_crossentropy, optimizer=RMSprop())
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs,
validation_split=0.05)
return model
def gen_sig_cls_pair(freqs, t_stops, num_examples, noise_magnitude):
x = []
y = []
num_cat = len(freqs)
dt = 0.01
max_t = int(np.max(t_stops) / dt)
for f_i, f in enumerate(freqs):
for t_stop in t_stops:
t_range = np.arange(0, t_stop, dt)
t_len = t_range.size
for _ in range(num_examples):
sig = gen_sin(f, t_range) + gen_noise(t_len, noise_magnitude)
x.append(sig)
one_hot = np.zeros(num_cat, dtype=np.bool)
one_hot[f_i] = 1
y.append(one_hot)
pad_kwargs = dict(padding='post', maxlen=max_t, value=np.NaN, dtype=np.float32)
return pad_sequences(x, **pad_kwargs), np.array(y)
if __name__ == '__main__':
noise_mag = 0.01
mask_val = -10
frequencies = (5, 7, 10)
signal_lengths = (0.8, 0.9, 1)
x_in, y_in = gen_sig_cls_pair(frequencies, signal_lengths, 50, noise_mag)
mod = train_rnn(x_in[:, :, None], y_in, 100, mask_val, len(frequencies))
However, I don't understand how I'm supposed to tell Keras about the other sequences. I thought I could mask them too, but when I try, they just output NaN.
testing_dat, expected = gen_sig_cls_pair(frequencies, signal_lengths, 1, 0)
res = mod.predict(testing_dat[:, :, None])
fig, axes = plt.subplots(3)
axes[0].plot(np.concatenate(testing_dat), label="input")
axes[1].plot(np.argmax(res, axis=1), "ro", label="result", alpha=0.2)
axes[1].plot(np.argmax(expected, axis=1), "bo", label="expected", alpha=0.2)
axes[1].legend(bbox_to_anchor=(1.1, 1))
axes[2].plot(res)
plt.show()
How do I make a network that can evaluate inputs of varying lengths?
You can pad the input sequences (usually with zeros) or you can use batches of size 1 with varying input size, as outlined in fchollet's answer on the Keras github:
for seq, label in zip(sequences, y):
model.train(np.array([seq]), [label])
Alternatively, if your type of problem allows it, you extract subsequences of the original time series with length less than the length of the shortest sequences. The third option also allows you to add redundancy to the dataset if you have few samples, and reduce the chances of overfitting.
EDIT:
Seanny123 (OP) pointed out that fchollet's lines above contain model.train, which is not valid code.
He solved the problem using batches of size 1 and the following code:
from keras.models import Sequential
from keras.layers import LSTM, Dense
import numpy as np
def gen_sig(num_samples, seq_len):
one_indices = np.random.choice(a=num_samples, size=num_samples // 2, replace=False)
x_val = np.zeros((num_samples, seq_len), dtype=np.bool)
x_val[one_indices, 0] = 1
y_val = np.zeros(num_samples, dtype=np.bool)
y_val[one_indices] = 1
return x_val, y_val
N_train = 100
N_test = 10
recall_len = 20
X_train, y_train = gen_sig(N_train, recall_len)
X_test, y_test = gen_sig(N_train, recall_len)
print('Build STATEFUL model...')
model = Sequential()
model.add(LSTM(10, batch_input_shape=(1, 1, 1), return_sequences=False, stateful=True))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print('Train...')
for epoch in range(15):
mean_tr_acc = []
mean_tr_loss = []
for seq_idx in range(X_train.shape[0]):
start_val = X_train[seq_idx, 0]
assert y_train[seq_idx] == start_val
assert tuple(np.nonzero(X_train[seq_idx, :]))[0].shape[0] == start_val
y_in = np.array([y_train[seq_idx]], dtype=np.bool)
for j in range(np.random.choice(a=np.arange(5, recall_len+1))):
x_in = np.array([[[X_train[seq_idx][j]]]])
tr_loss, tr_acc = model.train_on_batch(x_in, y_in)
mean_tr_acc.append(tr_acc)
mean_tr_loss.append(tr_loss)
model.reset_states()
print('accuracy training = {}'.format(np.mean(mean_tr_acc)))
print('loss training = {}'.format(np.mean(mean_tr_loss)))
print('___________________________________')
mean_te_acc = []
mean_te_loss = []
for seq_idx in range(X_test.shape[0]):
start_val = X_test[seq_idx, 0]
assert y_test[seq_idx] == start_val
assert tuple(np.nonzero(X_test[seq_idx, :]))[0].shape[0] == start_val
y_in = np.array([y_test[seq_idx]], dtype=np.bool)
for j in range(np.random.choice(a=np.arange(5, recall_len+1))):
te_loss, te_acc = model.test_on_batch(np.array([[[X_test[seq_idx][j]]]], dtype=np.bool), y_in)
mean_te_acc.append(te_acc)
mean_te_loss.append(te_loss)
model.reset_states()
print('accuracy testing = {}'.format(np.mean(mean_te_acc)))
print('loss testing = {}'.format(np.mean(mean_te_loss)))
print('___________________________________')