This is a recurrent neural network LSTM model meant to predict the future values of forex market movement.
The data set shape is (1713, 50), the first column is the Date time index and the others are numeric values.
but right after printing the Training data and Validation data shapes the error start.
When I tried to implement this code:
from sklearn.preprocessing import MinMaxScaler
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv(r"E:\Business\Stocks\StocksDF.csv", parse_dates=[0], index_col=[0], low_memory=False, dtype='object')
features = len(df.columns)
val_ratio = 0.2
epochs = 500
batch_size = df.__len__()
sequence_length = 822
data = df.as_matrix()
data_processed = []
for index in range(len(data) - sequence_length):
data_processed.append(data[index: index + sequence_length])
data_processed = np.array(data_processed)
val_split = round((1 - val_ratio) * data_processed.shape[0])
train = data_processed[:, int(val_split), :]
val = data_processed[int(val_split):, :]
print('Training data: {}'.format(train.shape))
print('Validation data: {}'.format(val.shape))
train_samples, train_nx, train_ny = train.shape
val_samples, val_nx, val_ny = val.shape
train = train.reshape((train_samples, train_nx * train_ny))
val = val.reshape((val_samples, val_nx * val_ny))
preprocessor = MinMaxScaler().fit(train)
train = preprocessor.transform(train)
val = preprocessor.transform(val)
train = train.reshape((train_samples, train_nx, train_ny))
val = val.reshape((val_samples, val_nx, val_ny))
X_train = train[:, : -1]
y_train = train[:, -1][:, -1]
X_val = val[:, : -1]
y_val = val[:, -1][:, -1]
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], features))
X_val = np.reshape(X_val, (X_val.shape[0], X_val.shape[1], features))
model = Sequential()
model.add(LSTM(input_shape=(X_train.shape[1:]), units=100, return_sequences=True))
model.add(Dropout(0.5))
model.add(LSTM(2, return_sequences=False))
model.add(Dropout(0.25))
model.add(Dense(units=1))
model.add(Activation("relu"))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mae', 'mse', 'accuracy'])
history = model.fit(
X_train,
y_train,
batch_size=batch_size,
epochs=epochs,
verbose=2)
preds_val = model.predict(X_val)
diff = []
for i in range(len(y_val)):
pred = preds_val[i][0]
diff.append(y_val[i] - pred)
real_min = preprocessor.data_min_[104]
real_max = preprocessor.data_max_[104]
print(preprocessor.data_min_[:1])
print(preprocessor.data_max_[:1])
preds_real = preds_val * (real_max - real_min) + real_min
y_val_real = y_val * (real_max - real_min) + real_min
plt.plot(preds_real, label='Predictions')
plt.plot(y_val_real, label='Actual values')
plt.xlabel('test')
plt.legend(loc=0)
plt.show()
print(model.summary())
I got this error:
Using TensorFlow backend.
Traceback (most recent call last):
Training data: (891, 50)
File "E:/Tutorial/new.py", line 31, in
Validation data: (178, 822, 50)
train_samples, train_nx, train_ny = train.shape
ValueError: not enough values to unpack (expected 3, got 2)
There's an error in this line:
train = data_processed[:, int(val_split), :]
It should be:
train = data_processed[:int(val_split), :, :]
val = data_processed[int(val_split):, :, :]
Related
I am trying to predict the crime rate using lstm model but I am getting the error that says "ValueError: X has 1 features, but MinMaxScaler is expecting 5 features as input. How do I correct it?
the code is below. I have an array with 5 elements and that is why it is expecting 5 inputs. I do not what to change on the minMaxScaler
type here
dataset_train = pd.read_excel('hotspot-NN.xls')
training_set = dataset_train.iloc[:, 1:].values
dataset_train.head()
print (dataset_train)
print (training_set)
training_set = np.transpose(training_set)
print (training_set)
print (tf.__version__)
sc = MinMaxScaler(feature_range=(0,1))
training_set_scaled = sc.fit_transform(training_set)
x_train = []
y_train = []
for i in range(3, 12):
x_train.append(training_set_scaled[i-3:i, 0])
y_train.append(training_set_scaled[i, 0])
x_train, y_train = np.array(x_train), np.array(y_train)
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
from keras.models import Sequential
from keras.layers import LSTM
from keras.layers import Dropout
from keras.layers import Dense
model = Sequential()
model.add(LSTM(units = 50, return_sequences = True, input_shape = (x_train.shape[1], 1)))
model.add(Dropout(0.2))
model.add(LSTM(units = 50, return_sequences = True))
model.add(Dropout(0.2))
model.add(LSTM(units = 50, return_sequences = True))
model.add(Dropout(0.2))
model.add(LSTM(units = 50, return_sequences = False))
model.add(Dropout(0.2))
model.add(Dense(units = 5))
model.compile(optimizer = 'adam', loss = 'mean_squared_error')
model.fit(x_train, y_train, epochs = 100, batch_size = 4)
dataset_test = pd.read_excel('hotspot-NN.xls')
real_data = dataset_test.iloc[:, 1:].values
dataset_total = pd.concat((dataset_train['Day 1'], dataset_test['Day 1']), axis = 0)
inputs = dataset_total[len(dataset_total) - len(dataset_test) - 3:].values
inputs = inputs.reshape(-1,1)
inputs = sc.transform(inputs)
x_test = []
for i in range(3, 12):
x_test.append(inputs[i-3:i, 0])
x_test = np.x_test()
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
predicted_data = model.predict(x_test)
predicted_data = sc.inverse_transform(predicted_data)
plt.plot(real_data, color = 'black', label = 'crime rate prediction')
plt.plot(predicted_data, color = 'green', label ='crime rate' )
plt.title('crime rate prediction')
plt.xlabel('days')
plt.ylabel('crimes')
plt.legend()
plt.show()
I have no idea how to fix that
I want to do grid search for my model, and here my model shown below.
def model_lstm(time_steps=24, n_features=40,
optimizer = tf.keras.optimizers.Adam,
learning_rate = 0.001,
dropout = 0.5,
n_units_LSTM = 256,
n_units_1 = 200):
activation2 = 'relu'
model = Sequential()
model.add(LSTM(units=n_units_LSTM, input_shape=(time_steps, n_features)))
model.add(Dropout(dropout))
model.add(Dense(units=n_units_1, activation=activation2))
model.add(Dense(units=n_units_1, activation=activation2))
model.add(Dense(units=n_units_1, activation=activation2))
model.add(Dense(units=n_units_1, activation=activation2))
model.add(Dense(units=n_units_1, activation=activation2))
model.add(Dense(1, activation='sigmoid'))
optimizer = optimizer(learning_rate=learning_rate)
model.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics = ['accuracy'])
print(model.summary())
return model
there are 4 parameters that i want to grid, learning rate, n_unit_LSTM, n_units_1, and dropout. i want to get the same value of each dense layer. so i add variable named n_units_1.
def grid_search(data, metode):
keras.backend.clear_session()
x_train, y_train = sequence_data(data)
params = {
'learning_rate' : [0.01, 0.001, 0.0001],
'n_units_LSTM' : [64,128,256],
'n_units_1' : [50, 100, 150, 200, 250, 300],
'dropout' : [0.1, 0.2, 0.25, 0.5]
}
scorers = {
'accuracy_score' : make_scorer(accuracy_score)
}
model = KerasClassifier(build_fn=model_lstm, verbose=0)
cv = cross_validate(5)
start = time.time()
grid = GridSearchCV(estimator = model,
param_grid = params,
n_jobs = -1,
verbose = 1,
cv = cv,
scoring = scorers,
refit = 'accuracy_score')
tf.random.set_seed(123)
grid.fit(x_train, y_train)
end = time.time()
runtime = end-start
result = grid.best_params_
results = grid.cv_results_
print('-----------------------------------------')
print(f'Best Parameter : {result}')
print(f'Runtime : {runtime}')
print('-----------------------------------------')
return grid
and when i run my grid, i got an error.
A task has failed to un-serialize. Please ensure that the arguments of the function are all picklable.
Step 1: Lets get the model to predict an age based on 9 abalone features. I changed the model to mean_squared_error for the age prediction Step 2: Add a gridsearch (pending)
import pandas as pd
from pandas import Series
from pandas import concat
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras import Model
from tensorflow.keras.layers import Input, LSTM, Embedding,Flatten,Dropout, Dense, Concatenate, TimeDistributed, Bidirectional,Attention
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import make_scorer
from sklearn.metrics import accuracy_score
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
def create_dataset(dataset, look_back=8):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back)]
dataX.append(a)
dataY.append(dataset[i + look_back])
return np.array(dataX), np.array(dataY)
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return Series(diff)
def timeseries_to_supervised(data, lag=1):
df = pd.DataFrame(data)
columns = [df.shift(i) for i in range(1, lag+1)]
columns.append(df)
df = concat(columns, axis=1)
return df
def scale(train, test):
# fit scaler
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(train)
# transform train
train = train.reshape(train.shape[0], train.shape[1])
train_scaled = scaler.transform(train)
# transform test
test = test.reshape(test.shape[0], test.shape[1])
test_scaled = scaler.transform(test)
return scaler, train_scaled, test_scaled
df=pd.read_csv('abalone.data',names=['Sex'
,'Length'
,'Diameter'
,'Height'
,'Whole weight'
,'Shucked weight'
,'Viscera weight'
,'Shell weight'
,'Rings'])
df.reset_index(inplace=True)
df['Age']=df['Rings'].apply(lambda x: x/1.5)
encoder=LabelEncoder()
df['Sex']=encoder.fit_transform(df['Sex'])
raw_values = df.values
#diff_values = difference(raw_values, 1)
features=10
#supervised = timeseries_to_supervised(diff_values, features)
supervised = timeseries_to_supervised(raw_values, features)
supervised_values = supervised.values[features:,:]
train_size = int(len(df) * 0.70)
test_size = len(df) - train_size
# split data into train and test-sets
train, test = supervised_values[0:-train_size, :], supervised_values[-train_size:, :]
def model_lstm(X,time_steps=24, n_features=9,
optimizer = tf.keras.optimizers.Adam,
learning_rate = 0.001,
dropout = 0.5,
n_units_LSTM = 256,
n_units_1 = 200,
batch_size=1
):
activation2 = 'relu'
model = Sequential()
model.add(LSTM(units=n_units_LSTM, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))
model.add(Dropout(dropout))
model.add(Dense(units=n_units_1, activation=activation2))
model.add(Dense(units=n_units_1, activation=activation2))
model.add(Dense(units=n_units_1, activation=activation2))
model.add(Dense(units=n_units_1, activation=activation2))
model.add(Dense(units=n_units_1, activation=activation2))
model.add(Dense(1))
optimizer = optimizer(learning_rate=learning_rate)
model.compile(loss='mean_squared_error',
optimizer=optimizer)
print(model.summary())
return model
scaler, train_scaled, test_scaled = scale(train, test)
X_train, y_train = train_scaled[:, 0:-1], train_scaled[:, -1]
X_train = X_train.reshape(X_train.shape[0], 1, X_train.shape[1])
X_test, y_test = test_scaled[:, 0:-1], test_scaled[:, -1]
X = X_test.reshape(X_test.shape[0], 1, X_test.shape[1])
#print(y)
model=model_lstm(X_train)
history=model.fit(X_train,y_train, epochs=100)
model.summary()
plt.plot(history.history['loss'])
plt.title('loss accuracy')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
I have this program that is used to predict outflow of a reservoir and I cannot seem to get it to predict the next day worth of data. The data is in 30 minute increments and would expect 48 points of data to represent the next 24 hours. Is there something I am doing incorrect or have missed?
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import datetime as dt
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, Dropout
# Prepare data
data = pd.read_csv('data.csv')
data["Date"] = pd.to_datetime(data["Date"])
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(data["Outlet"].values.reshape(-1, 1))
prediction_days = 30
x_train, y_train = [], []
for x in range(prediction_days, len(scaled_data)):
x_train.append(scaled_data[x-prediction_days:x, 0])
y_train.append(scaled_data[x, 0])
x_train, y_train = np.array(x_train), np.array(x_train)
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
# Create neural network
model = Sequential()
model.add(LSTM(units=50, return_sequences=True, input_shape=(x_train.shape[1], 1)))
model.add(Dropout(0.2))
model.add(LSTM(units=50, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=50))
model.add(Dropout(0.2))
model.add(Dense(units=1))
model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(x_train, y_train, epochs=25)
# Test the model
test_start = dt.datetime(2022, 1, 1)
test_end = dt.datetime.now()
actual_outlet = data[(data['Date'] >= test_start) & (data['Date'] <= test_end)]['Outlet']
total_dataset = pd.concat((data['Outlet'], actual_outlet), axis=0)
model_inputs = total_dataset[len(total_dataset) - len(actual_outlet) - prediction_days:].values
model_inputs = model_inputs.reshape(-1, 1)
model_inputs = scaler.transform(model_inputs)
x_test = []
for x in range(prediction_days, len(model_inputs)):
x_test.append(model_inputs[x-prediction_days:x, 0])
x_test = np.array(x_test)
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
prediction_outlet = model.predict(x_test)
prediction_outlet = scaler.inverse_transform(prediction_outlet)
print(len(prediction_outlet))
The length of my prediction_outlet comes to 1056. Have I missed some steps?
Your test data has 22 days of data since you are taking the slice from test_start to today. since you have 48 data points for each day it adds up to 1056 data points to predict. So you are making the right length of predictions. On the other hand, since you don't have it ordered some of the dates are mixed up. In one instance you have half of the data points from Feb-02 and the rest from Jan-13 which will be very confusing for the network. You might want to order them and make sure you don't take missing dates in your slices.
import math
import numpy as np
import pandas as pd
import pandas_datareader as pdd
from sklearn.preprocessing import MinMaxScaler
from keras.layers import Dense, Dropout, Activation, LSTM, Convolution1D, MaxPooling1D, Flatten
from keras.models import Sequential
import matplotlib.pyplot as plt
df = pdd.DataReader('AAPL', data_source='yahoo', start='2012-01-01', end='2020-12-31')
data = df.filter(['Close'])
dataset = data.values
len(dataset)
# 2265
training_data_size = math.ceil(len(dataset)*0.7)
training_data_size
# 1586
scaler = MinMaxScaler(feature_range=(0,1))
scaled_data = scaler.fit_transform(dataset)
scaled_data
# array([[0.04288701],
# [0.03870297],
# [0.03786614],
# ...,
# [0.96610873],
# [0.98608785],
# [1. ]])
train_data = scaled_data[0:training_data_size,:]
x_train = []
y_train = []
for i in range(60, len(train_data)):
x_train.append(train_data[i-60:i, 0])
y_train.append(train_data[i,0])
if i<=60:
print(x_train)
print(y_train)
'''
[array([0.04288701, 0.03870297, 0.03786614, 0.0319038 , 0.0329498 ,
0.03577404, 0.03504182, 0.03608791, 0.03640171, 0.03493728,
0.03661088, 0.03566949, 0.03650625, 0.03368202, 0.03368202,
0.03598329, 0.04100416, 0.03953973, 0.04110879, 0.04320089,
0.04089962, 0.03985353, 0.04037657, 0.03566949, 0.03640171,
0.03619246, 0.03253139, 0.0294979 , 0.03033474, 0.02960253,
0.03002095, 0.03284518, 0.03357739, 0.03410044, 0.03368202,
0.03472803, 0.02803347, 0.02792885, 0.03556487, 0.03451886,
0.0319038 , 0.03127613, 0.03274063, 0.02688284, 0.02635988,
0.03211297, 0.03096233, 0.03472803, 0.03713392, 0.03451886,
0.03441423, 0.03493728, 0.03587866, 0.0332636 , 0.03117158,
0.02803347, 0.02897494, 0.03546024, 0.03786614, 0.0401674 ])]
[0.03933056376752886]
'''
x_train, y_train = np.array(x_train), np.array(y_train)
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
x_train.shape
# (1526, 60, 1)
model = Sequential()
model.add(Convolution1D(64, 3, input_shape= (100,4), padding='same'))
model.add(MaxPooling1D(pool_size=2))
model.add(Convolution1D(32, 3, padding='same'))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(1))
model.add(Activation('linear'))
model.summary()
model.compile(loss='mean_squared_error', optimizer='rmsprop', metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=50, epochs=50, validation_data = (X_test, y_test), verbose=2)
test_data = scaled_data[training_data_size-60: , :]
x_test = []
y_test = dataset[training_data_size: , :]
for i in range(60, len(test_data)):
x_test.append(test_data[i-60:i, 0])
x_test = np.array(x_test)
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
predictions = model.predict(x_test)
predictions = scaler.inverse_transform(predictions)
rsme = np.sqrt(np.mean((predictions - y_test)**2))
rsme
train = data[:training_data_size]
valid = data[training_data_size:]
valid['predictions'] = predictions
plt.figure(figsize=(16,8))
plt.title('PFE')
plt.xlabel('Date', fontsize=18)
plt.ylabel('Close Price in $', fontsize=18)
plt.plot(train['Close'])
plt.plot(valid[['Close', 'predictions']])
plt.legend(['Train', 'Val', 'predictions'], loc='lower right')
plt.show
import numpy as np
y_test, predictions = np.array(y_test), np.array(predictions)
mape = (np.mean(np.abs((predictions - y_test) / y_test))) * 100
accuracy = 100 - mape
print(accuracy)
This above is my code. I tried to edit it but does not seem to be working. I am suspecting that I did not format my dataset well but I am new to this field so I do not know what should I do to my codes such that it will fit in. I hope you guys can enlighten me on this, Thank you!
I encountered errors like : ''IndexError: index 2264 is out of bounds for axis 0 with size 2264'' and
'' ValueError: Input 0 of layer dense is incompatible with the layer: expected axis -1 of input shape to have value 800 but received input with shape [None, 480]''
Your model doesn't tie to your data.
Change this line:
model.add(Convolution1D(64, 3, input_shape= (60,1), padding='same'))
I am trying to run a code that tells me number of fall and non fall in fall detection of a human and i am getting an error :
input_1:0 is both fed and fetch .
I tried running it alone but never works.
from keras.models import Model
from keras.utils import np_utils
import numpy as np
import pandas as pd
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import scipy.io as sio
import matplotlib.pyplot as plt
import keras
from keras.callbacks import ReduceLROnPlateau
from keras.layers.core import Flatten, Dense, Dropout, Lambda
from keras import backend as K
import cv2
def global_average_pooling(x):
return K.mean(x, axis = (2))
def global_average_pooling_shape(input_shape):
return input_shape[0:2]
p=Lambda(global_average_pooling,
output_shape=global_average_pooling_shape)
X = sio.loadmat('/Users/fateh/Documents/Hamidreza Work/ConvFall/ts.mat')
X=X['Data']
import csv
with open('/Users/fateh/Documents/Hamidreza Work/ConvFall/lab.csv', 'r') as mf:
re = csv.reader(mf,delimiter=',',quotechar='|')
re=np.array(list(re))
label = re.astype(np.float64)
Y_t=np.squeeze(label)
nb_epochs = 3
y_train =Y_t[:158]
y_test =Y_t[158:]
x_train=X[:158]
x_test=X[158:]
nb_classes = len(np.unique(y_test))
batch_size = min(x_train.shape[0]/8, 16)
y_train = (y_train - y_train.min())/(y_train.max()-y_train.min())*(nb_classes-1)
y_test = (y_test - y_test.min())/(y_test.max()-y_test.min())*(nb_classes-1)
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
x_train_mean = x_train.mean()
x_train_std = x_train.std()
x_train = (x_train - x_train_mean)/(x_train_std)
x_test = (x_test - x_train_mean)/(x_train_std)
x_train = x_train.reshape(x_train.shape + (1,))
x_test = x_test.reshape(x_test.shape + (1,))
#x_train = np.transpose(x_train, (0, 2, 1))
#x_test = np.transpose(x_test, (0, 2, 1))
input_shape=x_train.shape[1:]
x = keras.layers.Input(x_train.shape[1:])
# drop_out = Dropout(0.2)(x)
conv1 = keras.layers.Convolution1D(300, 9, padding='same')(x)
conv1 = keras.layers.normalization.BatchNormalization()(conv1)
conv1 = keras.layers.Activation('relu')(conv1)
conv2 = keras.layers.Convolution1D(200, 5, padding='same')(conv1)
conv2 = keras.layers.normalization.BatchNormalization()(conv2)
conv2 = keras.layers.Activation('relu')(conv2)
conv3 = keras.layers.Convolution1D(100, 3, padding='same')(conv2)
conv3 = keras.layers.normalization.BatchNormalization()(conv3)
conv3 = keras.layers.Activation('relu')(conv3)
full = p(conv3)
out = keras.layers.Dense(nb_classes, activation='softmax')(full)
model = Model(input=x, output=out)
optimizer = keras.optimizers.Adam() #'sgd'
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
reduce_lr = ReduceLROnPlateau(monitor = 'loss', factor=0.5,
patience=500, min_lr=0.001)
hist = model.fit(x_train, Y_train, batch_size=batch_size, epochs=nb_epochs,
verbose=1, validation_data=(x_test, Y_test), callbacks = [reduce_lr])
predict = model.predict(x_test)
preds = np.argmax(predict, axis=1)
log = pd.DataFrame(hist.history)
print(log.loc[log['loss'].idxmin]['loss'], log.loc[log['loss'].idxmin]['val_acc'])
labels = {1:'Non-Fall', 2:'Fall'}
from sklearn.metrics import classification_report, confusion_matrix
print(classification_report(preds, y_test,
target_names=[l for l in labels.values()]))
conf_mat = confusion_matrix(preds, y_test)
fig = plt.figure(figsize=(2,2))
res = plt.imshow(np.array(conf_mat), cmap=plt.cm.summer, interpolation='nearest')
for i, row in enumerate(conf_mat):
for j, c in enumerate(row):
if c>0:
plt.text(j-.2, i+.1, c, fontsize=16)
#cb = fig.colorbar(res)
plt.title('Confusion Matrix')
_ = plt.xticks(range(2), [l for l in labels.values()], rotation=90)
_ = plt.yticks(range(2), [l for l in labels.values()])
inp = model.input # input placeholder
outputs = [layer.output for layer in model.layers] # all layer outputs
functor = K.function([inp]+ [K.learning_phase()], outputs ) # evaluation function
# Testing
test=x_test[2:3,:,:]
test = np.random.random(input_shape)[np.newaxis,:]
layer_outs = functor([test, 1.])
Trying to run this code used for letting you know the fall detection of files through deep Convolutional Neural Network, where radar data is used to collect raw data and processed.
Skip the input layer,
outputs = [layer.output for layer in model.layers[1:]]
if you still need it, you can add an identity function.
outputs.insert(0, K.identity(inp))