Predicting next value of game results - python

I am working on predicting game output. The game has three seats. Seat A , seat B and seat C. Each time only one seat will win randomly.
I have written LSTM code to predict the next value. I made the dataset of around 8000 games and recorded the results. But LSTM is not predicting the next value. It is either predicting either all "0"s or all "1"s. I'm curious why it is not learning.
here you can find dataset
here is attached my google colab code
# univariate data preparation
from sklearn.preprocessing import OneHotEncoder
from numpy import array
import pandas as pd
# split a univariate sequence into samples
df = pd.read_csv('DatasetLarge.csv') # here you can find dataset https://drive.google.com/open?id=1WZBMYO-Oi3uErPlBXphXFAACmn9VK_yZ
def split_sequence(sequence, n_steps):
X, y = list(), list()
for i in range(len(sequence)):
# find the end of this pattern
end_ix = i + n_steps
# check if we are beyond the sequence
if end_ix > len(sequence)-1:
break
# gather input and output parts of the pattern
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]
X.append(seq_x)
y.append(seq_y)
return array(X), array(y)
# Removing some errors of dataset
df=df.replace(to_replace ="q",
value ="a")
df=df.replace(to_replace ="aa",
value ="a")
df=df.replace(to_replace ="cc",
value ="c")
#label encoding
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
le.fit(df.values)
df=le.transform(df)
# define input sequence
raw_seq=df
# choose a number of time steps
n_steps = 5
# split into samples
X, y = split_sequence(raw_seq, n_steps)
# summarize the data
for i in range(len(X)):
print(X[i], y[i])
#Spliting dataset for train and test
X_train=X[0:7500]
y_train=y[0:7500]
X_test=X[7500:]
y_test=y[7500:]
import tensorflow.keras
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Dropout, Flatten,Input,LSTM
n_features = 1
X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], n_features))
# define model
model = Sequential()
model.add(LSTM(200, activation='softmax', input_shape=(n_steps, n_features)))
model.add(Dense(1))
model.compile(optimizer='adam', loss='binary_crossentropy')
# # fit model
model.fit(X_train, y_train, epochs=100, verbose=0)
# # demonstrate prediction
X_test=X_test.reshape((X_test.shape[0], X_test.shape[1], n_features))
yhat_classes =(model.predict(X_test) > 0.5).astype("int32")
print(yhat_classes)
Please help me why it is not predicting correctly.

Related

Getting a probability distribution curve from a TensorFlow model

I am trying to learn working with TensorFlow and so I was trying to make a probablistic ML model to get the probability distribution of the next day stock price based on the last n days price sequence, and when doing so I managed to predict the next day's price but not getting a probability distribution of the model. How do I get the curve which the model predictions are based on from the TensorFlow model?
This is the code i have got so far that predicts the actual price for the next day (using this video: https://www.youtube.com/watch?v=PuZY9q-aKLw):
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
from datetime import timedelta
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
tfpl = tfp.layers
tfk = tf.keras
tfkl = tf.keras.layers
# preparing data
min_max_scaler = MinMaxScaler(feature_range=(0,1)) # a scaler object which normalizes the number between 0 and 1 in relation to the rest of the dataset
close_prices = price_data.loc[:,'Close'].values # all the close prices of the price_data df
scaled_close_prices = min_max_scaler.fit_transform(close_prices.reshape(-1,1)) # all the close prices, normalized between 1 and 0
n = 60 # the number of days that are used to determine the next value
# x contains the last n days before the predicted day which is y
X_train = [] # 70% of the dataset
y_train = []
X_test = [] # 30% of the dataset
y_test = []
for x in range(n, int(len(scaled_close_prices)*0.7)):
X_train.append(scaled_close_prices[x-n:x,0])
y_train.append(scaled_close_prices[x,0])
for x in range(int(len(scaled_close_prices)*0.7), len(scaled_close_prices)):
X_test.append(scaled_close_prices[x-n:x,0])
y_test.append(scaled_close_prices[x,0])
X_train, y_train = np.array(X_train), np.array(y_train)
X_test, y_test = np.array(X_test), np.array(y_test)
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
# building the model
model = tfk.Sequential()
model.add(tfkl.LSTM(units=50, return_sequences=True, input_shape=(X_train.shape[1], 1)))
model.add(tfkl.Dropout(0.2))
model.add(tfkl.LSTM(units=50, return_sequences=True))
model.add(tfkl.Dropout(0.2))
model.add(tfkl.LSTM(units=50))
model.add(tfkl.Dropout(0.2))
model.add(tfkl.Dense(units=1))
# compiling model
model.compile(optimizer=tfk.optimizers.Adam(learning_rate=0.05),
loss='mean_squared_error',
metrics=[])
# fitting model
model.fit(X_train, y_train, epochs=25, batch_size=32)
# testing model
y_predicted = model.predict(X_test)
y_predicted = min_max_scaler.inverse_transform(y_predicted).reshape(-1)
Probabilistic modelling one of the things that tensorflow_probability (tfp) can do.
It can do probabilistic time series forecasting - e.g. see https://www.tensorflow.org/probability/examples/Structural_Time_Series_Modeling_Case_Studies_Atmospheric_CO2_and_Electricity_Demand
... but I don't believe it includes a variational LSTM in its toolbox, and they may be close to the current research frontier. e.g. see https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7842932/
As written, this model is not especially probabilistic, per se, but it can be viewed as outputting an estimate of the mean of a gaussian distribution with unit variance; this is implicit in using a squared error loss. To make this more concrete, you could instantiate a tfd.Normal distribution with the output of your model as the loc parameter and scale=1., and call prob(...) to evaluate the pdf of this distribution. I'm not sure if this is what you're looking for, though.

Classification ANN stuck at 60%

I am trying to create a binary classifier on a data set of 10,000. I have tried multiple Activators and Optimizers, however the results are always between 56.8% and 58.9%. Given the fairly steady results over many dozen iterations, I assume the problem is either:
My dataset is not classifiable
My model is broken
This is the data set: training-set.csv
I may be able to get 2000 more records but that would be it.
My question is: is there something in the way my model is constructed that is preventing it from learning to a higher degree?
Note that I am happy to have as many layers and nodes as needed, and time is not a factor in generating the model.
dataframe = pandas.read_csv(r"training-set.csv", index_col=None)
dataset = dataframe.values
X = dataset[:,0:48].astype(float)
Y = dataset[:,48]
#count the input variables
col_count = X.shape[1]
#normalize X
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_scale = sc_X.fit_transform(X)
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_scale, Y, test_size = 0.2)
# define baseline model
activator = 'linear' #'relu' 'sigmoid' 'softmax' 'exponential' 'linear' 'tanh'
#opt = 'Adadelta' #adam SGD nadam RMSprop Adadelta
nodes = 1000
max_layers = 2
max_epochs = 100
max_batch = 32
loss_funct = 'binary_crossentropy' #for binary
last_act = 'sigmoid' # 'softmax' 'sigmoid' 'relu'
def baseline_model():
# create model
model = Sequential()
model.add(Dense(nodes, input_dim=col_count, activation=activator))
for x in range(0, max_layers):
model.add(Dropout(0.2))
model.add(Dense(nodes, input_dim=nodes, activation=activator))
#model.add(BatchNormalization())
model.add(Dense(1, activation=last_act)) #model.add(Dense(1, activation=last_act))
# Compile model
adam = Adam(lr=0.001)
model.compile(loss=loss_funct, optimizer=adam, metrics=['accuracy'])
return model
estimator = KerasClassifier(build_fn=baseline_model, epochs=max_epochs, batch_size=max_batch)
estimator.fit(X_train, y_train)
y_pred = estimator.predict(X_test)
#confusion matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
score = np.sum(cm.diagonal())/float(np.sum(cm))
Two points:
There is absolutely no point in stacking dense layers with linear activations - they only result to a single linear unit; change to activator = 'relu' (and just don't bother with the other candidate activation functions in your commented-out list).
Do not use dropout by default, especially if your model has difficulties in learning (like here); remove the dropout layer(s), and just be ready to put (some of) them back in only in case you see overfitting (you are currently still very far from that point, so this is not something to worry about now).

Spiral problem, why does my loss increase in this neural network using Keras?

I'm trying to solve the spiral problem using Keras with 3 spirals instead of 2 using a similar strategy that I used for 2. Problem is my loss is now growing exponentially instead of decreasing with the same parameters I used for 2 spirals (The neural network structure has 3 outputs instead of being binary). I'm not quite sure what could be happening with this issue if anyone could help? I have tried this with various epochs, learning rates, batch sizes.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.optimizers import RMSprop
from Question1.utils import create_neural_network, create_test_data
EPOCHS = 250
BATCH_SIZE = 20
def main():
df = three_spirals(1000)
# Set-up data
x_train = df[['x-coord', 'y-coord']].values
y_train = df['class'].values
# Don't need y_test, can inspect visually if it worked or not
x_test = create_test_data()
# Scale data
scaler = MinMaxScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
relu_model = create_neural_network(layers=3,
neurons=[40, 40, 40],
activation='relu',
optimizer=RMSprop(learning_rate=0.001),
loss='categorical_crossentropy',
outputs=3)
# Train networks
relu_model.fit(x=x_train, y=y_train, epochs=EPOCHS, verbose=1, batch_size=BATCH_SIZE)
# Predictions on test data
relu_predictions = relu_model.predict_classes(x_test)
models = [relu_model]
test_predictions = [relu_predictions]
# Plot
plot_data(models, test_predictions)
And here is the create_neural_network function:
def create_neural_network(layers, neurons, activation, optimizer, loss, outputs=1):
if layers != len(neurons):
raise ValueError("Number of layers doesn't much the amount of neuron layers.")
model = Sequential()
for i in range(layers):
model.add(Dense(neurons[i], activation=activation))
# Output
if outputs == 1:
model.add(Dense(outputs))
else:
model.add(Dense(outputs, activation='softmax'))
model.compile(optimizer=optimizer,
loss=loss)
return model
I have worked it out, for the output data it isn't like a binary classification where you only need one column. For multi classification you need a column for each class you want to classify...so where I had y could be 0, 1, 2 was incorrect. The correct way to do this was to have y0, y1, y2 which would be 1 if it fit that specific class and 0 if it didn't.

Keras Prediction after test values

I am currently trying to build neuronal network to be able to predict time series, but the question is, is it possible to predict further than just the test dataset. I mean, for my example, I have a dataset of about 3000 values, from which I keep 90% for training and 10% for testing. Then When I compare the prediction with the actual test value, it corresponds, but is it possible for instance to ask the program to predict the next 500 values (i.e. from 3001 to 3500) ?
Here is a snipper of the code I use.
import csv
import numpy as np
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM, GRU
from keras.models import Sequential
from keras import optimizers
from sklearn.svm import SVR
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import learning_curve
from sklearn.kernel_ridge import KernelRidge
import time
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (-1, 1))
def load_data(datasetname, column, seq_len, normalise_window):
# A support function to help prepare datasets for an RNN/LSTM/GRU
data = datasetname.loc[:,column]
sequence_length = seq_len + 1
result = []
for index in range(len(data) - sequence_length):
result.append(data[index: index + sequence_length])
result = np.array(result)
result.reshape(-1,1)
training_set_scaled = sc.fit_transform(result)
print (result)
#Last 10% is used for validation test, first 90% for training
row = round(0.9 * training_set_scaled.shape[0])
train = training_set_scaled[:int(row), :]
#np.random.shuffle(train)
x_train = train[:, :-1]
y_train = train[:, -1]
X_test = training_set_scaled[int(row):, :-1]
y_test = training_set_scaled[int(row):, -1]
print ("shape train", x_train)
print ("shape train", X_test)
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
return [x_train, X_test, y_train, y_test]
def build_model():
model = Sequential()
layers = {'input': 100, 'hidden1': 150, 'hidden2': 256, 'hidden3': 100, 'output': 10}
model.add(LSTM(
50,
return_sequences=True,
input_shape=(200,1)
))
model.add(Dropout(0.2))
model.add(LSTM(
layers['hidden2'],
return_sequences=True,
))
model.add(Dropout(0.2))
model.add(LSTM(
layers['hidden3'],
return_sequences=False,
))
model.add(Dropout(0.2))
model.add(Activation("linear"))
model.add(Dense(
output_dim=layers['output']))
start = time.time()
model.compile(loss="mean_squared_error", optimizer="adam")
print ("Compilation Time : ", time.time() - start)
return model
dataset = pd.read_csv(
'data.csv')
X_train, X_test, y_train, y_test = load_data(dataset, 'mean anomaly', 200, False)
model = build_model()
print ("train",X_train)
print ("test",X_test)
model.fit(X_train, y_train, batch_size=256, epochs=1, validation_split=0.05)
predictions = model.predict(X_test)
predictions = np.reshape(predictions, (predictions.size,))
plt.figure(1)
plt.subplot(311)
plt.title("Actual Test Signal w/Anomalies & noise")
plt.plot(y_test)
plt.subplot(312)
plt.title("predicted signal")
plt.plot(predictions, 'g')
plt.subplot(313)
plt.title("training signal")
plt.plot(y_train, 'b')
plt.plot(y_test, 'y')
plt.legend(['train', 'test'])
plt.show()
I have read that I should increase the output dim of the dense layer to get more than 1 predicted value, or increase the size of my window in the load data function ?
Here is the result, the yellow plot is supposed to be after the blue one, it respresents my input test data, the first plot is a zoom on this data and the second one the prediction.
If you want to predict the output value of your serie at t+x based on data at time t, the data you need to feed to the network should already have this format.
Time series data formating :
If you have 3000 data point and want to predict the output value for the next "virtual" 500 point you should offset the output value by this amount. For exemple :
In your dataset, your 500th data point correspond to the 500th output value. If you want to predict "future" values then the 500th data point should have the 1000th output value. You can do this in pandas with the shift function. Be aware that you will loose the last 500 data point by doing so, has they will no longer have an output value.
Then when you predict on data point xi you'll have the output value yi+500. You should find some basic guides for time serie forecasting on sites like machinelearningmastery
Good pratice for model evaluation :
If you want to better evaluate the quality of your model, first find some metrics that suits your problem and try to increase test set percenatage. While graphics are a good way to visualise result, they can be deceiving, try combining them with some metrics ! (be carefull with Mean Squarred Error, it can give you a biased score with value in the range [-1;1] as the square of an error in this range will always be less than the acutal error, try Mean Absolute Error instead)
Data leakage when scalling data :
While scalling data is usually a good thing you need to be carefull doing so. You comited something called a data leak. You used scalling on the whole data set before splitting into training and test set. Further reading about this data leak.
Update
I think i misunderstood your problem.
If you want to "predict further than just the test dataset" you will need some unseen/new data to make more prediction. The test set is only made to evaluate the performance of the learning phase.
Now if you want to predict further than just the next step (this won't allow you to "predict further than just the test dataset" because of the way you change your dataset, see bellow) :
Your model as it's made will only ever predict the next step.
In your example you feed to the algorithm series of lenght 'seq_len' and give them as output the value right after the end of those series. If you want your algorithm to learn to predict in more than one step into the future you y_train must have value at the corresponding time in the future, example :
x = [0,1,2,3,4,5,6,7,8,9,10,...]
seq_len = 5
step_to_predict = 5
So to predict not one step into the future but five, your series will have to look like this :
x_serie_1 = [0,1,2,3,4]
y_serie_1 = [9]
x_serie_2 = [1,2,3,4,5]
y_serie_2 = [10]
This is a way to get your model to learn how to make predictions further into the future than just the next step.

How to initialise weights of a MLP using an autoencoder #2nd part - Deep autoencoder #3rd part - Stacked autoencoder

I have built an autoencoder (1 encoder 8:5, 1 decoder 5:8) which takes the Pima-Indian-Diabetes dataset (https://raw.githubusercontent.com/jbrownlee/Datasets/master/pima-indians-diabetes.data.csv) and reduces its dimension (from 8 to 5). I would now like to use these reduced features to classify the data using an mlp. Now, here, I have some problems with the basic understanding of the architecture. How do I use the weights of the autoencoder and feed them into the mlp? I have checked these threads - https://github.com/keras-team/keras/issues/91 and https://www.codementor.io/nitinsurya/how-to-re-initialize-keras-model-weights-et41zre2g. The question here is which weight matrix should I consider? the one for the encoder part or the decoder part? When I add the layers for the mlp, how do I initialise the weights with these saved weights, not getting the exact syntax. Also, should my mlp start with 5 neurons since my reduced dimension is 5? What are the possible dimensions of the mlp for this binary classification problem? If anyone could elaborate please?
The deep autoencoder code is as follows:
# from keras.models import Sequential
from keras.layers import Input, Dense
from keras.models import Model
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import numpy
# Data pre-processing...
# load pima indians dataset
dataset = numpy.loadtxt("C:/Users/dibsa/Python Codes/pima.csv", delimiter=",")
# split into input (X) and output (Y) variables
X = dataset[:, 0:8]
Y = dataset[:, 8]
# Split data into training and testing datasets
x_train, x_test, y_train, y_test = train_test_split(
X, Y, test_size=0.2, random_state=42)
# scale the data within [0-1] range
scalar = MinMaxScaler()
x_train = scalar.fit_transform(x_train)
x_test = scalar.fit_transform(x_test)
# Autoencoder code begins here...
encoding_dim1 = 5 # size of encoded representations
encoding_dim2 = 3 # size of encoded representations in the bottleneck layer
# this is our input placeholder
input_data = Input(shape=(8,))
# "encoded" is the first encoded representation of the input
encoded = Dense(encoding_dim1, activation='relu', name='encoder1')(input_data)
# "enc" is the second encoded representation of the input
enc = Dense(encoding_dim2, activation='relu', name='encoder2')(encoded)
# "dec" is the lossy reconstruction of the input
dec = Dense(encoding_dim1, activation='sigmoid', name='decoder1')(enc)
# "decoded" is the final lossy reconstruction of the input
decoded = Dense(8, activation='sigmoid', name='decoder2')(dec)
# this model maps an input to its reconstruction
autoencoder = Model(inputs=input_data, outputs=decoded)
autoencoder.compile(optimizer='sgd', loss='mse')
# training
autoencoder.fit(x_train, x_train,
epochs=300,
batch_size=10,
shuffle=True,
validation_data=(x_test, x_test)) # need more tuning
# test the autoencoder by encoding and decoding the test dataset
reconstructions = autoencoder.predict(x_test)
print('Original test data')
print(x_test)
print('Reconstructed test data')
print(reconstructions)
#The stacked autoencoder code is as follows:
# from keras.models import Sequential
from keras.layers import Input, Dense
from keras.models import Model
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import numpy
# Data pre-processing...
# load pima indians dataset
dataset = numpy.loadtxt("C:/Users/dibsa/Python Codes/pima.csv", delimiter=",")
# split into input (X) and output (Y) variables
X = dataset[:, 0:8]
Y = dataset[:, 8]
# Split data into training and testing datasets
x_train, x_test, y_train, y_test = train_test_split(
X, Y, test_size=0.2, random_state=42)
# scale the data within [0-1] range
scalar = MinMaxScaler()
x_train = scalar.fit_transform(x_train)
x_test = scalar.fit_transform(x_test)
# Autoencoder code goes here...
encoding_dim1 = 5 # size of encoded representations
encoding_dim2 = 3 # size of encoded representations in the bottleneck layer
# this is our input placeholder
input_data1 = Input(shape=(8,))
# the first encoded representation of the input
encoded1 = Dense(encoding_dim1, activation='relu',
name='encoder1')(input_data1)
# the first lossy reconstruction of the input
decoded1 = Dense(8, activation='sigmoid', name='decoder1')(encoded1)
# this model maps an input to its first layer of reconstructions
autoencoder1 = Model(inputs=input_data1, outputs=decoded1)
# this is the first encoder model
enc1 = Model(inputs=input_data1, outputs=encoded1)
autoencoder1.compile(optimizer='sgd', loss='mse')
# training
autoencoder1.fit(x_train, x_train, epochs=300,
batch_size=10, shuffle=True,
validation_data=(x_test, x_test))
FirstAEoutput = autoencoder1.predict(x_train)
input_data2 = Input(shape=(encoding_dim1,))
# the second encoded representations of the input
encoded2 = Dense(encoding_dim2, activation='relu',
name='encoder2')(input_data2)
# the final lossy reconstruction of the input
decoded2 = Dense(encoding_dim1, activation='sigmoid',
name='decoder2')(encoded2)
# this model maps an input to its second layer of reconstructions
autoencoder2 = Model(inputs=input_data2, outputs=decoded2)
# this is the second encoder
enc2 = Model(inputs=input_data2, outputs=encoded2)
autoencoder2.compile(optimizer='sgd', loss='mse')
# training
autoencoder2.fit(FirstAEoutput, FirstAEoutput, epochs=300,
batch_size=10, shuffle=True)
# this is the overall autoencoder mapping an input to its final reconstructions
autoencoder = Model(inputs=input_data1, outputs=encoded2)
# test the autoencoder by encoding and decoding the test dataset
reconstructions = autoencoder.predict(x_test)
print('Original test data')
print(x_test)
print('Reconstructed test data')
print(reconstructions)
If your decoder is trying to reconstruct the input, then it doesn't really make sense to me to attach your classifier to its output. I mean, why not just attach it to the input in the first time? So if you are set on using an auto-encoder, I'd say it's pretty clear that you should attach your classifier to the output of the encoder pipe.
I'm not quite sure what you mean with "use the weights of the autoencoder and feed them into the mlp". You don't feed a layer with another layer's weights, but with it's output signal. This is pretty easy to do on Keras. Let's say you defined your auto-encoder and trained it as such:
from keras Input, Model
from keras import backend as K
from keras.layers import Dense
x = Input(shape=[8])
y = Dense(5, activation='sigmoid' name='encoder')(x)
y = Dense(8, name='decoder')(y)
ae = Model(inputs=x, outputs=y)
ae.compile(loss='mse', ...)
ae.fit(x_train, x_train, ...)
K.models.save_model(ae, './autoencoder.h5')
Then you can attach a classifying layer at the encoder and create a classifier model with the following code:
# load the model from the disk if you
# are in a different execution.
ae = K.models.load_model('./autoencoder.h5')
y = ae.get_layer('encoder').output
y = Dense(1, activation='sigmoid', name='predictions')(y)
classifier = Model(inputs=ae.inputs, outputs=y)
classifier.compile(loss='binary_crossentropy', ...)
classifier.fit(x_train, y_train, ...)
That's it, really. The classifier model will now have the first embedding layer encoder of the ae model as its first layer, followed by a sigmoid decision layer predictions.
If what you are really trying to do is to use the weights learned by the auto-encoder to initialize the weights from the classifier (I'm not positive I recommend this approach):
You can take the weight matrices with layer#get_weights, prune it (because the encoder has 5 units and the classifier only has 1) and finally set the classifier weights. Something in the following lines:
w, b = ae.get_layer('encoder').get_weights()
# remove all units except by one.
neuron_to_keep = 2
w = w[:, neuron_to_keep:neuron_to_keep + 1]
b = b[neuron_to_keep:neuron_to_keep + 1]
classifier.get_layer('predictions').set_weights(w, b)
Idavid, this is for your reference - MLP using Autoencoder reduced features. I need to understand which figure is the correct one? Sorry, I had to upload the picture as an answer as there was no option of uploading an image via comment. I think you are saying figure B is the correct one. Here is the code snippet for the same. Please let me know if I going right.
# This is a mlp classification code with features reduced by an Autoencoder
# from keras.models import Sequential
from keras.layers import Input, Dense
from keras.models import Model
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import numpy
# Data pre-processing...
# load pima indians dataset
dataset = numpy.loadtxt("C:/Users/dibsa/Python Codes/pima.csv", delimiter=",")
# split into input (X) and output (Y) variables
X = dataset[:, 0:8]
Y = dataset[:, 8]
# Split data into training and testing datasets
x_train, x_test, y_train, y_test = train_test_split(
X, Y, test_size=0.2, random_state=42)
# scale the data within [0-1] range
scalar = MinMaxScaler()
x_train = scalar.fit_transform(x_train)
x_test = scalar.fit_transform(x_test)
# Autoencoder code goes here...
encoding_dim = 5 # size of our encoded representations
# this is our input placeholder
input_data = Input(shape=(8,))
# "encoded" is the encoded representation of the input
encoded = Dense(encoding_dim, activation='relu', name='encoder')(input_data)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(8, activation='sigmoid', name='decoder')(encoded)
# this model maps an input to its reconstruction
autoencoder = Model(inputs=input_data, outputs=decoded)
autoencoder.compile(optimizer='sgd', loss='mse')
# training
autoencoder.fit(x_train, x_train,
epochs=300,
batch_size=10,
shuffle=True,
validation_data=(x_test, x_test)) # need more tuning
# test the autoencoder by encoding and decoding the test dataset
reconstructions = autoencoder.predict(x_test)
print('Original test data')
print(x_test)
print('Reconstructed test data')
print(reconstructions)
# MLP code goes here...
# create model
x = autoencoder.get_layer('encoder').output
# h = Dense(3, activation='relu', name='hidden')(x)
y = Dense(1, activation='sigmoid', name='predictions')(x)
classifier = Model(inputs=autoencoder.inputs, outputs=y)
# Compile model
classifier.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy'])
# Fit the model
classifier.fit(x_train, y_train, epochs=250, batch_size=10)
print('Now making predictions')
predictions = classifier.predict(x_test)
# round predictions
rounded_predicted_classes = [round(x[0]) for x in predictions]
temp = sum(y_test == rounded_predicted_classes)
acc = temp/len(y_test)
print(acc)

Categories