VAE: mat1 and mat2 shapes cannot be multiplied (90x12800 and 90x360) - python

I have CSI data with the following shape (5520, 90, 200) being samples, the stacked antennas and subcarriers and time frame. I am trying to build a variational autoencoder however I keep getting stuck on the training part as it says that the matrixes I am inputting cannot be multiplied.
class Encoder(nn.Module):
def __init__(self, z_dim):
super(Encoder, self).__init__()
x_dim = 90
# Layer 1
self.encoder = nn.Sequential()
self.encoder.add_module('fc1', nn.Linear(x_dim, 360))
self.encoder.add_module('Sigmoid1', nn.Sigmoid())
self.encoder.add_module('BN1', nn.BatchNorm1d(360))
self.encoder.add_module('DO1', nn.Dropout())
# Layer 2
self.encoder.add_module('fc2', nn.Linear(360, 50))
self.encoder.add_module('Sigmoid2', nn.Sigmoid())
self.encoder.add_module('BN2', nn.BatchNorm1d(50))
self.encoder.add_module('DO2', nn.Dropout())
# Latent
self.k_mu = nn.Linear(50, z_dim)
self.k_sigma = nn.Linear(50, z_dim)
def reparameterize(self, mean, logvar):
eps = np.random.normal(shape=mean.shape)
return eps * tf.exp(logvar * .5) + mean
def forward(self, x):
h = self.encoder(x)
k_mu = self.k_mu(h)
k_sigma = self.k_sigma(h)
z_dim = reparameterize(k_mu, log(k_sigma))
return z_dim
class Decoder(nn.Module):
def __init__(self, z_dim):
super(Decoder, self).__init__()
x_dim = 90
#1
self.decoder = nn.Sequential()
self.decoder.add_module('fc3', nn.Linear(z_dim, 50))
self.decoder.add_module('Sigmoid3', nn.Sigmoid())
self.decoder.add_module('BN3', nn.BatchNorm1d(50))
self.decoder.add_module('DO3', nn.Dropout())
#2
self.decoder.add_module('fc4', nn.Linear(50, 360))
self.decoder.add_module('Sigmoid4', nn.Sigmoid())
self.decoder.add_module('BN4', nn.BatchNorm1d(360))
self.decoder.add_module('DO4', nn.Dropout())
self.decoder.add_module('x_recon', nn.Linear(360, x_dim))
def forward(self, z):
x_ = self.decoder(z)
return x_
class VAE(nn.Module):
def __init__(self, z_dim):
super(VAE, self).__init__()
self.encoder = Encoder(z_dim)
self.decoder = Decoder(z_dim)
def sample(self, z_mu, z_log_var):
z_std = torch.exp(z_log_var)
eps = torch.randn_like(z_std)
z = z_mu + z_std*eps
return z
def forward(self, x):
# Encode
z_mu, z_log_var = self.encoder(x)
# Sample
if self.training:
# Sample if we are training
z = self.sample(z_mu, z_log_var)
else:
z = z_mu
# Decode
x_ = self.decoder(z)
return x_, z_mu, z_log_var
vae = VAE(z_dim=10).cuda()
optimizer = optim.Adam(vae.parameters())
epochs = 20 #Number of epochs
# Loop over epochs
for epoch in range(1, epochs + 1):
vae.train()
train_loss = 0
# Loop over batches. Note that the labels are ignored in this case
for batch_idx, (data) in enumerate(train_loader):
data = data.cuda()
optimizer.zero_grad()
data = data.view(data.size(1), -1)
x_recon, z_mu, z_log_var = vae(data)
# Calculate the loss
KL_loss, E_loss = loss_function(x_recon, data, z_mu, z_log_var)
loss = KL_loss + E_loss
loss.backward()
# Accumulate the loss to calculate the average
train_loss += loss.item()
# Step the optimizer
optimizer.step()
if batch_idx % 100 == 0:
print('Epoch: {} [{}/{} ({:.0f}%)]\tKL Loss: {:.6f}\tBC Loss: {:.6f}\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), KL_loss.item() / len(data), E_loss.item() / len(data), loss.item() / len(data)))
print('====> Epoch: {} Average loss: {:.4f}'.format(epoch, train_loss / len(train_loader.dataset)))
And I get the following error: mat1 and mat2 shapes cannot be multiplied (90x12800 and 90x360)
I cannot figure out why I am getting it.

Related

RuntimeError: Sizes of tensors must match except in dimension 1. Expected size 100 but got size 1 for tensor number 1 in the list

I'm new to PyTorch not able to figure out what I'm doing wrong, below is the code
x_np, y_np = datasets.make_regression(n_samples=100,n_features=1,noise=20,random_state=0)
x = torch.from_numpy(x_np.astype(np.float32))
y = torch.from_numpy(y_np.astype(np.float32))
y = y.view(y.shape[0],1)
n_samples, n_features = x.shape
class Regression(nn.Module):
def __init__(self, inputsize, outputsize, hiddensize):
super(Regression, self).__init__()
self.hidden_size = hiddensize
self.input_size = inputsize
self.output_size = outputsize
self.i2h = nn.Linear(self.input_size+self.hidden_size, self.hidden_size)
self.h2o = nn.Linear(self.input_size+self.hidden_size, self.output_size)
def forward(self, x):
hidden = torch.zeros(1, self.hidden_size)
print(x.shape)
print(hidden.shape)
combined = torch.cat((x,hidden), 1)
hidden = self.i2h(combined)
output = self.h2o(combined)
return output
model = Regression(n_features, n_features, 16)
lr = 0.01
loss = nn.MSELoss()
opt = torch.optim.SGD(model.parameters(), lr = lr)
for epoch in range(1000):
ypred = model(x)
l = loss(y, ypred)
l.backward()
opt.step()
opt.zero_grad()
if epoch % 100 == 0:
[w, b] = model.parameters()
print(f'epoch {epoch+1}: w = {w[0][0].item():.3f}, loss = {l:.8f}')
While training, I am getting this error
RuntimeError: Sizes of tensors must match except in dimension 1. Expected size 100 but got size 1 for tensor number 1 in the list
i2h maps self.input_size + self.hidden_size dimension to self.hidden_size, so for h2o, you have to define a mapping starting from self.hidden dimension. Also, you have to update the forward accordingly. Here is the complete code:
class Regression(nn.Module):
def __init__(self, inputsize, outputsize, hiddensize):
super(Regression, self).__init__()
self.hidden_size = hiddensize
self.input_size = inputsize
self.output_size = outputsize
self.i2h = nn.Linear(self.input_size+self.hidden_size, self.hidden_size)
self.h2o = nn.Linear(self.hidden_size, self.output_size)
def forward(self, x):
hidden = torch.zeros(1, self.hidden_size)
print(x.shape)
print(hidden.shape)
combined = torch.cat((x,hidden), 1)
hidden = self.i2h(combined)
output = self.h2o(hidden)
return output

Pytorch Deep Learning model for noisy function approximation

I have a dataset as shown in this picture: Dataset
I want to to approximate a function to fit the data and make predictions on my test dataset, I have tried to use a Neural Network as shown, but I get huge training loss, what should I change ? Is it just a problem of layers and neurons ?
class NeuralNet(nn.Module):
def __init__(self,
input_dimension,
output_dimension,
n_hidden_layers,
neurons,
regularization_param,
regularization_exp,
retrain_seed,
activation_name):
super(NeuralNet, self).__init__()
# Number of input dimensions n
self.input_dimension = input_dimension
# Number of output dimensions m
self.output_dimension = output_dimension
# Number of neurons per layer
self.neurons = neurons
# Number of hidden layers
self.n_hidden_layers = n_hidden_layers
# Activation function
self.activation_name = activation_name
self.activation = self.get_activation(activation_name)
# Regularization parameter
self.regularization_param = regularization_param
# Regularization exponent
self.regularization_exp = regularization_exp
# Random seed for weight initialization
self.retrain_seed = retrain_seed
if self.n_hidden_layers != 0:
self.input_layer = nn.Linear(self.input_dimension, self.neurons)
self.hidden_layers = nn.ModuleList([nn.Linear(self.neurons, self.neurons) for _ in range(n_hidden_layers - 1)])
self.output_layer = nn.Linear(self.neurons, self.output_dimension)
else:
print("Simple linear regression")
self.linear_regression_layer = nn.Linear(self.input_dimension, self.output_dimension)
self.init_xavier()
def init_xavier(self):
torch.manual_seed(self.retrain_seed)
def init_weights(m):
if type(m) == nn.Linear and m.weight.requires_grad and m.bias.requires_grad:
if self.activation_name in ['tanh', 'relu']:
gain = nn.init.calculate_gain(self.activation_name)
else:
gain = 1
torch.nn.init.xavier_uniform_(m.weight, gain=gain)
m.bias.data.fill_(0)
self.apply(init_weights)
def regularization(self):
reg_loss = 0
for name, param in self.named_parameters():
if 'weight' in name:
reg_loss = reg_loss + torch.norm(param, self.regularization_exp)
return reg_loss
def get_activation(self, activation_name):
if activation_name in ['tanh']:
return nn.Tanh()
elif activation_name in ['relu']:
return nn.ReLU(inplace=True)
elif activation_name in ['lrelu']:
return nn.LeakyReLU(inplace=True)
elif activation_name in ['sigmoid']:
return nn.Sigmoid()
elif activation_name in ['softplus']:
return nn.Softplus(beta=4)
elif activation_name in ['celu']:
return nn.CELU()
else:
raise ValueError('Unknown activation function')
def forward(self, x):
# The forward function performs the set of affine and non-linear transformations defining the network
# (see equation above)
if self.n_hidden_layers != 0:
x = self.activation(self.input_layer(x))
for k, l in enumerate(self.hidden_layers):
x = self.activation(l(x))
return self.output_layer(x)
else:
return self.linear_regression_layer(x)
my_network = NeuralNet(input_dimension=train1.shape[1], output_dimension=test1.shape[1], n_hidden_layers=8, neurons=500,regularization_param= 1e-4,
regularization_exp=2,
retrain_seed=134,
activation_name="tanh")
my_network.double()
optimizer_ = optim.Adam(my_network.parameters(), lr=0.0001, weight_decay=0)
def fit(model, training_set, x_validation_, y_validation_, num_epochs, optimizer, p, verbose=True):
history = [[], []]
regularization_param = model.regularization_param
# Loop over epochs
for epoch in range(num_epochs):
if verbose: print("################################ ", epoch, " ################################")
running_loss = list([0])
# Loop over batches
for j, (x_train_, u_train_) in enumerate(training_set):
def closure():
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
u_pred_ = model(x_train_)
loss_u = torch.mean((u_pred_ - u_train_) ** p)
loss_reg = model.regularization()
loss = loss_u + regularization_param * loss_reg
loss.backward()
# Compute average training loss over batches for the current epoch
running_loss[0] += loss.item() / len(training_set)
return loss
optimizer.step(closure=closure)
y_validation_pred_ = model(x_validation_)
validation_loss = torch.mean((y_validation_pred_.reshape(-1, ) - y_validation_.reshape(-1, )) ** p).item()
history[0].append(running_loss[0])
history[1].append(validation_loss)
if verbose:
print('Training Loss: ', np.round(running_loss[0], 8))
print('Validation Loss: ', np.round(validation_loss, 8))
print('Final Training Loss: ', np.round(history[0][-1], 8))
print('Final Validation Loss: ', np.round(history[1][-1], 8))
return history
n_epochs = 2000
history = fit(my_network, training_set1, x_val1, y_val1, n_epochs, optimizer_, p=2, verbose=False )
Sorry for the long code, I thought it was important to put it all to give you all better information on how to help me.

Pytorch lightning print accuracy and loss at the end of each epoch

In tensorflow keras, when I'm training a model, at each epoch it print the accuracy and the loss, I want to do the same thing using pythorch lightning.
I already create my module but I don't know how to do it.
import torch
import torch.nn as nn
from residual_block import ResidualBlock
import pytorch_lightning as pl
from torchmetrics import Accuracy
class ResNet(pl.LightningModule):
def __init__(self, block, layers, image_channels, num_classes, learning_rate):
super(ResNet, self).__init__()
self.in_channels = 64
self.conv1 = nn.Conv2d(
image_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(
block, layers[0], intermediate_channels=64, stride=1)
self.layer2 = self._make_layer(
block, layers[1], intermediate_channels=128, stride=2)
self.layer3 = self._make_layer(
block, layers[2], intermediate_channels=256, stride=2)
self.layer4 = self._make_layer(
block, layers[3], intermediate_channels=512, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * 4, num_classes)
self.learning_rate = learning_rate
self.train_accuracy = Accuracy()
self.val_accuracy = Accuracy()
self.test_accuracy = Accuracy()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.reshape(x.shape[0], -1)
x = self.fc(x)
return x
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
return optimizer
def training_step(self, train_batch, batch_idx):
images, labels = train_batch
outputs = self(images)
criterion = nn.CrossEntropyLoss()
loss = criterion(outputs, labels)
self.train_accuracy(outputs, labels)
self.log('train_loss', loss)
self.log('train_accuracy', self.train_accuracy)
return loss
def validation_step(self, val_batch, batch_idx):
images, labels = val_batch
outputs = self(images)
criterion = nn.CrossEntropyLoss()
loss = criterion(outputs, labels)
self.val_accuracy(outputs, labels)
self.log('val_loss', loss)
self.log('val_accuracy', self.val_accuracy)
def test_step(self, test_batch, batch_idx):
images, labels = test_batch
outputs = self(images)
criterion = nn.CrossEntropyLoss()
loss = criterion(outputs, labels)
self.test_accuracy(outputs, labels)
self.log('test_loss', loss)
self.log('test_accuracy', self.test_accuracy)
def _make_layer(self, block, num_residual_blocks, intermediate_channels, stride):
identity_downsample = None
layers = []
if stride != 1 or self.in_channels != intermediate_channels * 4:
identity_downsample = nn.Sequential(nn.Conv2d(self.in_channels, intermediate_channels * 4,
kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(intermediate_channels * 4),)
layers.append(
block(self.in_channels, intermediate_channels, identity_downsample, stride))
self.in_channels = intermediate_channels * 4
for i in range(num_residual_blocks - 1):
layers.append(block(self.in_channels, intermediate_channels))
return nn.Sequential(*layers)
#classmethod
def ResNet50(cls, img_channels, num_classes, learning_rate):
return ResNet(ResidualBlock, [3, 4, 6, 3], img_channels, num_classes, learning_rate)
#classmethod
def ResNet101(cls, img_channels, num_classes, learning_rate):
return ResNet(ResidualBlock, [3, 4, 23, 3], img_channels, num_classes, learning_rate)
#classmethod
def ResNet152(cls, img_channels, num_classes, learning_rate):
return ResNet(ResidualBlock, [3, 8, 36, 3], img_channels, num_classes, learning_rate)
I just want to print the training and validation accuracy and loss at the end of each epoch.
self.log("train_loss", loss, prog_bar=True, on_step=False, on_epoch=True)
The above code logs train_loss to the progress bar.
https://pytorch-lightning.readthedocs.io/en/stable/extensions/logging.html#automatic-logging
Or you can use this if on one device:
def training_step(self, batch, batch_idx):
...
loss = nn.functional.mse_loss(x_hat, x)
return loss
def training_epoch_end(self, outputs) -> None:
loss = sum(output['loss'] for output in outputs) / len(outputs)
print(loss)
Multiple GPUs:
def training_epoch_end(self, outputs) -> None:
gathered = self.all_gather(outputs)
if self.global_rank == 0:
# print(gathered)
loss = sum(output['loss'].mean() for output in gathered) / len(outputs)
print(loss.item())

Tensorflow: gradients suddenly became NaNs during training

I am training simple variational autoencoder with negative binomial likelihood for decoder. I used python 3.7.1 and tensorflow 2.0.0.
The model was trained well without any problems for tens of epochs, but all weights, loss, and gradients suddenly became NaN during training. I modified the code to find which variables become NaN first (among weights, loss, and gradients) and found that gradients first became nan and this affected other variables.
I have googled similar issues but most of case nan appeared in the loss, which is different from this case. I tried: smaller learning rate, clipping loss... but nothing could resolve the problem.
Here is the model class for autoencoder model:
class Encoder(tf.keras.layers.Layer):
def __init__(self, hidden_dim, latent_dim):
super(Encoder, self).__init__()
self.encoder_fc_1 = tf.keras.layers.Dense(hidden_dim, activation=tf.nn.leaky_relu)
self.encoder_fc_2 = tf.keras.layers.Dense(hidden_dim, activation=tf.nn.leaky_relu)
self.encoder_latent = tf.keras.layers.Dense(latent_dim + latent_dim)
def call(self, input):
h = tf.math.l2_normalize(input, 1)
h = self.encoder_fc_1(h)
h = self.encoder_fc_2(h)
return tf.split(self.encoder_latent(h), num_or_size_splits=2, axis=1)
class Decoder(tf.keras.layers.Layer):
def __init__(self, hidden_dim, vocab_size):
super(Decoder, self).__init__()
self.decoder_fc_1 = tf.keras.layers.Dense(hidden_dim, activation=tf.nn.leaky_relu)
self.decoder_fc_2 = tf.keras.layers.Dense(hidden_dim, activation=tf.nn.leaky_relu)
self.decoder_fc_3 = tf.keras.layers.Dense(vocab_size + vocab_size)
def call(self, z):
h = self.decoder_fc_1(z)
h = self.decoder_fc_2(h)
return tf.split(self.decoder_fc_3(h), num_or_size_splits=2, axis=1)
class NBVAE(tf.keras.Model):
def __init__(self, config):
super(NBVAE, self).__init__()
self.optimizer = tf.keras.optimizers.Adam(config["learning_rate"])
self.encoder = Encoder(config["hidden_dim"], config["latent_dim"])
self.decoder = Decoder(config["hidden_dim"], config["vocab_size"])
def call(self, input):
mean, logvar = self.encoder(input)
z = reparameterize(mean, logvar)
h_r, h_p = self.decoder(z)
return mean, logvar, z, h_r, h_p
def reparameterize(mean, logvar):
eps = tf.random.normal(shape=mean.shape)
return tf.add(tf.multiply(eps, tf.math.exp( tf.math.divide(logvar, 2))), mean)
def log_normal_pdf(sample, mean, logvar, raxis=1):
log2pi = tf.math.log(2. * np.pi)
return tf.reduce_sum(-.5 * ((sample - mean) ** 2. * tf.exp(-logvar) + logvar + log2pi), axis=raxis)
def compute_logpx_z(input, h_r, h_p):
temp = tf.exp(-tf.multiply(tf.exp(h_r), tf.math.log(tf.exp(h_p) + 1)))
temp_cliped = tf.clip_by_value(temp, 1e-5, 1 - 1e-5)
ll = tf.multiply(input, tf.math.log(1 - temp_cliped)) + tf.multiply(1 - input, tf.math.log(temp_cliped))
#print("logpx_z: {}".format(tf.reduce_sum(ll, axis=-1)))
return tf.reduce_sum(ll, axis=-1), temp
def compute_loss(model, input):
mean, logvar, z, h_r, h_p = model(input)
logpx_z, temp = compute_logpx_z(input, h_r, h_p)
logpz = log_normal_pdf(z, 0., 0.)
logqz_x = log_normal_pdf(z, mean, logvar)
return tf.negative(tf.reduce_mean(logpx_z + logpz - logqz_x)), temp
and here is the code snippet for training the model.
I put some if statements in the middle of the code to check which variable become NaN first.
print("start training...")
num_batches = int(np.ceil(len(training_data) / batch_size))
epoch_loss = []
for epoch in range(epochs):
print("epoch: {}".format(epoch+1))
progbar = tf.keras.utils.Progbar(num_batches)
loss_record = []
for i in range(num_batches):
x_batch = training_data[i*batch_size:(i+1)*batch_size]
x_batch = one_hot(x_batch, depth=len(concept2id))
with tf.GradientTape() as tape:
loss, temp = compute_loss(nbvae, x_batch)
print("step{s} loss: {l}".format(s=i, l=loss.numpy()))
# checking the loss
if np.isnan(loss.numpy()):
print("nan loss is detected")
detect_nan = True
break
loss_record.append(loss.numpy())
gradients = tape.gradient(loss, nbvae.trainable_variables)
#gradients, global_norm = tf.clip_by_global_norm(tape.gradient(loss, nbvae.trainable_variables), 10)
print("checking gradients...")
gradient_nancount = 0
for _, grad in enumerate(gradients):
gradient_nancount += np.sum(np.isnan(grad))
if gradient_nancount != 0:
print("nan is detected in gradients")
print("saving the current gradients and weights...")
save_data(os.path.join(output_path, "error_gradients.pkl"), gradients)
save_data(os.path.join(output_path, "error_tvariables.pkl"), nbvae.trainable_variables)
detect_nan = True
break
nbvae.optimizer.apply_gradients(zip(gradients, nbvae.trainable_variables))
print("checking the updated weights...")
weight_nancount = 0
for _, weight in enumerate(nbvae.weights):
weight_nancount += np.sum(np.isnan(weight))
if weight_nancount != 0:
print("nan is detected in weights")
print("saving the current gradients and weights...")
save_data(os.path.join(output_path, "error_gradients.pkl"), gradients)
save_data(os.path.join(output_path, "error_tvariables.pkl"), nbvae.trainable_variables)
detect_nan = True
break
progbar.add(1)
if detect_nan:
epoch_loss.append(np.nan)
nbvae.save_weights(os.path.join(output_path, "nbvae_error{}".format(epoch+1)))
break
print("average epoch loss: {}".format(np.mean(loss_record)))
epoch_loss.append(np.mean(loss_record))
Anyone knows the way to resolve this problem or possible reasons? Thank you for your time in advance.

Why gradient check gives high difference (almost 1)?

I'm trying to implement a Neural Net in python without the use of libraries like Keras or Tensorflow. I still have to test the net, right now I just tried to train it on Iris dataset and check afterwards the correctness of the backpropagation algorithm.
To do so, I wrote the gradient checking procedure, calculating the analytical gradients and comparing them with the gradients from backpropagation.
The point is that, even if the backpropagation algorithm seems correct to me, the difference between the gradients is always high (around 0.8, instead of the classic 1e-7).
Layer class
class Dense(Layer):
def __init__(self, input_shape, name=None, activation='relu', regularization='l2'):
self.name = name
self.is_output = False
self.weights = np.random.uniform(low=0.01, high=0.10, size=input_shape)
self.biases = np.ones((1,input_shape[1]))
if activation == 'sigmoid':
self.activation = Activation_Sigmoid()
else: #activation == 'relu':
self.activation = Activation_ReLU()
self.cost = Categorical_CrossEntropyLoss()
def set_as_output(self, is_output=True):
self.is_output = is_output
def forward(self, inputs, debug=False, epsilon=None):
self.net_input = inputs
if debug:
augmented_parameters = np.zeros(epsilon.shape)
weights_column_vector = np.reshape(self.weights,(-1,1))
biases_column_vector = np.reshape(self.biases,(-1,1))
concatenated_parameters = np.concatenate((weights_column_vector, biases_column_vector))
for i in range(concatenated_parameters.shape[0]):
augmented_parameters[i] = concatenated_parameters[i]
# make the augmented parameter long as theta in order to sum them
# this because epsilon is a standard basis vector
augmented_parameters += epsilon
# rebuild the weights matrix and biases vector to apply forward propagation
weights_end = self.weights.shape[0] * self.weights.shape[1]
biases_end = self.biases.shape[0] * self.biases.shape[1] + weights_end
weights = np.reshape(augmented_parameters[0:weights_end],self.weights.shape)
biases = np.reshape(augmented_parameters[weights_end:biases_end], self.biases.shape)
output = np.dot(inputs, weights) + biases
activated_output = self.activation.forward(output)
return activated_output
self.output = np.dot(inputs, self.weights) + self.biases
self.activated_output = self.activation.forward(self.output)
return self.activated_output
def backward(self, X, y, output, step, l2=0.5): #backpropagation
m = X.shape[0] # number of examples
if self.is_output:
error = self.cost.backward(output, y) #(a_k - y_hat_k)
delta_k = self.activation.backward(self.output)* error
# net input for neuron k is a_j^(l-1)
grad = np.dot(self.net_input.T, delta_k)
#update weights with l2 regularization
self.grad_w = grad + (l2 / m)*self.weights
self.grad_b = np.sum(delta_k * 1,axis=0)
self.weights -= step * self.grad_w
self.biases -= step * self.grad_b
return np.dot(delta_k ,self.weights.T)
else:
delta_j = self.activation.backward(self.output) * output
grad = np.dot(self.net_input.T, delta_j)
self.grad_w = grad + (l2 / m) * self.weights
self.grad_b = np.sum(delta_j * 1, axis=0)
self.weights -= step * self.grad_w
self.biases -= step * self.grad_b
return np.dot(delta_j, self.weights.T)
def get_parameters(self):
return self.weights, self.biases
def get_gradients(self):
return self.grad_w, self.grad_b
Neural Net class
class NeuralNet():
def __init__(self):
self.layers = []
self.layers_output = []
self.cost = None
self.regularization = L2_Regularization()
def add(self,layer):
self.layers.append(layer)
def forward(self, inputs, debug=False, epsilon=None):
input = np.copy(inputs)
for layer in self.layers:
output = layer.forward(input, debug=debug, epsilon=epsilon)
input = output
return input
def backward(self, X, y, output, step):
prev_delta = None
out = output
for layer in self.layers[::-1]:
prev_delta = layer.backward(X, y, out, step)
out = prev_delta
def fit(self, X, y, batch_size=1, epochs=10, step=0.05, shuffle=True):
self.layers[-1].set_as_output()
self.error = []
i = 0.005 * epochs
for epoch in range(epochs):
if shuffle:
X = np.random.permutation(X)
batches = int(np.ceil(X.shape[0]/batch_size))
batches_error = []
for t in range(batches):
batch_X = X[t*batch_size:np.min([X.shape[0],(t+1)*batch_size]),:]
batch_y = y[t*batch_size:np.min([y.shape[0],(t+1)*batch_size]),:]
output = self.forward(batch_X)
cost = self.cost.forward(output,batch_y)
cost += self.regularization.forward(X, self.layers)
batches_error.append(cost)
self.backward(batch_X, batch_y, output, step)
self.error.append(np.mean(batches_error))
if epoch % i == 0:
print('epoch:', epoch, 'error:', np.mean(self.error))
return self
def parameters_to_theta(self):
theta = []
for layer in self.layers:
w, b = layer.get_parameters()
#flatten parameter w
new_vector = np.reshape(w, (-1,1))
theta.append(new_vector)
#flatten parameter b
new_vector = np.reshape(b, (-1,1))
theta.append(new_vector)
return np.vstack(theta)
def gradients_to_theta(self):
theta = []
for layer in self.layers:
grad_w, grad_b = layer.get_gradients()
new_vector = np.reshape(grad_w, (-1,1))
theta.append(new_vector)
new_vector = np.reshape(grad_b, (-1,1))
theta.append(new_vector)
return np.vstack(theta)
def gradient_check(self, X, y, epsilon=1e-7):
theta = self.parameters_to_theta()
dtheta = self.gradients_to_theta()
num_parameters = theta.shape[0]
J_plus = np.zeros((num_parameters, 1))
J_minus = np.zeros((num_parameters, 1))
dtheta_approx = np.zeros((num_parameters, 1))
for i in range(num_parameters):
theta_plus = np.zeros((num_parameters,1))
theta_plus[i] = epsilon
J_plus[i] = self.cost.forward(self.forward(X, debug=True, epsilon=theta_plus),y)
theta_minus = np.zeros((num_parameters,1))
theta_minus[i] = - epsilon
J_minus[i] = self.cost.forward(self.forward(X, debug=True, epsilon=theta_minus),y)
dtheta_approx[i] = (J_plus[i] - J_minus[i])/ (2 * epsilon)
numerator = np.linalg.norm(dtheta - dtheta_approx)
denominator = np.linalg.norm(dtheta_approx) + np.linalg.norm(dtheta)
difference = numerator / denominator
return difference
I'm using ReLU and Sigmoid as activation functions, and Categorical Cross Entropy for the cost
import numpy as np
from scipy.special import expit as sigmoid
class Activation_ReLU:
def forward(self, inputs):
return np.maximum(0, inputs)
def backward(self, inputs):
return np.greater(inputs,0).astype(int)
class Activation_Sigmoid:
def forward(self, inputs):
return sigmoid(inputs)
def backward(self, inputs):
return sigmoid(inputs) * (1 - sigmoid(inputs))
class Categorical_CrossEntropyLoss():
def forward(self, y_pred, y_real):
predictions = np.copy(y_pred)
predictions = np.clip(predictions, 1e-12, 1 - 1e-12) # avoid zero values for log
n = y_real.shape[0]
return - (1 / n) * np.sum(y_real * np.log(y_pred))
def backward(self, y_pred, y_real):
return y_real - y_pred
These are the main classes that define the net. The model that I create to train on Iris dataset is a NN with 1 hidden layer.
# random seed is 1
X, y = load_iris(return_X_y=True)
X = (X - np.mean(X)) / np.std(X) # standardize data to improve network convergence
y = y.reshape((-1,1))
encoder = OneHotEncoder(sparse=False)
y = encoder.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X,y,train_size=0.8)
model = NeuralNet()
model.add(Dense((4,10),name='input_layer',activation='relu'))
model.add(Dense((10,10),name='hidden_layer',activation='relu'))
model.add(Dense((10,3),name='output_layer',activation='sigmoid'))
model.fit(X_train,y_train, batch_size=5, epochs=200, step=1e-3)
difference = model.gradient_check(X_train, y_train)
And then, the result of print(difference) is
0.7992920544491866
So there is something wrong with my implementation. What things I have to check to determine the causes of this high difference between gradients?

Categories