Can't init the weights of my neural network PyTorch - python

I can't initialize the weights with the function MyNet.apply(init_weights).
These are my functions:
def init_weights(net):
if type(net) == torch.nn.Module:
torch.nn.init.kaiming_uniform_(net.weight)
net.bias.data.fill_(0.01) # tots els bias a 0.01
My neural net is the following:
class NeuralNet(torch.nn.Module):
def __init__(self):
super().__init__() # Necessary for torch to detect this class as trainable
# Here define network architecture
self.layer1 = torch.nn.Linear(28**2, 32).to(device) # Linear layer with 32 neurons
self.layer2 = torch.nn.Linear(32, 64).to(device) # Linear layer with 64 neurons
self.layer3 = torch.nn.Linear(64, 128).to(device) # Linear layer with 128 neurons
self.output = torch.nn.Linear(128, 1).to(device) # Linear layer with 1 output neuron (binary output)
def forward(self, x):
# Here define architecture behavior
x = torch.sigmoid(self.layer1(x)).to(device) # x = torch.nn.functional.relu(self.layer1(x))
x = torch.sigmoid(self.layer2(x)).to(device)
x = torch.sigmoid(self.layer3(x)).to(device)
return torch.sigmoid(self.output(x)).to(device) # Binary output
The type(net) prints as linear so it never gets inside the if statement, and if I remove it produces the following error:
AttributeError: 'NeuralNet' object has no attribute 'weight'

You should init only the weight of the linear layers:
def init_weights(net):
if type(net) == torch.nn.Linear:
torch.nn.init.kaiming_uniform_(net.weight)
net.bias.data.fill_(0.01) # tots els bias a 0.01

Related

How to implement a custom non-trainable PCA layer on Keras

I am trying to develop a non-trainable custom PCA layer, where, starting from a float input of size equal to 256, the output is a two-dimensional vector obtained from the PCA application inside the network as last level. Prior to this layer I need to have one Dense layer. Below is my PCA implementation via tensorflow and the custom level:
class TF_PCA:
def __init__(self, X):
self.X = X
self.u = None
self.singular_values = None
self.sigma = None
def fit(self):
# Perform SVD
self.singular_values, self.u, _ = tf.linalg.svd(self.X)
# Create sigma matrix
self.sigma = tf.linalg.diag(self.singular_values)
def reduce(self, n_dimensions=None, keep_info=None):
if keep_info:
# Normalize singular values
normalized_singular_values = self.singular_values / sum(self.singular_values)
# Create the aggregated ladder of kept information per dimension
ladder = np.cumsum(normalized_singular_values)
# Get the first index which is above the given information threshold
index = next(idx for idx, value in enumerate(ladder) if value >= keep_info) + 1
n_dimensions = index
# Cut out the relevant part from sigma
sigma = tf.slice(self.sigma, [0, 0], [self.X.shape[1], n_dimensions])
# PCA
pca = tf.matmul(self.u, sigma)
return pca
class CustomPCALayer(tf.keras.layers.Layer):
def __init__(self, num_outputs):
super(CustomPCALayer, self).__init__()
self.num_outputs = num_outputs
self.total = tf.Variable(initial_value=tf.zeros((num_outputs,)), trainable=False)
def call(self, inputs):
tf_pca = TF_PCA(inputs)
tf_pca.fit()
pca = tf_pca.reduce(n_dimensions=self.num_outputs)
return tf.convert_to_tensor(pca, dtype=tf.float32)
Once I implemented this I also implemented the neural network:
in_dim = (256,)
out_dim = 2
def build_network(inputs, out_dim):
x = Dense(256)(inputs)
x = Dropout(0.2)(x)
x = CustomPCALayer(out_dim)(x) # non-trainable layer
return x
inputs = Input(shape=in_dim)
network = build_network(inputs, out_dim)
model = Model(inputs=inputs, outputs=[network], name="network")
opt = keras.optimizers.Adam(learning_rate=0.0001)
model.compile(optimizer=optimizer,
loss="mse",
metrics=[tf.keras.metrics.CosineSimilarity()])
As input and output I have something like this:
x = tf.random.uniform((300, 256))
y = tf.random.uniform((300, 2))
If I try to train, it immediately fails with the error NotImplementedError: SVD gradient has not been implemented for input with unknown inner matrix shape.
I would like to understand what is wrong with my architecture in order to solve the error and have a non-trainable layer to perform PCA

PyTorch: Sizes of tensors must match on 2 input neural network

I am attempting to recreate a 2 input neural network from this article: https://towardsdatascience.com/moving-from-keras-to-pytorch-f0d4fff4ce79
I have copied the network described in the post and adjusted it so that it fits my data. The first input is from GloVe Word embeddings while the other is numerical features about the text data.
class Net(nn.Module):
def __init__(self,hidden_size,lin_size, embedding_matrix=embedding_weights):
super(Alex_NeuralNet_Meta, self).__init__()
# Initialize some parameters for your model
self.hidden_size = hidden_size
drp = 0.1
# Layer 1: Embeddings.
self.embedding = nn.Embedding(size_of_vocabulary, pretrained_embedding_dim)
self.embedding.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32))
self.embedding.weight.requires_grad = False
# Layer 2: Dropout1D(0.1)
self.embedding_dropout = nn.Dropout2d(0.1)
# Layer 3: Bidirectional CuDNNLSTM
self.lstm = nn.LSTM(pretrained_embedding_dim, hidden_size, bidirectional=True, batch_first=True)
# Layer 4: Bidirectional CuDNNGRU
self.gru = nn.GRU(hidden_size*2, hidden_size, bidirectional=True, batch_first=True)
# Layer 7: A dense layer
self.linear = nn.Linear(hidden_size*6 + X2_train.shape[1], lin_size)
self.relu = nn.ReLU()
# Layer 8: A dropout layer
self.dropout = nn.Dropout(drp)
# Layer 9: Output dense layer with one output for our Binary Classification problem.
self.out = nn.Linear(lin_size, 1)
def forward(self, x):
'''
here x[0] represents the first element of the input that is going to be passed.
We are going to pass a tuple where first one contains the sequences(x[0])
and the second one is a additional feature vector(x[1])
'''
h_embedding = self.embedding(x[0].long())
h_embedding = torch.squeeze(self.embedding_dropout(torch.unsqueeze(h_embedding, 0)))
#print("emb", h_embedding.size())
h_lstm, _ = self.lstm(h_embedding)
# print("lst",h_lstm.size())
h_gru, hh_gru = self.gru(h_lstm)
hh_gru = hh_gru.view(-1, 2*self.hidden_size )
print("gru", h_gru.size())
print("h_gru", hh_gru.size())
# Layer 5: is defined dynamically as an operation on tensors.
avg_pool = torch.mean(h_gru, 1)
max_pool, _ = torch.max(h_gru, 1)
print("avg_pool", avg_pool.size())
print("max_pool", max_pool.size())
# the extra features you want to give to the model
f = torch.tensor(x[1], dtype=torch.float).cuda()
print("f", f.size())
# Layer 6: A concatenation of the last state, maximum pool, average pool and
# additional features
conc = torch.cat(( hh_gru, avg_pool, max_pool, f), 1)
#print("conc", conc.size())
# passing conc through linear and relu ops
conc = self.relu(self.linear(conc))
conc = self.dropout(conc)
out = self.out(conc)
# return the final output
return out
And during runtime I get an error on the concatenation line:
RuntimeError: Sizes of tensors must match except in dimension 0. Got 33164 and 20 (The offending index is 0)
From the dimensions of the outputs, I can see where the problem lies but I am not sure how I can fix it
The data inputs to the network is:
torch.Size([20, 150])
torch.Size([33164, 40])
The sizes of each layer output is:
gru torch.Size([20, 150, 80])
h_gru torch.Size([20, 80])
avg_pool torch.Size([20, 80])
max_pool torch.Size([20, 80])
f torch.Size([33164, 40])
For the example above the batch size is 20, hidden_size is 40, the number of rows in numerical data features is 33164 and its feature size is 40.
Thanks for any help in advance

Trainable Matrix multiplication Layer

I'm trying to build a (custom) trainable matrix-multiplication layer in TensorFlow, but things aren't working out... More precisely, my model should look like this:
x -> A(x) x
where A(x) is a feed-forward network with values in the n x n matrix (and thus depends on the input x) and A(x) is matrix by vector multiplication.
Here's what I've coded-up:
class custom_layer(tf.keras.layers.Layer):
def __init__(self, units=16, input_dim=32):
super(custom_layer, self).__init__()
self.units = units
def build(self, input_shape):
self.Tw1 = self.add_weight(name='Weights_1 ',
shape=(input_shape[-1], input_shape[-1]),
initializer='GlorotUniform',
trainable=True)
self.Tw2 = self.add_weight(name='Weights_2 ',
shape=(input_shape[-1], (self.units)**2),
initializer='GlorotUniform',
trainable=True)
self.Tb = self.add_weight(name='basies',
shape=(input_shape[-1],),
initializer='GlorotUniform',#Previously 'ones'
trainable=True)
def call(self, input):
# Build Vector-Valued Feed-Forward Network
ffNN = tf.matmul(input, self.Tw1) + self.Tb
ffNN = tf.nn.relu(ffNN)
ffNN = tf.matmul(ffNN, self.Tw2)
# Map to Matrix
ffNN = tf.reshape(ffNN, [self.units,self.units])
# Multiply Matrix-Valued function with input data
x_out = tf.matmul(ffNN,input)
# Return Output
return x_out
Now I build the model:
input_layer = tf.keras.Input(shape=[2])
output_layer = custom_layer(2)(input_layer)
model = tf.keras.Model(inputs=[input_layer], outputs=[output_layer])
# Compile Model
#----------------#
# Define Optimizer
optimizer_on = tf.keras.optimizers.SGD(learning_rate=10**(-1))
# Compile
model.compile(loss = 'mse',
optimizer = optimizer_on,
metrics = ['mse'])
# Fit Model
#----------------#
model.fit(data_x, data_y, epochs=(10**1), verbose=0)
and then I get this error message:
InvalidArgumentError: Input to reshape is a tensor with 128 values, but the requested shape has 4
[[node model_62/reconfiguration_unit_70/Reshape (defined at <ipython-input-176-0b494fa3fc75>:46) ]] [Op:__inference_distributed_function_175181]
Errors may have originated from an input operation.
Input Source operations connected to node model_62/reconfiguration_unit_70/Reshape:
model_62/reconfiguration_unit_70/MatMul_1 (defined at <ipython-input-176-0b494fa3fc75>:41)
Function call stack:
distributed_function
Thoughts:
It seems like something is wrong with the network dimensions but I can't figure what/how to repair it...

How much deep a Neural Network Required for 12 inputs of ranging from -5000 to 5000 in a3c Reinforcement Learning

I am trying to use A3C with LSTM for an environment where states has 12 inputs ranging from -5000 to 5000.
I am using an LSTM layer of size 12 and then 2 fully connected hidden layers of size 256, then 1 fc for 3 action dim and 1 fc for 1 value function.
The reward is in range (-1,1).
However during initial training I am unable to get good results.
My question is- Is this Neural Network good enough for this kind of environment.
Below is the code for Actor Critic
class ActorCritic(torch.nn.Module):
def __init__(self, params):
super(ActorCritic, self).__init__()
self.state_dim = params.state_dim
self.action_space = params.action_dim
self.hidden_size = params.hidden_size
state_dim = params.state_dim
self.lstm = nn.LSTMCell(state_dim, state_dim)
self.lstm.bias_ih.data.fill_(0)
self.lstm.bias_hh.data.fill_(0)
lst = [state_dim]
for i in range(params.layers):
lst.append(params.hidden_size)
self.hidden = nn.ModuleList()
for k in range(len(lst)-1):
self.hidden.append(nn.Linear(lst[k], lst[k+1]))
for layer in self.hidden:
layer.apply(init_weights)
self.critic_linear = nn.Linear(params.hidden_size, 1)
self.critic_linear.apply(init_weights)
self.actor_linear = nn.Linear(params.hidden_size, self.action_space)
self.actor_linear.apply(init_weights)
self.train()
def forward(self, inputs):
inputs, (hx, cx) = inputs
inputs = inputs.reshape(1,-1)
hx, cx = self.lstm(inputs, (hx, cx))
x = hx
for layer in self.hidden:
x = torch.tanh(layer(x))
return self.critic_linear(x), self.actor_linear(x), (hx, cx)
class Params():
def __init__(self):
self.lr = 0.0001
self.gamma = 0.99
self.tau = 1.
self.num_processes = os.cpu_count()
self.state_dim = 12
self.action_dim = 3
self.hidden_size = 256
self.layers = 2
self.epochs = 10
self.lstm_layers = 1
self.lstm_size = self.state_dim
self.num_steps = 20
self.window = 50
Since you have 12 inputs so make sure you dont use too many parameters, also try changing activation function.
i dont use Torch so i can not understand model architecture.
why your first layer is LSTM? is your data time series?
try using only Dense layer,
1 Dense only with 12 neurons and output layer
2 Dense Layers with 12 neurons each and output layer
As for activation function use leaky relu, since your data is -5000, or you can make your data positive only by adding 5000 to all data samples.

Tensorflow gradients do not exist for bias in custom layer

I've built an input convex neural network in Tensorflow following this ArXiv paper that is a scalar output feed-forward model. The first hidden layer is dense and subsequent layers are custom that takes two inputs: the output from the previous layer (kernel) and the model input (passthrough). Separate weights are applied to each. This allows a positive weights regularizer to be applied to kernel weights but not the passthrough. I calculate the regularizer and add it using self.add_loss in the call method of the custom layer. I'm also using custom activation functions that are squared leaky ReLU and leaky ReLU.
When I am training this network I am able to calculate a gradient for the bias in the first dense layer but I get a warning that no gradient exists for the bias in the custom layer. When I add #tf.function to my activation functions the warning goes away but the gradient is 0. Furthermore, loss.numpy() throws an error when I use #tf.function and run in a local Jupyter notebook (but not in Colab).
Any ideas why the bias gradient exists for the dense but not the custom layer and how to calculate the bias gradient for all layers? A minimal working example is provided in this Colab notebook. Much appreciated!
Below is my custom layer. It's very similar to the standard dense layer.
class DensePartiallyConstrained(Layer):
'''
A custom layer inheriting from `tf.keras.layers.Layers` class.
This class is a fully-connected layer with two inputs. This allows
for different constraints on the weights of each input. This enables
a passthrough of the inputs to each hidden layer to have no
weight constraints while the input from the previous layer can have
a positive constraint. It also allows for different initializations
of the weight values for each input.
Most of this code and documentation was borrowed from the
`tf.keras.layers.Dense` documentation on Github (thanks!).
'''
def __init__(self,
units,
activation = None,
use_bias = True,
kernel_initializer = 'glorot_uniform',
passthrough_initializer = 'glorot_uniform',
bias_initializer = 'zeros',
kernel_constraint = None,
passthrough_constraint = None,
bias_constraint = None,
activity_regularizer = None,
regularizer_constant = 1.0,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(DensePartiallyConstrained, self).__init__(
activity_regularizer = regularizers.get(activity_regularizer), **kwargs)
self.units = int(units)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.passthrough_initializer = initializers.get(passthrough_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.passthrough_constraint = constraints.get(passthrough_constraint)
self.bias_constraint = constraints.get(bias_constraint)
# This is for add_loss in call() method
self.regularizer_constant = regularizer_constant
# What does this do?
self.supports_masking = True
self.kernel_input_spec = InputSpec(min_ndim=2)
self.passthrough_input_spec = InputSpec(min_ndim=2)
def build(self, input_shape):
# Input shapes provided as list [kernel, passthrough]
kernel_input_shape, passthrough_input_shape = input_shape
# Check for proper datatype
dtype = dtypes.as_dtype(self.dtype or K.floatx())
if not (dtype.is_floating or dtype.is_complex):
raise TypeError('Unable to build `DensePartiallyConstrained` layer with non-floating point '
'dtype %s' % (dtype,))
# Check kernel input dimensions
kernel_input_shape = tensor_shape.TensorShape(kernel_input_shape)
if tensor_shape.dimension_value(kernel_input_shape[-1]) is None:
raise ValueError('The last dimension of the inputs to `DensePartiallyConstrained` '
'should be defined. Found `None`.')
kernel_last_dim = tensor_shape.dimension_value(kernel_input_shape[-1])
self.kernel_input_spec = InputSpec(min_ndim=2,
axes={-1: kernel_last_dim})
# Check passthrough input dimensions
passthrough_input_shape = tensor_shape.TensorShape(passthrough_input_shape)
if tensor_shape.dimension_value(passthrough_input_shape[-1]) is None:
raise ValueError('The last dimension of the inputs to `DensePartiallyConstrained` '
'should be defined. Found `None`.')
passthrough_last_dim = tensor_shape.dimension_value(passthrough_input_shape[-1])
self.passthrough_input_spec = InputSpec(min_ndim=2,
axes={-1: passthrough_last_dim})
# Add weights to kernel (between layer connections)
self.kernel = self.add_weight(name = 'kernel',
shape = [kernel_last_dim, self.units],
initializer = self.kernel_initializer,
constraint = self.kernel_constraint,
dtype = self.dtype,
trainable = True)
# Add weight to input passthrough
self.passthrough = self.add_weight(name = 'passthrough',
shape = [passthrough_last_dim, self.units],
initializer = self.passthrough_initializer,
constraint = self.passthrough_constraint,
dtype = self.dtype,
trainable = True)
# Add weights to bias
if self.use_bias:
self.bias = self.add_weight(name = 'bias',
shape = [self.units,],
initializer = self.bias_initializer,
constraint = self.bias_constraint,
dtype = self.dtype,
trainable = True)
else:
self.bias = None
self.built = True
super(DensePartiallyConstrained, self).build(input_shape)
def call(self, inputs):
# Inputs provided as list [kernel, passthrough]
kernel_input, passthrough_input = inputs
# Calculate weights regularizer
self.add_loss(self.regularizer_constant * tf.reduce_sum(tf.square(tf.math.maximum(tf.negative(self.kernel), 0.0))))
# Calculate layer output
outputs = tf.add(tf.matmul(kernel_input, self.kernel), tf.matmul(passthrough_input, self.passthrough))
if self.use_bias:
outputs = tf.add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs)
return outputs
And my activation functions:
##tf.function
def squared_leaky_ReLU(x, alpha = 0.2):
return tf.square(tf.maximum(x, alpha * x))
##tf.function
def leaky_ReLU(x, alpha = 0.2):
return tf.maximum(x, alpha * x)
Edit:
With a tensorflow update I can now access loss.numpy() when using #tf.function with my activation functions. This returns 0 gradients for the bias in all of my custom layers.
I'm beginning to think that the lack of gradient for the bias terms in the custom layer might have something to do with my loss function:
minimax loss
where
regularizer
is regularization for the weights in the custom layer kernel only. The loss for g(x) is based on the gradient with respect to the inputs, so it doesn't contain any information about the bias (the bias in f(x) update normally). Still though, if this is the case I don't understand why the bias in the first hidden dense layer of g(y) is updated? The networks are identical other than f(x) has a positive constraint on the kernel weights.

Categories