Functions within Keras 'Recurrent' layer - python

I am trying to follow an implementation of an attention decoder
from keras.layers.recurrent import Recurrent
...
class AttentionDecoder(Recurrent):
...
#######################################################
# The functionality of init, build and call is clear
#######################################################
def __init__(self, units, output_dim,
activation='tanh',
...
def build(self, input_shape):
...
def call(self, x):
self.x_seq = x
...
return super(AttentionDecoder, self).call(x)
##################################################################
# What is the purpose of 'get_initial_state' and 'step' functions
# Do these functions override the Recurrent base class functions?
##################################################################
def get_initial_state(self, inputs):
# apply the matrix on the first time step to get the initial s0.
s0 = activations.tanh(K.dot(inputs[:, 0], self.W_s))
# from keras.layers.recurrent to initialize a vector of (batchsize,
# output_dim)
y0 = K.zeros_like(inputs) # (samples, timesteps, input_dims)
y0 = K.sum(y0, axis=(1, 2)) # (samples, )
y0 = K.expand_dims(y0) # (samples, 1)
y0 = K.tile(y0, [1, self.output_dim])
return [y0, s0]
def step(self, x, states):
ytm, stm = states
...
return yt, [yt, st]
The AttentionDecoder class is inherited from Recurrent, an abstract base class for recurrent layers ( Recurrent#keras.layers.recurrent, documented here ).
How do the get_initial_state and step function work withing the class (who calls them, when, etc.)? If these function are related to the base class Recurrent, where can I find the relevant documentation?

Related

nn.LSTM not working together with functional_call for calculating the gradient

I have the following code:
import torch
from torch.nn.utils.stateless import functional_call
import torch.autograd as autograd
import torch.nn as nn
# This is the model
class Encoder(nn.Module):
def __init__(self, action_dim, z_dim, skill_length):
super().__init__()
print(action_dim)
self.lin1 = nn.Linear(action_dim, action_dim)
self.lstm = nn.LSTM(input_size=action_dim, hidden_size=z_dim, batch_first=True)
self.lin2 = nn.Linear(z_dim, z_dim)
def forward(self, skill):
a, b, c = skill.shape
skill = skill.reshape(-1, skill.shape[-1])
embed = self.lin1(skill)
embed = embed.reshape(a, b, c)
mean, _ = self.lstm(embed)
mean = mean[:, -1, :]
mean = self.lin2(mean)
return mean
# This is the initialization function
def pars(model):
params = {}
for name, param in model.named_parameters():
if len(param.shape) == 1:
init = torch.nn.init.constant_(param, 0)
else:
init = torch.nn.init.orthogonal_(param)
params[name] = nn.Parameter(init)
return params
# Initializating the model
model = Encoder(4, 2, 5)
x = torch.rand(3, 5, 4)
params = pars(model)
# Running the model with functional_call and calculating gradient.
samp = functional_call(model, params, x)
grad_f = autograd.grad(torch.mean(samp), params.values(),
retain_graph=True, allow_unused=True)
print(grad_f)
# grad_f has gradient for the linear layer, but None for the LSTM layer.
# Running the model without functional_call and calculating gradient.
samp = model(x)
grad = autograd.grad(torch.mean(samp), model.parameters(), retain_graph=True)
print(grad)
# grad has gradient for all layers, e.g., linears and lstm.
I know the problem is with the LSTM layer because when I use a linear layer with nn.Linear, then the gradient depends on std as well as the linear layer. Unfortunately, I do not know to resolve this problem. I'd appreciate any help.
*Edit: I heavily edited the code provided just to further simplify the example. This code can be copied and run.
Update Dec 11, 2022
class Encoder(nn.Module):
def __init__(self, action_dim, z_dim, skill_length):
super().__init__()
print(action_dim)
self.lin1 = nn.Linear(action_dim, action_dim)
self.lstm = nn.LSTM(input_size=action_dim, hidden_size=z_dim, batch_first=True)
self.lin2 = nn.Linear(z_dim, z_dim)
def forward(self, skill):
a, b, c = skill.shape
skill = skill.reshape(-1, skill.shape[-1])
embed = self.lin1(skill)
embed = embed.reshape(a, b, c)
mean, _ = self.lstm(embed)
pdb.set_trace()
grad1 = autograd.grad(mean.mean(), params.values(),
retain_graph=True, allow_unused=True)
# This gives gradient for the self.lin1 layer, and None for the LSTM
grad2 = autograd.grad(mean.mean(), self.parameters(),
retain_graph=True, allow_unused=True)
# This gives gradient the LSTM, but None for the self.lin1 layer
mean = mean[:, -1, :]
mean = self.lin2(mean)
return mean
When I run it the regular without functional_call and calling directly the model, then autograd.grad(mean.mean(), self.parameters(), allow_unused=True, retain_graph=True) has gradient for the self.lin1 and LSTM layer.
I don't know if this information is useful, but putting out there just in case.

Clarification for self.forward function in Python

I am not able to understand this sample_losses = self.forward(output, y) defined under the class Loss.
From which "forward function" it is taking input as forward function is previously defined for all three classes i.e. Dense_layer, Activation_ReLU and Activation_Softmax?
class Layer_Dense:
def __init__(self, n_inputs, n_neurons):
self.weights = 0.01 * np.random.randn(n_inputs, n_neurons)
self.biases = np.zeros((1, n_neurons))
print(self.weights)
def forward(self, inputs):
self.output = np.dot(inputs, self.weights) + self.biases
class Activation_ReLU:
def forward(self, inputs):
self.output= np.maximum(0, inputs)
class Activation_Softmax:
def forward (self, inputs):
exp_values = np.exp(inputs - np.max(inputs, axis = 1, keepdims= True ))
probabilities= exp_values/np.sum(exp_values, axis = 1, keepdims= True )
self.output = probabilities
class Loss:
def calculate(self, output, y):
sample_losses = self.forward(output, y)
data_loss = np.mean(sample_losses)
return data_loss
self.forward() is similar to call method but with registered hooks. This is used to directly call a method in the class when an instance name is called. These methods are inherited from nn.Module.
https://gist.github.com/nathanhubens/5a9fc090dcfbf03759068ae0fc3df1c9
Or refer to the source code:
https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/module.py#L485

Tensorflow 2.0: flat_map() to flatten Dataset of Dataset returns cardinality -2

I am trying to run the following code (as given in Tensorflow documentation) to create windows of my data and then flatten the dataset of datasets.
window_size = 5
windows = range_ds.window(window_size, shift=1)
for sub_ds in windows.take(5):
print(sub_ds)
flat_windows = windows.flat_map(lambda x: x)
The problem is that flat_windows.cardinality().numpy() returns cardinality to be -2 which is creating problem for me during training. I tried looking for ways to set_cardinality of a dataset but couldn't find anything. I also tried other ways of flattening a dataset of datasets, but again no success.
Edit-1: The problem with the training is that the shape is unknown (at Linear and Dense layers) when I am training a subclass model (given below). The model trains well when I train the model eagerly (through tf.config.run_functions_eagerly(True)) but that is slow. Therefore I want the input data to be known for the model training.
Neural Network
class NeuralNetworkModel(tf.keras.Model):
def __init__(self):
super(NeuralNetworkModel, self).__init__()
self.encoder = Encoder()
def train_step(self, inputs):
X = inputs[0]
Y = inputs[1]
with tf.GradientTape() as tape:
enc_X = self.encoder(X)
enc_Y = self.encoder(Y)
# loss:
loss = tf.norm(enc_Y - enc_X, axis = [0, 1], ord = 'fro')
# Compute gradients
trainable_vars = self.encoder.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Compute our own metrics
loss_tracker.update_state(loss)
# Return a dict mapping metric names to current value.
# Note that it will include the loss (tracked in self.metrics).
return {"loss": loss_tracker.result()}
#property
def metrics(self):
# We list our `Metric` objects here so that `reset_states()` can be
# called automatically at the start of each epoch
# or at the start of `evaluate()`.
# If you don't implement this property, you have to call
# `reset_states()` yourself at the time of your choosing.
return [loss_tracker]
def test_step(self, inputs):
X = inputs[0]
Y = inputs[1]
Psi_X = self.encoder(X)
Psi_Y = self.encoder(Y)
# loss:
loss = tf.norm(Psi_Y - Psi_X, axis = [0, 1], ord = 'fro')
# Compute our own metrics
loss_tracker.update_state(loss)
# Return a dict mapping metric names to current value.
# Note that it will include the loss (tracked in self.metrics).
return {"loss": loss_tracker.result()}
class Encoder(tf.keras.Model):
def __init__(self):
super(Encoder, self).__init__(dtype = 'float64', name = 'Encoder')
self.input_layer = DenseLayer(128)
self.hidden_layer1 = DenseLayer(128)
self.hidden_layer2 = DenseLayer(64)
self.hidden_layer3 = DenseLayer(64)
self.output_layer = LinearLayer(64)
def call(self, input_data, training):
fx = self.input_layer(input_data)
fx = self.hidden_layer1(fx)
fx = self.hidden_layer2(fx)
fx = self.hidden_layer3(fx)
return self.output_layer(fx)
class LinearLayer(tf.keras.layers.Layer):
def __init__(self, units):
super(LinearLayer, self).__init__(dtype = 'float64')
self.units = units
def build(self, input_shape):
input_dim = input_shape[-1]
self.w = self.add_weight(shape = (input_dim, self.units),
initializer = "random_normal",
trainable = True)
self.b = self.add_weight(shape = (self.units,),
initializer = tf.zeros_initializer(),
trainable = True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
class DenseLayer(tf.keras.layers.Layer):
def __init__(self, units):
super(DenseLayer, self).__init__(dtype = 'float64')
self.units = units
def build(self, input_shape):
input_dim = input_shape[-1]
self.w = self.add_weight(shape = (input_dim, self.units),
initializer = "random_normal",
trainable = True)
self.b = self.add_weight(shape = (self.units,),
initializer = tf.zeros_initializer(),
trainable = True)
def call(self, inputs):
x = tf.matmul(inputs, self.w) + self.b
return tf.nn.elu(x)
I was wondering about this as well. Turns out that -2 is tf.data.UNKNOWN_CARDINALITY (https://www.tensorflow.org/api_docs/python/tf/data#UNKNOWN_CARDINALITY), which represents that TF doesn't know how many elements the flat_map returns per item.
I just asked Windowing a TensorFlow dataset without losing cardinality information? to see if anyone knows a way to window datasets without losing cardinality.

How to set layer weights during training tensorflow

In every forward pass of the model, I want to implement l2 normalization on the softmax layer's columns, then set the weights back as per the imprinted weights paper and this pytorch implementation. I am using layer.set_weights() to set the normalized weights during the call() function of the model, but this implementation only works with eager execution, as something goes wrong with layer.set_weights() when building the graph.
here is the implementation of the model in tf 1.15:
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import Model
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Dense
class Extractor(Model):
def __init__(self, input_shape):
super(Extractor, self).__init__()
self.basenet = ResNet50(include_top=False, weights="imagenet",
pooling="avg", input_shape=input_shape)
def call(self, x):
x = self.basenet(x)
return(x)
class Embedding(Model):
def __init__(self, num_nodes, norm=True):
super(Embedding, self).__init__()
self.fc = Dense(num_nodes, activation="relu")
self.norm = norm
def call(self, x):
x = self.fc(x)
if self.norm:
x = tf.nn.l2_normalize(x)
return x
class Classifier(Model):
def __init__(self, n_classes, norm=True, bias=False):
super(Classifier, self).__init__()
self.n_classes = n_classes
self.norm = norm
self.bias = bias
def build(self, inputs_shape):
self.prediction = Dense(self.n_classes,
activation="softmax",use_bias=False)
def call(self, x):
if self.norm:
w = self.prediction.trainable_weights
if w:
w = tf.nn.l2_normalize(w, axis=2)
self.prediction.set_weights(w)
x = self.prediction(x)
return x
class Net(Model):
def __init__(self, input_shape, n_classes, num_nodes, norm=True,
bias=False):
super(Net, self).__init__()
self.n_classes = n_classes
self.num_nodes = num_nodes
self.norm = norm
self.bias = bias
self.extractor = Extractor(input_shape)
self.embedding = Embedding(self.num_nodes, norm=self.norm)
self.classifier = Classifier(self.n_classes, norm=self.norm,
bias=self.bias)
def call(self, x):
x = self.extractor(x)
x = self.embedding(x)
x = self.classifier(x)
return x
The weight normalization can be found in the call step of the Classifier class, where I call .set_weights() after normalizing it.
Creating the model with model = Net(input_shape,n_classes, num_nodes) and using model(x) works, but model.predict() and model.fit() give me errors about .get_weights(). I can train the model in eager mode with gradient tape, but it is extremely slow.
Is there another way I can set the weights of a Dense layer during training in each forward call but lets me use the model outside of eager mode? When I say eager mode I mean with tf.enable_eager_execution() at the start of the program.
Here is the error I get when I use model.predict(x) instead:
TypeError: len is not well defined for symbolic Tensors. (imprint_net_1/classifier/l2_normalize:0) Please call `x.shape` rather than `len(x)` for shape information.

Embed trainable bijector into Keras model

I am trying to implement normalizing flows embedded in a Keras model. In all examples I can find, such as the documentation of MAF, the bijectors which constitute the normalizing flows are embedded into a TransformedDistribution and exposed directly for training etc.
I am trying to embed this TransformedDistribution in a keras Model to match the architecture of other models I have which are inheriting from keras Model.
Unfortunately all my attempts (see code) so far fail at transferring the trainable variables inside the transformed distribution to the keras Model.
I have tried to make the bijector inherit from tf.keras.layers.Layer, which did not change anything.
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
class Flow(tfb.Bijector, tf.Module):
"""
tf.Module to register trainable_variables
"""
def __init__(self, d, init_sigma=0.1, **kwargs):
super(Flow, self).__init__(
dtype=tf.float32,
forward_min_event_ndims=0,
inverse_min_event_ndims=0,
**kwargs
)
# Shape of the flow goes from Rd to Rd
self.d = d
# Weights/Variables initializer
self.init_sigma = init_sigma
w_init = tf.random_normal_initializer(stddev=self.init_sigma)
# Variables
self.u = tf.Variable(
w_init(shape=[1, self.d], dtype=tf.float32),
dtype=tf.float32,
name='u',
trainable=True,
)
def _forward(self, x):
return x
def _inverse(self, y):
return y
class Flows(tf.keras.Model):
def __init__(self, d=2, shape=(100, 2), n_flows=10, ):
super(Flows, self).__init__()
# Parameters
self.d = d
self.shape = shape
self.n_flows = n_flows
# Base distribution - MF = Multivariate normal diag
base_distribution = tfd.MultivariateNormalDiag(
loc=tf.zeros(shape=shape, dtype=tf.float32)
)
# Flows as chain of bijector
flows = []
for n in range(n_flows):
flows.append(Flow(self.d, name=f"flow_{n + 1}"))
bijector = tfb.Chain(list(reversed(flows)))
self.flow = tfd.TransformedDistribution(
distribution=base_distribution,
bijector=bijector
)
def call(self, *inputs):
return self.flow.bijector.forward(*inputs)
def log_prob(self, *inputs):
return self.flow.log_prob(*inputs)
def sample(self, num):
return self.flow.sample(num)
q = Flows()
# Call to instantiate variables
q(tf.zeros(q.shape))
# Prints no trainable params
print(q.summary())
# Prints expected trainable params
print(q.flow.trainable_variables)
Any idea if this is even possible? Thanks!
I bumped into this issue as well. It seems to be caused by the incompatibility issues between TFP and TF 2.0 (a couple relevant issues https://github.com/tensorflow/probability/issues/355 and https://github.com/tensorflow/probability/issues/946).
As a workaround, you need to add the (trainable) variables of your transformed distribution / bijector as an attribute to your Keras Model:
class Flows(tf.keras.Model):
def __init__(self, d=2, shape=(100, 2), n_flows=10, ):
super(Flows, self).__init__()
# Parameters
self.d = d
self.shape = shape
self.n_flows = n_flows
# Base distribution - MF = Multivariate normal diag
base_distribution = tfd.MultivariateNormalDiag(
loc=tf.zeros(shape=shape, dtype=tf.float32)
)
# Flows as chain of bijector
flows = []
for n in range(n_flows):
flows.append(Flow(self.d, name=f"flow_{n + 1}"))
bijector = tfb.Chain(list(reversed(flows)))
self.flow = tfd.TransformedDistribution(
distribution=base_distribution,
bijector=bijector
)
# issue: https://github.com/tensorflow/probability/issues/355, https://github.com/tensorflow/probability/issues/946
# need to add bijector's trainable variables as an attribute (name does not matter)
# otherwise this layer has zero trainable variables
self._variables = self.flow.variables # https://github.com/tensorflow/probability/issues/355
def call(self, *inputs):
return self.flow.bijector.forward(*inputs)
def log_prob(self, *inputs):
return self.flow.log_prob(*inputs)
def sample(self, num):
return self.flow.sample(num)
After adding this your model should have trainable variables and weights to optimize.

Categories