I am writing a custom model with tf.keras and in a recurrent node I need to get the value of my 1D input as an int.
That recurrent node needs to build a 1xN tensor which elements are the result of N iterations of a function f(x).
So I created a numpy array of size N which is filled by one element at each iteration, then I convert the numpy array to a tensor.
The problem is I can't get the value of my 1D tensor as an int.
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import layers
def f(x):
return 3*x
class myLayer(layers.Layer):
def __init__(self, units=1, input_dim=1):
super(myLayer, self).__init__()
self.units = units
w_init = tf.random_normal_initializer()
self.w = tf.Variable(initial_value=w_init(shape=(input_dim, units),
dtype='float32'),
trainable=False)
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=False,
)
####### IMPORTANT PART HERE ######
def call(self, inputs):
# In this example N = 20
# Define numpy array
x = np.zeros(20)
# Set its first value to my 1D input # ERROR HERE
x[0] = inputs[0]
# Assign the other element of x
for i in range(1,20):
x[i] = f(x[i-1])
# Cast to tensor
return tf.constant(x, shape=(1,20))
class Linear(layers.Layer):
def __init__(self, units=1, input_dim=20):
super(Linear, self).__init__()
self.units = units
w_init = tf.random_normal_initializer()
self.w = tf.Variable(initial_value=w_init(shape=(input_dim, units),
dtype='float32'),
trainable=True)
b_init = tf.zeros_initializer()
self.b = tf.Variable(initial_value=b_init(shape=(units,),
dtype='float32'),
trainable=True)
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
def build_model():
model = tf.keras.Sequential([
myLayer(),
Linear()
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mse', optimizer=optimizer, metrics=['mae', 'mse'])
#model.build([1])
return model
class PrintDot(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print(epoch)
print('.', end='')
train_X = np.linspace(0,99, num=100)
train_y = 2*train_X
train_X = train_X / np.linalg.norm(train_X)
model = build_model()
#print(model.summary())
epochs = 10
history = model.fit(train_X, train_y, epochs=epochs, validation_split=0.2, verbose=0, callbacks=[PrintDot()])
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
print(hist.tail())
# x = tf.ones((1,1)) * 0.21
# horse_layer = Horseshoe()
# y = horse_layer(x)
# print(y)
Related
I train the following model based on GRU, note that I am passing the argument stateful=True to the GRU builder.
class LearningToSurpriseModel(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, rnn_units):
super().__init__(self)
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(rnn_units,
stateful=True,
return_sequences=True,
return_state=True,
reset_after=True
)
self.dense = tf.keras.layers.Dense(vocab_size)
def call(self, inputs, states=None, return_state=False, training=False):
x = inputs
x = self.embedding(x, training=training)
if states is None:
states = self.gru.get_initial_state(x)
x, states = self.gru(x, initial_state=states, training=training)
x = self.dense(x, training=training)
if return_state:
return x, states
else:
return x
#tf.function
def train_step(self, inputs):
[defining here my training step]
I instantiate my model
model = LearningToSurpriseModel(
vocab_size=len(ids_from_chars.get_vocabulary()),
embedding_dim=embedding_dim,
rnn_units=rnn_units
)
[compile and do stuff]
the custom callback below reset states manually at the end of each epoch.
gru_layer = model.layers[1]
class CustomCallback(tf.keras.callbacks.Callback):
def __init__(self, gru_layer):
self.gru_layer = gru_layer
def on_epoch_end(self, epoch, logs=None):
self.gru_layer.reset_states()
model.fit(train_dataset, validation_data=validation_dataset, \
epochs=EPOCHS, callbacks = [EarlyS, CustomCallback(gru_layer)], verbose=1)
States will be reset to zero. I would like to follow ideas in https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html to make states trainable. Implementation in this post seems based on tensorflow, and overwrites native functions, maybe there is a more elegant way in Keras.
(1) how do I make states trainable ?
(2) how do I combine trainable states and random initialization ?
Based on the very good answer above, the full code for solving the case
class CustomGRULayer(tf.keras.layers.Layer):
def __init__(self, rnn_units, batch_size):
super(CustomGRULayer, self).__init__()
self.rnn_units = rnn_units
self.batch_size = batch_size
self.gru = tf.keras.layers.GRU(self.rnn_units,
stateful=True,
return_sequences=True,
return_state=True,
reset_after=True,
)
self.w=None
def build(self, input_shape):
w_init = tf.random_normal_initializer(mean=0.0, stddev=0.2)
self.w = tf.Variable(
initial_value=w_init(shape=(self.batch_size, self.rnn_units),
dtype='float32'), trainable=True)
def call(self, inputs):
return self.gru(inputs, initial_state = self.w)
class LearningToSurpriseModel(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, rnn_units, batch_size):
super().__init__(self)
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru_layer = CustomGRULayer(rnn_units = rnn_units, batch_size = batch_size)
self.dense = tf.keras.layers.Dense(vocab_size)
def call(self, inputs, states=None, return_state=False, training=False):
x = inputs
x = self.embedding(x, training=training)
if states is None:
states = self.gru_layer.gru.get_initial_state(x)
x, states = self.gru_layer.gru(x, initial_state=states, training=training)
x = self.dense(x, training=training)
if return_state:
return x, states
else:
return x
model = LearningToSurpriseModel(
vocab_size=len(ids_from_chars.get_vocabulary()),
embedding_dim=embedding_dim,
rnn_units=rnn_units,
batch_size=BATCH_SIZE
)
model.compile(optimizer='adam', loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[
tf.keras.metrics.SparseCategoricalAccuracy()]
)
EarlyS = EarlyStopping(monitor = 'val_loss', mode = 'min', restore_best_weights=True, patience=10, verbose = 1)
# defining a custom callback for resetting states at the end of period only
gru_layer = model.layers[1]
class CustomCallback(tf.keras.callbacks.Callback):
def __init__(self, gru_layer):
self.gru_layer = gru_layer
def on_epoch_end(self, epoch, logs=None):
self.gru_layer.gru.reset_states(self.gru_layer.w)
model.fit(train_dataset, validation_data=validation_dataset, epochs=EPOCHS, callbacks = [EarlyS, CustomCallback(gru_layer)], verbose=1)
I am trying to learn to do custom layer, I followed the steps in keras.io.
Code -
class Linear(keras.layers.Layer):
def __init__(self, units=32, input_dim=32):
super(Linear, self).__init__()
w_init = tf.random_normal_initializer()
self.w = tf.Variable(
initial_value=w_init(shape=(input_dim, units), dtype="float32"),
trainable=True,
)
b_init = tf.zeros_initializer()
self.b = tf.Variable(
initial_value=b_init(shape=(units,), dtype="float32"), trainable=True
)
def call(self, inputs):
print('inputs', inputs.shape)
for index in range(inputs.shape[0]):
...
return tf.matmul(inputs, self.w) + self.b
This shows the error -
TypeError: in user code:
<ipython-input-3-314addf0c624>:39 call *
for index in range(inputs.shape[0]):
/usr/local/lib/python3.7/dist-packages/tensorflow/python/autograph/operators/py_builtins.py:365 range_ **
return _py_range(start_or_stop, stop, step)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/autograph/operators/py_builtins.py:390 _py_range
return range(start_or_stop)
TypeError: 'NoneType' object cannot be interpreted as an integer
when I run this Linear class separately, it works fine. But, when I run this layer as a trainable model, it shows this error.
How to solve this, thanks
As default, shape of inputs is [batch_size,width,height,channels], and, when you create your model, batch_size is set to None.
import os
# os.environ['KERAS_BACKEND'] = 'theano'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # suppress Tensorflow messages
import tensorflow as tf
from keras.layers import *
from keras.models import *
class CustomLinear(Layer):
def __init__(self, batch_size,units=32, input_dim=32):
super(CustomLinear, self).__init__()
self.batch_size = batch_size
w_init = tf.random_normal_initializer()
self.w = tf.Variable(
initial_value=w_init(shape=(input_dim, units), dtype="float32"),
trainable=True,
)
b_init = tf.zeros_initializer()
self.b = tf.Variable(
initial_value=b_init(shape=(units,), dtype="float32"), trainable=True
)
def call(self, inputs):
print('inputs', inputs.shape)
# for index in range(self.batch_size):
# print(index)
return tf.matmul(inputs, self.w) + self.b
batch_size = 10
model = Sequential()
model.add(Input(shape=(2,32)))
model.add(CustomLinear(batch_size = batch_size)) # inputs (None, 2, 32)
x = tf.random.normal((batch_size,2,32)) # dummy data
model(x) # inputs (10, 2, 32)
Mostly, batch_size is not required for the calculations within the layer. But, if you still need it, you can add an argument (e.g. batch_size) to your CustomLinear, define your batch_size beforehand, and access to it inside __call__ function.
I am trying to run the following code (as given in Tensorflow documentation) to create windows of my data and then flatten the dataset of datasets.
window_size = 5
windows = range_ds.window(window_size, shift=1)
for sub_ds in windows.take(5):
print(sub_ds)
flat_windows = windows.flat_map(lambda x: x)
The problem is that flat_windows.cardinality().numpy() returns cardinality to be -2 which is creating problem for me during training. I tried looking for ways to set_cardinality of a dataset but couldn't find anything. I also tried other ways of flattening a dataset of datasets, but again no success.
Edit-1: The problem with the training is that the shape is unknown (at Linear and Dense layers) when I am training a subclass model (given below). The model trains well when I train the model eagerly (through tf.config.run_functions_eagerly(True)) but that is slow. Therefore I want the input data to be known for the model training.
Neural Network
class NeuralNetworkModel(tf.keras.Model):
def __init__(self):
super(NeuralNetworkModel, self).__init__()
self.encoder = Encoder()
def train_step(self, inputs):
X = inputs[0]
Y = inputs[1]
with tf.GradientTape() as tape:
enc_X = self.encoder(X)
enc_Y = self.encoder(Y)
# loss:
loss = tf.norm(enc_Y - enc_X, axis = [0, 1], ord = 'fro')
# Compute gradients
trainable_vars = self.encoder.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Compute our own metrics
loss_tracker.update_state(loss)
# Return a dict mapping metric names to current value.
# Note that it will include the loss (tracked in self.metrics).
return {"loss": loss_tracker.result()}
#property
def metrics(self):
# We list our `Metric` objects here so that `reset_states()` can be
# called automatically at the start of each epoch
# or at the start of `evaluate()`.
# If you don't implement this property, you have to call
# `reset_states()` yourself at the time of your choosing.
return [loss_tracker]
def test_step(self, inputs):
X = inputs[0]
Y = inputs[1]
Psi_X = self.encoder(X)
Psi_Y = self.encoder(Y)
# loss:
loss = tf.norm(Psi_Y - Psi_X, axis = [0, 1], ord = 'fro')
# Compute our own metrics
loss_tracker.update_state(loss)
# Return a dict mapping metric names to current value.
# Note that it will include the loss (tracked in self.metrics).
return {"loss": loss_tracker.result()}
class Encoder(tf.keras.Model):
def __init__(self):
super(Encoder, self).__init__(dtype = 'float64', name = 'Encoder')
self.input_layer = DenseLayer(128)
self.hidden_layer1 = DenseLayer(128)
self.hidden_layer2 = DenseLayer(64)
self.hidden_layer3 = DenseLayer(64)
self.output_layer = LinearLayer(64)
def call(self, input_data, training):
fx = self.input_layer(input_data)
fx = self.hidden_layer1(fx)
fx = self.hidden_layer2(fx)
fx = self.hidden_layer3(fx)
return self.output_layer(fx)
class LinearLayer(tf.keras.layers.Layer):
def __init__(self, units):
super(LinearLayer, self).__init__(dtype = 'float64')
self.units = units
def build(self, input_shape):
input_dim = input_shape[-1]
self.w = self.add_weight(shape = (input_dim, self.units),
initializer = "random_normal",
trainable = True)
self.b = self.add_weight(shape = (self.units,),
initializer = tf.zeros_initializer(),
trainable = True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
class DenseLayer(tf.keras.layers.Layer):
def __init__(self, units):
super(DenseLayer, self).__init__(dtype = 'float64')
self.units = units
def build(self, input_shape):
input_dim = input_shape[-1]
self.w = self.add_weight(shape = (input_dim, self.units),
initializer = "random_normal",
trainable = True)
self.b = self.add_weight(shape = (self.units,),
initializer = tf.zeros_initializer(),
trainable = True)
def call(self, inputs):
x = tf.matmul(inputs, self.w) + self.b
return tf.nn.elu(x)
I was wondering about this as well. Turns out that -2 is tf.data.UNKNOWN_CARDINALITY (https://www.tensorflow.org/api_docs/python/tf/data#UNKNOWN_CARDINALITY), which represents that TF doesn't know how many elements the flat_map returns per item.
I just asked Windowing a TensorFlow dataset without losing cardinality information? to see if anyone knows a way to window datasets without losing cardinality.
I want create a layer Linear like y = w*x+b, with w is matrix[Mxd], x is matrix[dx1], b is bias as matrix[Mx1], with M is the units in a layer, d is dim of vector x.
my code like:
class Linear(keras.layers.Layer):
def __init__(self, units=32):
super(Linear, self).__init__()
self.units = units
print(self.units)
def build(self, input_shape):
self.w = self.add_weight(
shape=( self.units, input_shape[1]),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,1), initializer="random_normal", trainable=True
)
print(input_shape[1])
def call(self, inputs):
return tf.matmul(self.w, inputs) + self.b
def get_output_shape_for(self, input_shape):
return (self.units, 1)
And when i create model i use:
input_tensor1 = Input((2,1))
L11 = Linear(32)(input_tensor1)
model = Model(inputs=input_tensor1, outputs=L11)
model.summary()
In this code d=2 and M=32
As i think y =w*x+b, y will be a matrix[Mx1] cause w is [Mxd], x is [dx1] and b is [Mx1]. But when model summary, output_shape is (None, 2, 1)
Can anyone explain it? where my code wrong?
I've been trying to recreate a simple DNN using just the base Keras layer and writing everything from scratch. Everything seems to work just fine, but during the training loop I get this error:
AttributeError: 'SparseCategoricalCrossentropy' object has no attribute '_id'
I've tried changing the loss function to either CategoricalCrossentropy and SparseCategoricalCrossentropy (with from_logits True or False), but the error always pops up.
Here's the code:
import numpy as np
import tensorflow as tf
from tensorflow import keras
from utils import plot_image, plot_mnist_results, plot_value_array
class Flatten(keras.layers.Layer):
def __init__(self):
super(Flatten, self).__init__()
def build(self, input_shape):
self.output_size = np.prod(input_shape)
def call(self, X):
return tf.reshape(X, shape=(-1, self.output_size))
class Dense(keras.layers.Layer):
def __init__(self, units, activation):
super(Dense, self).__init__()
self.units = units
self.activation = activation
def build(self, input_shape):
self.kernel = self.add_weight(
name='kernel',
dtype=tf.float64,
initializer='glorot_normal',
trainable=True,
shape=(input_shape[-1], self.units)
)
self.bias = self.add_weight(
name='bias',
dtype=tf.float64,
initializer=keras.initializers.Constant(0.1),
trainable=True,
shape=(1, self.units)
)
def call(self, X):
return self.activation(tf.matmul(X, self.kernel) + self.bias)
class DNN(keras.models.Model):
def __init__(self, units, activation):
super(DNN, self).__init__()
self.units = units
self.activation = activation
def build(self, input_shape):
self.flatten = Flatten()
self.hidden_layer = Dense(self.units, tf.nn.relu)
self.output_layer = Dense(10, tf.nn.softmax)
def call(self, X):
print(self.hidden_layer(self.flatten(X)).shape)
print(self.output_layer(self.hidden_layer(self.flatten(X))).shape)
return self.output_layer(self.hidden_layer(self.flatten(X)))
# #tf.function
def train(model, loss, opt, X, y):
with tf.GradientTape() as tape:
gradients = tape.gradient(loss(model(X), y), model.trainable_variables)
gradient_variables = zip(gradients, model.trainable_variables)
opt.apply_gradients(gradient_variables)
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images / 255.0
test_images = test_images / 255.0
model = DNN(units=128, activation=tf.nn.relu)
opt = tf.optimizers.Adam(learning_rate=1e-3)
for epoch in range(3):
for step in range(train_labels.shape[0]):
loss = keras.losses.SparseCategoricalCrossentropy
train(model, loss, opt, train_images[step, :, :], train_labels[step])
train_loss = loss(model(train_images), train_labels)
template = 'Epoch {}, Train loss: {:.5f}'
print(template.format(epoch + 1, train_loss.numpy()))
I would expect for the model to train successfully, but it doesn't seem to be the case. What am I doing wrong?
From the given code, i could see that you are using tf and keras intermixed in places like given below.
opt = tf.optimizers.Adam(learning_rate=1e-3)
loss = keras.losses.SparseCategoricalCrossentropy
This could raise issues like this. For TensorFlow 2.0, you can use tf.keras uniformly in all places wherever you use keras directly.
Also i could find that, you are instantiating loss object inside the batch loop. which is not correct. You have to instantiate at the top of starting you epoch loop.
Rest all seems fine. Hope this helps!!!