AttributeError: 'SparseCategoricalCrossentropy' object has no attribute '_id' - python

I've been trying to recreate a simple DNN using just the base Keras layer and writing everything from scratch. Everything seems to work just fine, but during the training loop I get this error:
AttributeError: 'SparseCategoricalCrossentropy' object has no attribute '_id'
I've tried changing the loss function to either CategoricalCrossentropy and SparseCategoricalCrossentropy (with from_logits True or False), but the error always pops up.
Here's the code:
import numpy as np
import tensorflow as tf
from tensorflow import keras
from utils import plot_image, plot_mnist_results, plot_value_array
class Flatten(keras.layers.Layer):
def __init__(self):
super(Flatten, self).__init__()
def build(self, input_shape):
self.output_size = np.prod(input_shape)
def call(self, X):
return tf.reshape(X, shape=(-1, self.output_size))
class Dense(keras.layers.Layer):
def __init__(self, units, activation):
super(Dense, self).__init__()
self.units = units
self.activation = activation
def build(self, input_shape):
self.kernel = self.add_weight(
name='kernel',
dtype=tf.float64,
initializer='glorot_normal',
trainable=True,
shape=(input_shape[-1], self.units)
)
self.bias = self.add_weight(
name='bias',
dtype=tf.float64,
initializer=keras.initializers.Constant(0.1),
trainable=True,
shape=(1, self.units)
)
def call(self, X):
return self.activation(tf.matmul(X, self.kernel) + self.bias)
class DNN(keras.models.Model):
def __init__(self, units, activation):
super(DNN, self).__init__()
self.units = units
self.activation = activation
def build(self, input_shape):
self.flatten = Flatten()
self.hidden_layer = Dense(self.units, tf.nn.relu)
self.output_layer = Dense(10, tf.nn.softmax)
def call(self, X):
print(self.hidden_layer(self.flatten(X)).shape)
print(self.output_layer(self.hidden_layer(self.flatten(X))).shape)
return self.output_layer(self.hidden_layer(self.flatten(X)))
# #tf.function
def train(model, loss, opt, X, y):
with tf.GradientTape() as tape:
gradients = tape.gradient(loss(model(X), y), model.trainable_variables)
gradient_variables = zip(gradients, model.trainable_variables)
opt.apply_gradients(gradient_variables)
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images / 255.0
test_images = test_images / 255.0
model = DNN(units=128, activation=tf.nn.relu)
opt = tf.optimizers.Adam(learning_rate=1e-3)
for epoch in range(3):
for step in range(train_labels.shape[0]):
loss = keras.losses.SparseCategoricalCrossentropy
train(model, loss, opt, train_images[step, :, :], train_labels[step])
train_loss = loss(model(train_images), train_labels)
template = 'Epoch {}, Train loss: {:.5f}'
print(template.format(epoch + 1, train_loss.numpy()))
I would expect for the model to train successfully, but it doesn't seem to be the case. What am I doing wrong?

From the given code, i could see that you are using tf and keras intermixed in places like given below.
opt = tf.optimizers.Adam(learning_rate=1e-3)
loss = keras.losses.SparseCategoricalCrossentropy
This could raise issues like this. For TensorFlow 2.0, you can use tf.keras uniformly in all places wherever you use keras directly.
Also i could find that, you are instantiating loss object inside the batch loop. which is not correct. You have to instantiate at the top of starting you epoch loop.
Rest all seems fine. Hope this helps!!!

Related

Custom layer in Keras

I am trying to learn to do custom layer, I followed the steps in keras.io.
Code -
class Linear(keras.layers.Layer):
def __init__(self, units=32, input_dim=32):
super(Linear, self).__init__()
w_init = tf.random_normal_initializer()
self.w = tf.Variable(
initial_value=w_init(shape=(input_dim, units), dtype="float32"),
trainable=True,
)
b_init = tf.zeros_initializer()
self.b = tf.Variable(
initial_value=b_init(shape=(units,), dtype="float32"), trainable=True
)
def call(self, inputs):
print('inputs', inputs.shape)
for index in range(inputs.shape[0]):
...
return tf.matmul(inputs, self.w) + self.b
This shows the error -
TypeError: in user code:
<ipython-input-3-314addf0c624>:39 call *
for index in range(inputs.shape[0]):
/usr/local/lib/python3.7/dist-packages/tensorflow/python/autograph/operators/py_builtins.py:365 range_ **
return _py_range(start_or_stop, stop, step)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/autograph/operators/py_builtins.py:390 _py_range
return range(start_or_stop)
TypeError: 'NoneType' object cannot be interpreted as an integer
when I run this Linear class separately, it works fine. But, when I run this layer as a trainable model, it shows this error.
How to solve this, thanks
As default, shape of inputs is [batch_size,width,height,channels], and, when you create your model, batch_size is set to None.
import os
# os.environ['KERAS_BACKEND'] = 'theano'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # suppress Tensorflow messages
import tensorflow as tf
from keras.layers import *
from keras.models import *
class CustomLinear(Layer):
def __init__(self, batch_size,units=32, input_dim=32):
super(CustomLinear, self).__init__()
self.batch_size = batch_size
w_init = tf.random_normal_initializer()
self.w = tf.Variable(
initial_value=w_init(shape=(input_dim, units), dtype="float32"),
trainable=True,
)
b_init = tf.zeros_initializer()
self.b = tf.Variable(
initial_value=b_init(shape=(units,), dtype="float32"), trainable=True
)
def call(self, inputs):
print('inputs', inputs.shape)
# for index in range(self.batch_size):
# print(index)
return tf.matmul(inputs, self.w) + self.b
batch_size = 10
model = Sequential()
model.add(Input(shape=(2,32)))
model.add(CustomLinear(batch_size = batch_size)) # inputs (None, 2, 32)
x = tf.random.normal((batch_size,2,32)) # dummy data
model(x) # inputs (10, 2, 32)
Mostly, batch_size is not required for the calculations within the layer. But, if you still need it, you can add an argument (e.g. batch_size) to your CustomLinear, define your batch_size beforehand, and access to it inside __call__ function.

How to set layer weights during training tensorflow

In every forward pass of the model, I want to implement l2 normalization on the softmax layer's columns, then set the weights back as per the imprinted weights paper and this pytorch implementation. I am using layer.set_weights() to set the normalized weights during the call() function of the model, but this implementation only works with eager execution, as something goes wrong with layer.set_weights() when building the graph.
here is the implementation of the model in tf 1.15:
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import Model
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Dense
class Extractor(Model):
def __init__(self, input_shape):
super(Extractor, self).__init__()
self.basenet = ResNet50(include_top=False, weights="imagenet",
pooling="avg", input_shape=input_shape)
def call(self, x):
x = self.basenet(x)
return(x)
class Embedding(Model):
def __init__(self, num_nodes, norm=True):
super(Embedding, self).__init__()
self.fc = Dense(num_nodes, activation="relu")
self.norm = norm
def call(self, x):
x = self.fc(x)
if self.norm:
x = tf.nn.l2_normalize(x)
return x
class Classifier(Model):
def __init__(self, n_classes, norm=True, bias=False):
super(Classifier, self).__init__()
self.n_classes = n_classes
self.norm = norm
self.bias = bias
def build(self, inputs_shape):
self.prediction = Dense(self.n_classes,
activation="softmax",use_bias=False)
def call(self, x):
if self.norm:
w = self.prediction.trainable_weights
if w:
w = tf.nn.l2_normalize(w, axis=2)
self.prediction.set_weights(w)
x = self.prediction(x)
return x
class Net(Model):
def __init__(self, input_shape, n_classes, num_nodes, norm=True,
bias=False):
super(Net, self).__init__()
self.n_classes = n_classes
self.num_nodes = num_nodes
self.norm = norm
self.bias = bias
self.extractor = Extractor(input_shape)
self.embedding = Embedding(self.num_nodes, norm=self.norm)
self.classifier = Classifier(self.n_classes, norm=self.norm,
bias=self.bias)
def call(self, x):
x = self.extractor(x)
x = self.embedding(x)
x = self.classifier(x)
return x
The weight normalization can be found in the call step of the Classifier class, where I call .set_weights() after normalizing it.
Creating the model with model = Net(input_shape,n_classes, num_nodes) and using model(x) works, but model.predict() and model.fit() give me errors about .get_weights(). I can train the model in eager mode with gradient tape, but it is extremely slow.
Is there another way I can set the weights of a Dense layer during training in each forward call but lets me use the model outside of eager mode? When I say eager mode I mean with tf.enable_eager_execution() at the start of the program.
Here is the error I get when I use model.predict(x) instead:
TypeError: len is not well defined for symbolic Tensors. (imprint_net_1/classifier/l2_normalize:0) Please call `x.shape` rather than `len(x)` for shape information.

Get tensor element as int

I am writing a custom model with tf.keras and in a recurrent node I need to get the value of my 1D input as an int.
That recurrent node needs to build a 1xN tensor which elements are the result of N iterations of a function f(x).
So I created a numpy array of size N which is filled by one element at each iteration, then I convert the numpy array to a tensor.
The problem is I can't get the value of my 1D tensor as an int.
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import layers
def f(x):
return 3*x
class myLayer(layers.Layer):
def __init__(self, units=1, input_dim=1):
super(myLayer, self).__init__()
self.units = units
w_init = tf.random_normal_initializer()
self.w = tf.Variable(initial_value=w_init(shape=(input_dim, units),
dtype='float32'),
trainable=False)
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=False,
)
####### IMPORTANT PART HERE ######
def call(self, inputs):
# In this example N = 20
# Define numpy array
x = np.zeros(20)
# Set its first value to my 1D input # ERROR HERE
x[0] = inputs[0]
# Assign the other element of x
for i in range(1,20):
x[i] = f(x[i-1])
# Cast to tensor
return tf.constant(x, shape=(1,20))
class Linear(layers.Layer):
def __init__(self, units=1, input_dim=20):
super(Linear, self).__init__()
self.units = units
w_init = tf.random_normal_initializer()
self.w = tf.Variable(initial_value=w_init(shape=(input_dim, units),
dtype='float32'),
trainable=True)
b_init = tf.zeros_initializer()
self.b = tf.Variable(initial_value=b_init(shape=(units,),
dtype='float32'),
trainable=True)
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
def build_model():
model = tf.keras.Sequential([
myLayer(),
Linear()
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mse', optimizer=optimizer, metrics=['mae', 'mse'])
#model.build([1])
return model
class PrintDot(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print(epoch)
print('.', end='')
train_X = np.linspace(0,99, num=100)
train_y = 2*train_X
train_X = train_X / np.linalg.norm(train_X)
model = build_model()
#print(model.summary())
epochs = 10
history = model.fit(train_X, train_y, epochs=epochs, validation_split=0.2, verbose=0, callbacks=[PrintDot()])
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
print(hist.tail())
# x = tf.ones((1,1)) * 0.21
# horse_layer = Horseshoe()
# y = horse_layer(x)
# print(y)

Debugging neural network dropout problem for the probability not lying inside [0,1]

I tried to put a droprate to my neural network (NN) using torch and I got a strange error at the end. How can I fix it?
So the idea is that I wrote a NN inside a function to make it easier to call. The function is the following:
(I personally think the problem lies inside the class of the NN, but for the sake of having a working example I'm putting everything).
def train_neural_network(data_train_X, data_train_Y, batch_size, learning_rate, graph = True, dropout = 0.0 ):
input_size = len(data_test_X.columns)
hidden_size = 200
num_classes = 4
num_epochs = 120
batch_size = batch_size
learning_rate = learning_rate
# The class of NN
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes, p = dropout):
super(NeuralNet, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, num_classes)
def forward(self, x, p = dropout):
out = F.relu(self.fc1(x))
out = F.relu(self.fc2(out))
out = nn.Dropout(out, p) #drop
out = self.fc3(out)
return out
# Prepare data
X_train = torch.from_numpy(data_train_X.values).float()
Y_train = torch.from_numpy(data_train_Y.values).float()
# Loading data
train = torch.utils.data.TensorDataset(X_train, Y_train)
train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size)
net = NeuralNet(input_size, hidden_size, num_classes)
# Loss
criterion = nn.CrossEntropyLoss()
# Optimiser
optimiser = torch.optim.SGD(net.parameters(), lr=learning_rate)
# Proper training
total_step = len(train_loader)
loss_values = []
for epoch in range(num_epochs+1):
net.train()
train_loss = 0.0
for i, (predictors, results) in enumerate(train_loader, 0):
# Forward pass
outputs = net(predictors)
results = results.long()
results = results.squeeze_()
loss = criterion(outputs, results)
# Backward and optimise
optimiser.zero_grad()
loss.backward()
optimiser.step()
# Update loss
train_loss += loss.item()
loss_values.append(train_loss / batch_size )
print('Finished Training')
return net
And when I call the function:
net = train_neural_network(data_train_X = data_train_X, data_train_Y = data_train_Y, batch_size = batch_size, learning_rate = learning_rate, dropout = 0.1)
The error is the following:
net = train_neural_network(data_train_X = data_train_X, data_train_Y = data_train_Y, batch_size = batch_size, learning_rate = learning_rate, dropout = 0.1)
/usr/local/lib/python3.6/dist-packages/torch/nn/modules/dropout.py in __init__(self, p, inplace)
8 def __init__(self, p=0.5, inplace=False):
9 super(_DropoutNd, self).__init__()
---> 10 if p < 0 or p > 1:
11 raise ValueError("dropout probability has to be between 0 and 1, "
12 "but got {}".format(p))
RuntimeError: bool value of Tensor with more than one value is ambiguous
Why do you think there is an error?
Before putting the droprate, everything was working. Additional points for you if you know how to
implement a bias inside my network! For example, on the hidden layer. I can't find any example online.
Change your architecture for this:
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes, p=dropout):
super(NeuralNet, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, num_classes)
self.dropout = nn.Dropout(p=p)
def forward(self, x):
out = F.relu(self.fc1(x))
out = F.relu(self.fc2(out))
out = self.dropout(self.fc3(out))
return out
Let me know if it works.

How can I deal with this code error which happens in my custom layer in Keras?

I want to make a custom layer in Keras.
In this example, I use a variable to multiply the tensor, but i get the error which is
in /keras/engine/training_arrays.py, line 304, in predict_loop
outs[i][batch_start:batch_end] = batch_out ValueError: could not broadcast input array from shape (36) into shape (2).
Actually i have check this file, but i get nothing. Is there some wrong in my custom layer?
#the definition of mylayer.
from keras import backend as K
import keras
from keras.engine.topology import Layer
class mylayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(mylayer, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(name = 'kernel',
shape=(1,),dtype='float32',trainable=True,initializer='uniform')
super(mylayer, self).build(input_shape)
def call(self, inputs, **kwargs):
return self.kernel * inputs[0]
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[1])
#the test of mylayer.
from mylayer import mylayer
from tensorflow import keras as K
import numpy as np
from keras.layers import Input, Dense, Flatten
from keras.models import Model
x_train = np.random.random((2, 3, 4, 3))
y_train = np.random.random((2, 36))
print(x_train)
x = Input(shape=(3, 4, 3))
y = Flatten()(x)
output = mylayer((36, ))(y)
model = Model(inputs=x, outputs=output)
model.summary()
model.compile(optimizer='Adam',loss='categorical_crossentropy',metrics=['accuracy'])
model.fit(x_train, y_train, epochs=2)
hist = model.predict(x_train,batch_size=2)
print(hist)
print(model.get_layer(index=1).get_weights())
#So is there some wrong in my custom error?
Especially, when i train this net, it's ok,but when i try to use "prdict", it's wrong.
Your shape of self.kernel * inputs[0] is (36,), but your expectation is (?,36). Change it:
def call(self, inputs, **kwargs):
return self.kernel * inputs
If you want to output the weight of mylayer, you should set index=2.

Categories