I am trying to implement Graph Convolution Layer using Keras custom layer that is mentioned in the following paper: GCNN.
When I am trying to train my model, It gives me the following error:
Traceback (most recent call last):
File "main.py", line 35, in <module>
model.fit(train_images, train_labels, validation_data=(test_images, test_labels), epochs=50, batch_size=32)
File "/usr/local/lib/python2.7/dist-packages/keras/engine/training.py", line 1010, in fit
self._make_train_function()
File "/usr/local/lib/python2.7/dist-packages/keras/engine/training.py", line 509, in _make_train_function
loss=self.total_loss)
File "/usr/local/lib/python2.7/dist-packages/keras/legacy/interfaces.py", line 91, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/keras/optimizers.py", line 256, in get_updates
grads = self.get_gradients(loss, params)
File "/usr/local/lib/python2.7/dist-packages/keras/optimizers.py", line 91, in get_gradients
raise ValueError('An operation has `None` for gradient. '
ValueError: An operation has `None` for gradient. Please make sure that all of your ops have a gradient defined (i.e. are differentiable). Common ops without gradient: K.argmax, K.round, K.eval.
I don't know how to get rid of this problem.
Can someone explain me briefly what should I do?
I have gone through Keras official documentation about writing custom layer but it didn't specify about it. Link
Following is the code for my custom layer.
class GraphConvolutionalLayer(Layer):
def __init__(self, A, num_input_features, num_output_features, **kwargs):
self.A = A
self.num_input_features = num_input_features
self.num_output_features = num_output_features
self.num_vertices = A.get_shape().as_list()[0]
self.input_spec = (self.num_vertices, num_input_features)
super(GraphConvolutionalLayer, self).__init__(**kwargs)
def build(self, input_shape):
self.k0 = self.add_weight(name='k0',
shape=(self.num_output_features, self.num_input_features),
initializer='uniform',
trainable=True)
self.k1 = self.add_weight(name='k1',
shape=(self.num_output_features, self.num_input_features),
initializer='uniform',
trainable=True)
self.H = tf.einsum('ab,cd->abcd', tf.convert_to_tensor(self.k0, dtype=tf.float32), tf.eye(self.num_vertices))
self.built = True
def call(self, Vin):
Vin2 = tf.reshape(tf.transpose(Vin, [0, 2, 1]), [Vin.get_shape().as_list()[1] * Vin.get_shape().as_list()[2], -1])
H_tmp = tf.reshape(tf.transpose(self.H, [0, 2, 1, 3]), [ self.num_output_features, self.num_vertices, self.num_vertices * self.num_input_features])
Vout = tf.transpose(K.dot(H_tmp, Vin2), [2, 1, 0])
return Vout
def compute_output_shape(self, input_shape):
return (self.num_vertices, self.num_output_features)
Following is the code for the main file.
main_input = Input(shape=train_images[0].shape)
Vout1 = GraphConvolutionalLayer(A, 1, 4)(main_input)
Vout2 = GraphConvolutionalLayer(A, 4, 8)(Vout1)
Vout3 = Flatten()(Vout2)
Vout4 = Dense(10, activation='sigmoid')(Vout3)
print(train_images.shape, train_labels.shape)
model = Model(inputs=main_input, outputs=Vout4)
print(model.summary())
model.compile(optimizer='rmsprop', loss='binary_crossentropy')
model.fit(train_images, train_labels, validation_data=(test_images, test_labels), epochs=50, batch_size=32)
Here, I take uniform as an initializer. When I changed it, I didn't get any error. I don't know why this happened but I could be able to solve my error just changing that line.
As error states, some of your function is non-differentiable. It' not easy to say why exactly it happens. For example, take a look
List of Differentiable Ops in Tensorflow
How to make sure your computation graph is differentiable
Edit: Consider example, where I use standard cifar10 data.
class GraphConvolutionalLayer(layers.Layer):
def __init__(self, A, num_input_features, num_output_features, **kwargs):
#self.A = A
self.num_input_features = num_input_features
self.num_output_features = num_output_features
self.num_vertices = A
self.input_spec = (self.num_vertices, num_input_features)
super(GraphConvolutionalLayer, self).__init__(**kwargs)
def build(self, input_shape):
self.k0 = self.add_weight(name='k0',
shape=(self.num_output_features, self.num_input_features),
initializer='uniform',
trainable=True)
self.H = tf.einsum('ab,cd->abcd', tf.convert_to_tensor(self.k0, dtype=tf.float32), tf.eye(self.num_vertices))
self.H = tf.reshape(self.H, [32*32, 3])
self.built = True
def call(self, Vin):
Vin2 = tf.reshape(Vin, [Vin.get_shape().as_list()[1] * Vin.get_shape().as_list()[1],Vin.get_shape().as_list()[-1]])
Vin2 = tf.transpose(Vin2)
Vout = tf.matmul(self.H, Vin2)
return Vout
def input_fn():
train, test = tf.keras.datasets.cifar10.load_data()
dataset = tf.data.Dataset.from_tensor_slices((train[0], train[1]))
dataset = dataset.batch(1)
return dataset
main_input = layers.Input(shape=[32, 32, 3])
Vout1 = GraphConvolutionalLayer(32, 3, 1)(main_input)
Vout3 = layers.Flatten()(Vout1)
Vout4 = layers.Dense(10, activation='sigmoid')(Vout3)
model = Model(inputs=main_input, outputs=Vout4)
model.compile(optimizer='rmsprop', loss='binary_crossentropy')
model.fit(input_fn(), epochs=50, steps_per_epoch=10)
In this case gradients are computed. So the problem clearly is not in how you construct GraphConvolutionalLayer but in some internal operation, which depends on data. You need to check every op one by one with your data shapes.
P.S. You can try substituting einsum with matmul, cause the former is simply a syntactic wrap for the latter.
Related
I have two concatenated models VAE and GAN, and i need to apply sharpening filter to the output of VAE using custom layer, before feeding its output to the GAN, my model is as follow:
Here is the code of models building:
from keras.layers import Input, Layer
i = Input(shape=(1,256,256))
x = vae(i)
x1 = Sharpen(num_outputs=(x.shape))(x) # MY CUSTOM LAYER
x2 = GAN(x1)
g = Model(i, [x1, x2])
and the code for the custom layer (taken from here) is as follow:
class Sharpen(Layer):
def __init__(self, num_outputs):
super(Sharpen, self).__init__()
self.num_outputs = num_outputs
def build(self, input_shape):
self.kernel = np.array([[-2, -2, -2],
[-2, 17, -2],
[-2, -2, -2]])
self.kernel = tf.expand_dims(self.kernel, 0)
self.kernel = tf.expand_dims(self.kernel, 0)
self.kernel = tf.cast(self.kernel, tf.float32)
def call(self, input_shape):
return Convolution2D(input_shape, self.kernel, subsample=(1, 1), border_mode='same')
the error msg i get is this:
File "C:\Users\User\AppData\Local\Temp\ipykernel_16036\1078305279.py", line 1, in <module>
x1 = Sharpen(num_outputs=(x.shape))(x)
File "C:\Users\User\anaconda3\envs\keras122_tf115__GPU\lib\site-
packages\keras\engine\topology.py", line 572, in __call__
self.add_inbound_node(inbound_layers, node_indices, tensor_indices)
File "C:\Users\User\anaconda3\envs\keras122_tf115__GPU\lib\site-
packages\keras\engine\topology.py", line 635, in add_inbound_node
Node.create_node(self, inbound_layers, node_indices, tensor_indices)
File "C:\Users\User\anaconda3\envs\keras122_tf115__GPU\lib\site-
packages\keras\engine\topology.py", line 166, in create_node
output_tensors = to_list(outbound_layer.call(input_tensors[0], mask=input_masks[0]))
TypeError: call() got an unexpected keyword argument 'mask'
note, i am using Tf 1.15 , keras 1.2.2 and python 3.7
Please, any help is highly appreciated.
I am trying to train my neuronal network. Train in the model is correct, but I can't calculate loss. The output and the target have the same dimension.
I had tried to use torch.stack, but I can't because the size of the each input is (252, x) where x is the same in the 252 elements, but is different for the others inputs.
I use a custom Dataset:
class MusicDataSet(Dataset):
def __init__(self, transform=None):
self.ms, self.target, self.tam = sd.cargarDatos()
self.mean, self.std = self.NormalizationValues()
def __len__(self):
return self.tam
def __getitem__(self, idx):
#Normalize
inp = (self.ms[idx]-self.mean)/self.std
inp = torch.from_numpy(inp).float()
inp = inp.t()
inp = inp.to('cuda')
target= torch.from_numpy(self.target[idx])
target = target.long()
target = target.t()
target = target.to('cuda')
return inp, target
I must say that list can't be cast with something like: target = torch.Tensor() or torch.stack() because this (252, x), as I have already said.
def music_collate_fn(batch):
data = [item[0] for item in batch]
data = pad_sequence(data, batch_first=True)
target = [item[0] for item in batch]
target = pad_sequence(target, batch_first=True)
return data, target
musicSet = mds.MusicDataSet()
train_loader = torch.utils.data.DataLoader(musicSet,batch_size=50, collate_fn = music_collate_fn, shuffle=False)
input_dim = 252
hidden_dim = (512,1024,512)
output_dim = 88
mlp = rn.MLP(input_dim, hidden_dim, output_dim).to(device)
optimizer = torch.optim.RMSprop(mlp.parameters(), lr = learning_rate)
criterion = nn.CrossEntropyLoss()
for batch_idx, (x,y) in enumerate(train_loader):
outputs = mlp(x.to(device))
loss = criterion(outputs, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
The size of output and target is the same,
output: torch.Size([50, 288, 88])
target: torch.Size([50, 288, 88])
But the next error apears when I try to calculate loss:
File "<ipython-input-205-3c47d7aa11a4>", line 32, in <module>
loss = criterion(outputs, y)
File "C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py", line 489, in __call__
result = self.forward(*input, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\loss.py", line 904, in forward
ignore_index=self.ignore_index, reduction=self.reduction)
File "C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\functional.py", line 1970, in cross_entropy
return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction)
File "C:\ProgramData\Anaconda3\lib\site-packages\torch
\nn\functional.py", line 1800, in nll_loss
out_size, target.size()))
ValueError: Expected target size (50, 88), got torch.Size([50, 288, 88])
I think you are using CrossEntropyLoss incorrectly. See the documentation here.
In particular, if the input is of shape [NxCxd] then target should be of shape [Nxd], and value in target are integer between 0 and C-1 i.e you can just provide the class labels and it is not required to one-hot encode the target variable. Error message also states that same.
Build a subclassed model with multiple outputs, use tensorflow datasets as input. Custom defined datasets.
Use keras fit to train model.
When i only use train dataset, it could run. But once i use same type datasets as validation input, it goes wrong like:
'Error when checking model target: expected no data, but got:'
Data type is like 'tuple(data, (target[0], target[1]))'
tensorflow-gpu==1.12, tensorflow.keras
Wrong information
File "/home/god/anaconda3/envs/tensorflow_n/lib/python3.6/site-packages/tensorflow/python/keras/engine/training.py", line 1574, in fit
steps=validation_steps)
File "/home/god/anaconda3/envs/tensorflow_n/lib/python3.6/site-packages/tensorflow/python/keras/engine/training.py", line 992, in _standardize_user_data
class_weight, batch_size)
File "/home/god/anaconda3/envs/tensorflow_n/lib/python3.6/site-packages/tensorflow/python/keras/engine/training.py", line 1154, in _standardize_weights
exception_prefix='target')
File "/home/god/anaconda3/envs/tensorflow_n/lib/python3.6/site-packages/tensorflow/python/keras/engine/training_utils.py", line 257, in standardize_input_data
'expected no data, but got:', data)
ValueError: ('Error when checking model target: expected no data, but got:', (<tf.Tensor 'IteratorGetNext_1:1' shape=(16, 16, 513) dtype=float32>, <tf.Tensor 'IteratorGetNext_1:2' shape=(16, 16, 513) dtype=float32>))
simplification code cause same error
import tensorflow as tf
class Model(tf.keras.Model):
def __init__(self):
super().__init__()
self.dense1 = tf.keras.layers.Dense(units=16)
self.dense2 = tf.keras.layers.Dense(units=16)
def compute_output_shape(self, input_shape):
return input_shape, input_shape
def call(self, inputs, training=None, mask=None):
out1 = self.dense1(inputs)
out2 = self.dense2(inputs)
return out1, out2
train_dataset = tf.data.Dataset.from_tensor_slices(
(tf.constant(0., shape=[1024, 16]), (tf.constant(0., shape=[1024, 16]), tf.constant(0., shape=[1024, 16])))).repeat().batch(32)
valid_dataset = tf.data.Dataset.from_tensor_slices(
(tf.constant(0., shape=[128, 16]), (tf.constant(0., shape=[128, 16]), tf.constant(0., shape=[128, 16])))).repeat(1).batch(32)
model = Model()
model.compile(
optimizer=tf.train.AdamOptimizer(learning_rate=1e-4),
loss=[tf.keras.losses.mse, tf.keras.losses.mse],
loss_weights=[1, 1]
)
model.fit(
train_dataset,
validation_data=valid_dataset,
epochs=10,
steps_per_epoch=30,
validation_steps=4,
)
Keras is not PyTorch, you should not subclass model without a very advanced reason.
inputs = Input(input_shape)
out1 = Dense(16)(inputs)
out2 = Dense(16)(inputs)
model = tf.keras.Model(inputs, [out1,out2])
Separate x and y when training:
x_train = your_tuple[0]
y_train = your_tuple[1]
model.fit(x_train, y_train, ....)
This is a piece of code I get from github for hierarchical attention network,the code is originally in Keras 1.2.2. now I have to change it to compile with Keras 2.0.5, however, it has such error messages that I could not solve.
The original code is the following
MAX_SENT_LENGTH = 100
MAX_SENTS = 20
MAX_NB_WORDS = 276176
EMBEDDING_DIM = 128
VALIDATION_SPLIT = 0.1
# Feed the data
# Here you have source data
x_train = np.load('./data/X_full_train_data.npy')
y_train = np.load('./data/X_full_train_labels.npy')
x_val = np.load('./data/X_full_test_data.npy')
y_val = np.load('./data/X_full_test_labels.npy')
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(len(y_train)))
x_train = x_train[shuffle_indices]
y_train = y_train[shuffle_indices]
shuffle_indices = np.random.permutation(np.arange(len(y_val)))
x_val = x_train[shuffle_indices]
y_val = y_train[shuffle_indices]
with open("./data/W.npy", "rb") as fp:
embedding_weights = np.load(fp)
# here you feed embeding matrix
embedding_layer = Embedding(MAX_NB_WORDS,
EMBEDDING_DIM,
weights=[embedding_weights],
input_length=MAX_SENT_LENGTH,
trainable=True)
# building Hierachical Attention network
class AttLayer(Layer):
def __init__(self, **kwargs):
self.init = initializers.get('normal')
super(AttLayer, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape)==3
self.W = self.init((input_shape[-1],))
self.trainable_weights = [self.W]
super(AttLayer, self).build(input_shape)
def call(self, x, mask=None):
eij = K.tanh(K.dot(x, self.W))
ai = K.exp(eij)
weights = ai/K.sum(ai, axis=1).dimshuffle(0,'x')
weighted_input = x*weights.dimshuffle(0,1,'x')
ret = weighted_input.sum(axis=1)
return ret
#def get_output_shape_for(self, input_shape):
def compute_output_shape(self,input_shape):
return (input_shape[0], input_shape[-1])
sentence_input = Input(shape=(MAX_SENT_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sentence_input)
l_lstm = Bidirectional(GRU(100, return_sequences=True))(embedded_sequences)
l_dense = TimeDistributed(Dense(200))(l_lstm)
l_att = AttLayer()(l_lstm)
sentEncoder = Model(sentence_input, l_att)
review_input = Input(shape=(MAX_SENTS,MAX_SENT_LENGTH), dtype='int32')
review_encoder = TimeDistributed(sentEncoder)(review_input)
l_lstm_sent = Bidirectional(GRU(100, return_sequences=True))(review_encoder)
l_dense_sent = TimeDistributed(Dense(200))(l_lstm_sent)
l_att_sent = AttLayer()(l_lstm_sent)
preds = Dense(3, activation='softmax')(l_att_sent)
model = Model(input=review_input, output=preds)
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['categorical_accuracy'])
print("model fitting - Hierachical attention network")
print(model.summary())
model.fit(x_train, y_train, nb_epoch=10, batch_size=32, validation_data=(x_val,y_val))
predictions = model.predict(x_val)
score, acc = model.evaluate(x_val, y_val,batch_size=32)
Then I have the following error
textClassifierHATT.py:235: UserWarning: The `nb_epoch` argument in `fit` has been renamed `epochs`.
model.fit(x_train, y_train, nb_epoch=10, batch_size=32, validation_data=(x_val,y_val))
Traceback (most recent call last):
File "textClassifierHATT.py", line 235, in <module>
model.fit(x_train, y_train, nb_epoch=10, batch_size=32, validation_data=(x_val,y_val))
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/engine/training.py", line 1575, in fit
self._make_train_function()
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/engine/training.py", line 960, in _make_train_function
loss=self.total_loss)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/legacy/interfaces.py", line 87, in wrapper
return func(*args, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/optimizers.py", line 226, in get_updates
accumulators = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/optimizers.py", line 226, in <listcomp>
accumulators = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/backend/theano_backend.py", line 275, in int_shape
raise TypeError('Not a Keras tensor:', x)
TypeError: ('Not a Keras tensor:', Elemwise{add,no_inplace}.0)
the keras model compile succesfully in model.compile(), but it has error in model.fit(), I totally don't understand why such error exists. anyone can tell me how to modify it so that it can run with keras 2.0 Thanks a lot.
The problem is on the build method of your custom layer, according to keras' documentation, you need to create the weights with the self.add_weight function:
def build(self, input_shape):
assert len(input_shape)==3
self.W = self.add_weight(name='kernel',
shape=(input_shape[-1],),
initializer='normal',
trainable=True)
super(AttLayer, self).build(input_shape)
That and a few API changes:
Parameter input and output changed in Model(inputs=.., outputs=..)
The nb_epochs parameter in fit is now called epochs
The data provided for training is not a tensor but fed as a numpy array. Try to convert the numpy arrays to tensor using :
import tensorflow as tf
tf.convert_to_tensor(
value, dtype=None, dtype_hint=None, name=None
)
Then pass them to the model for training.
I'm trying to adapt Lasagne tutorial to an LSTM model. This is how my code is atm:
def build_lstm(input_var=None):
num_inputs, num_units, num_classes = 4978, 300, 127
l_inp = lasagne.layers.InputLayer((None, None, num_inputs))
batchsize, seqlen, _ = l_inp.input_var.shape
l_lstm = lasagne.layers.LSTMLayer(l_inp, num_units=num_units)
l_shp = lasagne.layers.ReshapeLayer(l_lstm, (-1, num_units))
l_dense = lasagne.layers.DenseLayer(l_shp, num_units=num_classes)
l_out = lasagne.layers.ReshapeLayer(l_dense, (batchsize, seqlen, num_classes))
return l_out
train_in, test_in, train_out, test_out = build_dataset()
input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
myLSTMNetwork = build_lstm()
#Loss evaluation
prediction = lasagne.layers.get_output(myLSTMNetwork)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
I can build my model without errors. But after inserting the code related to Loss evaluation I get this error:
File "code.py", line 110, in build_lstm
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/lasagne/objectives.py", line 146, in categorical_crossentropy
return theano.tensor.nnet.categorical_crossentropy(predictions, targets)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/theano/tensor/nnet/nnet.py", line 1906, in categorical_crossentropy
raise TypeError('rank mismatch between coding and true distributions')
TypeError: rank mismatch between coding and true distributions
My output layer should have batchsize * seqlen as parameter.
Not batchsize, seqlen.