I am writing a custom layer using Keras that returns a tensors of zeros the first three times it is invoked and does nothing the other times. The code is the following
class MyLayer(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(MyLayer, self).__init__(**kwargs)
self.__iteration = 0
self.__returning_zeros = None
def build(self, input_shape):
self.__returning_zeros = tf.zeros(shape=input_shape, dtype=tf.float32)
def call(self, inputs):
self.__iteration += 1
if self.__iteration <= 3:
return self.__returning_zeros
else:
return inputs
Unfortunately if I try to build a model using this layer like this
def build_model(input_shape, num_classes):
input_layer = keras.Input(shape=input_shape, name='input')
conv1 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu", name='conv1')(input_layer)
maxpool1 = layers.MaxPooling2D(pool_size=(2, 2), name='maxpool1')(conv1)
conv2 = layers.Conv2D(64, kernel_size=(3, 3), activation="relu", name='conv2')(maxpool1)
mylayer = MyLayer()(conv2)
maxpool2 = layers.MaxPooling2D(pool_size=(2, 2), name='maxpool2')(mylayer)
flatten = layers.Flatten(name='flatten')(maxpool2)
dropout = layers.Dropout(0.5, name='dropout')(flatten)
dense = layers.Dense(num_classes, activation="softmax", name='dense')(dropout)
return keras.Model(inputs=(input_layer,), outputs=dense)
I get the following error message
File "customlayerkeras.py", line 25, in build
self.__returning_zeros = tf.zeros(shape=input_shape, dtype=tf.float32)
ValueError: Cannot convert a partially known TensorShape (None, 13, 13, 64) to a Tensor.
Where it seems that, despite using the build function as suggested in the documentation I am not able to retrieve the correct shape of the input.
How can I fix this problem?
EDIT:
I was complicating the problem without thinking, the best solution is to just multiply the inputs per zero like this
def call(self, inputs):
self.__iteration += 1
if self.__iteration <= 3:
return inputs*0
else:
return inputs
Pretty sure you don't need the dimension of the batch, so you can do something like this:
class MyLayer(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(MyLayer, self).__init__(**kwargs)
self.__iteration = 0
self.__returning_zeros = None
def build(self, input_shape):
self.__returning_zeros = tf.zeros(shape=input_shape[1:], dtype=tf.float32)
def call(self, inputs):
self.__iteration += 1
if self.__iteration <= 3:
return inputs * self.__returning_zeros
# or like return tf.repeat(self.__returning_zeros[None,...], tf.shape(inputs)[0], axis=0)
else:
return inputs
Related
So I'm implementing Generator of a GAN and I need the architecture as shown as below:
The problem is when I try to reshape the output of Linear layer after BatchNorm and ReLU (in fig. Dense as they have used Tensorflow) it is throwing error as :TypeError: reshape(): argument 'input' (position 1) must be Tensor, not int
I understand the error but I can't find its solution.
Is there any other way to reshape within nn.Sequential instead of calling torch explicitly?
class Generator(nn.Module):
def __init__(self, z_dim=100, im_chan=1, hidden_dim=64, rdim=9216):
super(Generator, self).__init__()
self.z_dim = z_dim
self.gen = nn.Sequential(
nn.Linear(z_dim, rdim),
nn.BatchNorm2d(rdim,momentum=0.9),
nn.ReLU(inplace=True),
----> torch.reshape(rdim, (6,6,256)),
self.make_gen_block(rdim, hidden_dim*2),
self.make_gen_block(hidden_dim*2,hidden_dim),
self.make_gen_block(hidden_dim,im_chan,final_layer=True),
)
def make_gen_block(self, input_channels, output_channels, kernel_size=1, stride=2, final_layer=False):
if not final_layer:
return nn.Sequential(
nn.ConvTranspose2d(input_channels, output_channels, kernel_size, stride),
nn.BatchNorm2d(output_channels),
nn.ReLU(inplace=True)
)
else:
return nn.Sequential(
nn.ConvTranspose2d(input_channels, output_channels, kernel_size, stride),
nn.Tanh()
)
def unsqueeze_noise(self, noise):
return noise.view(len(noise), self.zdim, 1, 1)
def forward(self, noise):
x = self.unsqueeze_noise(noise)
return self.gen(x)
def get_noise(n_samples, z_dim, device='cpu'):
return torch.randn(n_samples, z_dim, device=device)
#Testing the Gen arch
gen = Generator()
num_test = 100
#test the hidden block
test_hidden_noise = get_noise(num_test, gen.z_dim)
test_hidden_block = gen.make_gen_block(6, 6, kernel_size=1,stride=2)
test_uns_noise = gen.unsqueeze_noise(test_hidden_noise)
hidden_output = test_hidden_block(test_uns_noise)
In nn.Sequential, torch.nn.Unflatten() can help you achieve reshape operation.
For nn.Linear, its input shape is (N, *, H_{in}) and output shape is (H, *, H_{out}). Note that the feature dimension is last. So unsqueeze_noise() is not useful here.
Based on the network structure, the arguments passed to make_gen_block are wrong.
I have checked the following code:
import torch
from torch import nn
class Generator(nn.Module):
def __init__(self, z_dim=100, im_chan=1, hidden_dim=64, rdim=9216):
super(Generator, self).__init__()
self.z_dim = z_dim
self.gen = nn.Sequential(
nn.Linear(z_dim, rdim),
nn.BatchNorm1d(rdim,momentum=0.9), # use BN1d
nn.ReLU(inplace=True),
nn.Unflatten(1, (256,6,6)),
self.make_gen_block(256, hidden_dim*2,kernel_size=2), # note arguments
self.make_gen_block(hidden_dim*2,hidden_dim,kernel_size=2), # note kernel_size
self.make_gen_block(hidden_dim,im_chan,kernel_size=2,final_layer=True), # note kernel_size
)
def make_gen_block(self, input_channels, output_channels, kernel_size=1, stride=2, final_layer=False):
if not final_layer:
return nn.Sequential(
nn.ConvTranspose2d(input_channels, output_channels, kernel_size, stride),
nn.BatchNorm2d(output_channels),
nn.ReLU(inplace=True)
)
else:
return nn.Sequential(
nn.ConvTranspose2d(input_channels, output_channels, kernel_size, stride),
nn.Tanh()
)
def forward(self, x):
return self.gen(x)
def get_noise(n_samples, z_dim, device='cpu'):
return torch.randn(n_samples, z_dim, device=device)
gen = Generator()
num_test = 100
input_noise = get_noise(num_test, gen.z_dim)
output = gen(input_noise)
assert output.shape == (num_test, 1, 48, 48)
I am creating a series of custom Tensorflow (version 2.4.1) layers and am running into a problem where the model summary shows zero trainable parameters. Below is a series of examples showing how everything is fine until I add in the last custom layer.
Here are the imports and custom classes:
from tensorflow.keras.models import Model
from tensorflow.keras.layers import (BatchNormalization, Conv2D, Input, ReLU,
Layer)
class basic_conv_stack(Layer):
def __init__(self, filters, kernel_size, strides):
super(basic_conv_stack, self).__init__()
self.conv1 = Conv2D(filters, kernel_size, strides, padding='same')
self.bn1 = BatchNormalization()
self.relu = ReLU()
def call(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
return x
class basic_residual(Layer):
def __init__(self, filters, kernel_size, strides):
super(basic_residual, self).__init__()
self.bcs1 = basic_conv_stack(filters, kernel_size, strides)
self.bcs2 = basic_conv_stack(filters, kernel_size, strides)
def call(self, x):
x = self.bcs1(x)
x = self.bcs2(x)
return x
class basic_module(Layer):
def __init__(self, filters, kernel_size, strides):
super(basic_module, self).__init__()
self.res = basic_residual
self.args = (filters, kernel_size, strides)
def call(self, x):
for _ in range(4):
x = self.res(*self.args)(x)
return x
Now, if I do the following, everything works out ok and I get 300 trainable parameters:
input_layer = Input((128, 128, 3))
conv = basic_conv_stack(10, 3, 1)(input_layer)
model = Model(input_layer, conv)
print (model.summary())
Similarly, if I do the following, I get 1,230 trainable parameters:
input_layer = Input((128, 128, 3))
conv = basic_residual(10, 3, 1)(input_layer)
model = Model(input_layer, conv)
print (model.summary())
However, if I try the basic_module class, I get zero trainable parameters:
input_layer = Input((128, 128, 3))
conv = basic_module(10, 3, 1)(input_layer)
model = Model(input_layer, conv)
print (model.summary())
Does anyone know why this is happening?
Edit to add:
I discovered that the layers used in the call must be initialized in the class's init for things to work properly. So if I change the basic module to this:
class basic_module(Layer):
def __init__(self, filters, kernel_size, strides):
super(basic_module, self).__init__()
self.clayers = [basic_residual(filters, kernel_size, strides) for _ in range(4)]
def call(self, x):
for idx in range(4):
x = self.clayers[idx](x)
return x
Everything works fine. I don't know why this is the case, so I'll leave this question open in case someone can answer the why of this question.
You have to initialize the class instances with the required parameter such as filters, kernel_size, strides to the predefined base_mdoule. Also, note that these hyper-parameters are related to trainable weights properties.
# >>> a = basic_module
# >>> a __main__.basic_module
# >>> a = basic_module(10, 3, 1)
# >>> a
# >>> <__main__.basic_module at 0x7f6123eed510>
class basic_module(Layer):
def __init__(self, filters, kernel_size, strides):
super(basic_module, self).__init__()
self.res = basic_residual # < ---
self.args = (filters, kernel_size, strides)
def call(self, x):
for _ in range(4):
x = self.res(*self.args)(x)
return x
When training Neural Networks for classification in TensorFlow/Keras, is it possible to set the bias term in the output layer to non-trainable?
It looks like layer.trainable = False will freeze both the kernel and the bias in this layer. Is it possible to only freeze the bias, but still update the kernel?
A hacky solution with initializers and constraints for uninitialized models.
If your model has been initialized, you will need to replace the layers in order to add initializers and constraints. See https://github.com/keras-team/keras/issues/13100.
Biases with different values
class ConstantTensorInitializer(tf.keras.initializers.Initializer):
"""Initializes tensors to `t`."""
def __init__(self, t):
self.t = t
def __call__(self, shape, dtype=None):
return self.t
def get_config(self):
return {'t': self.t}
class ConstantTensorConstraint(tf.keras.constraints.Constraint):
"""Constrains tensors to `t`."""
def __init__(self, t):
self.t = t
def __call__(self, w):
return self.t
def get_config(self):
return {'t': self.t}
#Example
biases = tf.constant([0.1, 0.2, 0.3, 0.4])
layer = Conv2D(
4,
(3, 3),
use_bias=True,
bias_initializer=ConstantTensorInitializer(biases),
bias_constraint=ConstantTensorConstraint(biases)
)
Biases with the same value
class ConstantValueConstraint(tf.keras.constraints.Constraint):
"""Constrains the elements of the tensor to `value`."""
def __init__(self, value):
self.value = value
def __call__(self, w):
return w * 0 + self.value
def get_config(self):
return {'value': self.value}
#Example
layer = Conv2D(
4,
(3, 3),
use_bias=True,
bias_initializer=tf.keras.initializers.Constant(0.1),
bias_constraint=ConstantValueConstraint(0.1)
)
you can set use_bias to false for any layer.
ie.
model.add(layers.Conv2D(64, (3, 3), use_bias=False))
I'm trying to implement CRelu layer in Keras
One option that seems work is to use Lambda layer:
def _crelu(x):
x = tf.nn.crelu(x, axis=-1)
return x
def _conv_bn_crelu(x, n_filters, kernel_size):
x = Conv2D(filters=n_filters, kernel_size=kernel_size, strides=(1, 1), padding='same')(x)
x = BatchNormalization(axis=-1)(x)
x = Lambda(_crelu)(x)
return x
But I wonder is Lamda layer introduce some overhead in training or inference process?
My second attemp is to create keras layer that is wrapper around tf.nn.crelu
class CRelu(Layer):
def __init__(self, **kwargs):
super(CRelu, self).__init__(**kwargs)
def build(self, input_shape):
super(CRelu, self).build(input_shape)
def call(self, x):
x = tf.nn.crelu(x, axis=-1)
return x
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
output_shape[-1] = output_shape[-1] * 2
output_shape = tuple(output_shape)
return output_shape
def _conv_bn_crelu(x, n_filters, kernel_size):
x = Conv2D(filters=n_filters, kernel_size=kernel_size, strides=(1, 1), padding='same')(x)
x = BatchNormalization(axis=-1)(x)
x = CRelu()(x)
return x
Which version will be more efficient?
Also looking forward for pure Keras implementation, if it's possible.
I don't think there is a significant difference between the two implementations speed-wise.
The Lambda implementation is the simplest actually but writing a custom Layer as you have done usually is better, especially for what regards model saving and loading (get_config method).
But in this case it doesn't matter as the CReLU is trivial and don't require saving and restoring parameters. You can store the axis parameter actually as in the code below. In this way it will be retrieved automatically when the model is loaded.
class CRelu(Layer):
def __init__(self, axis=-1, **kwargs):
self.axis = axis
super(CRelu, self).__init__(**kwargs)
def build(self, input_shape):
super(CRelu, self).build(input_shape)
def call(self, x):
x = tf.nn.crelu(x, axis=self.axis)
return x
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
output_shape[-1] = output_shape[-1] * 2
output_shape = tuple(output_shape)
return output_shape
def get_config(self, input_shape):
config = {'axis': self.axis, }
base_config = super(CReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
thanks for looking my question.
For example.
The final output is the sum of two matrix A and B,like this:
output = keras.layers.add([A, B])
Now,I want to build a new parameter x to change the output.
I want to make newoutput = Ax+B(1-x)
and x is a trainable parameter in my network.
what should I do?
please help me ~ thanks very much!
edit(part of code ):
conv1 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(input)
drop1 = Dropout(0.5)(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(drop1)
conv2 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
drop2 = Dropout(0.5)(conv2)
up1 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop2))
#the line I want to change:
merge = add([drop2,up1])
#this layer is simply add drop2 and up1 layer.now I want to add a trainable parameter x to adjust the weight of thoese two layers.
I tried to use the codes,but still occured some questions:
1.how can I use my own layer?
merge = Mylayer()(drop2,up1)
or otherway?
2.what is the meaning of out_dim?
those parameters are all 3-dim matrix.what is the mening of out_dim?
thank you...T.T
edit2(solved)
from keras import backend as K
from keras.engine.topology import Layer
import numpy as np
from keras.layers import add
class MyLayer(Layer):
def __init__(self, **kwargs):
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
self._x = K.variable(0.5)
self.trainable_weights = [self._x]
super(MyLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
A, B = x
result = add([self._x*A ,(1-self._x)*B])
return result
def compute_output_shape(self, input_shape):
return input_shape[0]
You have to create a custom class which inherits from Layer and create the trainable parameter using self.add_weight(...). You can find an example of this here and there.
For your example, the layer would somehow look like this:
from keras import backend as K
from keras.engine.topology import Layer
import numpy as np
class MyLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self._A = self.add_weight(name='A',
shape=(input_shape[1], self.output_dim),
initializer='uniform',
trainable=True)
self._B = self.add_weight(name='B',
shape=(input_shape[1], self.output_dim),
initializer='uniform',
trainable=True)
super(MyLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
return K.dot(x, self._A) + K.dot(1-x, self._B)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
Edit: Just based on the names I (wrongly) assumed that x is the layers input and you want to optimize A and B. But, as you stated, you want to optimize x. For this, you can do something like this:
from keras import backend as K
from keras.engine.topology import Layer
import numpy as np
class MyLayer(Layer):
def __init__(self, **kwargs):
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self._x = self.add_weight(name='x',
shape=(1,),
initializer='uniform',
trainable=True)
super(MyLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
A, B = x
return K.dot(self._x, A) + K.dot(1-self._x, B)
def compute_output_shape(self, input_shape):
return input_shape[0]
Edit2: You can call this layer using
merge = Mylayer()([drop2,up1])