When training Neural Networks for classification in TensorFlow/Keras, is it possible to set the bias term in the output layer to non-trainable?
It looks like layer.trainable = False will freeze both the kernel and the bias in this layer. Is it possible to only freeze the bias, but still update the kernel?
A hacky solution with initializers and constraints for uninitialized models.
If your model has been initialized, you will need to replace the layers in order to add initializers and constraints. See https://github.com/keras-team/keras/issues/13100.
Biases with different values
class ConstantTensorInitializer(tf.keras.initializers.Initializer):
"""Initializes tensors to `t`."""
def __init__(self, t):
self.t = t
def __call__(self, shape, dtype=None):
return self.t
def get_config(self):
return {'t': self.t}
class ConstantTensorConstraint(tf.keras.constraints.Constraint):
"""Constrains tensors to `t`."""
def __init__(self, t):
self.t = t
def __call__(self, w):
return self.t
def get_config(self):
return {'t': self.t}
#Example
biases = tf.constant([0.1, 0.2, 0.3, 0.4])
layer = Conv2D(
4,
(3, 3),
use_bias=True,
bias_initializer=ConstantTensorInitializer(biases),
bias_constraint=ConstantTensorConstraint(biases)
)
Biases with the same value
class ConstantValueConstraint(tf.keras.constraints.Constraint):
"""Constrains the elements of the tensor to `value`."""
def __init__(self, value):
self.value = value
def __call__(self, w):
return w * 0 + self.value
def get_config(self):
return {'value': self.value}
#Example
layer = Conv2D(
4,
(3, 3),
use_bias=True,
bias_initializer=tf.keras.initializers.Constant(0.1),
bias_constraint=ConstantValueConstraint(0.1)
)
you can set use_bias to false for any layer.
ie.
model.add(layers.Conv2D(64, (3, 3), use_bias=False))
Related
I am writing a custom layer using Keras that returns a tensors of zeros the first three times it is invoked and does nothing the other times. The code is the following
class MyLayer(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(MyLayer, self).__init__(**kwargs)
self.__iteration = 0
self.__returning_zeros = None
def build(self, input_shape):
self.__returning_zeros = tf.zeros(shape=input_shape, dtype=tf.float32)
def call(self, inputs):
self.__iteration += 1
if self.__iteration <= 3:
return self.__returning_zeros
else:
return inputs
Unfortunately if I try to build a model using this layer like this
def build_model(input_shape, num_classes):
input_layer = keras.Input(shape=input_shape, name='input')
conv1 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu", name='conv1')(input_layer)
maxpool1 = layers.MaxPooling2D(pool_size=(2, 2), name='maxpool1')(conv1)
conv2 = layers.Conv2D(64, kernel_size=(3, 3), activation="relu", name='conv2')(maxpool1)
mylayer = MyLayer()(conv2)
maxpool2 = layers.MaxPooling2D(pool_size=(2, 2), name='maxpool2')(mylayer)
flatten = layers.Flatten(name='flatten')(maxpool2)
dropout = layers.Dropout(0.5, name='dropout')(flatten)
dense = layers.Dense(num_classes, activation="softmax", name='dense')(dropout)
return keras.Model(inputs=(input_layer,), outputs=dense)
I get the following error message
File "customlayerkeras.py", line 25, in build
self.__returning_zeros = tf.zeros(shape=input_shape, dtype=tf.float32)
ValueError: Cannot convert a partially known TensorShape (None, 13, 13, 64) to a Tensor.
Where it seems that, despite using the build function as suggested in the documentation I am not able to retrieve the correct shape of the input.
How can I fix this problem?
EDIT:
I was complicating the problem without thinking, the best solution is to just multiply the inputs per zero like this
def call(self, inputs):
self.__iteration += 1
if self.__iteration <= 3:
return inputs*0
else:
return inputs
Pretty sure you don't need the dimension of the batch, so you can do something like this:
class MyLayer(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(MyLayer, self).__init__(**kwargs)
self.__iteration = 0
self.__returning_zeros = None
def build(self, input_shape):
self.__returning_zeros = tf.zeros(shape=input_shape[1:], dtype=tf.float32)
def call(self, inputs):
self.__iteration += 1
if self.__iteration <= 3:
return inputs * self.__returning_zeros
# or like return tf.repeat(self.__returning_zeros[None,...], tf.shape(inputs)[0], axis=0)
else:
return inputs
I'm following Quantization aware training comprehensive guide and struggling with QAT for custom layers, working with tf=2.6.0, py=3.9.7.
Below is a toy example of my problem:
I wrote a simple custom layer that implements Conv2D
class MyConv(tf.keras.layers.Layer):
'''costume conv2d'''
def __init__(self, filt=1, name=None, **kwargs):
super(MyConv, self).__init__(name=name)
self.filt = filt
super(MyConv, self).__init__(**kwargs)
def get_config(self):
config = super().get_config().copy()
config.update({"filt": self.filt})
return config
def build(self, shape):
self.conv = tf.keras.layers.Conv2D(self.filt, 1, padding="same")
def call(self, input):
return self.conv(input)
I've created a small model with that layer, then recursively pass over its layers and annotates them using tfmot.guantization.keras.quantize_annotate_layer (each custom layer could have more custom sub-layers that needs to be quantized). Then I apply tfmot.quantization.keras.quantize_apply to the annotated model. The result model consists of all the quantized layers, except of my custom layer, that had not been quantized.
I'll note that when I'm replacing the custom layer MyConv with the code below, as in the comprehensive guide, the quantization works.
def MyConv(tf.keras.layers.Conv2D):
pass
Please help me solve this issue. Might be some issue with my QuantizeConfig?
Below is my full code:
import tensorflow as tf
import tensorflow_model_optimization as tfmot
class MyConv(tf.keras.layers.Layer):
'''costume conv2d'''
def __init__(self, filt=1, name=None, **kwargs):
super(MyConv, self).__init__(name=name)
self.filt = filt
super(MyConv, self).__init__(**kwargs)
def get_config(self):
config = super().get_config().copy()
config.update({"filt": self.filt})
return config
def build(self, shape):
self.conv = tfmot.quantization.keras.quantize_annotate_layer(tf.keras.layers.Conv2D(self.filt, 1, padding="same"))
def call(self, input):
return self.conv(input)
def get_toy_model():
input = tf.keras.Input((10, 10, 1), name='input')
x = tf.keras.layers.Conv2D(1, 3, padding="same")(input)
x = tf.keras.layers.ReLU()(x)
x = MyConv()(x)
for _ in range(2):
y = tf.keras.layers.Conv2D(1, 3, padding="same")(x)
y = tf.keras.layers.ReLU()(y)
out = tf.keras.layers.Conv2D(1, 3, padding="same")(y)
return tf.keras.Model(input, out, name='toy_Conv2D')
LastValueQuantizer = tfmot.quantization.keras.quantizers.LastValueQuantizer
MovingAverageQuantizer = tfmot.quantization.keras.quantizers.MovingAverageQuantizer
class DefaultCostumeQuantizeConfig(tfmot.quantization.keras.QuantizeConfig):
# Configure how to quantize weights.
def get_weights_and_quantizers(self, layer):
return []
# Configure how to quantize activations.
def get_activations_and_quantizers(self, layer):
return []
def set_quantize_weights(self, layer, quantize_weights):
pass
def set_quantize_activations(self, layer, quantize_activations):
pass
# Configure how to quantize outputs (may be equivalent to activations).
def get_output_quantizers(self, layer):
return [tfmot.quantization.keras.quantizers.MovingAverageQuantizer(num_bits=8, per_axis=False, symmetric=False, narrow_range=False)]
def get_config(self):
return {}
def recursive_depth_layers(layer):
for l in list(layer.__dict__.values()):
if isinstance(l, tf.keras.layers.Layer):
recursive_depth_layers(l)
if isinstance(l, (
tf.keras.layers.Dense, tf.keras.layers.Conv2D, tf.keras.layers.ReLU, tf.keras.layers.LeakyReLU, tf.keras.layers.Activation)):
ql = tfmot.quantization.keras.quantize_annotate_layer(l, DefaultCostumeQuantizeConfig())
ql._name += "_" + l.name
return ql
def apply_quantization(layer):
# regular layer
if isinstance(layer, (tf.keras.layers.Dense, tf.keras.layers.Conv2D, tf.keras.layers.ReLU, tf.keras.layers.LeakyReLU,tf.keras.layers.Activation)):
l = tfmot.quantization.keras.quantize_annotate_layer(layer, DefaultCostumeQuantizeConfig())
l._name += '_' + layer.name
return l
if layer.__module__ == "__main__":
# custom layer
recursive_depth_layers(layer)
l = tfmot.quantization.keras.quantize_annotate_layer(layer, DefaultCostumeQuantizeConfig())
l._name += '_' + layer.name
return l
return layer
model = get_toy_model()
model.summary()
annotated_model = tf.keras.models.clone_model(model, clone_function=apply_quantization)
annotated_model.summary()
quantize_scope = tfmot.quantization.keras.quantize_scope
with quantize_scope({'DefaultCostumeQuantizeConfig': DefaultCostumeQuantizeConfig, 'MyConv': MyConv}):
quant_aware_model = tfmot.quantization.keras.quantize_apply(annotated_model)
quant_aware_model._name += "_quant"
quant_aware_model.summary()
quant_aware_model.compile()
I am creating a series of custom Tensorflow (version 2.4.1) layers and am running into a problem where the model summary shows zero trainable parameters. Below is a series of examples showing how everything is fine until I add in the last custom layer.
Here are the imports and custom classes:
from tensorflow.keras.models import Model
from tensorflow.keras.layers import (BatchNormalization, Conv2D, Input, ReLU,
Layer)
class basic_conv_stack(Layer):
def __init__(self, filters, kernel_size, strides):
super(basic_conv_stack, self).__init__()
self.conv1 = Conv2D(filters, kernel_size, strides, padding='same')
self.bn1 = BatchNormalization()
self.relu = ReLU()
def call(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
return x
class basic_residual(Layer):
def __init__(self, filters, kernel_size, strides):
super(basic_residual, self).__init__()
self.bcs1 = basic_conv_stack(filters, kernel_size, strides)
self.bcs2 = basic_conv_stack(filters, kernel_size, strides)
def call(self, x):
x = self.bcs1(x)
x = self.bcs2(x)
return x
class basic_module(Layer):
def __init__(self, filters, kernel_size, strides):
super(basic_module, self).__init__()
self.res = basic_residual
self.args = (filters, kernel_size, strides)
def call(self, x):
for _ in range(4):
x = self.res(*self.args)(x)
return x
Now, if I do the following, everything works out ok and I get 300 trainable parameters:
input_layer = Input((128, 128, 3))
conv = basic_conv_stack(10, 3, 1)(input_layer)
model = Model(input_layer, conv)
print (model.summary())
Similarly, if I do the following, I get 1,230 trainable parameters:
input_layer = Input((128, 128, 3))
conv = basic_residual(10, 3, 1)(input_layer)
model = Model(input_layer, conv)
print (model.summary())
However, if I try the basic_module class, I get zero trainable parameters:
input_layer = Input((128, 128, 3))
conv = basic_module(10, 3, 1)(input_layer)
model = Model(input_layer, conv)
print (model.summary())
Does anyone know why this is happening?
Edit to add:
I discovered that the layers used in the call must be initialized in the class's init for things to work properly. So if I change the basic module to this:
class basic_module(Layer):
def __init__(self, filters, kernel_size, strides):
super(basic_module, self).__init__()
self.clayers = [basic_residual(filters, kernel_size, strides) for _ in range(4)]
def call(self, x):
for idx in range(4):
x = self.clayers[idx](x)
return x
Everything works fine. I don't know why this is the case, so I'll leave this question open in case someone can answer the why of this question.
You have to initialize the class instances with the required parameter such as filters, kernel_size, strides to the predefined base_mdoule. Also, note that these hyper-parameters are related to trainable weights properties.
# >>> a = basic_module
# >>> a __main__.basic_module
# >>> a = basic_module(10, 3, 1)
# >>> a
# >>> <__main__.basic_module at 0x7f6123eed510>
class basic_module(Layer):
def __init__(self, filters, kernel_size, strides):
super(basic_module, self).__init__()
self.res = basic_residual # < ---
self.args = (filters, kernel_size, strides)
def call(self, x):
for _ in range(4):
x = self.res(*self.args)(x)
return x
I'm trying to implement CRelu layer in Keras
One option that seems work is to use Lambda layer:
def _crelu(x):
x = tf.nn.crelu(x, axis=-1)
return x
def _conv_bn_crelu(x, n_filters, kernel_size):
x = Conv2D(filters=n_filters, kernel_size=kernel_size, strides=(1, 1), padding='same')(x)
x = BatchNormalization(axis=-1)(x)
x = Lambda(_crelu)(x)
return x
But I wonder is Lamda layer introduce some overhead in training or inference process?
My second attemp is to create keras layer that is wrapper around tf.nn.crelu
class CRelu(Layer):
def __init__(self, **kwargs):
super(CRelu, self).__init__(**kwargs)
def build(self, input_shape):
super(CRelu, self).build(input_shape)
def call(self, x):
x = tf.nn.crelu(x, axis=-1)
return x
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
output_shape[-1] = output_shape[-1] * 2
output_shape = tuple(output_shape)
return output_shape
def _conv_bn_crelu(x, n_filters, kernel_size):
x = Conv2D(filters=n_filters, kernel_size=kernel_size, strides=(1, 1), padding='same')(x)
x = BatchNormalization(axis=-1)(x)
x = CRelu()(x)
return x
Which version will be more efficient?
Also looking forward for pure Keras implementation, if it's possible.
I don't think there is a significant difference between the two implementations speed-wise.
The Lambda implementation is the simplest actually but writing a custom Layer as you have done usually is better, especially for what regards model saving and loading (get_config method).
But in this case it doesn't matter as the CReLU is trivial and don't require saving and restoring parameters. You can store the axis parameter actually as in the code below. In this way it will be retrieved automatically when the model is loaded.
class CRelu(Layer):
def __init__(self, axis=-1, **kwargs):
self.axis = axis
super(CRelu, self).__init__(**kwargs)
def build(self, input_shape):
super(CRelu, self).build(input_shape)
def call(self, x):
x = tf.nn.crelu(x, axis=self.axis)
return x
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
output_shape[-1] = output_shape[-1] * 2
output_shape = tuple(output_shape)
return output_shape
def get_config(self, input_shape):
config = {'axis': self.axis, }
base_config = super(CReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
thanks for looking my question.
For example.
The final output is the sum of two matrix A and B,like this:
output = keras.layers.add([A, B])
Now,I want to build a new parameter x to change the output.
I want to make newoutput = Ax+B(1-x)
and x is a trainable parameter in my network.
what should I do?
please help me ~ thanks very much!
edit(part of code ):
conv1 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(input)
drop1 = Dropout(0.5)(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(drop1)
conv2 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
drop2 = Dropout(0.5)(conv2)
up1 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop2))
#the line I want to change:
merge = add([drop2,up1])
#this layer is simply add drop2 and up1 layer.now I want to add a trainable parameter x to adjust the weight of thoese two layers.
I tried to use the codes,but still occured some questions:
1.how can I use my own layer?
merge = Mylayer()(drop2,up1)
or otherway?
2.what is the meaning of out_dim?
those parameters are all 3-dim matrix.what is the mening of out_dim?
thank you...T.T
edit2(solved)
from keras import backend as K
from keras.engine.topology import Layer
import numpy as np
from keras.layers import add
class MyLayer(Layer):
def __init__(self, **kwargs):
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
self._x = K.variable(0.5)
self.trainable_weights = [self._x]
super(MyLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
A, B = x
result = add([self._x*A ,(1-self._x)*B])
return result
def compute_output_shape(self, input_shape):
return input_shape[0]
You have to create a custom class which inherits from Layer and create the trainable parameter using self.add_weight(...). You can find an example of this here and there.
For your example, the layer would somehow look like this:
from keras import backend as K
from keras.engine.topology import Layer
import numpy as np
class MyLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self._A = self.add_weight(name='A',
shape=(input_shape[1], self.output_dim),
initializer='uniform',
trainable=True)
self._B = self.add_weight(name='B',
shape=(input_shape[1], self.output_dim),
initializer='uniform',
trainable=True)
super(MyLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
return K.dot(x, self._A) + K.dot(1-x, self._B)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
Edit: Just based on the names I (wrongly) assumed that x is the layers input and you want to optimize A and B. But, as you stated, you want to optimize x. For this, you can do something like this:
from keras import backend as K
from keras.engine.topology import Layer
import numpy as np
class MyLayer(Layer):
def __init__(self, **kwargs):
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self._x = self.add_weight(name='x',
shape=(1,),
initializer='uniform',
trainable=True)
super(MyLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
A, B = x
return K.dot(self._x, A) + K.dot(1-self._x, B)
def compute_output_shape(self, input_shape):
return input_shape[0]
Edit2: You can call this layer using
merge = Mylayer()([drop2,up1])