thanks for looking my question.
For example.
The final output is the sum of two matrix A and B,like this:
output = keras.layers.add([A, B])
Now,I want to build a new parameter x to change the output.
I want to make newoutput = Ax+B(1-x)
and x is a trainable parameter in my network.
what should I do?
please help me ~ thanks very much!
edit(part of code ):
conv1 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(input)
drop1 = Dropout(0.5)(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(drop1)
conv2 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
drop2 = Dropout(0.5)(conv2)
up1 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop2))
#the line I want to change:
merge = add([drop2,up1])
#this layer is simply add drop2 and up1 layer.now I want to add a trainable parameter x to adjust the weight of thoese two layers.
I tried to use the codes,but still occured some questions:
1.how can I use my own layer?
merge = Mylayer()(drop2,up1)
or otherway?
2.what is the meaning of out_dim?
those parameters are all 3-dim matrix.what is the mening of out_dim?
thank you...T.T
edit2(solved)
from keras import backend as K
from keras.engine.topology import Layer
import numpy as np
from keras.layers import add
class MyLayer(Layer):
def __init__(self, **kwargs):
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
self._x = K.variable(0.5)
self.trainable_weights = [self._x]
super(MyLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
A, B = x
result = add([self._x*A ,(1-self._x)*B])
return result
def compute_output_shape(self, input_shape):
return input_shape[0]
You have to create a custom class which inherits from Layer and create the trainable parameter using self.add_weight(...). You can find an example of this here and there.
For your example, the layer would somehow look like this:
from keras import backend as K
from keras.engine.topology import Layer
import numpy as np
class MyLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self._A = self.add_weight(name='A',
shape=(input_shape[1], self.output_dim),
initializer='uniform',
trainable=True)
self._B = self.add_weight(name='B',
shape=(input_shape[1], self.output_dim),
initializer='uniform',
trainable=True)
super(MyLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
return K.dot(x, self._A) + K.dot(1-x, self._B)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
Edit: Just based on the names I (wrongly) assumed that x is the layers input and you want to optimize A and B. But, as you stated, you want to optimize x. For this, you can do something like this:
from keras import backend as K
from keras.engine.topology import Layer
import numpy as np
class MyLayer(Layer):
def __init__(self, **kwargs):
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self._x = self.add_weight(name='x',
shape=(1,),
initializer='uniform',
trainable=True)
super(MyLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
A, B = x
return K.dot(self._x, A) + K.dot(1-self._x, B)
def compute_output_shape(self, input_shape):
return input_shape[0]
Edit2: You can call this layer using
merge = Mylayer()([drop2,up1])
Related
I am writing a custom layer using Keras that returns a tensors of zeros the first three times it is invoked and does nothing the other times. The code is the following
class MyLayer(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(MyLayer, self).__init__(**kwargs)
self.__iteration = 0
self.__returning_zeros = None
def build(self, input_shape):
self.__returning_zeros = tf.zeros(shape=input_shape, dtype=tf.float32)
def call(self, inputs):
self.__iteration += 1
if self.__iteration <= 3:
return self.__returning_zeros
else:
return inputs
Unfortunately if I try to build a model using this layer like this
def build_model(input_shape, num_classes):
input_layer = keras.Input(shape=input_shape, name='input')
conv1 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu", name='conv1')(input_layer)
maxpool1 = layers.MaxPooling2D(pool_size=(2, 2), name='maxpool1')(conv1)
conv2 = layers.Conv2D(64, kernel_size=(3, 3), activation="relu", name='conv2')(maxpool1)
mylayer = MyLayer()(conv2)
maxpool2 = layers.MaxPooling2D(pool_size=(2, 2), name='maxpool2')(mylayer)
flatten = layers.Flatten(name='flatten')(maxpool2)
dropout = layers.Dropout(0.5, name='dropout')(flatten)
dense = layers.Dense(num_classes, activation="softmax", name='dense')(dropout)
return keras.Model(inputs=(input_layer,), outputs=dense)
I get the following error message
File "customlayerkeras.py", line 25, in build
self.__returning_zeros = tf.zeros(shape=input_shape, dtype=tf.float32)
ValueError: Cannot convert a partially known TensorShape (None, 13, 13, 64) to a Tensor.
Where it seems that, despite using the build function as suggested in the documentation I am not able to retrieve the correct shape of the input.
How can I fix this problem?
EDIT:
I was complicating the problem without thinking, the best solution is to just multiply the inputs per zero like this
def call(self, inputs):
self.__iteration += 1
if self.__iteration <= 3:
return inputs*0
else:
return inputs
Pretty sure you don't need the dimension of the batch, so you can do something like this:
class MyLayer(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(MyLayer, self).__init__(**kwargs)
self.__iteration = 0
self.__returning_zeros = None
def build(self, input_shape):
self.__returning_zeros = tf.zeros(shape=input_shape[1:], dtype=tf.float32)
def call(self, inputs):
self.__iteration += 1
if self.__iteration <= 3:
return inputs * self.__returning_zeros
# or like return tf.repeat(self.__returning_zeros[None,...], tf.shape(inputs)[0], axis=0)
else:
return inputs
I am creating a series of custom Tensorflow (version 2.4.1) layers and am running into a problem where the model summary shows zero trainable parameters. Below is a series of examples showing how everything is fine until I add in the last custom layer.
Here are the imports and custom classes:
from tensorflow.keras.models import Model
from tensorflow.keras.layers import (BatchNormalization, Conv2D, Input, ReLU,
Layer)
class basic_conv_stack(Layer):
def __init__(self, filters, kernel_size, strides):
super(basic_conv_stack, self).__init__()
self.conv1 = Conv2D(filters, kernel_size, strides, padding='same')
self.bn1 = BatchNormalization()
self.relu = ReLU()
def call(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
return x
class basic_residual(Layer):
def __init__(self, filters, kernel_size, strides):
super(basic_residual, self).__init__()
self.bcs1 = basic_conv_stack(filters, kernel_size, strides)
self.bcs2 = basic_conv_stack(filters, kernel_size, strides)
def call(self, x):
x = self.bcs1(x)
x = self.bcs2(x)
return x
class basic_module(Layer):
def __init__(self, filters, kernel_size, strides):
super(basic_module, self).__init__()
self.res = basic_residual
self.args = (filters, kernel_size, strides)
def call(self, x):
for _ in range(4):
x = self.res(*self.args)(x)
return x
Now, if I do the following, everything works out ok and I get 300 trainable parameters:
input_layer = Input((128, 128, 3))
conv = basic_conv_stack(10, 3, 1)(input_layer)
model = Model(input_layer, conv)
print (model.summary())
Similarly, if I do the following, I get 1,230 trainable parameters:
input_layer = Input((128, 128, 3))
conv = basic_residual(10, 3, 1)(input_layer)
model = Model(input_layer, conv)
print (model.summary())
However, if I try the basic_module class, I get zero trainable parameters:
input_layer = Input((128, 128, 3))
conv = basic_module(10, 3, 1)(input_layer)
model = Model(input_layer, conv)
print (model.summary())
Does anyone know why this is happening?
Edit to add:
I discovered that the layers used in the call must be initialized in the class's init for things to work properly. So if I change the basic module to this:
class basic_module(Layer):
def __init__(self, filters, kernel_size, strides):
super(basic_module, self).__init__()
self.clayers = [basic_residual(filters, kernel_size, strides) for _ in range(4)]
def call(self, x):
for idx in range(4):
x = self.clayers[idx](x)
return x
Everything works fine. I don't know why this is the case, so I'll leave this question open in case someone can answer the why of this question.
You have to initialize the class instances with the required parameter such as filters, kernel_size, strides to the predefined base_mdoule. Also, note that these hyper-parameters are related to trainable weights properties.
# >>> a = basic_module
# >>> a __main__.basic_module
# >>> a = basic_module(10, 3, 1)
# >>> a
# >>> <__main__.basic_module at 0x7f6123eed510>
class basic_module(Layer):
def __init__(self, filters, kernel_size, strides):
super(basic_module, self).__init__()
self.res = basic_residual # < ---
self.args = (filters, kernel_size, strides)
def call(self, x):
for _ in range(4):
x = self.res(*self.args)(x)
return x
I am trying to create a custom layer in keras which has two trainable variables. I am passing this two variables to another function inside which they need to get the ndims and shape of the variables. But it is showing error. My custom layer code is-
class MyLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
assert len(input_shape) >= 3
input_dim = input_shape[1:]
print(input_shape)
self.kernel1 = self.add_weight(
shape=self.output_dim[0],input_dim[0]),
name = 'kernel1',
initializer='uniform',
trainable=True)
print(self.kernel1)
self.kernel2 = self.add_weight(
shape=(self.output_dim[1],input_dim[1]),
name = 'kernel2',
initializer='uniform',
trainable=True)
print(self.kernel2)
super(MyLayer, self).build(input_shape) # Be sure to call this at
the end
def call(self, x):
print(x.shape)
mat1 =np.array(self.kernel1)
print(K.shape(mat1))
#print(mat1.ndim)
mat2 =np.array(self.kernel2)
print(K.shape(mat2))
#print(mat2.ndim)
output1 = Myoperation(x,mat1,1)
output = Myoperation(output1,mat2,2)
return output
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
inside the function Myoperation, it needs to calculate
m2=list(np.shape(M))[1] ; M is mat1 or mat2enter code here
The error is -
IndexError: list index out of range
if we use mat1.shape to check the shape, we get
TypeError: Expected binary or unicode string, got
please help
I'm trying to implement CRelu layer in Keras
One option that seems work is to use Lambda layer:
def _crelu(x):
x = tf.nn.crelu(x, axis=-1)
return x
def _conv_bn_crelu(x, n_filters, kernel_size):
x = Conv2D(filters=n_filters, kernel_size=kernel_size, strides=(1, 1), padding='same')(x)
x = BatchNormalization(axis=-1)(x)
x = Lambda(_crelu)(x)
return x
But I wonder is Lamda layer introduce some overhead in training or inference process?
My second attemp is to create keras layer that is wrapper around tf.nn.crelu
class CRelu(Layer):
def __init__(self, **kwargs):
super(CRelu, self).__init__(**kwargs)
def build(self, input_shape):
super(CRelu, self).build(input_shape)
def call(self, x):
x = tf.nn.crelu(x, axis=-1)
return x
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
output_shape[-1] = output_shape[-1] * 2
output_shape = tuple(output_shape)
return output_shape
def _conv_bn_crelu(x, n_filters, kernel_size):
x = Conv2D(filters=n_filters, kernel_size=kernel_size, strides=(1, 1), padding='same')(x)
x = BatchNormalization(axis=-1)(x)
x = CRelu()(x)
return x
Which version will be more efficient?
Also looking forward for pure Keras implementation, if it's possible.
I don't think there is a significant difference between the two implementations speed-wise.
The Lambda implementation is the simplest actually but writing a custom Layer as you have done usually is better, especially for what regards model saving and loading (get_config method).
But in this case it doesn't matter as the CReLU is trivial and don't require saving and restoring parameters. You can store the axis parameter actually as in the code below. In this way it will be retrieved automatically when the model is loaded.
class CRelu(Layer):
def __init__(self, axis=-1, **kwargs):
self.axis = axis
super(CRelu, self).__init__(**kwargs)
def build(self, input_shape):
super(CRelu, self).build(input_shape)
def call(self, x):
x = tf.nn.crelu(x, axis=self.axis)
return x
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
output_shape[-1] = output_shape[-1] * 2
output_shape = tuple(output_shape)
return output_shape
def get_config(self, input_shape):
config = {'axis': self.axis, }
base_config = super(CReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
I want to make a custom layer in Keras.
In this example, I use a variable to multiply the tensor, but i get the error which is
in /keras/engine/training_arrays.py, line 304, in predict_loop
outs[i][batch_start:batch_end] = batch_out ValueError: could not broadcast input array from shape (36) into shape (2).
Actually i have check this file, but i get nothing. Is there some wrong in my custom layer?
#the definition of mylayer.
from keras import backend as K
import keras
from keras.engine.topology import Layer
class mylayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(mylayer, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(name = 'kernel',
shape=(1,),dtype='float32',trainable=True,initializer='uniform')
super(mylayer, self).build(input_shape)
def call(self, inputs, **kwargs):
return self.kernel * inputs[0]
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[1])
#the test of mylayer.
from mylayer import mylayer
from tensorflow import keras as K
import numpy as np
from keras.layers import Input, Dense, Flatten
from keras.models import Model
x_train = np.random.random((2, 3, 4, 3))
y_train = np.random.random((2, 36))
print(x_train)
x = Input(shape=(3, 4, 3))
y = Flatten()(x)
output = mylayer((36, ))(y)
model = Model(inputs=x, outputs=output)
model.summary()
model.compile(optimizer='Adam',loss='categorical_crossentropy',metrics=['accuracy'])
model.fit(x_train, y_train, epochs=2)
hist = model.predict(x_train,batch_size=2)
print(hist)
print(model.get_layer(index=1).get_weights())
#So is there some wrong in my custom error?
Especially, when i train this net, it's ok,but when i try to use "prdict", it's wrong.
Your shape of self.kernel * inputs[0] is (36,), but your expectation is (?,36). Change it:
def call(self, inputs, **kwargs):
return self.kernel * inputs
If you want to output the weight of mylayer, you should set index=2.