TensorFlow layer subclass input shape - python

I am trying to make a custom layer from TensorFlow's layers.Layer instance.
I am trying to make a IIR filter, so that's using values from the input layer and computing an output sequence, something like this:
y[i] = a0 * x[i] + a1 * x[i - 1] + b1 * y[i - 1]
where x is the input and y is the output. I define the class this way:
class IIR(keras.layers.Layer):
def __init__(self, input_dim):
super(IIR, self).__init__()
self.input_dim = 60
self.b0 = tf.Variable(tf.constant([uniform(-1, 1)]))
self.b1 = tf.Variable(tf.constant([uniform(-1, 1)]))
self.b2 = tf.Variable(tf.constant([uniform(-1, 1)]))
self.a1 = tf.Variable(tf.constant([uniform(-1, 1)]))
self.a2 = tf.Variable(tf.constant([uniform(-1, 1)]))
def call(self, inputs):
order = 3
init_dim = [0,1,2]
output_sequence = tf.constant(np.zeros((self.input_dim)),dtype=tf.float32)
outt = np.zeros(self.input_dim)
outt[0] = inputs[0]
outt[1] = inputs[1]
outt[2] = inputs[2]
for i in range(2,self.input_dim):
outt[i] = self.b0*inputs[i] + self.b1*inputs[i-1] + self.b2*inputs[i-2] - self.a1*outt[i-1] - self.a2*outt[i-2]
output_sequence = tf.constant(outt)
return output_sequence
but I keep getting the error
ValueError: Exception encountered when calling layer "iir_13" (type IIR).
in user code:
File "<ipython-input-37-0717fc982e73>", line 17, in call *
outt[0] = inputs[:][0]
ValueError: setting an array element with a sequence.
Call arguments received:
• inputs=tf.Tensor(shape=(None, 60), dtype=float32)
and so on. The shape of the input is (None, 60) (I'm setting 60 just for testing purposes) and I am assuming None will be replaced by batch size when training? How can I have access to the values of the input? What's the actual shape of the input? Is this the right approach?
EDIT: I am trying to implement this in a model, something like this:
model = keras.Sequential()
model.add(keras.layers.Input(shape=60))
model.add(IIR(input_dim=60))
model.add(keras.layers.Dense(8, activation='relu'))
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy')

Not sure what exactly you want to do, but I would recommend using Tensorflow operations only. Here is an example:
import tensorflow as tf
class IIR(tf.keras.layers.Layer):
def __init__(self, input_dim):
super(IIR, self).__init__()
self.input_dim = input_dim
self.b0 = tf.Variable(tf.random.uniform((1,), minval=-1, maxval=1))
self.b1 = tf.Variable(tf.random.uniform((1,), minval=-1, maxval=1))
self.b2 = tf.Variable(tf.random.uniform((1,), minval=-1, maxval=1))
self.a1 = tf.Variable(tf.random.uniform((1,), minval=-1, maxval=1))
self.a2 = tf.Variable(tf.random.uniform((1,), minval=-1, maxval=1))
def call(self, inputs):
batch_size = tf.shape(inputs)[0]
output_sequence = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True, clear_after_read=False)
output_sequence = output_sequence.write(0, inputs[:, 0])
output_sequence = output_sequence.write(1, inputs[:, 1])
output_sequence = output_sequence.write(2, inputs[:, 2])
for i in range(2, self.input_dim):
output_sequence = output_sequence.write(i, self.b0*inputs[:, i] + self.b1*inputs[:, i-1]
+ self.b2*inputs[:, i-2] - self.a1*output_sequence.read(i-1)
- self.a2*output_sequence.read(i-2))
result = output_sequence.stack()
return tf.reshape(result, tf.shape(inputs))
iir = IIR(input_dim=60)
tf.print(iir(tf.random.normal((2, 60))).shape)
iir = IIR(input_dim=60)
model = tf.keras.Sequential()
model.add(tf.keras.layers.Input(shape=60))
model.add(IIR(input_dim=60))
model.add(tf.keras.layers.Dense(8, activation='relu'))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
print(model.summary())
TensorShape([2, 60])
Model: "sequential_21"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
iir_80 (IIR) (None, 60) 5
dense_20 (Dense) (None, 8) 488
dense_21 (Dense) (None, 1) 9
=================================================================
Total params: 502
Trainable params: 502
Non-trainable params: 0
_________________________________________________________________
None

Related

Custom activation function in Tensorflow with trainable params

I am trying to implement a custom version of the PElu activation function in tensorflow. The custom thing about this activation is the knee of the relu is smoothed. I got the equation from this paper.
Here is the code:
from keras import backend as K
import tensorflow as tf
def SMU_LeakyPRElu(x, alpha=2.5,u=1.0):
return ((1+alpha)*x)+((1-alpha)*x)*(tf.math.erf(u*(1-alpha)*x))
from keras.layers import Layer
class SMU_LeakyPRElu(Layer):
def __init__(self, alpha=2.5, u=1.0, trainable=False, **kwargs):
super(SMU_LeakyPRElu, self).__init__(**kwargs)
self.supports_masking = True
self.alpha = alpha
self.u = u
self.trainable = trainable
def build(self, input_shape):
self.alpha_factor = K.variable(self.alpha,
dtype=K.floatx(),
name='alpha_factor')
self.u_factor = K.variable(self.u,
dtype=K.floatx(),
name='u_factor')
if self.trainable:
self._trainable_weights.append(self.alpha_factor)
self._trainable_weights.append(self.u_factor)
super(SMU_LeakyPRElu, self).build(input_shape)
def call(self, inputs, mask=None):
return SMU_LeakyPRElu(inputs, self.alpha_factor,self.u_factor)
def get_config(self):
config = {'alpha': self.get_weights()[0] if self.trainable else self.alpha,
'u' : self.get_weights()[1] if self.trainable else self.u,
'trainable': self.trainable}
base_config = super(SMU_LeakyPRElu, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
x = tf.random.normal((1,10,4))
print(x)
input_shape = (1,10,4)
input_layer = tf.keras.layers.Input(shape=input_shape[1:], name="input_layer")
layer_1 = tf.keras.layers.Conv1D(2, 1,padding = 'valid', input_shape=input_shape[:1])(input_layer)
layer_2 = SMU_LeakyPRElu(alpha=2.5,u=1.0,trainable=True)(layer_1)
model = tf.keras.models.Model(input_layer, layer_2, name="model")
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0005), loss="categorical_crossentropy", run_eagerly=True)
print(model.summary())
result = model.predict(x)
print(result)
print(result.shape)
I implemented this code using a example from this post at Data Science SE.
Error:
tf.Tensor(
[[[ 1.0467066 -1.1833347 1.5384735 2.078511 ]
[-1.6025988 -0.30846047 0.8019808 0.3113866 ]
[ 0.58313304 -0.90643036 -0.3926888 -0.6210553 ]
[ 0.16505387 -0.5930619 0.6983522 -0.12211661]
[ 0.06077941 -0.11117186 -1.2540722 -0.32234746]
[ 0.41838828 0.7090619 0.30999053 0.10459523]
[ 0.35603598 -0.2695868 -0.17901018 -0.09100233]
[ 1.2746769 0.8311447 0.02825974 -0.48021472]
[-1.536545 -0.24765234 -0.36437735 -1.1891246 ]
[ 0.7531206 -0.56109476 -0.65761757 0.19102335]]], shape=(1, 10, 4), dtype=float32)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-50-c9d490dfd533> in <module>
5 input_layer = tf.keras.layers.Input(shape=input_shape[1:], name="input_layer")
6 layer_1 = tf.keras.layers.Conv1D(2, 1,padding = 'valid', input_shape=input_shape[:1])(input_layer)
----> 7 layer_2 = SMU_LeakyPRElu(alpha=2.5,u=1.0,trainable=True)(layer_1)
8
9 model = tf.keras.models.Model(input_layer, layer_2, name="model")
1 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/type_spec.py in type_spec_from_value(value)
888 3, "Failed to convert %r to tensor: %s" % (type(value).__name__, e))
889
--> 890 raise TypeError(f"Could not build a TypeSpec for {value} of "
891 f"unsupported type {type(value)}.")
892
TypeError: Could not build a TypeSpec for <__main__.SMU_LeakyPRElu object at 0x7fde698f7850> of unsupported type <class '__main__.SMU_LeakyPRElu'>.
I don't understand this error. How should I implement this function as custom activation function with trainable parameters alpha and u.?
The problem is that you have named your activation function and the custom layer you created the same thing. I refactored your code for you.
Code:
import tensorflow as tf
from typing import Optional
from tensorflow.keras import Model
from tensorflow.keras.layers import Conv1D
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Layer
from tensorflow.keras.optimizers import Adam
class SMULeakyPReLU(Layer):
"""``SMULeakyPReLU``."""
def __init__(self,
alpha: float = 2.5,
u: float = 1.,
trainable: bool = False,
**kwargs):
super().__init__(**kwargs)
self.alpha = alpha
self.u = u
self.trainable = trainable
def build(self, input_shape: tf.TensorShape):
super().build(input_shape)
self.alpha_factor = tf.Variable(
self.alpha,
dtype=tf.float32,
trainable=self.trainable,
name="alpha_factor")
self.u_factor = tf.Variable(
self.u,
dtype=tf.float32,
name="u_factor")
def call(self,
inputs: tf.Tensor,
mask: Optional[tf.Tensor] = None
) -> tf.Tensor:
fst = (1. + self.alpha_factor) * inputs
snd = (1. - self.alpha_factor) * inputs
trd = tf.math.erf(self.u_factor * (1. - self.alpha_factor) * inputs)
return fst * snd * trd
def get_config(self):
config = {
"alpha": self.get_weights()[0] if self.trainable else self.alpha,
"u": self.get_weights()[1] if self.trainable else self.u,
"trainable": self.trainable
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
Test
# fake data
x = tf.random.normal((1, 10, 4))
# create network
input_layer = Input(shape=x.shape[1:], name="input_layer")
layer_1 = Conv1D(2, 1, padding="valid")(input_layer)
layer_2 = SMULeakyPReLU(alpha=2.5, u=1.0, trainable=True)(layer_1)
# create model
model = Model(input_layer, layer_2, name="model")
# compile model and summary
model.compile(
optimizer=Adam(learning_rate=5e-4),
loss="categorical_crossentropy",
run_eagerly=True)
print(model.summary())
# forward pass
result = model.predict(x)
print(result)
print(result.shape)
# Model: "model"
# _________________________________________________________________
# Layer (type) Output Shape Param #
# =================================================================
# input_layer (InputLayer) [(None, 10, 4)] 0
#
# conv1d_1 (Conv1D) (None, 10, 2) 10
#
# smu_leaky_p_re_lu_1 (SMULea (None, 10, 2) 2
# kyPReLU)
#
# =================================================================
# Total params: 12
# Trainable params: 12
# Non-trainable params: 0
# _________________________________________________________________
# None
# 1/1 [==============================] - 0s 13ms/step
# [[[-1.6503611e+01 -3.5051659e+01]
# [ 4.0098205e-02 1.5923592e+00]
# [-1.4898951e+00 7.5487376e-05]
# [ 3.1900513e+01 2.8786476e+01]
# [ 1.9207695e+01 3.6511238e+01]
# [-6.8302655e-01 -4.7705490e-02]
# [ 9.6008554e-03 7.5611029e+00]
# [ 4.7136435e-01 2.5528276e+00]
# [ 2.6859209e-01 3.3496175e+00]
# [ 1.4372441e+01 3.4978668e+01]]]
# (1, 10, 2)

How can I match a Decoder's input to a Pretrained Resnet18 Encoder?

I am trying to build a custom Decoder with skip connections to run with a pretrained Resnet18 Encoder for Image Segmentation task. The total number of classes are 150.
The Resnet18 Encoder has fc output of 512. In order to match the Encoder's output to Decoder's input, I am trying to set the Conv layers of Decoder such that it matches the output from Encoder i.e [151, 512, 1, 1]. However, no matter what combination of layers I make, I am not able to match the input and output tensors.
Here is the relevant part of Decoder Code
class ResNet18Transpose(nn.Module):
def __init__(self, transblock, layers, num_classes=150):
self.inplanes = 512
super(ResNet18Transpose, self).__init__()
self.deconv1 = self._make_transpose(transblock, 256 * transblock.expansion, layers[0], stride=2)
self.deconv2 = self._make_transpose(transblock, 128 * transblock.expansion, layers[1], stride=2)
self.deconv3 = self._make_transpose(transblock, 64 * transblock.expansion, layers[2], stride=2)
self.deconv4 = self._make_transpose(transblock, 32 * transblock.expansion, layers[3], stride=2)
self.skip0 = self._make_skip_layer(64, 64 * transblock.expansion)
self.skip1 = self._make_skip_layer(128, 64 * transblock.expansion)
self.skip2 = self._make_skip_layer(256, 64 * transblock.expansion)
self.skip3 = self._make_skip_layer(512, 128 * transblock.expansion)
self.inplanes = 64
self.final_conv = self._make_transpose(transblock, 64 * transblock.expansion, 3)
self.final_deconv = nn.ConvTranspose2d(self.inplanes * transblock.expansion, num_classes, kernel_size=2,
stride=2, padding=0, bias=True)
self.out6_conv = nn.Conv2d(1024, num_classes, kernel_size=1, stride=1, bias=True)
self.out5_conv = nn.Conv2d(128 * transblock.expansion, num_classes, kernel_size=1, stride=1, bias=True)
self.out4_conv = nn.Conv2d(128 * transblock.expansion, num_classes, kernel_size=1, stride=1, bias=True)
self.out3_conv = nn.Conv2d(64 * transblock.expansion, num_classes, kernel_size=1, stride=1, bias=True)
self.out2_conv = nn.Conv2d(32 * transblock.expansion, num_classes, kernel_size=1, stride=1, bias=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.ConvTranspose2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
The forward block for Decoder is as follows:
def forward(self, x, labels=None, sparse_mode=False, use_skip=True):
[in0, in1, in2, in3, in4] = x
if labels:
[lab0, lab1, lab2, lab3, lab4] = labels
out6 = self.out6_conv(in4)
if sparse_mode:
if labels:
mask4 = (lab4==0).unsqueeze(1).repeat(1,in4.shape[1],1,1).type(in4.dtype)
else:
mask4 = (torch.argmax(out6, dim=1)==0).unsqueeze(1).repeat(1,in4.shape[1],1,1).type(in4.dtype)
in4 = in4 * mask4
# upsample 1
x = self.deconv1(in4)
out5 = self.out5_conv(x)
if sparse_mode:
if labels:
mask3 = (lab3==0).unsqueeze(1).repeat(1,in3.shape[1],1,1).type(in3.dtype)
else:
mask3 = (torch.argmax(out5, dim=1)==0).unsqueeze(1).repeat(1,in3.shape[1],1,1).type(in3.dtype)
in3 = in3 * mask3
if use_skip:
x = x + self.skip3(in3)
# upsample 2
x = self.deconv2(x)
out4 = self.out4_conv(x)
if sparse_mode:
if labels:
mask2 = (lab2==0).unsqueeze(1).repeat(1,in2.shape[1],1,1).type(in2.dtype)
else:
mask2 = (torch.argmax(out4, dim=1)==0).unsqueeze(1).repeat(1,in2.shape[1],1,1).type(in2.dtype)
in2 = in2 * mask2
if use_skip:
x = x + self.skip2(in2)
# upsample 3
x = self.deconv3(x)
out3 = self.out3_conv(x)
if sparse_mode:
if labels:
mask1 = (lab1==0).unsqueeze(1).repeat(1,in1.shape[1],1,1).type(in1.dtype)
else:
mask1 = (torch.argmax(out3, dim=1)==0).unsqueeze(1).repeat(1,in1.shape[1],1,1).type(in1.dtype)
in1 = in1 * mask1
if use_skip:
x = x + self.skip1(in1)
# upsample 4
x = self.deconv4(x)
out2 = self.out2_conv(x)
if sparse_mode:
if labels:
mask0 = (lab0==0).unsqueeze(1).repeat(1,in0.shape[1],1,1).type(in0.dtype)
else:
mask0 = (torch.argmax(out2, dim=1)==0).unsqueeze(1).repeat(1,in0.shape[1],1,1).type(in0.dtype)
in0 = in0 * mask0
if use_skip:
x = x + self.skip0(in0)
# final
x = self.final_conv(x)
out1 = self.final_deconv(x)
return [out6, out5, out4, out3, out2, out1]
I get the following error:
File "/project/xfu/aamir/Golden-QGN/models/resnet.py", line 447, in forward
out6 = self.out6_conv(in4)
File "/project/xfu/aamir/anaconda3/envs/QGN/lib/python3.8/site-packages/torch/nn/modules/module.py", line 722, in _call_impl
result = self.forward(*input, **kwargs)
File "/project/xfu/aamir/anaconda3/envs/QGN/lib/python3.8/site-packages/torch/nn/modules/conv.py", line 419, in forward
return self._conv_forward(input, self.weight)
File "/project/xfu/aamir/anaconda3/envs/QGN/lib/python3.8/site-packages/torch/nn/modules/conv.py", line 415, in _conv_forward
return F.conv2d(input, weight, self.bias, self.stride,
RuntimeError: Given groups=1, weight of size [151, 1024, 1, 1], expected input[8, 512, 8, 8] to have 1024 channels, but got 512 channels instead
Exception in thread Thread-1:
If I change the Decoder Layers as follows:
self.out6_conv = nn.Conv2d(512, num_classes, kernel_size=1, stride=1, bias=True)
self.out5_conv = nn.Conv2d(256 * transblock.expansion, num_classes, kernel_size=1, stride=1, bias=True)
self.out4_conv = nn.Conv2d(128 * transblock.expansion, num_classes, kernel_size=1, stride=1, bias=True)
self.out3_conv = nn.Conv2d(64 * transblock.expansion, num_classes, kernel_size=1, stride=1, bias=True)
self.out2_conv = nn.Conv2d(32 * transblock.expansion, num_classes, kernel_size=1, stride=1, bias=True)
I get the followng error. Note that the input tensor has also changed.
File "/project/xfu/aamir/anaconda3/envs/QGN/lib/python3.8/site-packages/torch/nn/modules/conv.py", line 415, in _conv_forward
return F.conv2d(input, weight, self.bias, self.stride,
RuntimeError: Given groups=1, weight of size [64, 64, 3, 3], expected input[8, 32, 128, 128] to have 64 channels, but got 32 channels instead
I also tried altering the Resnet18 Encoder's fc output to 1024 instead of 512 as follows:
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(load_url(model_urls['resnet18']))
model.fc = torch.nn.Linear(1024, 150)
return model
But nothing seems to work. I also could not find a skip connections based decoder for resnet18 on github / internet. Any help will be appreciated.
NOTE: I only want to work on Resnet18. Also the images being input to the network through dataloader are working perfectly fine with a Resnet50 Encoder + Custom Decoder. I also tried changing crop size from 0 to 128 to 256 to 512 but in vain.
Here is the code that is being run in models.py for image segmentation. I tried using the set_trace() method to debug the code. The code stops running just before the (pred, pred_quad) = self.decoder(fmap, labels_scaled) line in following code
class SegmentationModule(SegmentationModuleBase):
def __init__(self, net_enc, net_dec, crit, deep_sup_scale=None, quad_sup=False, running_avg_param=0.99):
super(SegmentationModule, self).__init__()
self.encoder = net_enc
self.decoder = net_dec
self.crit = crit
if deep_sup_scale:
if deep_sup_scale < 0:
self.adapt_weights = True
self.running_avg_param = running_avg_param
deep_sup_scale = 1
else:
self.adapt_weights = False
self.loss_weights = [(deep_sup_scale**(i+1)) for i in range(5)]
self.quad_sup = quad_sup
def forward(self, feed_dict, *, segSize=None):
inputs = feed_dict['img_data'].cuda()
if segSize is None: # training
labels_orig_scale = feed_dict['seg_label_0'].cuda()
labels_scaled = []
fmap = self.encoder(inputs, return_feature_maps=True)
if self.quad_sup:
labels_scaled.append(feed_dict['seg_label_1'].cuda())
labels_scaled.append(feed_dict['seg_label_2'].cuda())
labels_scaled.append(feed_dict['seg_label_3'].cuda())
labels_scaled.append(feed_dict['seg_label_4'].cuda())
labels_scaled.append(feed_dict['seg_label_5'].cuda())
(pred, pred_quad) = self.decoder(fmap, labels_scaled)
else:
pred = self.decoder(fmap)
loss = self.crit(pred, labels_orig_scale)
if self.quad_sup:
loss_orig = loss
for i in range(len(pred_quad)):
loss_quad = self.crit(pred_quad[i], labels_scaled[i])
loss = loss + loss_quad * self.loss_weights[i]
if self.adapt_weights:
self.loss_weights[i] = self.running_avg_param * self.loss_weights[i] + \
(1 - self.running_avg_param) * (loss_quad/loss_orig).data.cpu().numpy()
acc = self.pixel_acc(pred, labels_orig_scale, self.quad_sup)
return loss, acc
else: # inference
if 'qtree' in feed_dict:
labels_scaled = [feed_dict['qtree'][l].cuda() for l in range(1,6)]
else:
labels_scaled = None
pred = self.decoder(self.encoder(inputs, return_feature_maps=True), labels_scaled, segSize=segSize)
return pred
So here is how I resolved the error.
Disabled the skip connections. Not sure why the decoder doesnt work with skip connections.
I had given incorrect layers [6, 3, 4, 3] input to Decoder. i.e for Resnet18 the layers should be [2,2,2,2]

How to combine RNN with CNN

I am working on a program that subtracts using two images from MNIST. In doing so, we are using an algorithm that combines CNN and RNN.
enter image description here
class CRNN_Model(tf.keras.Model):
def __init__(self):
super(CRNN_Model, self).__init__()
self.hidden_size = 256
self.batch_size = 128
self.sequence_size = 2
self.output_size = 19
self.img_size = 28
#cnn
self.reshape = tf.keras.layers.Reshape((10000, 28, 28, 1))
self.conv1 = tf.keras.layers.Conv2D(16, 3, padding='same', activation='relu', input_shape=(28, 28, 1))
self.maxpooling1 = tf.keras.layers.MaxPooling2D()
self.conv2 = tf.keras.layers.Conv2D(32, 3, padding='same', activation='relu')
self.maxpooling2 = tf.keras.layers.MaxPooling2D()
self.flatten1 = tf.keras.layers.Flatten()
self.hidden = tf.keras.layers.Dense(self.hidden_size, activation='relu')
#rnn
self.rnn_cell = tf.keras.layers.SimpleRNNCell(self.hidden_size, activation=None)
def call(self, x):
#cnn
hidden_list = []
for i in range(2):
x = x[0][i]
x = self.conv1(x)
x = self.maxpooling1(x)
x = self.conv2(x)
x = self.maxpooling2(x)
x = self.flatten1(x)
x = self.hidden(x)
hidden_list.append(x)
self.hidden_list = tf.transpose(hidden_list, perm=[1, 0, 2])
#rnn
self.initial_state = self.rnn_cell.zero_state(self.batch_size, tf_float32)
state = self.initial_state
outputs = []
for t in range(self.sequence_size):
(output, state) = self.rnn_cell(self.hidden_list[:, t, :], state)
outputs.append(output)
self.outputs = outputs
self.prediction = tf.keras.layers.Dense(self.outputs[-1], self.output_size)
self.pred_output = tf.keras.activations.softmax(self.prediction)
return self.pred_output
However, an error occurs at the input to the first conv1 layer.
ValueError: Input 0 of layer conv2d_111 is incompatible with the layer: : expected min_ndim=4, found ndim=3. Full shape received: (28, 28, 1)
The shape of the data we are giving is as follows.
print(np.shape(train_x))
print(np.shape(train_y))
print(np.shape(test_x))
print(np.shape(test_y))
(10000, 2, 28, 28, 1)
→(number of data * number of image * width * height * channel)
(10000, 1)
(2000, 2, 28, 28, 1)
(2000, 1)
How do I give the data?

Subclass definitions of TensorFlow models with customizable hidden layers

I am learning about Models subclass definitions in TensorFlow
A pretty straightforward definition will be something like this
class MyNetwork1(tf.keras.Model):
def __init__(self, num_classes = 10):
super().__init__()
self.num_classes = num_classes
self.input_layer = tf.keras.layers.Flatten()
self.hidden_1 = tf.keras.layers.Dense(128, activation = 'relu')
self.hidden_2 = tf.keras.layers.Dense(64, activation = 'relu')
self.output_layer = tf.keras.layers.Dense(self.num_classes, activation = 'softmax')
def call(self, input_tensor):
x = self.input_layer(input_tensor)
x = self.hidden_1(x)
x = self.hidden_2(x)
x = self.output_layer(x)
return x
After building the model,
Model1 = MyNetwork1()
Model1.build((None, 28, 28, 1))
It will look like
Model: "my_network1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
flatten (Flatten) multiple 0
_________________________________________________________________
dense (Dense) multiple 100480
_________________________________________________________________
dense_1 (Dense) multiple 8256
_________________________________________________________________
dense_2 (Dense) multiple 650
=================================================================
Total params: 109,386
Trainable params: 109,386
Non-trainable params: 0
Since this method cannot customize the number of neurons and activation type per layer I have tried to edit it a little bit.
I have tried the following definition
class MyNetwork2(tf.keras.Model):
def __init__(self, num_classes = 2, hidden_dimensions = [100],
hidden_activations = ['relu']):
super().__init__()
self.inputlayer = tf.keras.layers.Flatten()
i = 0
self.hidden_layers = []
for d,a in zip(hidden_dimensions,hidden_activations):
i += 1
setattr(self, 'hidden_' + str(i) ,
tf.keras.layers.Dense(d, activation = a))
self.hidden_layers.append('self.hidden_' + str(i) + '(x)')
self.outputlayer = tf.keras.layers.Dense(num_classes, activation = 'softmax')
self.num_layers = len(hidden_dimensions) + 2
def call(self, inputtensor):
x = self.inputlayer(inputtensor)
for h in self.hidden_layers:
# print(h)
x = eval(h,{}, x)
x = self.outputlayer(x)
return x
In this code, I tried to do as same as the previous definition.
Model2 = MyNetwork2(num_classes = 10, hidden_dimensions = [128,64],
hidden_activations = ['relu', 'relu'])
Model2.build((None, 28, 28, 1))
However, I faced the following error:
TypeError: Only integers, slices (`:`), ellipsis (`...`), tf.newaxis (`None`) and scalar tf.int32/tf.int64 tensors are valid indices, got 'self'
How can I fix this error to achieve my goal?
Seems like a very complicated way to do things. If you use a dictionary for a variable number of layers instead of eval, everything works fine.
class MyNetwork2(tf.keras.Model):
def __init__(self, num_classes=2, hidden_dimensions=[100],
hidden_activations=['relu']):
super(MyNetwork2, self).__init__()
self.inputlayer = tf.keras.layers.Flatten()
self.hidden_layers = dict()
for i, (d, a) in enumerate(zip(hidden_dimensions, hidden_activations)):
self.hidden_layers['hidden_'+str(i)]=tf.keras.layers.Dense(d, activation=a)
self.outputlayer = tf.keras.layers.Dense(num_classes, activation='softmax')
self.num_layers = len(hidden_dimensions) + 2
Running example:
import tensorflow as tf
import numpy as np
class MyNetwork2(tf.keras.Model):
def __init__(self, num_classes=2, hidden_dimensions=[100],
hidden_activations=['relu']):
super(MyNetwork2, self).__init__()
self.inputlayer = tf.keras.layers.Flatten()
self.hidden_layers = dict()
for i, (d, a) in enumerate(zip(hidden_dimensions, hidden_activations)):
self.hidden_layers['hidden_'+str(i)]=tf.keras.layers.Dense(d, activation=a)
self.outputlayer = tf.keras.layers.Dense(num_classes, activation='softmax')
self.num_layers = len(hidden_dimensions) + 2
def call(self, inputtensor, training=None, **kwargs):
x = self.inputlayer(inputtensor)
for k, v in self.hidden_layers.items():
x = v(x)
x = self.outputlayer(x)
return x
Model2 = MyNetwork2(num_classes = 10, hidden_dimensions = [128,64],
hidden_activations = ['relu', 'relu'])
Model2.build((None, 28, 28, 1))
Model2(np.random.uniform(0, 1, (1, 28, 28, 1)).astype(np.float32))
<tf.Tensor: shape=(1, 10), dtype=float32, numpy=
array([[0.14969216, 0.10196744, 0.0874036 , 0.08350615, 0.18459582,
0.07227989, 0.08263624, 0.08537506, 0.10291573, 0.04962786]],
dtype=float32)>
The hidden layers as a dictionary:
Model2.hidden_layers
{'hidden_0': <tensorflow.python.keras.layers.core.Dense at 0x1891b5c13a0>,
'hidden_1': <tensorflow.python.keras.layers.core.Dense at 0x1891b5c1d00>}

How to print all activation shapes (more detailed than summary()) for Tensorflow V2 keras model?

I've spent a lot of time with Tensorflow v.0 and v.1, and now I'm trying Tensorflow v.2 keras model. model.summary() looked easy and convenient, but lack details.
Here's a toy example. Let's say I define custom layers and models as below (a functional API style and a subclass syle).
Please see below. I wanted to see primitive layers inside the custom layers, but .summary() only shows shallow information (only direct children layers).
Toy custom layers (layers are just toy definitions):
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import (Dense, Conv2D, BatchNormalization)
class LayerA(tf.keras.layers.Layer):
def __init__(self, num_outputs, **kwargs):
super(LayerA, self).__init__(**kwargs)
self.num_outputs = num_outputs
self.dense1 = Dense(64)
self.dense2 = Dense(128)
self.dense3 = Dense(num_outputs)
self.bn = BatchNormalization(trainable=True)
def call(self, inputs, training=True):
x = self.dense1(inputs)
x = self.bn(x, training=training)
x = self.dense1(inputs)
x = self.bn(x, training=training)
x = self.dense1(inputs)
x = self.bn(x, training=training)
return x
class LayerB(tf.keras.layers.Layer):
def __init__(self, num_outputs, **kwargs):
super(LayerB, self).__init__(**kwargs)
self.num_outputs = num_outputs
self.dense = Dense(64)
self.bn = BatchNormalization(trainable=True)
def call(self, inputs, training=True):
x = self.dense(inputs)
x = self.bn(x, training=training)
return x
Model definition with functional API:
inputs = tf.keras.Input(shape=(28), name='input')
x = LayerA(7, name='layer_a')(inputs)
x = LayerB(13, name='layer_b')(x)
x = tf.reduce_max(x, 1)
model_func = keras.Model(inputs=inputs, outputs=x, name='model')
model_func.summary()
# Results:
# Layer (type) Output Shape # Param #
# =================================================================
# input (InputLayer) [(None, 28)] 0
# _________________________________________________________________
# layer_a (LayerA) (None, 64) 2112
# layer_b (LayerB) (None, 64) 4416
# tf_op_layer_Max_1 (TensorFlo [(None,)] 0
# =================================================================
# Total params: 6,528
# Trainable params: 6,272
# Non-trainable params: 256
Model definition subclassing:
class ModelA(tf.keras.Model):
def __init__(self):
super(ModelA, self).__init__()
self.block_1 = LayerA(7, name='layer_a')
self.block_2 = LayerB(13, name='layer_b')
def call(self, inputs):
x = self.block_1(inputs)
x = self.block_2(x)
x = tf.reduce_max(x, 1)
return x
model_subclass = ModelA()
y = model_subclass(inputs)
model_subclass.summary()
### Result:
# Layer (type) Output Shape Param #
# =================================================================
# layer_a (LayerA) (None, 64) 2112
# layer_b (LayerB) (None, 64) 4416
# =================================================================
# Total params: 6,528
# Trainable params: 6,016
# Non-trainable params: 512
How can I prints all the activation shapes of Conv and Dense layers in the model? For example,
layer_a/dense_1 (None, ...)
layer_a/dense_2 (None, ...)
layer_b/dense_1 (None, ...)
layer_b/maybe-even-deeper-layer/conv2d_1 (None, ...)
... etc ...
In Tensorflow v.0 or v.1, I would do something like:
for n in tf.get_default_graph().as_graph_def().node:
print(n.name, n.shape)
Is there a way to prints more details when I have a keras model?
The summary will not do this automatically, so you have to adapt. You can, for instance, create a recurrent summary:
def full_summary(layer):
#check if this layer has layers
if hasattr(layer, 'layers'):
print('summary for ' + layer.name)
layer.summary()
print('\n\n')
for l in layer.layers:
full_summary(l)
Use it as:
full_summary(my_model)

Categories