How do I retrieve name of a layer inside hook function? - python

I have a neural network
class ConvNet(nn.Module):
def __init__(self):
super().__init__()
self.trunk = nn.ModuleList()
self.trunk.add_module('conv1', nn.Conv2d(3, 10, 3))
self.classifier = nn.Linear(30, 2)
def forward(self, x):
out = self.classifier(self.trunk.conv1(x))
return out
model = ConvNet()
I registered forward hook
def hook(module, input, output):
print(module, input[0].shape, output.shape)
x = model.trunk.conv1.register_forward_hook(hook)
How do I retrieve name of this layer that is 'conv1' inside hook function, module._get_name returns Conv2d, module.__class__ returns <class 'torch.nn.modules.conv.Conv2d'>, how do I get just 'conv1'?

Related

How can I combine a Huggingface tokenizer and a BERT-based model in onnx?

Problem description:
I have a model based on BERT, with a classifier layer on top. I want to export it to ONNX, but to avoid issues on the side of the 'user' of the onnx model, I want to export the entire pipeline, including tokenization, as a ONNX model. However, this requires a basic string as input type, which I believe ONNX does not support.
The Model:
class BertClassifier(nn.Module):
"""
Class defining the classifier model with a BERT encoder and a single fully connected classifier layer.
"""
def __init__(self, dropout=0.5, num_labels=24):
super(BertClassifier, self).__init__()
self.bert = BertModel.from_pretrained('bert-base-uncased')
self.dropout = nn.Dropout(dropout)
self.linear = nn.Linear(768, num_labels)
self.relu = nn.ReLU()
self.best_score = 0
def forward(self, input_id, mask):
_, pooled_output = self.bert(input_ids=input_id, attention_mask=mask, return_dict=False)
output = self.relu(self.linear(self.dropout(pooled_output)))
return output
The Tokenizer:
def get_tokenizer(chosen_model):
# chosen_model = 'bert_base_uncased'
return AutoTokenizer.from_pretrained(chosen_model)
Combined Pipeline:
class OnnxBertModel(nn.Module):
"""
Model wrapper for onnx. Allows user to only provide a string as input. Output is a list of class probabilities
"""
def __init__(self, dropout=0.5, num_labels=24):
super(OnnxBertModel, self).__init__()
self.bert = BertModel.from_pretrained('bert-base-uncased')
self.dropout = nn.Dropout(dropout)
self.linear = nn.Linear(768, num_labels)
self.relu = nn.ReLU()
self.best_score = 0
self.tokenizer = get_tokenizer('bert-base-uncased')
def forward(self, input_string):
input_tokens = self.tokenizer(input_string,
padding='max_length', max_length=512, truncation=True,
return_tensors="pt")
mask = input_tokens['attention_mask']
input_id = input_tokens['input_ids'].squeeze(1)
_, pooled_output = self.bert(input_ids=input_id, attention_mask=mask, return_dict=False)
output = self.relu(self.linear(self.dropout(pooled_output)))
return output
Additional code to export:
model = OnnxBertModel(num_labels=len(labels))
torch.onnx.export(model, ex_string, 'tryout.onnx', export_params=True, do_constant_folding=False)
The last call does not work due to the string typing.

python class call function without indicate function name

I was learning pytorch and I encountered a case I could not understand what's happening. Here is a class called MLP, with init function and a forward function. When I pass X as a parameter to the MLP instance net, without using net.forward(X), it seems forward function has been autimatically called. Why this is the case?
import torch
from torch import nn
from torch.nn import functional as F
class MLP(nn.Module):
def __init__(self):
super().__init__() # nn.Module's params
self.hidden = nn.Linear(20, 256)
self.out = nn.Linear(256, 10)
def forward(self, X):
return self.out(F.relu(self.hidden(X)))
X = torch.rand(2, 20)
net = MLP()
net(X)
"""
output of net(X)
tensor([[ 0.0614, -0.0143, -0.0546, 0.1173, -0.1838, -0.1843, 0.0861, 0.1152,
0.0990, 0.1818],
[-0.0483, -0.0196, 0.0720, 0.1243, 0.0261, -0.2727, -0.0480, 0.1391,
-0.0685, 0.2025]], grad_fn=<AddmmBackward0>)
"""
My initial guess is that the forward is the only function is MLP receives a parameter, but after I added another function that takes the same parameters, calling net(X) seems still choose forward function
class MLP(nn.Module):
def __init__(self):
super().__init__() # nn.Module's params
self.hidden = nn.Linear(20, 256)
self.out = nn.Linear(256, 10)
def forward2(self, X):
print("hello")
return self.out((self.hidden(X)))
def forward(self, X):
return self.out(F.relu(self.hidden(X)))
net = MLP()
net(X)
net.forward(X)
net.forward2(X)
then I got
>>> net.forward(X)
tensor([[-0.1273, -0.0338, -0.1412, -0.1321, -0.1213, 0.0589, 0.0752, 0.0066,
-0.0057, -0.1374],
[-0.1660, -0.0044, -0.1765, -0.0451, -0.0386, 0.0824, 0.0486, -0.1293,
0.0511, -0.1285]], grad_fn=<AddmmBackward0>)
>>> net.forward2(X)
hello
tensor([[-0.2027, -0.2304, -0.3597, -0.3741, -0.5000, -0.2698, 0.2464, 0.1709,
-0.2262, -0.1462],
[-0.1168, -0.0417, -0.3584, -0.3133, -0.2366, -0.1521, 0.2428, 0.0043,
-0.1296, -0.2021]], grad_fn=<AddmmBackward0>)
>>> net(X)
tensor([[-0.1273, -0.0338, -0.1412, -0.1321, -0.1213, 0.0589, 0.0752, 0.0066,
-0.0057, -0.1374],
[-0.1660, -0.0044, -0.1765, -0.0451, -0.0386, 0.0824, 0.0486, -0.1293,
0.0511, -0.1285]], grad_fn=<AddmmBackward0>)
What did I miss? Really appreciate with any help!

Fail to quantize custom layer - Quantization Aware Training

I'm following Quantization aware training comprehensive guide and struggling with QAT for custom layers, working with tf=2.6.0, py=3.9.7.
Below is a toy example of my problem:
I wrote a simple custom layer that implements Conv2D
class MyConv(tf.keras.layers.Layer):
'''costume conv2d'''
def __init__(self, filt=1, name=None, **kwargs):
super(MyConv, self).__init__(name=name)
self.filt = filt
super(MyConv, self).__init__(**kwargs)
def get_config(self):
config = super().get_config().copy()
config.update({"filt": self.filt})
return config
def build(self, shape):
self.conv = tf.keras.layers.Conv2D(self.filt, 1, padding="same")
def call(self, input):
return self.conv(input)
I've created a small model with that layer, then recursively pass over its layers and annotates them using tfmot.guantization.keras.quantize_annotate_layer (each custom layer could have more custom sub-layers that needs to be quantized). Then I apply tfmot.quantization.keras.quantize_apply to the annotated model. The result model consists of all the quantized layers, except of my custom layer, that had not been quantized.
I'll note that when I'm replacing the custom layer MyConv with the code below, as in the comprehensive guide, the quantization works.
def MyConv(tf.keras.layers.Conv2D):
pass
Please help me solve this issue. Might be some issue with my QuantizeConfig?
Below is my full code:
import tensorflow as tf
import tensorflow_model_optimization as tfmot
class MyConv(tf.keras.layers.Layer):
'''costume conv2d'''
def __init__(self, filt=1, name=None, **kwargs):
super(MyConv, self).__init__(name=name)
self.filt = filt
super(MyConv, self).__init__(**kwargs)
def get_config(self):
config = super().get_config().copy()
config.update({"filt": self.filt})
return config
def build(self, shape):
self.conv = tfmot.quantization.keras.quantize_annotate_layer(tf.keras.layers.Conv2D(self.filt, 1, padding="same"))
def call(self, input):
return self.conv(input)
def get_toy_model():
input = tf.keras.Input((10, 10, 1), name='input')
x = tf.keras.layers.Conv2D(1, 3, padding="same")(input)
x = tf.keras.layers.ReLU()(x)
x = MyConv()(x)
for _ in range(2):
y = tf.keras.layers.Conv2D(1, 3, padding="same")(x)
y = tf.keras.layers.ReLU()(y)
out = tf.keras.layers.Conv2D(1, 3, padding="same")(y)
return tf.keras.Model(input, out, name='toy_Conv2D')
LastValueQuantizer = tfmot.quantization.keras.quantizers.LastValueQuantizer
MovingAverageQuantizer = tfmot.quantization.keras.quantizers.MovingAverageQuantizer
class DefaultCostumeQuantizeConfig(tfmot.quantization.keras.QuantizeConfig):
# Configure how to quantize weights.
def get_weights_and_quantizers(self, layer):
return []
# Configure how to quantize activations.
def get_activations_and_quantizers(self, layer):
return []
def set_quantize_weights(self, layer, quantize_weights):
pass
def set_quantize_activations(self, layer, quantize_activations):
pass
# Configure how to quantize outputs (may be equivalent to activations).
def get_output_quantizers(self, layer):
return [tfmot.quantization.keras.quantizers.MovingAverageQuantizer(num_bits=8, per_axis=False, symmetric=False, narrow_range=False)]
def get_config(self):
return {}
def recursive_depth_layers(layer):
for l in list(layer.__dict__.values()):
if isinstance(l, tf.keras.layers.Layer):
recursive_depth_layers(l)
if isinstance(l, (
tf.keras.layers.Dense, tf.keras.layers.Conv2D, tf.keras.layers.ReLU, tf.keras.layers.LeakyReLU, tf.keras.layers.Activation)):
ql = tfmot.quantization.keras.quantize_annotate_layer(l, DefaultCostumeQuantizeConfig())
ql._name += "_" + l.name
return ql
def apply_quantization(layer):
# regular layer
if isinstance(layer, (tf.keras.layers.Dense, tf.keras.layers.Conv2D, tf.keras.layers.ReLU, tf.keras.layers.LeakyReLU,tf.keras.layers.Activation)):
l = tfmot.quantization.keras.quantize_annotate_layer(layer, DefaultCostumeQuantizeConfig())
l._name += '_' + layer.name
return l
if layer.__module__ == "__main__":
# custom layer
recursive_depth_layers(layer)
l = tfmot.quantization.keras.quantize_annotate_layer(layer, DefaultCostumeQuantizeConfig())
l._name += '_' + layer.name
return l
return layer
model = get_toy_model()
model.summary()
annotated_model = tf.keras.models.clone_model(model, clone_function=apply_quantization)
annotated_model.summary()
quantize_scope = tfmot.quantization.keras.quantize_scope
with quantize_scope({'DefaultCostumeQuantizeConfig': DefaultCostumeQuantizeConfig, 'MyConv': MyConv}):
quant_aware_model = tfmot.quantization.keras.quantize_apply(annotated_model)
quant_aware_model._name += "_quant"
quant_aware_model.summary()
quant_aware_model.compile()

TF2.6: ValueError: Model cannot be saved because the input shapes have not been set

I want to create a custom model using transfer learning in Google Colab.
import tensorflow as tf
from tensorflow.keras.layers import Conv2D
from tensorflow.python.keras.applications.xception import Xception
class MyModel(tf.keras.Model):
def __init__(self, input_shape, num_classes=5, dropout_rate=0.5):
super(MyModel, self).__init__()
self.weight_dict = {}
self.weight_dict['backbone'] = Xception(input_shape=input_shape, weights='imagenet', include_top=False)
self.weight_dict['outputs'] = Conv2D(num_classes, (1, 1), padding="same", activation="softmax")
self.build((None,) + input_shape)
def call(self, inputs, training=False):
self.weight_dict['backbone'].trainable = False
x = self.weight_dict['backbone'](inputs)
x = self.weight_dict['outputs'](x)
return x
model = MyModel(input_shape=(256, 256, 3))
model.save('./saved')
However, I encounter this error:
ValueError: Model `<__main__.MyModel object at 0x7fc66134bdd0>` cannot be saved because the input shapes have not been set. Usually, input shapes are automatically determined from calling `.fit()` or `.predict()`. To manually set the shapes, call `model.build(input_shape)`.
Yes, there is no call to .fit() or .predict(). But there is a call to .build in the __init__() method of the class. What am I to do?
If the layer has not been built, compute_output_shape will call build on the layer. This assumes that the layer will later be used with inputs that match the input shape provided.
Working code as shown below
import tensorflow as tf
print(tf.__version__)
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.applications.xception import Xception
class MyModel(tf.keras.Model):
def __init__(self, input_shape, num_classes=5, dropout_rate=0.5):
super(MyModel, self).__init__()
self.weight_dict = {}
self.weight_dict['backbone'] = Xception(input_shape=input_shape, weights='imagenet', include_top=False)
self.weight_dict['outputs'] = Conv2D(num_classes, (1, 1), padding="same", activation="softmax")
self.build((None,) + input_shape)
def call(self, inputs, training=False):
self.weight_dict['backbone'].trainable = False
x = self.weight_dict['backbone'](inputs)
x = self.weight_dict['outputs'](x)
return x
input_shape=(256, 256, 3)
model=MyModel(input_shape)
model.compute_output_shape(input_shape=(None, 256, 256, 3))
model.save('./saved')
Output:
2.6.0
Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/xception/xception_weights_tf_dim_ordering_tf_kernels_notop.h5
83689472/83683744 [==============================] - 1s 0us/step
INFO:tensorflow:Assets written to: ./saved/assets
For more information you can refer here.
If you build e.g. a GNN with multiple inputs of variable size, the proposal of TFer2 won't work. Specifying the TensorSpecs in the decorator AND using tf.saved_model.save instead works:
import keras.layers
import tensorflow as tf
import unittest
class TestModel(keras.Model):
def __init__(self):
super(TestModel, self).__init__()
self.w = tf.Variable(initial_value=tf.initializers.he_normal()(shape=[10, 10]))
#tf.function(input_signature=[tf.TensorSpec([10, None], tf.float32), tf.TensorSpec([10, None], tf.float32)]) # This line defines the inputs' sizes of the network call
def __call__(self, x, y):
return tf.matmul(self.w, x) + tf.matmul(self.w, y)
class SaveAndLoadTest(unittest.TestCase):
def __init__(self):
super(SaveAndLoadTest, self).__init__()
x = tf.ones([10, 5])
y = tf.ones([10, 5])
model = TestModel()
z = model(x, y)
tf.saved_model.save(model, "/tmp/test_model/") # saving a this way works
with self.assertRaises(ValueError):
model.save("/tmp/test_model/") # saving a model this way fails, regardless of assigning TensorSpecs to tf.function
model_loaded = tf.saved_model.load("/tmp/test_model/")
z_loaded = model_loaded(x, y)
self.assertTrue((z_loaded.numpy() == z.numpy()).all()) # making sure the outputs are the same
if __name__ == "__main__":
SaveAndLoadTest()
print("Success.")

2x nested Tensorflow custom layers results in zero trainable parameters

I am creating a series of custom Tensorflow (version 2.4.1) layers and am running into a problem where the model summary shows zero trainable parameters. Below is a series of examples showing how everything is fine until I add in the last custom layer.
Here are the imports and custom classes:
from tensorflow.keras.models import Model
from tensorflow.keras.layers import (BatchNormalization, Conv2D, Input, ReLU,
Layer)
class basic_conv_stack(Layer):
def __init__(self, filters, kernel_size, strides):
super(basic_conv_stack, self).__init__()
self.conv1 = Conv2D(filters, kernel_size, strides, padding='same')
self.bn1 = BatchNormalization()
self.relu = ReLU()
def call(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
return x
class basic_residual(Layer):
def __init__(self, filters, kernel_size, strides):
super(basic_residual, self).__init__()
self.bcs1 = basic_conv_stack(filters, kernel_size, strides)
self.bcs2 = basic_conv_stack(filters, kernel_size, strides)
def call(self, x):
x = self.bcs1(x)
x = self.bcs2(x)
return x
class basic_module(Layer):
def __init__(self, filters, kernel_size, strides):
super(basic_module, self).__init__()
self.res = basic_residual
self.args = (filters, kernel_size, strides)
def call(self, x):
for _ in range(4):
x = self.res(*self.args)(x)
return x
Now, if I do the following, everything works out ok and I get 300 trainable parameters:
input_layer = Input((128, 128, 3))
conv = basic_conv_stack(10, 3, 1)(input_layer)
model = Model(input_layer, conv)
print (model.summary())
Similarly, if I do the following, I get 1,230 trainable parameters:
input_layer = Input((128, 128, 3))
conv = basic_residual(10, 3, 1)(input_layer)
model = Model(input_layer, conv)
print (model.summary())
However, if I try the basic_module class, I get zero trainable parameters:
input_layer = Input((128, 128, 3))
conv = basic_module(10, 3, 1)(input_layer)
model = Model(input_layer, conv)
print (model.summary())
Does anyone know why this is happening?
Edit to add:
I discovered that the layers used in the call must be initialized in the class's init for things to work properly. So if I change the basic module to this:
class basic_module(Layer):
def __init__(self, filters, kernel_size, strides):
super(basic_module, self).__init__()
self.clayers = [basic_residual(filters, kernel_size, strides) for _ in range(4)]
def call(self, x):
for idx in range(4):
x = self.clayers[idx](x)
return x
Everything works fine. I don't know why this is the case, so I'll leave this question open in case someone can answer the why of this question.
You have to initialize the class instances with the required parameter such as filters, kernel_size, strides to the predefined base_mdoule. Also, note that these hyper-parameters are related to trainable weights properties.
# >>> a = basic_module
# >>> a __main__.basic_module
# >>> a = basic_module(10, 3, 1)
# >>> a
# >>> <__main__.basic_module at 0x7f6123eed510>
class basic_module(Layer):
def __init__(self, filters, kernel_size, strides):
super(basic_module, self).__init__()
self.res = basic_residual # < ---
self.args = (filters, kernel_size, strides)
def call(self, x):
for _ in range(4):
x = self.res(*self.args)(x)
return x

Categories