Unknown behaviour of HOOKS in PyTorch - python

I have a straightforward and simple CNN below,
# creat a dummy deep net
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1,2, kernel_size=3, stride=1, padding=1, bias=True)
self.conv2 = nn.Conv2d(2,3, kernel_size=3, stride=1, padding=1, bias=True)
self.conv3 = nn.Conv2d(3,1, kernel_size=3, stride=1, padding=1, bias=True)
self.seq = nn.Sequential(
nn.Conv2d(1,5, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(5,1, kernel_size=3, stride=1, padding=1, bias=True),
)
self.relu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, x):
out = self.relu(self.conv1(x))
out = self.conv3(self.conv2(out))
out = out + x
out = self.seq(x)
return out
5 hooks have been applied to each layer for the forward pass.
Hooked 0 to Conv2d(1, 2, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
Hooked 1 to Conv2d(2, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
Hooked 2 to Conv2d(3, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
Hooked 3 to Sequential(
(0): Conv2d(1, 5, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): LeakyReLU(negative_slope=0.2, inplace=True)
(2): Conv2d(5, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
Hooked 4 to LeakyReLU(negative_slope=0.2, inplace=True)
These hooks have been created using following class
# ------------------The Hook class begins to calculate each layer stats
class Hook():
def __init__(self, module, backward=False):
if backward==False:
self.hook = module.register_forward_hook(self.hook_fn)
else:
self.hook = module.register_backward_hook(self.hook_fn)
self.inputMean = []
self.outputMean = []
def hook_fn(self, module, input, output):
self.inputMean.append(input[0][0,...].mean().item())#calculate only for 1st image in the batch
print('\nIn hook class input {}'.format(input[0].size()))
self.outputMean.append(output[0][0,...].mean().item())
print('In hook class outout {}'.format(output[0].size()))
# create hooks on each layer
hookF = []
for i,layer in enumerate(list(net.children())):
print('Hooked to {}'.format(layer))
hookF.append(Hook(layer))
Please note between Hook 1 and Hook 2 there is no ReLU
self.conv3(self.conv2(out)). Thus OUTPUT of HOOK1 is INPUT to HOOK2 and should be identical. BUT THIS DOES NOT TURNS OUT TO BE WHY? Below is output for HOOK1 and HOOK2
Hook of layer 1 (HOOK on layer 1 which is self.conv2)
... OutputMean: [0.2381615787744522, 0.2710852324962616, 0.30706286430358887, 0.26064932346343994, 0.24395985901355743]
Hook of layer 2 (HOOK on layer 2 which is self.conv3)
InputMean: [0.13127394020557404, 0.1611362248659134, 0.1457807868719101, 0.17380955815315247, 0.1537724733352661], OutputMean: ...
These two values should have been the same but do not turn out to be.
------ The Full code is shown below -------
import torch
import torch.nn as nn
# creat a dummy deep net
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1,2, kernel_size=3, stride=1, padding=1, bias=True)
self.conv2 = nn.Conv2d(2,3, kernel_size=3, stride=1, padding=1, bias=True)
self.conv3 = nn.Conv2d(3,1, kernel_size=3, stride=1, padding=1, bias=True)
self.seq = nn.Sequential(
nn.Conv2d(1,5, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(5,1, kernel_size=3, stride=1, padding=1, bias=True),
)
self.relu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, x):
out = self.relu(self.conv1(x))
out = self.conv3(self.conv2(out))
out = out + x
out = self.seq(x)
return out
net = Net()
print(net)
criterion = nn.MSELoss()
# ------------------The Hook class begins to calculate each layer stats
class Hook():
def __init__(self, module, backward=False):
if backward==False:
self.hook = module.register_forward_hook(self.hook_fn)
else:
self.hook = module.register_backward_hook(self.hook_fn)
self.inputMean = []
self.outputMean = []
def hook_fn(self, module, input, output):
self.inputMean.append(input[0][0,...].mean().item())#calculate only for 1st image in the batch
print('\nIn hook class input {}'.format(input[0].size()))
self.outputMean.append(output[0][0,...].mean().item())
print('In hook class outout {}'.format(output[0].size()))
# create hooks on each layer
hookF = []
for i,layer in enumerate(list(net.children())):
print('Hooked to {}'.format(layer))
hookF.append(Hook(layer))
optimizer = torch.optim.Adam(net.parameters())
# Do 5 forward pass
for _ in range(5):
print('Iteration --------')
data = torch.rand(2,1,10,10)*10
print('Input mean is {}'.format(data[0,...].mean()))
target = data.clone()
out = net(data)
loss = criterion(out, target)
print('backward')
loss.backward()
optimizer.step()
optimizer.zero_grad()
for i,h in enumerate(hookF):
print('\n Hook of layer {}'.format(i))
print('InputMean: {}, OutputMean: {}'.format(h.inputMean, h.outputMean))
h.hook.remove()

The problem is that in your Conv2d layer input is a tuple and output is a torch.Tensor. Therefore output[0][0,...] is selecting the first item from dim 0 in the tensor whereas input[0][0,...] is selecting the first item from the tuple.
You just need to change output[0][0,...] to output[0,...].

Related

Dimension error encoder input it different from decoder output

i am new to auto encoders and i cannot understand why i am getting this error.
'''
class Autoencoder(Model):
def __init__(self, name="autoencoder"):
super(Autoencoder, self).__init__()
self.encoder_input = tf.keras.Input(shape=(128,219,1))
self.encoder = layers.Conv2D(16, (3, 3), activation='relu', padding='same', strides=2, kernel_initializer=HeNormal())(self.encoder_input)
self.encoder_output = layers.Conv2D(8, (3, 3), activation='relu', padding='same', strides=2, kernel_initializer=HeNormal())(self.encoder)
self.encoder_model = tf.keras.Model(self.encoder_input, self.encoder_output)
# ------------------------------------------
# The Autoencoder has an decoder which we can make using normal layers
# Here we use the Functional API which requires 3 things (Input, model and output)
# ------------------------------------------
self.decoder_input = tf.keras.Input(shape=(32,55,8))
self.decoder = layers.Conv2DTranspose(8, kernel_size=3, strides=2, activation='relu', padding='same', kernel_initializer=HeNormal())(self.decoder_input)
self.decoder_second = layers.Conv2DTranspose(16, kernel_size=3, strides=2, activation='relu', padding='same', kernel_initializer=HeNormal())(self.decoder)
self.decoder_output = layers.Conv2D(1, kernel_size=(3, 3), activation='sigmoid', padding='same', kernel_initializer=HeNormal())(self.decoder_second)
self.decoder_model = tf.keras.Model(self.decoder_input, self.decoder_output)
# ------------------------------------------
# The forward pass
def call(self, x):
# Encode the inputs x using the propery defined above
# In the previous notebook we created the encoder and decoder as Sequential Models
# but now since we used the Functional API we need to call the Model objects we created above
encoded = self.encoder_model(x)
# Decode the encoded vector using the property defined above
decoded = self.decoder_model(encoded)
# Return the decoded (28,28) data
return decoded
autoencoder = Autoencoder()
I defined the encoder part then the decoder part but after calling the fit model i cam getting
ValueError: Dimensions must be equal, but are 220 and 219 for '{{node mean_squared_error/SquaredDifference}} = SquaredDifference[T=DT_FLOAT](autoencoder_3/model_7/conv2d_15/Sigmoid, IteratorGetNext:1)' with input shapes: [?,128,220,1], [?,128,219,1].
Am i doing something wrong?
The input image is of shape 128,219,1

Pass an arbitrary image size to cnn in pytorch

I'm trying to train a lenet model in pytorch, The ideia is to put images of any size in it, so I started doing with nn.AdaptiveAvgPool2d but the error comes as
mat1 dim 1 must match mat2 dim 0
Here is my code
class LeNet5(nn.Module):
def __init__(self, num_classes=10):
super(LeNet5, self).__init__()
self.conv_1 = nn.Conv2d(
in_channels=1, out_channels=32, kernel_size=5, bias=False
)
self.relu_1 = nn.ReLU(inplace=True)
self.maxpool_1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv_2 = nn.Conv2d(
in_channels=32, out_channels=256, kernel_size=5, bias=False
)
self.relu_2 = nn.ReLU(inplace=True)
self.maxpool_2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(output_size=1)
self.flatten = nn.Flatten()
self.fc_1 = nn.Linear(in_features=4096, out_features=120, bias=False)
self.fc_2 = nn.Linear(in_features=120, out_features=84)
self.fc_3 = nn.Linear(in_features=84, out_features=num_classes)
def forward(self, input):
conv_1_output = self.conv_1(input)
relu_1_output = self.relu_1(conv_1_output)
maxpool_1_output = self.maxpool_1(relu_1_output)
conv_2_output = self.conv_2(maxpool_1_output)
relu_2_output = self.relu_2(conv_2_output)
maxpool_2_output = self.maxpool_2(relu_2_output)
flatten_output = self.flatten((self.avgpool(maxpool_2_output).view(maxpool_2_output.shape[0], -1)))
fc_1_output = self.fc_1(flatten_output)
fc_2_output = self.fc_2(fc_1_output)
fc_3_output = self.fc_3(fc_2_output)
return fc_3_output
if you read the theory on AdaptiveAvgPool2d, this is what it says " we specify the output size And the stride and kernel-size are automatically selected to adapt to the needs"
More info available here
Hence Your spatial dimension is reduced by AdaptiveAvgPool2d and not the depth of feature maps.
So, the spatial dimension will be 1x1 and depth will still be 256 , making your
self.fc_1 = nn.Linear(in_features=256, out_features=120, bias=False) and not self.fc_1 = nn.Linear(in_features=4096, out_features=120, bias=False)

Pruning using Pytorch on a complicated model

So I am trying to use torch.nn.utils.prune.global_unstructured.
I did it on a simple model and that worked. model.cov2 or other layers and that works. I am trying to do it on a model that's (nested)? I get errors as:
AttributeError: 'CNN' object has no attribute 'conv1'
and other errors. I tried everything to access this deep cov1, but I couldn't.
You can find the model code below:
class CNN(nn.Module):
def __init__(self):
"""CNN Builder."""
super(CNN, self).__init__()
self.conv_layer = nn.Sequential(
# Conv Layer block 1
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
# Conv Layer block 2
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(p=0.05),
# Conv Layer block 3
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fc_layer = nn.Sequential(
nn.Dropout(p=0.1),
nn.Linear(4096, 1024),
nn.ReLU(inplace=True),
nn.Linear(1024, 512),
nn.ReLU(inplace=True),
nn.Dropout(p=0.1),
nn.Linear(512, 100)
)
def forward(self, x):
"""Perform forward."""
# conv layers
x = self.conv_layer(x)
# flatten
x = x.view(x.size(0), -1)
# fc layer
x = self.fc_layer(x)
return x
How can I apply pruning on this model?
Your modules are not names 'conv1' or 'conv2', you can see the names using the named_modules generator. From above, you have a 'conv_stem' which can be indexed as model.conv_stem[0] to access. You can iterate over modules to create a dict like:
parameters_to_prune = (
(model.conv1, 'weight'),
(model.conv2, 'weight'),
(model.fc1, 'weight'),
(model.fc2, 'weight'),
(model.fc3, 'weight'), )
and pass this in. See for more: https://colab.research.google.com/github/pytorch/tutorials/blob/gh-pages/_downloads/f40ae04715cdb214ecba048c12f8dddf/pruning_tutorial.ipynb#scrollTo=UVFjM079F0Oi
Use this method to see the names of layers
for layer_name, param in model.named_parameters():
print(f"layer name: {layer_name} has {param.shape}")
and pass those names to prune method
for eg , in prune.random_unstructured(module_name, name="weight", amount=0.3)
When showing the inner layers' names using print method, it can be found that when nn.Sequential is used, the inner layer cannot be called directly by programmer since their names are like xxx.0, xxx.1 etc, and xxx.0.weight, xxx.0.bias as well. That's actually not the right grammar in Python. So rewrite the code and seperate the layers in nn.Sequential may be a right choise, although it is more complex.

Converting model declaration in Keras (removing Sequential) into a new one without Sequential returns different shape

I'm newbie with Python, Tensorflow and Keras.
I have modified this code:
def build_base_network(input_shape):
seq = Sequential()
nb_filter = [6, 12]
kernel_size = 3
#convolutional layer 1
seq.add(Convolution2D(nb_filter[0], kernel_size, kernel_size, input_shape=input_shape,
border_mode='valid', dim_ordering='th'))
seq.add(Activation('relu'))
seq.add(MaxPooling2D(pool_size=(2, 2)))
seq.add(Dropout(.25))
#convolutional layer 2
seq.add(Convolution2D(nb_filter[1], kernel_size, kernel_size, border_mode='valid', dim_ordering='th'))
seq.add(Activation('relu'))
seq.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='th'))
seq.add(Dropout(.25))
#flatten
seq.add(Flatten())
seq.add(Dense(128, activation='relu'))
seq.add(Dropout(0.1))
seq.add(Dense(50, activation='relu'))
return seq
Writing my own version:
def build_base_network(input_shape):
inputs = Input(shape = input_shape)
nb_filter = [6, 12]
kernel_size = 3
conv1 = Conv2D(nb_filter[0], (kernel_size, kernel_size), activation='relu', padding="valid", data_format='channels_first')(inputs)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
drop1 = Dropout(.25)(pool1)
#convolutional layer 2
conv2 = Conv2D(nb_filter[1], (kernel_size, kernel_size), activation='relu', padding="valid", data_format="channels_first")(drop1)
pool2 = MaxPooling2D(pool_size=(2, 2), data_format="channels_first")(conv2)
drop2 = Dropout(.25)(pool2)
#flatten
dense1 = Dense(128, activation='relu')(drop2)
drop3 = Dropout(0.1)(dense1)
dense2 = Dense(50, activation='relu')(drop3)
model = Model(inputs=inputs, outputs=dense2)
return model
I call it with this code:
input_dim = x_train.shape[2:]
img_a = Input(shape=input_dim)
img_b = Input(shape=input_dim)
base_network = build_base_network(input_dim)
feat_vecs_a = base_network(img_a)
feat_vecs_b = base_network(img_b)
The unmodified code (the first one) returns this shape: (None, 50)
The modified code (my own version) returns this shape: (None, 12, 12, 50)
I hadn't modified any other piece of code. The function base_network is the only one I have changed it.
By the way, input_dim is (1, 56, 46).
What am I doing wrong?
You forgot a Flatten operation :
pool2 = MaxPooling2D(pool_size=(2, 2), data_format="channels_first")(conv2)
drop2 = Dropout(.25)(pool2)
#flatten
dense1 = Dense(128, activation='relu')(drop2)
should then be
pool2 = MaxPooling2D(pool_size=(2, 2), data_format="channels_first")(conv2)
drop2 = Dropout(.25)(pool2)
#flatten
flatten1 = Flatten()(drop2)
dense1 = Dense(128, activation='relu')(flatten1)

How do I optimize Tensorflow CNN?

I'm very new to Tensorflow, so I apologize if my question comes off as ignorant.
I have a very simple CNN Tensorflow that takes images and outputs another image. With just a batchsize of 5, it takes minutes to run between epochs and often crashes after 5 epochs.(I'm using python 3.6.5 on my mac with 16 gbs of RAM)
This is a snippet of my program
learning_rate = 0.01
inputs_ = tf.placeholder(tf.float32, (None, 224, 224, 3), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, 224, 224, 1), name='targets')
### Encoder
conv1 = tf.layers.conv2d(inputs=inputs_, filters=32, kernel_size=(3,3), padding='same', activation=tf.nn.relu)
# Now 224x224x32
maxpool1 = tf.layers.max_pooling2d(conv1, pool_size=(2,2), strides=(2,2), padding='same')
# Now 112x112x32
conv2 = tf.layers.conv2d(inputs=maxpool1, filters=32, kernel_size=(3,3), padding='same', activation=tf.nn.relu)
# Now 112x112x32
maxpool2 = tf.layers.max_pooling2d(conv2, pool_size=(2,2), strides=(2,2), padding='same')
# Now 56x56x32
conv3 = tf.layers.conv2d(inputs=maxpool2, filters=32, kernel_size=(3,3), padding='same', activation=tf.nn.relu)
# Now 56x56x32
maxpool3 = tf.layers.max_pooling2d(conv3, pool_size=(2,2), strides=(2,2), padding='same')
# Now 28x28x32
conv4 = tf.layers.conv2d(inputs=maxpool3, filters=32, kernel_size=(3,3), padding='same', activation=tf.nn.relu)
# Now 28x28x32
maxpool4 = tf.layers.max_pooling2d(conv4, pool_size=(2,2), strides=(2,2), padding='same')
# Now 14x14x32
conv5 = tf.layers.conv2d(inputs=maxpool4, filters=32, kernel_size=(3,3), padding='same', activation=tf.nn.relu)
# Now 14x14x32
maxpool5 = tf.layers.max_pooling2d(conv5, pool_size=(2,2), strides=(2,2), padding='same')
# Now 7x7x32
conv6 = tf.layers.conv2d(inputs=maxpool5, filters=16, kernel_size=(3,3), padding='same', activation=tf.nn.relu)
# Now 7x7x16
encoded = tf.layers.max_pooling2d(conv6, pool_size=(2,2), strides=(2,2), padding='same')
# Now 4x4x16
### Decoder
upsample1 = tf.image.resize_images(encoded, size=(7,7), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# Now 7x7x16
conv7 = tf.layers.conv2d(inputs=upsample1, filters=16, kernel_size=(3,3), padding='same', activation=tf.nn.relu)
# Now 7x7x16
upsample2 = tf.image.resize_images(conv7, size=(14,14), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# Now 14x14x16
conv8 = tf.layers.conv2d(inputs=upsample2, filters=32, kernel_size=(3,3), padding='same', activation=tf.nn.relu)
# Now 14x14x32
upsample3 = tf.image.resize_images(conv8, size=(28,28), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# Now 28x28x32
conv9 = tf.layers.conv2d(inputs=upsample3, filters=32, kernel_size=(3,3), padding='same', activation=tf.nn.relu)
# Now 28x28x32
upsample4 = tf.image.resize_images(conv9, size=(56,56), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# Now 56x56x32
conv10 = tf.layers.conv2d(inputs=upsample3, filters=32, kernel_size=(3,3), padding='same', activation=tf.nn.relu)
# Now 56x56x32
upsample5 = tf.image.resize_images(conv10, size=(112,112), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# Now 112x112x32
conv11 = tf.layers.conv2d(inputs=upsample5, filters=32, kernel_size=(3,3), padding='same', activation=tf.nn.relu)
# Now 112x112x32
upsample6 = tf.image.resize_images(conv11, size=(224,224), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# Now 224x224x32
conv12 = tf.layers.conv2d(inputs=upsample6, filters=32, kernel_size=(3,3), padding='same', activation=tf.nn.relu)
# Now 224x224x32
logits = tf.layers.conv2d(inputs=conv12, filters=1, kernel_size=(3,3), padding='same', activation=None)
#Now 224x224x1
# Pass logits through sigmoid and calculate the cross-entropy loss
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)
# Get cost and define the optimizer
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(learning_rate).minimize(cost)
imagelist = ... #array of all images with 3 channels
imagelabellist = ... #array of all images with 1 channel
epochs = 15
for e in range(epochs):
imgs_large = imagelist
imgs_target_large = imagelabellist
shaped_imgs = tf.image.resize_images(imgs_large, [224, 224])
shaped_imgs_target = tf.image.resize_images(imgs_target_large, [224, 224])
# Get images from the batch
imgs = sess.run(shaped_imgs)
imgs_target = sess.run(shaped_imgs_target)
batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: imgs, targets_: imgs_target})
This is the output of the CNN
epoch: #1
0 minutes between epoch
epoch: #2
3 minutes between epoch
epoch: #3
3 minutes between epoch
epoch: #4
12 minutes between epoch
epoch: #5
...
I'm open to any suggestions on how to fix this issue. Thank you.
tf.image.resize_images is a graph op, so you're appending more nodes to the graph (that explains the increasing run time). Before your training loop add sess.graph.finalize() if nodes are being added it will throw an error to check this.
If you move resize_images outside of the loop, that should fixed the issue.

Categories