I have a simple convolution network:
import torch.nn as nn
class model(nn.Module):
def __init__(self, ks=1):
super(model, self).__init__()
self.conv1 = nn.Conv2d(in_channels=4, out_channels=32, kernel_size=ks, stride=1)
self.fc1 = nn.Linear(8*8*32*ks, 64)
self.fc2 = nn.Linear(64, 64)
def forward(self, x):
x = F.relu(self.conv1(x))
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
cnn = model(1)
Since the kernel size is 1 and the output channel is 32, I assume that there should be 32*1*1 weights in this layer. But, when I ask pytorch about the shape of the weight matrix cnn.conv1.weight.shape, it returns torch.Size([32, 4, 1, 1]). Why the number of input channel should matter on the weight of a conv2d layer?
Am I missing something?
It matters because you are doing 2D convolution over the images which means the depth of the filter(kernel) must be equal to the number of in_channels(pytorch sets it for you) so the true kernel size is [in_channels,1,1]. On the other hands we can say that out_channels number is the number of kernels so the number of weights = number of kernels * size of kernel = out_channels * (in_channels * kernel_size). Here is 2D conv with 3D input
Related
I have this class that inherits from tf.keras.Model:
import tensorflow as tf
from tensorflow.keras.layers import Dense
class Actor(tf.keras.Model):
def __init__(self):
super().__init__()
self.linear1 = Dense(128, activation = 'relu')
self.linear2 = Dense(256, activation = 'relu')
self.linear3 = Dense(3, activation = 'softmax')
# model override method
def call(self, state):
x = tf.convert_to_tensor(state)
x = self.linear1(x)
x = self.linear2(x)
x = self.linear3(x)
return x
it is used like this:
prob = self.actor(np.array([state]))
it works with (5,) input and returns (1,3) tensor which is what I expect
state: (5,) data: [0.50267935 0.50267582 0.50267935 0.50268406 0.5026817 ]
prob: (1, 3) data: tf.Tensor([[0.29540768 0.3525798 0.35201252]], shape=(1, 3), dtype=float32)
however if I pass a higher dimension input this return higher dimension tensor:
state: (5, 3) data: [[0.50789109 0.49648439 0.49651666]
[0.5078905 0.49648391 0.49648928]
[0.50788815 0.49648356 0.49643452]
[0.50788677 0.4964834 0.49640713]
[0.50788716 0.49648329 0.49635237]]
prob: (1, 5, 3) data: tf.Tensor(
[[[0.34579638 0.342928 0.3112757 ]
[0.34579614 0.34292707 0.31127676]
[0.34579575 0.34292522 0.31127906]
[0.3457955 0.3429243 0.31128016]
[0.34579512 0.34292242 0.3112824 ]]], shape=(1, 5, 3), dtype=float32)
But I need it to be (1,3) still. I never used raw keras models implemented like this. What can I do to fix it?
Tensorflow 2.9.1 with keras 2.9.0
Looks like you are working on a reinforcement learning problem. Try adding a Flatten layer to the beginning of your model (or a Reshape layer):
class Actor(tf.keras.Model):
def __init__(self):
super().__init__()
self.flatten = tf.keras.layers.Flatten()
self.linear1 = Dense(128, activation = 'relu')
self.linear2 = Dense(256, activation = 'relu')
self.linear3 = Dense(3, activation = 'softmax')
# model override method
def call(self, state):
x = tf.convert_to_tensor(state)
x = self.flatten(x)
x = self.linear1(x)
x = self.linear2(x)
x = self.linear3(x)
return x
Also check the design of the Dense layer:
Note: If the input to the layer has a rank greater than 2, then Dense computes the dot product between the inputs and the kernel along the last axis of the inputs and axis 0 of the kernel (using tf.tensordot). For example, if input has dimensions (batch_size, d0, d1), then we create a kernel with shape (d1, units), and the kernel operates along axis 2 of the input, on every sub-tensor of shape (1, 1, d1) (there are batch_size * d0 such sub-tensors). The output in this case will have shape (batch_size, d0, units).
I am trying to learn build a U-NET architecture from scratch. I have written this code but the problem is that when I try to run to check the output of the encoder part, I am having issues with it. When you the run the code below , you'll get
import torch
import torch.nn as nn
batch = 1
channels = 3
width = 512 # same as height
image = torch.randn(batch, channels, width, width)
enc = Encoder(channels)
enc(image)
RuntimeError: Given groups=1, weight of size [128, 64, 3, 3], expected input[1, 3, 512, 512] to have 64 channels, but got 3 channels instead
Below is the code:
class ConvolutionBlock(nn.Module):
'''
The basic Convolution Block Which Will have Convolution -> RelU -> Convolution -> RelU
'''
def __init__(self, in_channels, out_channels, upsample:bool = False,):
'''
args:
upsample: If True, then use TransposedConv2D (Means it being used in the decoder part) instead MaxPooling
batch_norm was introduced after UNET so they did not know if it existed. Might be useful
'''
super().__init__()
self.network = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size = 3, padding= 1), # padding is 0 by default, 1 means the input width, height == out width, height
nn.ReLU(),
nn.Conv2d(out_channels, out_channels, kernel_size = 3, padding = 1),
nn.ReLU(),
nn.MaxPool2d(kernel_size = 2, stride = 2) if not upsample else nn.ConvTranspose2d(out_channels, out_channels//2, kernel_size = 2, ) # As it is said in the paper that it TransPose2D halves the features
)
def forward(self, feature_map_x):
'''
feature_map_x could be the image itself or the
'''
return self.network(feature_map_x)
class Encoder(nn.Module):
'''
'''
def __init__(self, image_channels:int = 1, repeat:int = 4):
'''
In UNET, the features start at 64 and keeps getting twice the size of the previous one till it reached BottleNeck
'''
super().__init__()
in_channels = [image_channels,64, 128, 256, 512]
out_channels = [64, 128, 256, 512, 1024]
self.layers = nn.ModuleList(
[ConvolutionBlock(in_channels = in_channels[i], out_channels = out_channels[i]) for i in range(repeat+1)]
)
def forward(self, feature_map_x):
for layer in self.layers:
out = layer(feature_map_x)
return out
EDIT: Running the code below gives me expected info too:
in_ = [3,64, 128, 256, 512]
ou_ = [64, 128, 256, 512, 1024]
width = 512
from torchsummary import summary
for i in range(5):
cb = ConvolutionBlock(in_[i], ou_[i])
summary(cb, (in_[i],width,width))
print('#'*50)
There was a code logic mistake in the forward of Encoder
I did:
for layer in self.layers:
out = layer(feature_map_x)
return out
but I was supposed to use feature_map_x as the input because the loop was iterating over the original feature map before but it was supposed to get the output of previous layer.
for layer in self.layers:
feature_map_x = layer(feature_map_x)
return feature_map_x
I am very new to Deep learning. I am working on the CIFAR10 dataset and created a CNN model which is as below.
class Net2(nn.Module):
def __init__(self):
super(Net2, self).__init__()
self.conv1 = nn.Conv2d(3, 32, 5, 1)
self.fc1 = nn.Linear(32 * 5 * 5, 512)
self.fc2 = nn.Linear(512,10)
def forward(self, x):
x = x.view(x.size(0), -1)
x = F.max_pool2d(F.relu(self.conv1(x)),(2,2))
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
net2 = Net2().to(device)
My assignment requirements are to create a model with:
Convolutional layer with 32 filters, kernel size of 5x5 and stride of 1.
Max Pooling layer with kernel size of 2x2 and default stride.
ReLU Activation Layers.
Linear layer with output of 512.
ReLU Activation Layers.
A linear layer with output of 10.
Which I guess I wrote. But I am assuming that I am going to the wrong path. Please help me to write the correct model and also the reason behind those arguments in Conv2d and Linear layers.
The error which I am getting from my code is as below:
RuntimeError: Expected 4-dimensional input for 4-dimensional weight [32, 3, 5, 5], but got 2-dimensional input of size [1024, 3072] instead
Please help me!
There are two problems with the code:
Flattening of input
x = x.view(x.size(0), -1)
The convolutional layer expects a four dimensional input of dimensions (N, C, H, W), where N is the batch size, C = 3 is the number of channels, and (H, W) is the dimension of the image. By using the above statement, you are flattening your (1024, 3, 32, 32) input to (1024, 3072).
Number of input features in the first linear layer
self.fc1 = nn.Linear(32 * 5 * 5, 512)
The output dimensions of the convolutional layer for a (1024, 3, 32, 32) input will be (1024, 32, 28, 28), and after applying the 2 x 2 maxpooling, it is (1024, 32, 14, 14). So the number of input features for the linear layer should be 32 x 14 x 14 = 6272.
I am working on sentiment analysis, I want to classify the output into 4 classes. For loss I am using cross-entropy.
The problem is PyTorch cross-entropy needs the input of (batch_size, output) which is am having trouble with.
I am taking a batch size of 12 and sequence size is 32
import torch.nn as nn
class RNN(nn.Module):
def __init__(self, hidden_dim = 256, input_size = 32 , num_layers = 1, num_classes=4, vocab_size = len(vocab_to_int)+1, embedding_dim=100):
super().__init__()
self.input_size = input_size
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.num_classes = num_classes
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers)
self.fc1 = nn.Linear(hidden_dim, 50)
self.fc2 = nn.Linear(50, 4)
def forward(self, x, hidden):
x = self.embedding(x)
x = x.view(32, 12, 100)
x, hidden = self.lstm(x, hidden)
x = x.contiguous().view(-1, 256)
x = self.fc1(x) # output shape ([384, 50])
x = self.fc2(x) # output shape [384, 4]
return x, hidden
def init_hidden(self, batch_size=12):
weight = next(self.parameters()).data
hidden = (weight.new(self.num_layers, 12, self.hidden_dim).zero_().cuda(), weight.new(self.num_layers, 12, self.hidden_dim).zero_().cuda())
return hidden
According to the CrossEntropyLoss docs:
input has to be a Tensor of size (C) for unbatched input, (minibatch,C) [for batched input] [...]
The code you provided is only the RNN class and not the data processing and the actual call to CrossEntropyLoss, but the error you stated in the comments makes me think that you didn't reshape the labels tensor to have the same size as the output from the neural network. Therefore, you'd be calculating the loss of a tensor with size (384, 4) against another tensor which I infer is of size (12, 32). Your labels tensor should be of size (384) to match the first dimension of the neural network output.
Also, you don't have to manually reshape your tensors, you can reshape them after the forward() call through the torch.nn.utils.rnn.pack_padded_sequence() function. If you do apply this function to both the output of the neural network and the labels, you will have a tensor of size (384, 4) that PyTorch can handle in the call to CrossEntropyLoss. See the note in the pack_padded_sequence() function docs for more details.
I am newbee in neural networks, i have teached my model and now i want to test it. I have wrote a code with google help, but it do not work.
The problem is that i do not understand from where i am getting the 4th dimension.
Code is the following:
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
transform = transforms.Compose([
transforms.Resize(32),
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
main_path = 'D:/RTU/dataset/ready_dataset_2classes'
train_data_path = main_path + '/train'
#test_data_path = main_path + '/test'
weigths_path = 'D:/RTU/dataset/weights_done/weights_noise_original037-97%.pt'
train_data = torchvision.datasets.ImageFolder(root=train_data_path, transform=transform)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# convolutional layer (sees 32x32x3 image tensor)
self.conv1 = nn.Conv2d(3, 16, 3, padding=1)
# convolutional layer (sees 16x16x16 tensor)
self.conv2 = nn.Conv2d(16, 32, 3, padding=1)
# convolutional layer (sees 8x8x32 tensor)
self.conv3 = nn.Conv2d(32, 64, 3, padding=1)
# max pooling layer
self.pool = nn.MaxPool2d(2, 2)
# linear layer (64 * 4 * 4 -> 500)
self.fc1 = nn.Linear(64 * 4 * 4, 500)
# linear layer (500 -> 10)
self.fc2 = nn.Linear(500, 250)
self.fc3 = nn.Linear(250, 2)
# dropout layer (p=0.25)
self.dropout = nn.Dropout(0.25) #0.25
def forward(self, x):
# add sequence of convolutional and max pooling layers
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
# flatten image input
x = x.view(-1, 64 * 4 * 4)
# add dropout layer
x = self.dropout(x)
# add 1st hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add 2nd hidden layer, with relu activation function
x = F.relu(self.fc2(x))
x = self.dropout(x)
x = self.fc3(x)
return x
# Disable grad
with torch.no_grad():
# Retrieve item
index = 1
item = train_data[index]
image = item[0]
true_target = item[1]
# Loading the saved model
mlp = Net()
optimizer = optim.SGD(mlp.parameters(), lr=0.01)
epoch=5
valid_loss_min = np.Inf
checkpoint = torch.load(weigths_path , map_location=torch.device('cpu'))
mlp.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
valid_loss_min = checkpoint['valid_loss_min']
mlp.eval()
# Generate prediction
prediction = mlp(image)
# Predicted class value using argmax
predicted_class = np.argmax(prediction)
# Reshape image
#image = image.reshape(28, 28, 1)
# Show result
plt.imshow(image, cmap='gray')
plt.title(f'Prediction: {predicted_class} - Actual target: {true_target}')
plt.show()
The code seems working till the "mlp.eval()" and then i am getting an error Expected 4-dimensional input for 4-dimensional weight [16, 3, 3, 3], but got 3-dimensional input of size [3, 32, 32] instead.
What i am doing wrong?
Error
When you are training neural nets, you are feeding small batches of input data to your model. Indeed even it's not clearly specified when writting Layers in Pytorch, If you look at the documentation, here you can see that Layers receive 4D arrays
with N corresponding to batch size and C to number of channels, here 3 because you are using RGB images
So when testing your model once trained, the testing data should be built the same way to be fed into the network.
Thus if you want to feed 1 image to your network you must reshape it proprely
myimage.reshape(-1,3,32,32)
print(myimage.shape)
#(1,3,32,33)