How to define pytorch fullyconnect model more simple and convenient? - python

i am a beginner of pytorch, and i want to build a fully connect model using Pytorch;
the model is very simple like:
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return self.fc3(x)
but when i want to add some layers or adjust the hidden layers, i found i have to write lots of Redundant code like:
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.relu(self.fc3(x))
x = self.relu(self.fc4(x))
x = self.relu(self.fc5(x))
...
return self.fcn(x)
besides, if i want to change some layer's feature nums, i have to change the layer adjacent
so i want to know a way which is more grace(maybe more pythonic and more easy to adjust hyper parameter).
i tried to write code like:
def __init__(self):
super().__init__()
self.hidden_num = [2881, 5500, 2048, 20] # i just want to change here! to try some new structure
self.fc = [nn.Linear(self.hidden_num[i], self.hidden_num[i + 1]).to(DEVICE) for i in range(len(self.hidden_num) - 1)]
self.relu = nn.ReLU()
def forward(self, x):
for i in range(len(self.fc)):
x = self.fc[i](x)
if i != (len(self.fc) - 1):
x = self.relu(x)
return x
but i found this way doesn't work, the model can't be built
so could any bro tell me, how to define a fullyconnect model like above??
(so i can adjust the model layers only by adjust the list named hidden_num )

If you want to keep the same approach then you can use nn.ModuleList to properly register all linear layers inside the module's __init__:
class Model(nn.Module):
def __init__(self, hidden_num=[2881, 5500, 2048, 20]):
super().__init__()
self.fc = nn.ModuleList([
nn.Linear(hidden_num[i], hidden_num[i+1])
for i in range(len(hidden_num) - 1)])
def forward(self, x):
for i, m in enumerate(self.fc.children()):
x = m(x)
print(i)
if i != len(self.fc) - 1:
x = torch.relu(x)
return x
However, you may want to handle the logic inside the __init__ function once. One alternative is to use nn.Sequential.
class Model(nn.Module):
def __init__(self, hidden_num=[2881, 5500, 2048, 20]):
super().__init__()
fc = []
for i in range(len(hidden_num) - 1):
fc.append(nn.Linear(hidden_num[i], hidden_num[i+1]))
if i != len(self.fc) - 1:
fc.append(nn.ReLU())
self.fc = nn.Sequential(fc)
def forward(self, x):
x = self.fc(x)
return x
Ideally, you would inherit from nn.Sequential directly to avoid re-writing the forward function which is unnecessary in this case:
class Model(nn.Sequential):
def __init__(self, hidden_num=[2881, 5500, 2048, 20]):
fc = []
for i in range(len(hidden_num) - 1):
fc.append(nn.Linear(hidden_num[i], hidden_num[i+1]))
if i != len(self.fc) - 1:
fc.append(nn.ReLU())
super().__init__(fc)

Related

using list in creating pytorch NN module

This code runs fine to create a simple feed-forward neural Network. The layer (torch.nn.Linear) is assigned to the class variable by using self.
class MultipleRegression3L(torch.nn.Module):
def __init__(self, num_features):
super(MultipleRegression3L, self).__init__()
self.layer_1 = torch.nn.Linear(num_features, 16)
## more layers
self.relu = torch.nn.ReLU()
def forward(self, inputs):
x = self.relu(self.layer_1(inputs))
x = self.relu(self.layer_2(x))
x = self.relu(self.layer_3(x))
x = self.layer_out(x)
return (x)
def predict(self, test_inputs):
return self.forward(test_inputs)
However, when I tried to store the layer using the list:
class MultipleRegression(torch.nn.Module):
def __init__(self, num_features, params):
super(MultipleRegression, self).__init__()
number_of_layers = 3 if not 'number_of_layers' in params else params['number_of_layers']
number_of_neurons_in_each_layer = [16, 32, 16] if not 'number_of_neurons_in_each_layer' in params else params['number_of_neurons_in_each_layer']
activation_function = "relu" if not 'activation_function' in params else params['activation_function']
self.layers = []
v1 = num_features
for i in range(0, number_of_layers):
v2 = number_of_neurons_in_each_layer[i]
self.layers.append(torch.nn.Linear(v1, v2))
v1 = v2
self.layer_out = torch.nn.Linear(v2, 1)
if activation_function == "relu":
self.act_func = torch.nn.ReLU()
else:
raise Exception("Activation function %s is not supported" % (activation_function))
def forward(self, inputs):
x = self.act_func(self.layers[0](inputs))
for i in range(1, len(self.layers)):
x = self.act_func(self.layers[i](x))
x = self.layer_out(x)
return (x)
The two models do not behave the same way. What can be wrong here?
Pytorch needs to keep the graph of the modules in the model, so using a list does not work. Using self.layers = torch.nn.ModuleList() fixed the problem.

set variable network layers based on parameters in pytorch

I want to make the following network definition to a parametric one. The number of continuous and discrete columns varies for different data. I first pass the whole input data, which in this case is 110 dimensional , from a linear with a relu activation. The output of each categorical field of my data varies based on a previous one-hot encoding data transformation. I need to define a nn.Linear(110, number of encodings) for each of them.
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__(110)
self.lin1 = nn.Linear(110,110)
self.lin_numerical = nn.Linear(110, 6)
self.lin_cat_job = nn.Linear(110, 9)
self.lin_cat_sex = nn.Linear(110, 2)
self.lin_cat_incomeclass = nn.Linear(110, 7)
def forward(self, x):
x = torch.relu(self.lin1(x))
x_numerical = f.leaky_relu(self.lin_numerical(x))
x_cat1 = f.gumbel_softmax(self.lin_cat_job(x), tau=0.2)
x_cat2 = f.gumbel_softmax(self.lin_cat_sex(x), tau=0.2)
x_cat3 = f.gumbel_softmax(self.lin_cat_incomeclass(x), tau=0.2)
x_final = torch.cat((x_numerical, x_cat1, x_cat2, x_cat3),1)
return x_final
I have managed to change the init part, using discrete_columns input which is an ordereddict that has the name and number of one-hot-encoding of each categorical field of my data as key and values, and continuous_columns which is only a list with the names of the continuous columns. But I have no idea how to edit the forward part:
class Generator(nn.Module):
def __init__(self, input_dim, continuous_columns, discrete_columns):
super(Generator, self).__init__()
self._input_dim = input_dim
self._discrete_columns = discrete_columns
self._num_continuous_columns = len(continuous_columns)
self.lin1 = nn.Linear(self._input_dim, self._input_dim)
self.lin_numerical = nn.Linear(self._input_dim, self._num_continuous_columns)
for key, value in self._discrete_columns.items():
setattr(self, "lin_cat_{}".format(key), nn.Linear(self._input_dim, value))
def forward(self, x):
x = torch.relu(self.lin1(x))
x_numerical = f.leaky_relu(self.lin_numerical(x))
####
This is the problematic part
#####
return x
You don't need to use setattr and honestly should not since you'd need getattr, it brings more trouble than it solves if there's any other ways to do the job.
Now this is what I'd do for this task
self.lin_cat = nn.ModuleDict()
for key, value in self._discrete_columns.items():
self.lin_cat[key] = nn.Linear(self._input_dim, value)
# setattr(self, "lin_cat_{}".format(key), nn.Linear(self._input_dim, value))
def forward(self, x):
x = torch.relu(self.lin1(x))
x_numerical = f.leaky_relu(self.lin_numerical(x))
x_cat = []
for key in self.lin_cat:
x_cat.append(f.gumbel_softmax(self.lin_cat[key](x), tau=0.2))
x_final = torch.cat((x_numerical, *x_cat), 1)
return x

Clarification for self.forward function in Python

I am not able to understand this sample_losses = self.forward(output, y) defined under the class Loss.
From which "forward function" it is taking input as forward function is previously defined for all three classes i.e. Dense_layer, Activation_ReLU and Activation_Softmax?
class Layer_Dense:
def __init__(self, n_inputs, n_neurons):
self.weights = 0.01 * np.random.randn(n_inputs, n_neurons)
self.biases = np.zeros((1, n_neurons))
print(self.weights)
def forward(self, inputs):
self.output = np.dot(inputs, self.weights) + self.biases
class Activation_ReLU:
def forward(self, inputs):
self.output= np.maximum(0, inputs)
class Activation_Softmax:
def forward (self, inputs):
exp_values = np.exp(inputs - np.max(inputs, axis = 1, keepdims= True ))
probabilities= exp_values/np.sum(exp_values, axis = 1, keepdims= True )
self.output = probabilities
class Loss:
def calculate(self, output, y):
sample_losses = self.forward(output, y)
data_loss = np.mean(sample_losses)
return data_loss
self.forward() is similar to call method but with registered hooks. This is used to directly call a method in the class when an instance name is called. These methods are inherited from nn.Module.
https://gist.github.com/nathanhubens/5a9fc090dcfbf03759068ae0fc3df1c9
Or refer to the source code:
https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/module.py#L485

How to define the loss function using the output of intermediate layers?

class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.encoder = nn.Linear(300, 100)
self.dense1 = nn.Sequential(nn.Linear(100, 10),nn.ReLU())
self.dense2 = nn.Sequential(nn.Linear(10, 5),nn.ReLU())
self.dense3 = nn.Sequential(nn.Linear(5, 1))
def forward(self, x):
x = self.encoder(x)
x = self.dense1(x)
x = self.dense2(x)
x = self.dense3(x)
return x
I am working on a regression problem, and I need to use the output of the dense2 layer to calculate the loss.
output of dense2 layer is 5 dimensional (5x1).
I am using PyTorch.
Dataset: Suppose i am using 300 features and i need to predict some score(a floating value).
Input: 300 Features
Output: Some Floating Value
In general, your nn.Module can return as many elements as you like. Moreover, you don't have to use them anywhere - there is no mechanism that checks that. Pytorch philosophy is to compute computational graph on-the-run.
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.encoder = nn.Linear(300, 100)
self.dense1 = nn.Sequential(nn.Linear(100, 10),nn.ReLU())
self.dense2 = nn.Sequential(nn.Linear(10, 5),nn.ReLU())
self.dense3 = nn.Sequential(nn.Linear(5, 1))
def forward(self, x):
enc_output = self.encoder(x)
dense1_output = self.dense1(enc_output)
dense2_output = self.dense2(dense1_output)
dense3_output = self.dense3(dense2_output)
return dense3_output, dense2_output

How to implement current pytorch activation functions with parameters?

I am looking for a simple way to use an activation function which exist in the pytorch library, but using some sort of parameter. for example:
Tanh(x/10)
The only way I came up with looking for solution was implementing the custom function completely from scratch. Is there any better/more elegant way to do this?
edit:
I am looking for some way to append to my model the function Tanh(x/10) rather than plain Tanh(x). Here is the relevant code block:
self.model = nn.Sequential()
for i in range(len(self.layers)-1):
self.model.add_module("linear_layer_" + str(i), nn.Linear(self.layers[i], self.layers[i + 1]))
if activations == None:
self.model.add_module("activation_" + str(i), nn.Tanh())
else:
if activations[i] == "T":
self.model.add_module("activation_" + str(i), nn.Tanh())
elif activations[i] == "R":
self.model.add_module("activation_" + str(i), nn.ReLU())
else:
#no activation
pass
Instead of defining it as a specific function, you could inline it in a custom layer.
For instance your solution could look like:
import torch
import torch.nn as nn
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(4, 10)
self.fc2 = nn.Linear(10, 3)
self.fc3 = nn.Softmax()
def forward(self, x):
return self.fc3(self.fc2(torch.tanh(self.fc1(x)/10)))
where torch.tanh(output/10) is inlined in the forward function of your module.
You can create a layer with the multiplying parameter:
import torch
import torch.nn as nn
class CustomTanh(nn.Module):
#the init method takes the parameter:
def __init__(self, multiplier):
self.multiplier = multiplier
#the forward calls it:
def forward(self, x):
x = self.multiplier * x
return torch.tanh(x)
Add it to your models with CustomTanh(1/10) instead of nn.Tanh().

Categories