I have a question regarding python class - function.
I have one class with three (including __init__) functions, output of each def is feed on to the next def.
The code below works until function (a1), but when I execute function (a2), I m getting the error below, and I have no idea why.
AttributeError: 'numpy.ndarray' object has no attribute 'a2'
I am attaching the code below.
class Optimizer:
def __init__(self,x_input, y_input,w1,w2,w3,b1,b2,b3,learn_rate):
self.x = x_input
self.y = y_input
self.w1 = w1
self.w2 = w2
self.w3 = w3
self.b1 = b1
self.b2 = b2
self.b3 = b3
self.learn_rate = 2
print("x input\n", self.x)
print("y input", self.y)
def a1(self):
layer_1 = sigmoid(np.dot(self.w1, self.x) +self.b1)
print("layer_1",layer_1)
return layer_1
def a2(self):
layer_2 = sigmoid(np.dot(self.w2, self.layer_1) + self.b2)
print("layer_2",layer_2)
return layer_2
line
trial = Optimizer(x,y,w1,w2,w3,b1,b2,b3,2).a1()
works fine, no error
line
trial = Optimizer(x,y,w1,w2,w3,b1,b2,b3,2).a1().a2()
give the No attribute error.
Could anyone give me an insight???
Thank you
⚠️ This code behaviour is generaly considered as a bad practice in Python, consider different calls like this:
optimizer = Optimizer(x,y,w1,w2,w3,b1,b2,b3,2)
layer_1 = optimizer.a1()
layer_2 = optimizer.a2(layer_1)
Return self in a1()
sigmoid return a numpy array, so if you want to chain call, you need to return the self. Also, to store the result of sigmoid, you may have to store it in an attribute.
def a1(self):
self.layer_1 = sigmoid(np.dot(self.w1, self.x) +self.b1)
print("layer_1", self.layer_1)
return self
Related
I am trying to build Relu layer:
import numpy as np
class Relu:
def __init__(self):
self.mask = None
def forward (self,x):
self.mask = (x<=0)
out = x.copy
out[self.mask] = 0
return out
def backward(self, dout):
dout[self.mask]=0
dx = dout
return dx
activation = Relu()
out = activation.forward(np.random.rand(3,2))
print(out)
error message is :
5 self.mask = (x<=0)
6 out = x.copy
----> 7 out[self.mask] = 0
8 return out
9 def backward(self, dout):
TypeError: 'builtin_function_or_method' object does not support item assignment
I am using code in my textbook. However, I cannot execute the Relu code. Is this an environmental setting problem ?
You forgot to add a parenthesis to copy. Right now, you just have the copy function assigned to out. It was just referenced, not called.
Here is the corrected code.
import numpy as np
class Relu:
def __init__(self):
self.mask = None
def forward (self,x):
self.mask = (x<=0)
out = x.copy()
out[self.mask] = 0
return out
def backward(self, dout):
dout[self.mask]=0
dx = dout
return dx
activation = Relu()
out = activation.forward(np.random.rand(3,2))
print(out)
I am not able to understand this sample_losses = self.forward(output, y) defined under the class Loss.
From which "forward function" it is taking input as forward function is previously defined for all three classes i.e. Dense_layer, Activation_ReLU and Activation_Softmax?
class Layer_Dense:
def __init__(self, n_inputs, n_neurons):
self.weights = 0.01 * np.random.randn(n_inputs, n_neurons)
self.biases = np.zeros((1, n_neurons))
print(self.weights)
def forward(self, inputs):
self.output = np.dot(inputs, self.weights) + self.biases
class Activation_ReLU:
def forward(self, inputs):
self.output= np.maximum(0, inputs)
class Activation_Softmax:
def forward (self, inputs):
exp_values = np.exp(inputs - np.max(inputs, axis = 1, keepdims= True ))
probabilities= exp_values/np.sum(exp_values, axis = 1, keepdims= True )
self.output = probabilities
class Loss:
def calculate(self, output, y):
sample_losses = self.forward(output, y)
data_loss = np.mean(sample_losses)
return data_loss
self.forward() is similar to call method but with registered hooks. This is used to directly call a method in the class when an instance name is called. These methods are inherited from nn.Module.
https://gist.github.com/nathanhubens/5a9fc090dcfbf03759068ae0fc3df1c9
Or refer to the source code:
https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/module.py#L485
I'm trying to fade a keras layer over several batches stepwise out. Therefore I wrote a custom layer "DecayingSkip". In addition I'm adding it residually to an other layer. Im trying to implement a fading out skip connection.
However the code seems not to work correctly. The model compiles and trains, but the layer activation is not faded out as expected. What am I doing wrong?
class DecayingSkip(Layer):
def __init__(self, fade_out_at_batch, **kwargs):
self.fade_out_at_batch = K.variable(fade_out_at_batch)
self.btch_cnt = K.variable(0)
super(decayingSkip, self).__init__(**kwargs)
def call(self, x):
self.btch_cnt = self.btch_cnt + 1.0
return K.switch(
self.btch_cnt >= self.fade_out_at_batch,
x * 0,
x * (1.0 - ((1.0 / self.fade_out_at_batch) * self.btch_cnt))
)
def add_fade_out(fadeOutLayer, layer, fade_out_at_batch):
cnn_match = Conv2D(filters=int(layer.shape[-1]), kernel_size=1, activation=bounded_relu)(fadeOutLayer)
fadeOutLayer = DecayingSkip(fade_out_at_batch=fade_out_at_batch, name=name + '_fade_out')(cnn_match)
return Add()([fadeOutLayer, layer])
Besides, in another attempt I tried to use a tensorflow variable which I changed within the session like:
def add_fade_out(fadeOutLayer, layer):
fadeOutLayer = Conv2D(filters=int(layer.shape[-1]), kernel_size=1, activation='relu')(fadeOutLayer)
alph = K.variable(1.0, name='alpha')
fadeOutLayer = Lambda(lambda x: x * alph)(fadeOutLayer)
return Add()([fadeOutLayer, layer])
sess = K.get_session()
lw = sess.graph.get_tensor_by_name("alpha:0")
sess.run(K.tf.assign(lw, new_value))
This did not work either. Why?
I think i found the solution. I changed the call function of the layer to:
def call(self, x):
self.btch_cnt = K.tf.assign_add(self.btch_cnt, 1)
K.get_session().run(self.btch_cnt)
return K.switch(
self.btch_cnt >= self.fade_out_at_batch,
x * 0,
x * (1.0 - ((1.0 / self.fade_out_at_batch) * self.btch_cnt))
)
I am looking for a simple way to use an activation function which exist in the pytorch library, but using some sort of parameter. for example:
Tanh(x/10)
The only way I came up with looking for solution was implementing the custom function completely from scratch. Is there any better/more elegant way to do this?
edit:
I am looking for some way to append to my model the function Tanh(x/10) rather than plain Tanh(x). Here is the relevant code block:
self.model = nn.Sequential()
for i in range(len(self.layers)-1):
self.model.add_module("linear_layer_" + str(i), nn.Linear(self.layers[i], self.layers[i + 1]))
if activations == None:
self.model.add_module("activation_" + str(i), nn.Tanh())
else:
if activations[i] == "T":
self.model.add_module("activation_" + str(i), nn.Tanh())
elif activations[i] == "R":
self.model.add_module("activation_" + str(i), nn.ReLU())
else:
#no activation
pass
Instead of defining it as a specific function, you could inline it in a custom layer.
For instance your solution could look like:
import torch
import torch.nn as nn
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(4, 10)
self.fc2 = nn.Linear(10, 3)
self.fc3 = nn.Softmax()
def forward(self, x):
return self.fc3(self.fc2(torch.tanh(self.fc1(x)/10)))
where torch.tanh(output/10) is inlined in the forward function of your module.
You can create a layer with the multiplying parameter:
import torch
import torch.nn as nn
class CustomTanh(nn.Module):
#the init method takes the parameter:
def __init__(self, multiplier):
self.multiplier = multiplier
#the forward calls it:
def forward(self, x):
x = self.multiplier * x
return torch.tanh(x)
Add it to your models with CustomTanh(1/10) instead of nn.Tanh().
class neuralNetwork:
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
self.inodes = input_nodes
self.hnodes = hidden_nodes
self.onodes = output_nodes
self.lr = learning_rate
pass
def train():
pass
def query():
pass
input_nodes = 3
hidden_nodes = 3
output_nodes = 3
learning_rate = 0.3
n = neuralNetwork(input_nodes,hidden_nodes,output_nodes, learning_rate)
import numpy
self.wih = (numpy.random.rand(self.hnodes, self.inodes) - 0.5)
self.who = (numpy.random.rand(self.onodes, self.hnodes) - 0.5)
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-20-8474973d8450> in <module>()
----> 1 self.wih = (numpy.random.rand(self.hnodes, self.inodes) - 0.5)
2 self.who = (numpy.random.rand(self.onodes, self.hnodes) - 0.5)
NameError: name 'self' is not defined
Why is self not defined?
so how can I fix this error... I overlooked it several times, but still can't find any Solutions to it. Even though it is explained like this in a tutorial. Help would be appreciated.
self is passed into the call to an instance method. You're looking to address the instance itself.
nnet = neuralNetwork(input_nodes,hidden_nodes,output_nodes, learning_rate)
nnet.wih = (numpy.random.rand(nnet.hnodes, nnet.inodes) - 0.5)
nnet.who = (numpy.random.rand(nnet.onodes, nnet.hnodes) - 0.5)
Briefly talking about how self works, it's not magical. Taking an example class:
class Foo(object):
def __init__(self, name):
self.name = name
def speak(self):
return "Hello {}".format(self.name)
self is passed into speak when it's invoked as a bound function. One could call it this instead, but self is the Python convention.