I am trying to compute a function that calculates the gradient descent in python. I know how to compute it without vectors for example:
def gradient_descent(x,y):
m_curr = b_curr = 0
iterations = 10000
n = len(x)
learning_rate = 0.08
for i in range(iterations):
y_predicted = m_curr * x + b_curr
cost = (1/n) * sum([val**2 for val in (y-y_predicted)])
md = -(2/n)*sum(x*(y-y_predicted))
bd = -(2/n)*sum(y-y_predicted)
m_curr = m_curr - learning_rate * md
b_curr = b_curr - learning_rate * bd
However, I'm having trouble when the parameters are vectors. Any help would be appreciated. I'm new to python
# computeMSEBatchGradient:
# weights - vector of weights (univariate linear = 2 weights)
# features - vector (or matrix) of feature values
# targets - vector of target values, same length as features
#
# returns average gradient over the batch of features
def computeMSEBatchGradient(weights,features,targets):
# insert calculation of gradient here
#return the gradient as a vector
return gradient
Related
I am new to PyTorch and I would like to implement linear regression partly with PyTorch and partly on my own. I want to use squared features for my regression:
import torch
# init
x = torch.tensor([1,2,3,4,5])
y = torch.tensor([[1],[4],[9],[16],[25]])
w = torch.tensor([[0.5], [0.5], [0.5]], requires_grad=True)
iterations = 30
alpha = 0.01
def forward(X):
# feature transformation [1, x, x^2]
psi = torch.tensor([[1.0, x[0], x[0]**2]])
for i in range(1, len(X)):
psi = torch.cat((psi, torch.tensor([[1.0, x[i], x[i]**2]])), 0)
return torch.matmul(psi, w)
def loss(y, y_hat):
return ((y-y_hat)**2).mean()
for i in range(iterations):
y_hat = forward(x)
l = loss(y, y_hat)
l.backward()
with torch.no_grad():
w -= alpha * w.grad
w.grad.zero_()
if i%10 == 0:
print(f'Iteration {i}: The weight is:\n{w.detach().numpy()}\nThe loss is:{l}\n')
When I execute my code, the regression doesn't learn the correct features and the loss increases permanently. The output is the following:
Iteration 0: The weight is:
[[0.57 ]
[0.81 ]
[1.898]]
The loss is:25.450000762939453
Iteration 10: The weight is:
[[ 5529.5835]
[22452.398 ]
[97326.12 ]]
The loss is:210414632960.0
Iteration 20: The weight is:
[[5.0884394e+08]
[2.0662339e+09]
[8.9567642e+09]]
The loss is:1.7820802835250162e+21
Does somebody know, why my model is not learning?
UPDATE
Is there a reason why it performs so poorly? I thought it's because of the low number of training data. But also with 10 data points, it is not performing well :
You should normalize your data. Also, since you're trying to fit x -> ax² + bx + c, c is essentially the bias. It should be wiser to remove it from the training data (I'm referring to psi here) and use a separate parameter for the bias.
What could be done:
normalize your input data and targets with mean and standard deviation.
separate the parameters into w (a two-component weight tensor) and b (the bias).
you don't need to construct psi on every inference since x is identical.
you can build psi with torch.stack([torch.ones_like(x), x, x**2], 1), but here we won't need the ones, as we've essentially detached the bias from the weight tensor.
Here's how it would look like:
x = torch.tensor([1,2,3,4,5]).float()
psi = torch.stack([x, x**2], 1).float()
psi = (psi - psi.mean(0)) / psi.std(0)
y = torch.tensor([[1],[4],[9],[16],[25]]).float()
y = (y - y.mean(0)) / y.std(0)
w = torch.tensor([[0.5], [0.5]], requires_grad=True)
b = torch.tensor([0.5], requires_grad=True)
iterations = 30
alpha = 0.02
def loss(y, y_hat):
return ((y-y_hat)**2).mean()
for i in range(iterations):
y_hat = torch.matmul(psi, w) + b
l = loss(y, y_hat)
l.backward()
with torch.no_grad():
w -= alpha * w.grad
b -= alpha * b.grad
w.grad.zero_()
b.grad.zero_()
if i%10 == 0:
print(f'Iteration {i}: The weight is:\n{w.detach().numpy()}\nThe loss is:{l}\n')
And the results:
Iteration 0: The weight is:
[[0.49954653]
[0.5004535 ]]
The loss is:0.25755801796913147
Iteration 10: The weight is:
[[0.49503425]
[0.5049657 ]]
The loss is:0.07994867861270905
Iteration 20: The weight is:
[[0.49056274]
[0.50943726]]
The loss is:0.028329044580459595
Currently I'm learning from Andrew Ng course on Coursera called "Machine Learning". In exercise 5, we built a model that can predict digits, trained by the MNIST dataset. This task was completed successfully in Matlab by me, but I wanted to migrate that code to Python, just to see how different things are and maybe continue to play around with the model.
I managed to implement the cost function and the back propagation algorithm correctly. I know that because I compared the metrics with my working model in Matlab and it emits the same numbers.
Now, because in the course we train the model using fmincg, I tried to do the same using Scipy fmin_cg
function.
My problem is, the cost function takes extra small steps and fails to converge.
Here is my code for the network:
import numpy as np
import utils
import scipy.optimize as op
class Network:
def __init__(self, layers):
self.layers = layers
self.weights = self.generate_params()
# Function for generating theta multidimensional matrix
def generate_params(self):
theta = []
epsilon = 0.12
for i in range(len(self.layers) - 1):
current_layer_units = self.layers[i]
next_layer_units = self.layers[i + 1]
theta_i = np.multiply(
np.random.rand(next_layer_units, current_layer_units + 1),
2 * epsilon - epsilon
)
# Appending the params to the theta matrix
theta.append(theta_i)
return theta
# Function to append bias row/column to matrix X
def append_bias(self, X, d):
m = X.shape[0]
n = 1 if len(X.shape) == 1 else X.shape[1]
if (d == 'column'):
ones = np.ones((m, n + 1))
ones[:, 1:] = X.reshape((m, n))
elif (d == 'row'):
ones = np.ones((m + 1, n))
ones[1:, :] = X.reshape((m, n))
return ones
# Function for computing the gradient for 1 training example
def back_prop(self, y, feed, theta):
activations = feed["activations"]
weighted_layers = feed["weighted_layers"]
delta_output = activations[-1] - y.reshape(len(y), 1)
current_delta = delta_output
# Initializing gradients
gradients = []
for i, theta_i in enumerate(theta):
gradients.append(np.zeros(theta_i.shape))
# Peforming delta calculations.
# Here, we continue to propagate the delta values backwards
# until we arrive to the second layer.
for i in reversed(range(len(theta))):
theta_i = theta[i]
if (i > 0):
i_weighted_inputs = self.append_bias(weighted_layers[i - 1], 'row')
t_theta_i = np.transpose(theta_i)
delta_i = np.multiply(np.dot(t_theta_i, current_delta), utils.sigmoidGradient(i_weighted_inputs))
delta_i = delta_i[1:]
gradients[i] = current_delta * np.transpose(activations[i])
# Setting current delta for the next layer
current_delta = delta_i
else:
gradients[i] = current_delta * np.transpose(activations[i])
return gradients
# Function for computing the cost and the derivatives
def compute_cost(self, theta, X, y, r12n = 0):
m = len(X)
num_labels = self.layers[-1]
costs = np.zeros(m)
# Initializing gradients
gradients = []
for i, theta_i in enumerate(theta):
gradients.append(np.zeros(theta_i.shape))
# Iterating over the training set
for i in range(m):
inputs = X[i]
observed = utils.create_output_vector(y[i], num_labels)
feed = self.feed_forward(inputs)
predicted = feed["activations"][-1]
total_cost = 0
for k, o in enumerate(observed):
if (o == 1):
total_cost += np.log(predicted[k])
else:
total_cost += np.log(1 - predicted[k])
cost = -1 * total_cost
# Storing the cost for the i-th training example
costs[i] = cost
# Calculating the gradient for this training example
# using back propagation algorithm
gradients_i = self.back_prop(observed, feed, theta)
for i, gradient in enumerate(gradients_i):
gradients[i] += gradient
# Calculating the avg regularization term for the cost
sum_of_theta = 0
for i, theta_i in enumerate(theta):
squared_theta = np.power(theta_i[:, 1:], 2)
sum_of_theta += np.sum(squared_theta)
r12n_avg = r12n * sum_of_theta / (2 * m)
total_cost = np.sum(costs) / m + r12n_avg
# Applying regularization terms to the gradients
for i, theta_i in enumerate(theta):
lambda_i = np.copy(theta_i)
lambda_i[:, 0] = 0
lambda_i = np.multiply((r12n / m), lambda_i)
# Adding the r12n matrix to the gradient
gradients[i] = gradients[i] / m + lambda_i
return total_cost, gradients
# Function for training the neural network using conjugate gradient algorithm
def train_cg(self, X, y, r12n = 0, iterations = 50):
weights = self.weights
def Cost(theta, X, y):
theta = utils.roll_theta(theta, self.layers)
cost, _ = self.compute_cost(theta, X, y, r12n)
print(cost);
return cost
def Gradient(theta, X, y):
theta = utils.roll_theta(theta, self.layers)
_, gradient = self.compute_cost(theta, X, y, r12n)
return utils.unroll_theta(gradient)
unrolled_theta = utils.unroll_theta(weights)
result = op.fmin_cg(f = Cost,
x0 = unrolled_theta,
args=(X, y),
fprime=Gradient,
maxiter = iterations)
self.weights = utils.roll_theta(result, self.layers)
# Function for feeding forward the network
def feed_forward(self, X):
# Useful variables
activations = []
weighted_layers = []
weights = self.weights
currentActivations = self.append_bias(X, 'row')
activations.append(currentActivations)
for i in range(len(self.layers) - 1):
layer_weights = weights[i]
weighted_inputs = np.dot(layer_weights, currentActivations)
# Storing the weighted inputs
weighted_layers.append(weighted_inputs)
activation_nodes = []
# If the next layer is not the output layer, we'd like to add a bias unit to it
# (Excluding the input and the output layer)
if (i < len(self.layers) - 2):
activation_nodes = self.append_bias(utils.sigmoid(weighted_inputs), 'row')
else:
activation_nodes = utils.sigmoid(weighted_inputs)
# Appending the layer of nodes to the activations array
activations.append(activation_nodes)
currentActivations = activation_nodes
data = {
"activations": activations,
"weighted_layers": weighted_layers
}
return data
def predict(self, X):
data = self.feed_forward(X)
output = data["activations"][-1]
# Finding the max index in the output layer
return np.argmax(output, axis=0)
Here is the invocation of the code:
import numpy as np
from network import Network
# %% Load data
X = np.genfromtxt('data/mnist_data.csv', delimiter=',')
y = np.genfromtxt('data/mnist_outputs.csv', delimiter=',').astype(int)
# %% Create network
num_labels = 10
input_layer = 400
hidden_layer = 25
output_layer = num_labels
layers = [input_layer, hidden_layer, output_layer]
# Create a new neural network
network = Network(layers)
# %% Train the network and save the weights
network.train_cg(X, y, r12n = 1, iterations = 20)
This is what the code emits after each iteration:
15.441233231650283
15.441116436313076
15.441192262452514
15.44122384651483
15.441231216030646
15.441232804294314
15.441233141284435
15.44123321255294
15.441233227614855
As you can see, the changes to the cost are very small.
I checked for the shapes of the vectors and gradient and they both seem fine, just like in my Matlab implementation. I'm not sure what I do wrong here.
If you guys could help me, that'd be great :)
This is not a question for a specific problem I am trying to solve. I am just trying to understand why a gradient is calculated by multiplying the layers (matrices) in a mostly backward fashion. I also didn't know subtracting y from the prediction could also give you something called a gradient.
grad_y_pred = 2.0 * (y_pred - y)
grad_w2 = h_relu.T.dot(grad_y_pred)
I don't know what I thought Pytorch was doing finding the gradients. I figured it was some kind of algorithm that did the power rule and followed other derivative rules somehow.
import numpy as np
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random input and output data
x = np.random.randn(N, D_in)
y = np.random.randn(N, D_out)
# Randomly initialize weights
w1 = np.random.randn(D_in, H)
w2 = np.random.randn(H, D_out)
learning_rate = 1e-6
for t in range(500):
# Forward pass: compute predicted y
h = x.dot(w1)
h_relu = np.maximum(h, 0)
y_pred = h_relu.dot(w2)
# Compute and print loss
loss = np.square(y_pred - y).sum()
print(t, loss)
# Backprop to compute gradients of w1 and w2 with respect to loss
grad_y_pred = 2.0 * (y_pred - y)
grad_w2 = h_relu.T.dot(grad_y_pred)
grad_h_relu = grad_y_pred.dot(w2.T)
grad_h = grad_h_relu.copy()
grad_h[h < 0] = 0
grad_w1 = x.T.dot(grad_h)
# Update weights
w1 -= learning_rate * grad_w1
w2 -= learning_rate * grad_w2
Assignment 1 from CS231n course.
After loss calculation, I'm asking to implement the gradient of the scores
where scores is a matrix with N (# of examples) lines and C (classes) columns.
This is the loss calculation:
z1 = X.dot(W1) + b1
a1 = np.maximum(0, z1) # pass through ReLU activation function
scores = a1.dot(W2) + b2
# compute the class probabilities
exp_scores = np.exp(scores)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) # [N x K]
# average cross-entropy loss and regularization
corect_logprobs = -np.log(probs[range(N), y])
data_loss = np.sum(corect_logprobs) / N
reg_loss = 0.5 * reg * np.sum(W1 * W1) + 0.5 * reg * np.sum(W2 * W2)
loss = data_loss + reg_loss
and this is the gradient calculation: (not mine but it looks similar everywhere)
#############################################################################
# TODO: Compute the backward pass, computing the derivatives of the weights #
# and biases. Store the results in the grads dictionary. For example, #
# grads['W1'] should store the gradient on W1, and be a matrix of same size #
#############################################################################
# compute the gradient on scores
dscores = probs
dscores[range(N),y] -= 1 # The line I don't understand
dscores /= N
# W2 and b2
grads['W2'] = np.dot(a1.T, dscores)
grads['b2'] = np.sum(dscores, axis=0)
# next backprop into hidden layer
dhidden = np.dot(dscores, W2.T)
# backprop the ReLU non-linearity
dhidden[a1 <= 0] = 0
# finally into W,b
grads['W1'] = np.dot(X.T, dhidden)
grads['b1'] = np.sum(dhidden, axis=0)
# add regularization gradient contribution
grads['W2'] += reg * W2
grads['W1'] += reg * W1
My question is why I should decrement dscores? Why is it a derivative?
I'm by no means an expert.
But what I think that line is basically doing is decrementing the gradient vector only on the correct classes, hence making it more negative so when you once you update the weights it will help the classifier predict the right class.
I may be wrong.
I am now learning the stanford cs231n course. When completing the softmax_loss function, I found it is not easy to write in a full-vectorized type, especially dealing with the dw term. Below is my code. Can somebody optimize the code. Would be appreciated.
def softmax_loss_vectorized(W, X, y, reg):
loss = 0.0
dW = np.zeros_like(W)
num_train = X.shape[0]
num_classes = W.shape[1]
scores = X.dot(W)
scores -= np.max(scores, axis = 1)[:, np.newaxis]
exp_scores = np.exp(scores)
sum_exp_scores = np.sum(exp_scores, axis = 1)
correct_class_score = scores[range(num_train), y]
loss = np.sum(np.log(sum_exp_scores)) - np.sum(correct_class_score)
exp_scores = exp_scores / sum_exp_scores[:,np.newaxis]
# **maybe here can be rewroten into matrix operations**
for i in xrange(num_train):
dW += exp_scores[i] * X[i][:,np.newaxis]
dW[:, y[i]] -= X[i]
loss /= num_train
loss += 0.5 * reg * np.sum( W*W )
dW /= num_train
dW += reg * W
return loss, dW
Here's a vectorized implementation below. But I suggest you try to spend a little bit more time and get to the solution yourself. The idea is to construct a matrix with all softmax values and subtract -1 from the correct elements.
def softmax_loss_vectorized(W, X, y, reg):
num_train = X.shape[0]
scores = X.dot(W)
scores -= np.max(scores)
correct_scores = scores[np.arange(num_train), y]
# Compute the softmax per correct scores in bulk, and sum over its logs.
exponents = np.exp(scores)
sums_per_row = np.sum(exponents, axis=1)
softmax_array = np.exp(correct_scores) / sums_per_row
information_array = -np.log(softmax_array)
loss = np.mean(information_array)
# Compute the softmax per whole scores matrix, which gives the matrix for X rows coefficients.
# Their linear combination is algebraically dot product X transpose.
all_softmax_matrix = (exponents.T / sums_per_row).T
grad_coeff = np.zeros_like(scores)
grad_coeff[np.arange(num_train), y] = -1
grad_coeff += all_softmax_matrix
dW = np.dot(X.T, grad_coeff) / num_train
# Regularization
loss += 0.5 * reg * np.sum(W * W)
dW += reg * W
return loss, dW