How to use PyTorch's autograd efficiently with tensors? - python

In my previous question I found how to use PyTorch's autograd to differentiate. And it worked:
#autograd
import torch
from torch.autograd import grad
import torch.nn as nn
import torch.optim as optim
class net_x(nn.Module):
def __init__(self):
super(net_x, self).__init__()
self.fc1=nn.Linear(1, 20)
self.fc2=nn.Linear(20, 20)
self.out=nn.Linear(20, 4)
def forward(self, x):
x=torch.tanh(self.fc1(x))
x=torch.tanh(self.fc2(x))
x=self.out(x)
return x
nx = net_x()
r = torch.tensor([1.0], requires_grad=True)
print('r', r)
y = nx(r)
print('y', y)
print('')
for i in range(y.shape[0]):
# prints the vector (dy_i/dr_0, dy_i/dr_1, ... dy_i/dr_n)
print(grad(y[i], r, retain_graph=True))
>>>
r tensor([1.], requires_grad=True)
y tensor([ 0.1698, -0.1871, -0.1313, -0.2747], grad_fn=<AddBackward0>)
(tensor([-0.0124]),)
(tensor([-0.0952]),)
(tensor([-0.0433]),)
(tensor([-0.0099]),)
The problem that I currently have is that I have to differentiate a very large tensor and iterating through it like I'm currently doing (for i in range(y.shape[0])) is taking forever.
The reason I'm iterating is that from understanding, grad only knows how to propagate gradients from a scalar tensor, which y is not. So I need to compute the gradients with respect to each coordinate of y.
I know that TensorFlow is capable of differentiating tensors, from here:
tf.gradients(
ys, xs, grad_ys=None, name='gradients', gate_gradients=False,
aggregation_method=None, stop_gradients=None,
unconnected_gradients=tf.UnconnectedGradients.NONE
)
"ys and xs are each a Tensor or a list of tensors. grad_ys is a list of Tensor, holding the gradients received by the ys. The list must be the same length as ys.
gradients() adds ops to the graph to output the derivatives of ys with respect to xs. It returns a list of Tensor of length len(xs) where each tensor is the sum(dy/dx) for y in ys and for x in xs."
And was hoping that there's a more efficient way to differentiate tensors in PyTorch.
For example:
a = range(100)
b = range(100)
c = range(100)
d = range(100)
my_tensor = torch.tensor([a,b,c,d])
t = range(100)
#derivative = grad(my_tensor, t) --> not working
#Instead what I'm currently doing:
for i in range(len(t)):
a_grad = grad(a[i],t[i], retain_graph=True)
b_grad = grad(b[i],t[i], retain_graph=True)
#etc.
I was told that it might work if I could run autograd on the forward pass rather than the backwards pass, but from here it seems like it's not currently a feature PyTorch has.
Update 1:
#jodag mentioned that what I'm looking for might be just the diagonal of the Jacobian. I'm following the link he attached and trying out the faster method. Though, this doesn't seem to work and gives me an error:
RuntimeError: grad can be implicitly created only for scalar outputs.
Code:
nx = net_x()
x = torch.rand(10, requires_grad=True)
x = torch.reshape(x, (10,1))
x = x.unsqueeze(1).repeat(1, 4, 1)
y = nx(x)
dx = torch.diagonal(torch.autograd.grad(torch.diagonal(y, 0, -2, -1), x), 0, -2, -1)

I believe I solved it using # jodag advice -- to simply calculate the Jacobian and take the diagonal.
Consider the following network:
import torch
from torch.autograd import grad
import torch.nn as nn
import torch.optim as optim
class net_x(nn.Module):
def __init__(self):
super(net_x, self).__init__()
self.fc1=nn.Linear(1, 20)
self.fc2=nn.Linear(20, 20)
self.out=nn.Linear(20, 4) #a,b,c,d
def forward(self, x):
x=torch.tanh(self.fc1(x))
x=torch.tanh(self.fc2(x))
x=self.out(x)
return x
nx = net_x()
#input
t = torch.tensor([1.0, 2.0, 3.2], requires_grad = True) #input vector
t = torch.reshape(t, (3,1)) #reshape for batch
My approach so far was to iterate through the input since grad wants a scalar value as mentioned above:
#method 1
for timestep in t:
y = nx(timestep)
print(grad(y[0],timestep, retain_graph=True)) #0 for the first vector (i.e "a"), 1 for the 2nd vector (i.e "b")
>>>
(tensor([-0.0142]),)
(tensor([-0.0517]),)
(tensor([-0.0634]),)
Using the diagonal of the Jacobian seems more efficient and gives the same results:
#method 2
dx = torch.autograd.functional.jacobian(lambda t_: nx(t_), t)
dx = torch.diagonal(torch.diagonal(dx, 0, -1), 0)[0] #first vector
#dx = torch.diagonal(torch.diagonal(dx, 1, -1), 0)[0] #2nd vector
#dx = torch.diagonal(torch.diagonal(dx, 2, -1), 0)[0] #3rd vector
#dx = torch.diagonal(torch.diagonal(dx, 3, -1), 0)[0] #4th vector
dx
>>>
tensor([-0.0142, -0.0517, -0.0634])

Related

How to solve 1D heat equation using neural networks in pytorch

I want to solve a 1D heat conduction using neural netwroks in pytorch. The PDE represeting the heat conduction is as follows:
du/dt = k d2u/dx2
where, k is a constant, u represent temperature and x is also the space. I also include a boundary condition like 0 temperature at x=0 and initial condition like t=0. I am quite new in the field of PINN and only can solve normal ODEs with it. The following code is tryin to solve a very simple ODE like du/dx=u with boundary condition like u=0 at x=0. The answer is simply u = u2/2. The following code is a simple PINN that solve this ODE:
import torch
import torch.nn as nn
import numpy as np
# N is a Neural Network with three layers
N = nn.Sequential(nn.Linear(1, 50), nn.Sigmoid(), nn.Linear(50,1, bias=False))
BC = 0. # Boundary condition
g_f = lambda x: BC + x * N(x) # a general function satisfying the BC
f = lambda x: x # Undolvable ODE!!! : du/dx = x ----> u = x2/2
# The loss function
def loss(x):
x.requires_grad = True
outputs = g_f(x)
grdnt = torch.autograd.grad(outputs, x, grad_outputs=torch.ones_like(outputs),
create_graph=True)[0]
return torch.mean((grdnt - f(x)) ** 2)
optimizer = torch.optim.LBFGS(N.parameters())
x = torch.Tensor(np.linspace(-4, 4, 100)[:, None])
# Run the optimizer
def closure():
optimizer.zero_grad()
l = loss(x)
l.backward()
return l
for i in range(10):
optimizer.step(closure)
x_test = np.linspace(-4, 4, 100)[:, None]
with torch.no_grad():
y_test = g_f(torch.Tensor(x_test)).numpy()
How to replicate the code with 1D heat conduction?

Trying to updated weighted matrices of RNN with pytorch but .grad returns None

I am trying to create my own RNN with pytorch and am following some simple tutorials on the .backward function. Once I run my code, I get "None" as the result for .grad and I cannot figure out why. It looks like from this post, that it may be because I setting up the inputs as tensors so they are getting detached? If so, I am not sure how to correct for this but ensure they can still be multiplied in the matrices.
import math
import numpy as np
import torch
from collections import deque
#set up the inputs
lists = deque()
for i in range(0, 13, 1):
lists.append(range(i, i + 4))
x = np.array(lists)
# set up the y vector
y = []
for i in range(len(x)):
y.append((x[i,3])+1)
#set up the validation input
lists = deque()
for i in range(13, 19, 1):
lists.append(range(i, i + 4))
x_val = np.array(lists)
#set up the validation y vector
y_val = []
for i in range(len(x_val)):
y_val.append((x_val[i,3])+1)
#set params
input_dimension = len(x[0])
hidden_dimension = 100
output_dimension = 1
#set up the weighted matrices
np.random.seed(99)
Wxh = np.random.uniform(0, 1, (hidden_dimension, input_dimension)) # weights from input to hidden layer
Whh = np.random.uniform(0, 1, (hidden_dimension, hidden_dimension)) # weights inside cell - recurrant
Why = np.random.uniform(0, 1, (output_dimension, hidden_dimension)) # weights from hidden to output layer
#set up the input tensor
xt = torch.tensor(x[[0]], dtype=torch.float) #do I want to keep a float here? or force an int? think: float - understand why
Wxh_t = torch.tensor(Wxh, requires_grad = True).float()
Whh_t = torch.tensor(Whh, requires_grad = True).float()
Why_t = torch.tensor(Why, requires_grad = True).float()
loss = 0
for i in range(len(x)):
xt = torch.tensor(x[[i]], dtype=torch.float)
print(xt)
current_affine_3 = torch.mm(xt,Wxh_t.T)
hidden_t = torch.mm(h_prev_t, Whh_t.T)
ht_t = torch.tanh(current_affine_3 + hidden_t)
y_hat_t = torch.mm(ht_t, Why_t.T)
loss += (y[i] - y_hat_t)**2
print(y[i])
print(loss)
h_prev_t = ht_t
loss.backward
print(Wxh_t.grad)
loss.backward returns <bound method Tensor.backward of tensor([[18672.0215]], grad_fn=<AddBackward0>)>
And if I view the weighted matrices, I notice something different than the tutorials. Instead of grad_fn=<AddBackward0> after calculating with a requires_grad = True tensor, I get grad_fn=<MmBackward0>. I assume it's because I am using torch.mm, but I'm not sure if this matters. This is an example of some code I was using for a tutorial:
x = torch.tensor(2., requires_grad = False)
w = torch.tensor(3., requires_grad = True)
b = torch.tensor(1., requires_grad = True)
print("x:", x)
print("w:", w)
print("b:", b)
# define a function of the above defined tensors
y = w * x + b
print("y:", y)
# take the backward() for y
y.backward()
# print the gradients w.r.t. above x, w, and b
print("x.grad:", x.grad)
print("w.grad:", w.grad)
print("b.grad:", b.grad)
Thank you!

Python: How to prevent Scipy's optimize.minimize function from changing the shape of initial guess x0?

I am trying to implement the optimization algorithm from Scipy. It works fine when I implement it without inputting the Jacobian gradient function. I believe the issue that I am getting when I input the gradient is because the minimize function itself is changing the shape of the initial guess x0. You can see this from the output of the code below.
Input:
import numpy as np
from costFunction import *
import scipy.optimize as op
def sigmoid(z):
epsilon = np.finfo(z.dtype).eps
g = 1/(1+np.exp(-z))
g = np.clip(g,epsilon,1-epsilon)
return g
def costFunction(theta,X,y):
m = y.size
h = sigmoid(X#theta)
J = 1/(m)*(-y.T#np.log(h)-(1-y).T#np.log(1-h))
grad = 1/m*X.T#(h-y)
print ('Shape of theta is',np.shape(theta),'\n')
print ('Shape of gradient is',np.shape(grad),'\n')
return J, grad
X = np.array([[1, 3],[5,7]])
y = np.array([[1],[0]])
m,n = np.shape(X)
one_vec = np.ones((m,1))
X = np.hstack((one_vec,X))
initial_theta = np.zeros((n+1,1))
print ('Running costFunction before executing minimize function...\n')
cost, grad = costFunction(initial_theta,X,y) #To test the shape of gradient before calling minimize
print ('Executing minimize function...\n')
Result = op.minimize(costFunction,initial_theta,args=(X,y),method='TNC',jac=True,options={'maxiter':400})
Output:
Running costFunction before executing minimize function...
Shape of theta is (3, 1)
Traceback (most recent call last):
Shape of gradient is (3, 1)
Executing minimize function...
Shape of theta is (3,)
File "C:/Users/#####/minimizeshapechange.py", line 34, in <module>
Shape of gradient is (3, 2)
Result = op.minimize(costFunction,initial_theta,args=(X,y),method='TNC',jac=True,options={'maxiter':400})
File "C:\Users\#####\anaconda3\lib\site-packages\scipy\optimize\_minimize.py", line 453, in minimize
**options)
File "C:\Users\#####\anaconda3\lib\site-packages\scipy\optimize\tnc.py", line 409, in _minimize_tnc
xtol, pgtol, rescale, callback)
ValueError: tnc: invalid gradient vector from minimized function.
Process finished with exit code 1
I will not analyze your exact computations, but some remarks:
(1) Your gradient is broken!
scipy expects a partial derivative resulting in an array of shape equal to your x0.
your gradient is of shape (3,2), while (n+1, 1) is expected
compare with the example given in the tutorial which uses scipy.optimize.rosen_der (der = derivative)
(2) It seems your scipy-version is a bit older, because mine (0.19.0) is telling me:
ValueError: tnc: invalid gradient vector from minimized function.
Some supporting source-code from scipy:
if (PyArray_SIZE(arr_grad) != py_state->n)
{
PyErr_SetString(PyExc_ValueError,
"tnc: invalid gradient vector from minimized function.");
goto failure;
Remark: This code above was changed / touched / introduced 5 years ago. If you really don't get this error while using your code listed (with removal of the import of costFunction), it seems you are using scipy < v0.13.0b1, which i do no recommend! I assume you are using some deprecated windows-based inofficial distribution with outdated scipy. You should change that!
I had the same problem with Scipy trying to do the same thing as you. I don't understand exactly why this solves the problem but playing with array shapes until it worked gave me the following:
Gradient function defined as follows
def Gradient(theta,X,y):
#Initializing variables
m = len(y)
theta = theta[:,np.newaxis] #<---- THIS IS THE TRICK
grad = np.zeros(theta.shape)
#Vectorized computations
z = X # theta
h = sigmoid(z)
grad = (1/m)*(X.T # ( h - y));
return grad #< --- also works with grad.ravel()
Initial_theta initialized as
initial_theta = np.zeros((n+1))
initial_theta.shape
(3,)
i.e. a simple numpy array rather than a column vector.
Gradient function returns
Gradient(initial_theta,X,y).shape
(3,1) or (3,) depending on whether the function returns grad or grad.ravel
scipy.optimize called as
import scipy.optimize as opt
model = opt.minimize(fun = CostFunc, x0 = initial_theta, args = (X, y), method = 'TNC', jac = Gradient)
What does not work with Scipy
initial_theta of shape (3,1) using initial_theta = np.zeros((n+1))[:,np.newaxis] crashes the scipy.minimize function call.
ValueError: tnc: invalid gradient vector from minimized function.
If someone could clarify these points that would be great ! Thanks
your code of costFunctuion is wrong,maybe you should look that
def costFunction(theta,X,y):
h_theta = sigmoid(X#theta)
J = (-y) * np.log(h_theta) - (1 - y) * np.log(1 - h_theta)
return np.mean(J)
please copy and past in jpuiter in1 and so on in separte cell
In 1
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
filepath =('C:/Pythontry/MachineLearning/dataset/couresra/ex2data1.txt')
data =pd.read_csv(filepath,sep=',',header=None)
#print(data)
X = data.values[:,:2] #(100,2)
y = data.values[:,2:3] #(100,1)
#print(np.shape(y))
#In 2
#%% ==================== Part 1: Plotting ====================
postive_value = data.loc[data[2] == 1]
#print(postive_value.values[:,2:3])
negative_value = data.loc[data[2] == 0]
#print(len(postive_value))
#print(len(negative_value))
ax1 = postive_value.plot(kind='scatter',x=0,y=1,s=50,color='b',marker="+",label="Admitted") # S is line width #https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.scatter.html#matplotlib.axes.Axes.scatter
ax2 = negative_value.plot(kind='scatter',x=0,y=1,s=50,color='y',ax=ax1,label="Not Admitted")
ax1.set_xlabel("Exam 1 score")
ax2.set_ylabel("Exam 2 score")
plt.show()
#print(ax1 == ax2)
#print(np.shape(X))
# In 3
#============ Part 2: Compute Cost and Gradient ===========
[m,n] = np.shape(X) #(100,2)
print(m,n)
additional_coulmn = np.ones((m,1))
X = np.append(additional_coulmn,X,axis=1)
initial_theta = np.zeros((n+1), dtype=int)
print(initial_theta)
# In4
#Sigmoid and cost function
def sigmoid(z):
g = np.zeros(np.shape(z));
g = 1/(1+np.exp(-z));
return g
def costFunction(theta, X, y):
J = 0;
#print(theta)
receive_theta = np.array(theta)[np.newaxis] ##This command is used to create the 1D array
#print(receive_theta)
theta = np.transpose(receive_theta)
#print(np.shape(theta))
#grad = np.zeros(np.shape(theta))
z = np.dot(X,theta) # where z = theta*X
#print(z)
h = sigmoid(z) #formula h(x) = g(z) whether g = 1/1+e(-z) #(100,1)
#print(np.shape(h))
#J = np.sum(((-y)*np.log(h)-(1-y)*np.log(1-h))/m);
J = np.sum(np.dot((-y.T),np.log(h))-np.dot((1-y).T,np.log(1-h)))/m
#J = (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean()
#error = h-y
#print(np.shape(error))
#print(np.shape(X))
grad =np.dot(X.T,(h-y))/m
#print(grad)
return J,grad
#In5
[cost, grad] = costFunction(initial_theta, X, y)
print('Cost at initial theta (zeros):', cost)
print('Expected cost (approx): 0.693\n')
print('Gradient at initial theta (zeros): \n',grad)
print('Expected gradients (approx):\n -0.1000\n -12.0092\n -11.2628\n')
In6 # Compute and display cost and gradient with non-zero theta
test_theta = [-24, 0.2, 0.2]
#test_theta_value = np.array([-24, 0.2, 0.2])[np.newaxis] #This command is used to create the 1D row array
#test_theta = np.transpose(test_theta_value) # Transpose
#test_theta = test_theta_value.transpose()
[cost, grad] = costFunction(test_theta, X, y)
print('\nCost at test theta: \n', cost)
print('Expected cost (approx): 0.218\n')
print('Gradient at test theta: \n',grad);
print('Expected gradients (approx):\n 0.043\n 2.566\n 2.647\n')
#IN6
# ============= Part 3: Optimizing using range =============
import scipy.optimize as opt
#initial_theta_initialize = np.array([0, 0, 0])[np.newaxis]
#initial_theta = np.transpose(initial_theta_initialize)
print ('Executing minimize function...\n')
# Working models
#result = opt.minimize(costFunction,initial_theta,args=(X,y),method='TNC',jac=True,options={'maxiter':400})
result = opt.fmin_tnc(func=costFunction, x0=initial_theta, args=(X, y))
# Not working model
#costFunction(initial_theta,X,y)
#model = opt.minimize(fun = costFunction, x0 = initial_theta, args = (X, y), method = 'TNC',jac = costFunction)
print('Thetas found by fmin_tnc function: ', result);
print('Cost at theta found : \n', cost);
print('Expected cost (approx): 0.203\n');
print('theta: \n',result[0]);
print('Expected theta (approx):\n');
print(' -25.161\n 0.206\n 0.201\n');
Result:
Executing minimize function...
Thetas found by fmin_tnc function: (array([-25.16131854, 0.20623159, 0.20147149]), 36, 0)
Cost at theta found :
0.218330193827
Expected cost (approx): 0.203
theta:
[-25.16131854 0.20623159 0.20147149]
Expected theta (approx):
-25.161
0.206
0.201
scipy’s fmin_tnc doesn’t work well with column or row vector. It expects the parameters to be in an array format.
Python Implementation of Andrew Ng’s Machine Learning Course (Part 2.1)
opt.fmin_tnc(func = costFunction, x0 = theta.flatten(),fprime = gradient, args = (X, y.flatten()))
What worked for me is to reshape y as a vector (1-D) rather than a matrix (2-D array). I simply used the following code and then reran the SciPy's minimize function and it worked.
y = np.reshape(y,100) #e.g., if your y variable has 100 data points.
Little bit late but I also started anderw assignment to implement with Python and put a lot of effort to resolve the mentioned issue. Finally is works for me.
This blog help me but with one changes in fmin_tnc function calling, refer below :-
result = op.fmin_tnc(func=costFunction, x0=initial_theta, fprime=None, approx_grad=True, args=(X, y)) Got this info from here

Compute the pairwise distance between each pair of the two collections of inputs in TensorFlow

I have two collections. One consists of m1 points in k dimensions and another one of m2 points in k dimensions. I need to calculate pairwise distance between each pair of the two collections.
Basically having two matrices Am1, k and Bm2, k I need to get a matrix Cm1, m2.
I can easily do this in scipy by using distance.sdist and select one of many distance metrics, and I also can do this in TF in a loop, but I can't figure out how to do this with matrix manipulations even for Eucledian distance.
After a few hours I finally found how to do this in Tensorflow. My solution works only for Eucledian distance and is pretty verbose. I also do not have a mathematical proof (just a lot of handwaving, which I hope to make more rigorous):
import tensorflow as tf
import numpy as np
from scipy.spatial.distance import cdist
M1, M2, K = 3, 4, 2
# Scipy calculation
a = np.random.rand(M1, K).astype(np.float32)
b = np.random.rand(M2, K).astype(np.float32)
print cdist(a, b, 'euclidean'), '\n'
# TF calculation
A = tf.Variable(a)
B = tf.Variable(b)
p1 = tf.matmul(
tf.expand_dims(tf.reduce_sum(tf.square(A), 1), 1),
tf.ones(shape=(1, M2))
)
p2 = tf.transpose(tf.matmul(
tf.reshape(tf.reduce_sum(tf.square(B), 1), shape=[-1, 1]),
tf.ones(shape=(M1, 1)),
transpose_b=True
))
res = tf.sqrt(tf.add(p1, p2) - 2 * tf.matmul(A, B, transpose_b=True))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print sess.run(res)
This will do it for tensors of arbitrary dimensionality (i.e. containing (..., N, d) vectors). Note that it isn't between collections (i.e. not like scipy.spatial.distance.cdist) it's instead within a single batch of vectors (i.e. like scipy.spatial.distance.pdist)
import tensorflow as tf
import string
def pdist(arr):
"""Pairwise Euclidean distances between vectors contained at the back of tensors.
Uses expansion: (x - y)^T (x - y) = x^Tx - 2x^Ty + y^Ty
:param arr: (..., N, d) tensor
:returns: (..., N, N) tensor of pairwise distances between vectors in the second-to-last dim.
:rtype: tf.Tensor
"""
shape = tuple(arr.get_shape().as_list())
rank_ = len(shape)
N, d = shape[-2:]
# Build a prefix from the array without the indices we'll use later.
pref = string.ascii_lowercase[:rank_ - 2]
# Outer product of points (..., N, N)
xxT = tf.einsum('{0}ni,{0}mi->{0}nm'.format(pref), arr, arr)
# Inner product of points. (..., N)
xTx = tf.einsum('{0}ni,{0}ni->{0}n'.format(pref), arr, arr)
# (..., N, N) inner products tiled.
xTx_tile = tf.tile(xTx[..., None], (1,) * (rank_ - 1) + (N,))
# Build the permuter. (sigh, no tf.swapaxes yet)
permute = list(range(rank_))
permute[-2], permute[-1] = permute[-1], permute[-2]
# dists = (x^Tx - 2x^Ty + y^Tx)^(1/2). Note the axis swapping is necessary to 'pair' x^Tx and y^Ty
return tf.sqrt(xTx_tile - 2 * xxT + tf.transpose(xTx_tile, permute))

Error in backpropagation python neural net

Darn thing just won't learn. Sometimes weights seem to become nan.
I haven't played with different numbers of hidden layers/inputs/outputs but the bug appears consistent across different sizes of hidden layer.
from __future__ import division
import numpy
import matplotlib
import random
class Net:
def __init__(self, *sizes):
sizes = list(sizes)
sizes[0] += 1
self.sizes = sizes
self.weights = [numpy.random.uniform(-1, 1, (sizes[i+1],sizes[i])) for i in range(len(sizes)-1)]
#staticmethod
def activate(x):
return 1/(1+numpy.exp(-x))
def y(self, x_):
x = numpy.concatenate(([1], numpy.atleast_1d(x_.copy())))
o = [x] #o[i] is the (activated) output of hidden layer i, "hidden layer 0" is inputs
for weight in self.weights[:-1]:
x = weight.dot(x)
x = Net.activate(x)
o.append(x)
o.append(self.weights[-1].dot(x))
return o
def __call__(self, x):
return self.y(x)[-1]
def delta(self, x, t):
o = self.y(x)
delta = [(o[-1]-t) * o[-1] * (1-o[-1])]
for i, weight in enumerate(reversed(self.weights)):
delta.append(weight.T.dot(delta[-1]) * o[-i-2] * (1-o[-i-2]))
delta.reverse()
return o, delta
def train(self, inputs, outputs, epochs=100, rate=.1):
for epoch in range(epochs):
pairs = zip(inputs, outputs)
random.shuffle(pairs)
for x, t in pairs: #shuffle? subset?
o, d = self.delta(x, t)
for layer in range(len(self.sizes)-1):
self.weights[layer] -= rate * numpy.outer(o[layer+1], d[layer])
n = Net(1, 4, 1)
x = numpy.linspace(0, 2*3.14, 10)
t = numpy.sin(x)
matplotlib.pyplot.plot(x, t, 'g')
matplotlib.pyplot.plot(x, map(n, x), 'r')
n.train(x, t)
print n.weights
matplotlib.pyplot.plot(x, map(n, x), 'b')
matplotlib.pyplot.show()
I haven't looked for a particular bug in your code, but can you please try the following things to narrow down your problem further? Otherwise it is very tedious to find the needle in the haystack.
1) Please try to use a real dataset to have an idea what to expect, e.g., MNIST, and/or standardize your data, because your weights may become NaN if they become too small.
2) Try different learning rates and plot the cost function vs. epochs to check if you are converging. It should look somewhat like this (note that I used minibatch learning and averaged the minibatch chunks for each epoch).
3) I see that you are using a sigmoid activation, your implementation is correct, but to make it numerically more stable, replace 1.0 / (1.0 + np.exp(-z)) by expit(z) from scipy.special (same function but more efficient).
4) Implement gradient checking. Here, you compare the analytical solution to a numerically approximated gradient
Or an even better approach that yields a more accurate approximation of the gradient is to compute the symmetric (or centered) difference quotient given by the two-point formula
PS: If you are interested and find it useful, I have a working vanilla NumPy neural net implemented here.
I fixed it! Thanks for all the suggestions. I worked out numeric partials and found that my o and deltas were correct, but I was multiplying the wrong ones. That's why I now take numpy.outer(d[layer+1], o[layer]) instead of numpy.outer(d[layer], o[layer+1]).
I was also skipping the update on one layer. That's why I changed for layer in range(self.hidden_layers) to for layer in range(self.hidden_layers+1).
I'll add that I caught a bug just before posting originally. My output layer delta was incorrect because my net (intentionally) doesn't activate the final outputs, but my delta was computed as though it did.
Debugged primarily with a one hidden layer, one hidden unit net, then moved to a 2 input, 3 hidden layers of 2 neurons each, 2 output model.
from __future__ import division
import numpy
import scipy
import scipy.special
import matplotlib
#from pylab import *
#numpy.random.seed(23)
def nmap(f, x):
return numpy.array(map(f, x))
class Net:
def __init__(self, *sizes):
self.hidden_layers = len(sizes)-2
self.weights = [numpy.random.uniform(-1, 1, (sizes[i+1],sizes[i])) for i in range(self.hidden_layers+1)]
#staticmethod
def activate(x):
return scipy.special.expit(x)
#return 1/(1+numpy.exp(-x))
#staticmethod
def activate_(x):
s = scipy.special.expit(x)
return s*(1-s)
def y(self, x):
o = [numpy.array(x)] #o[i] is the (activated) output of hidden layer i, "hidden layer 0" is inputs and not activated
for weight in self.weights[:-1]:
o.append(Net.activate(weight.dot(o[-1])))
o.append(self.weights[-1].dot(o[-1]))
# for weight in self.weights:
# o.append(Net.activate(weight.dot(o[-1])))
return o
def __call__(self, x):
return self.y(x)[-1]
def delta(self, x, t):
x = numpy.array(x)
t = numpy.array(t)
o = self.y(x)
#delta = [(o[-1]-t) * o[-1] * (1-o[-1])]
delta = [o[-1]-t]
for i, weight in enumerate(reversed(self.weights)):
delta.append(weight.T.dot(delta[-1]) * o[-i-2] * (1-o[-i-2]))
delta.reverse() #surely i need this
return o, delta
def train(self, inputs, outputs, epochs=1000, rate=.1):
errors = []
for epoch in range(epochs):
for x, t in zip(inputs, outputs): #shuffle? subset?
o, d = self.delta(x, t)
for layer in range(self.hidden_layers+1):
grad = numpy.outer(d[layer+1], o[layer])
self.weights[layer] -= rate * grad
return errors
def rmse(self, inputs, outputs):
return ((outputs - nmap(self, inputs))**2).sum()**.5/len(inputs)
n = Net(1, 8, 1)
X = numpy.linspace(0, 2*3.1415, 10)
T = numpy.sin(X)
Y = map(n, X)
Y = numpy.array([y[0,0] for y in Y])
matplotlib.pyplot.plot(X, T, 'g')
matplotlib.pyplot.plot(X, Y, 'r')
print 'output successful'
print n.rmse(X, T)
errors = n.train(X, T)
print 'tried to train successfully'
print n.rmse(X, T)
Y = map(n, X)
Y = numpy.array([y[0,0] for y in Y])
matplotlib.pyplot.plot(x, Y, 'b')
matplotlib.pyplot.show()

Categories