Simple Regression Example pyBrain - python

I am trying to make the simpliest regression on pyBrain but somehow I'm failing.
The Neural Network should learn the function Y=3*X
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.datasets import SupervisedDataSet
from pybrain.structure import FullConnection, FeedForwardNetwork, TanhLayer, LinearLayer, BiasUnit
import matplotlib.pyplot as plt
from numpy import *
n = FeedForwardNetwork()
n.addInputModule(LinearLayer(1, name = 'in'))
n.addInputModule(BiasUnit(name = 'bias'))
n.addModule(TanhLayer(1,name = 'tan'))
n.addOutputModule(LinearLayer(1, name = 'out'))
n.addConnection(FullConnection(n['bias'], n['tan']))
n.addConnection(FullConnection(n['in'], n['tan']))
n.addConnection(FullConnection(n['tan'], n['out']))
n.sortModules()
# initialize the backprop trainer and train
t = BackpropTrainer(n, learningrate = 0.1, momentum = 0.0, verbose = True)
#DATASET
DS = SupervisedDataSet( 1, 1 )
X = random.rand(100,1)*100
Y = X*3+random.rand(100,1)*5
for r in xrange(X.shape[0]):
DS.appendLinked((X[r]),(Y[r]))
t.trainOnDataset(DS, 200)
plt.plot(X,Y,'.b')
X=[[i] for i in arange(0,100,0.1)]
Y=map(n.activate,X)
plt.plot(X,Y,'-g')
It doesn't learn anything. I have tried to remove the hidden layer (because in this example we don't even need that) and the network started to predict NaNs.
What's going on?
EDIT: This is the code that solved my problem:
#DATASET
DS = SupervisedDataSet( 1, 1 )
X = random.rand(100,1)*100
Y = X*3+random.rand(100,1)*5
maxy = float(max(Y))
maxx = 100.0
for r in xrange(X.shape[0]):
DS.appendLinked((X[r]/maxx),(Y[r]/maxy))
t.trainOnDataset(DS, 200)
plt.plot(X,Y,'.b')
X=[[i] for i in arange(0,100,0.1)]
Y=map(lambda x: n.activate(array(x)/maxx)*maxy,X)
plt.plot(X,Y,'-g')

The basic pybrain neurons are going to output something between 0 and 1. Divide your Y by 300 (the maximum possible value), and you'll get better results.
More generally, find the maximum Y for your dataset, and scale everything by that.

Related

Implementing STFT with Pytorch gives a slightly different result than the STFT with Librose

I am trying to implement STFT with Pytorch. But the output from the Pytorch implementation is slightly off, when compared with the implementation from Librosa.
Librosa version
import numpy as np
from librosa.core import stft
import matplotlib.pyplot as plt
np.random.seed(3)
y = np.sin(2*np.pi*50*np.linspace(0,10,2048))+np.sin(2*np.pi*20*np.linspace(0,10,2048)) + np.random.normal(scale=1,size=2048)
S_stft = np.abs(stft(y, hop_length=512, n_fft=2048,center=False))
plt.plot(S_stft)
Pytorch version
import torch
from torch.autograd import Variable
from torch.nn.functional import conv1d
from scipy.signal.windows import hann
stride = 512
def create_filters(d,k,low=50,high=6000):
x = np.arange(0, d, 1)
wsin = np.empty((k,1,d), dtype=np.float32)
wcos = np.empty((k,1,d), dtype=np.float32)
start_freq = low
end_freq = high
# num_cycles = start_freq*d/44000.
# scaling_ind = np.log(end_freq/start_freq)/k
window_mask = hann(2048, sym=False) # same as 0.5-0.5*np.cos(2*np.pi*x/(k))
for ind in range(k):
wsin[ind,0,:] = window_mask*np.sin(2*np.pi*ind/k*x)
wcos[ind,0,:] = window_mask*np.cos(2*np.pi*ind/k*x)
return wsin,wcos
wsin, wcos = create_filters(2048,2048)
wsin_var = Variable(torch.from_numpy(wsin), requires_grad=False)
wcos_var = Variable(torch.from_numpy(wcos),requires_grad=False)
network_input = torch.from_numpy(y).float()
network_input = network_input.reshape(1,-1)
zx = np.sqrt(conv1d(network_input[:,None,:], wsin_var, stride=stride).pow(2)+conv1d(network_input[:,None,:], wcos_var, stride=stride).pow(2))
pytorch_Xs = zx.cpu().numpy()
plt.plot(pytorch_Xs[0,:1025,0])
My Question
The two graphs might look the same, but if I check the two outputs with np.allclose, we can see that they are slightly different.
np.allclose(S_stft, pytorch_Xs[0,:1025,0].reshape(1025,1))
output >>> False
Only when I tune up the tolerance to 1e-5, it gives me True result
np.allclose(S_stft, pytorch_Xs[0,:1025,0].reshape(1025,1),atol=1e-5)
output >>> True
What causes the difference in values? Is it because of the data conversion by using torch.from_numpy(y).float()?
I would like to have a difference in value less than 1e-7, 1e-8 is even better.
The difference is from the difference between their default bit.
NumPy's float is 64bit by default.
PyTorch's float is 32bit by default.

Simple Linear Regression Error in updating cost function and Theta Parameters

I wrote a piece code to make a simple linear regression model using Python. However, I am having trouble getting the correct cost function, and most importantly the correct theta parameters. The model is implemented from scratch and not using Scikit learn module. I have used Andrew NG's notes from his ML Coursera course to create the model. The correct values of theta are [[-3.630291] [1.166362]].
Would be really grateful if someone could offer their expertise, and point out what I'm doing wrong.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#Load The Dataset
dataset = pd.read_csv("Population vs Profit.txt",names=["Population" ,
"Profit"])
print (dataset.head())
col = len(dataset.columns)
x = dataset.iloc[:,:col-1].values
y = dataset.iloc[:,col-1].values
#Visualizing The Dataset
plt.scatter(x, y, color="red", marker="x", label="Profit")
plt.title("Population vs Profit")
plt.xlabel("Population")
plt.ylabel("Profit")
plt.legend()
plt.show()
#Preprocessing Data
dataset.insert(0,"x0",1)
col = len(dataset.columns)
x = dataset.iloc[:,:col-1].values
b = np.zeros(col-1)
m = len(y)
costlist = []
alpha = 0.001
iteration = 10000
#Defining Functions
def hypothesis(x,b,y):
h = x.dot(b.T) - y
return h
def cost(x,b,y,m):
j = np.sum(hypothesis(x,b,y)**2)
j = j/(2*m)
return j
print (cost(x,b,y,m))
def gradient_descent(x,b,y,m,alpha):
for i in range (iteration):
h = hypothesis(x,b,y)
product = np.sum(h.dot(x))
b = b - ((alpha/m)*product)
costlist.append(cost(x,b,y,m))
return b,cost(x,b,y,m)
b , mincost = gradient_descent(x,b,y,m,alpha)
print (b , mincost)
print (cost(x,b,y,m))
plt.plot(b,color="green")
plt.show()
The dataset I'm using is the following text.
6.1101,17.592
5.5277,9.1302
8.5186,13.662
7.0032,11.854
5.8598,6.8233
8.3829,11.886
7.4764,4.3483
8.5781,12
6.4862,6.5987
5.0546,3.8166
5.7107,3.2522
14.164,15.505
5.734,3.1551
8.4084,7.2258
5.6407,0.71618
5.3794,3.5129
6.3654,5.3048
5.1301,0.56077
6.4296,3.6518
7.0708,5.3893
6.1891,3.1386
20.27,21.767
5.4901,4.263
6.3261,5.1875
5.5649,3.0825
18.945,22.638
12.828,13.501
10.957,7.0467
13.176,14.692
22.203,24.147
5.2524,-1.22
6.5894,5.9966
9.2482,12.134
5.8918,1.8495
8.2111,6.5426
7.9334,4.5623
8.0959,4.1164
5.6063,3.3928
12.836,10.117
6.3534,5.4974
5.4069,0.55657
6.8825,3.9115
11.708,5.3854
5.7737,2.4406
7.8247,6.7318
7.0931,1.0463
5.0702,5.1337
5.8014,1.844
11.7,8.0043
5.5416,1.0179
7.5402,6.7504
5.3077,1.8396
7.4239,4.2885
7.6031,4.9981
6.3328,1.4233
6.3589,-1.4211
6.2742,2.4756
5.6397,4.6042
9.3102,3.9624
9.4536,5.4141
8.8254,5.1694
5.1793,-0.74279
21.279,17.929
14.908,12.054
18.959,17.054
7.2182,4.8852
8.2951,5.7442
10.236,7.7754
5.4994,1.0173
20.341,20.992
10.136,6.6799
7.3345,4.0259
6.0062,1.2784
7.2259,3.3411
5.0269,-2.6807
6.5479,0.29678
7.5386,3.8845
5.0365,5.7014
10.274,6.7526
5.1077,2.0576
5.7292,0.47953
5.1884,0.20421
6.3557,0.67861
9.7687,7.5435
6.5159,5.3436
8.5172,4.2415
9.1802,6.7981
6.002,0.92695
5.5204,0.152
5.0594,2.8214
5.7077,1.8451
7.6366,4.2959
5.8707,7.2029
5.3054,1.9869
8.2934,0.14454
13.394,9.0551
5.4369,0.61705
One issue is with your "product". It is currently a number when it should be a vector. I was able to get the values [-3.24044334 1.12719788] by rerwitting your for-loop as follows:
def gradient_descent(x,b,y,m,alpha):
for i in range (iteration):
h = hypothesis(x,b,y)
#product = np.sum(h.dot(x))
xvalue = x[:,1]
product = h.dot(xvalue)
hsum = np.sum(h)
b = b - ((alpha/m)* np.array([hsum , product]) )
costlist.append(cost(x,b,y,m))
return b,cost(x,b,y,m)
There's possibly another issue besides this as it doesn't converge to your answer. You should make sure you are using the same alpha also.

How to specify size for bernoulli distribution with pymc3?

In trying to make my way through Bayesian Methods for Hackers, which is in pymc, I came across this code:
first_coin_flips = pm.Bernoulli("first_flips", 0.5, size=N)
I've tried to translate this to pymc3 with the following, but it just returns a numpy array, rather than a tensor (?):
first_coin_flips = pm.Bernoulli("first_flips", 0.5).random(size=50)
The reason the size matters is that it's used later on in a deterministic variable. Here's the entirety of the code that I have so far:
import pymc3 as pm
import matplotlib.pyplot as plt
import numpy as np
import mpld3
import theano.tensor as tt
model = pm.Model()
with model:
N = 100
p = pm.Uniform("cheating_freq", 0, 1)
true_answers = pm.Bernoulli("truths", p)
print(true_answers)
first_coin_flips = pm.Bernoulli("first_flips", 0.5)
second_coin_flips = pm.Bernoulli("second_flips", 0.5)
# print(first_coin_flips.value)
# Create model variables
def calc_p(true_answers, first_coin_flips, second_coin_flips):
observed = first_coin_flips * true_answers + (1-first_coin_flips) * second_coin_flips
# NOTE: Where I think the size param matters, since we're dividing by it
return observed.sum() / float(N)
calced_p = pm.Deterministic("observed", calc_p(true_answers, first_coin_flips, second_coin_flips))
step = pm.Metropolis(model.free_RVs)
trace = pm.sample(1000, tune=500, step=step)
pm.traceplot(trace)
html = mpld3.fig_to_html(plt.gcf())
with open("output.html", 'w') as f:
f.write(html)
f.close()
And the output:
The coin flips and uniform cheating_freq output look correct, but the observed doesn't look like anything to me, and I think it's because I'm not translating that size param correctly.
The pymc3 way to specify the size of a Bernoulli distribution is by using the shape parameter, like:
first_coin_flips = pm.Bernoulli("first_flips", 0.5, shape=N)

How to fit cubic equation on pybrain

I am trying to fit a Neural Network on a cubic Equation, but after many tries changing the number of the neurons on the hidden layer and increasing the number of epochs I could only get this:
Could you guys help me on this?
from pybrain.datasets import SupervisedDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.structure import TanhLayer
from pybrain.supervised.trainers import BackpropTrainer
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Preparing Data
dataset = pd.read_csv("C:\Users\gugub\Documents\projects\Cyrus\graph.data", header = None)
darray = np.array(dataset)
x = []
y = []
ds = SupervisedDataSet(1,1)
#Preparing True inputs and True Outputs
for i in range(43):
e1 = darray[i,0]
s1 = darray[i,1]
x.append(e1)
y.append(s1)
print(x)
print(y)
ds = SupervisedDataSet(1,1)
i = 0
for i in range(43):
ds.addSample(x[i],y[i])
print(ds)
net = buildNetwork(ds.indim,30,ds.outdim,recurrent=True)
trainer = BackpropTrainer(net,learningrate=0.01,verbose=True)
trainer.trainOnDataset(ds,20000)
trainer.testOnData(verbose=True)
y1 = []
i = 0
for i in x:
y1.append(net.activate(i))
plt.plot(x,y,'r')
plt.plot(x,y1,'b')
plt.show()
while True:
e2 = int(raw_input(">"))
s2 = [e2]
print(net.activate(s2))
P.S The red line on the graph is what it should be and the blue one is the function generated by my network

initialize GMM using sklearn python

I wish to create a sklearn GMM object with a predefined set of means, weights, and covariances ( on a grid ).
I managed to do it:
from sklearn.mixture import GaussianMixture
import numpy as np
def get_grid_gmm(subdivisions=[10,10,10], variance=0.05 ):
n_gaussians = reduce(lambda x, y: x*y,subdivisions)
step = [ 1.0/(2*subdivisions[0]), 1.0/(2*subdivisions[1]), 1.0/(2*subdivisions[2])]
means = np.mgrid[ step[0] : 1.0-step[0]: complex(0,subdivisions[0]),
step[1] : 1.0-step[1]: complex(0,subdivisions[1]),
step[2] : 1.0-step[2]: complex(0,subdivisions[2])]
means = np.reshape(means,[-1,3])
covariances = variance*np.ones_like(means)
weights = (1.0/n_gaussians)*np.ones(n_gaussians)
gmm = GaussianMixture(n_components=n_gaussians, covariance_type='spherical' )
gmm.weights_ = weights
gmm.covariances_ = covariances
gmm.means_ = means
return gmm
def main():
xx = np.random.rand(100,3)
gmm = get_grid_gmm()
y= gmm.predict_proba(xx)
if __name__ == "__main__":
main()
The problem is its missing the gmm.predict_proba() method that I need to use later on.
How can I overcome this?
UPDATE : I updated the code to be a complete example that shows the error
UPDATE2
I updated the code according to comments and answers
from sklearn.mixture import GaussianMixture
import numpy as np
def get_grid_gmm(subdivisions=[10,10,10], variance=0.05 ):
n_gaussians = reduce(lambda x, y: x*y,subdivisions)
step = [ 1.0/(2*subdivisions[0]), 1.0/(2*subdivisions[1]), 1.0/(2*subdivisions[2])]
means = np.mgrid[ step[0] : 1.0-step[0]: complex(0,subdivisions[0]),
step[1] : 1.0-step[1]: complex(0,subdivisions[1]),
step[2] : 1.0-step[2]: complex(0,subdivisions[2])]
means = np.reshape(means,[3,-1])
covariances = variance*np.ones(n_gaussians)
cov_type = 'spherical'
weights = (1.0/n_gaussians)*np.ones(n_gaussians)
gmm = GaussianMixture(n_components=n_gaussians, covariance_type=cov_type )
gmm.weights_ = weights
gmm.covariances_ = covariances
gmm.means_ = means
from sklearn.mixture.gaussian_mixture import _compute_precision_cholesky
gmm.precisions_cholesky_ = _compute_precision_cholesky(covariances, cov_type)
gmm.precisions_ = gmm.precisions_cholesky_ ** 2
return gmm
def main():
xx = np.random.rand(100,3)
gmm = get_grid_gmm()
_, y = gmm._estimate_log_prob(xx)
y = np.exp(y)
if __name__ == "__main__":
main()
No more errors but _estimate_log_prob and predict_proba do not produce the same result for a fitted GMM. Why could that be?
Since you don't train the model but just use the function for estimation, you don't need to use the object but you could use the same function they use under the hood. You could try _estimate_log_gaussian_prob. That is what they do internaly I think.
Have a look at the source:
in particular at the base class
https://github.com/scikit-learn/scikit-learn/blob/ab93d657eb4268ac20c4db01c48065b5a1bfe80d/sklearn/mixture/base.py#L342
that is calling the specific method, that in turn is calling a function
https://github.com/scikit-learn/scikit-learn/blob/ab93d657eb4268ac20c4db01c48065b5a1bfe80d/sklearn/mixture/gaussian_mixture.py#L671

Categories