Regularized Logistic Regression in Python (Andrew ng Course) - python

I'm starting the ML journey and I'm having troubles with this coding exercise
here is my code
import numpy as np
import pandas as pd
import scipy.optimize as op
# Read the data and give it labels
data = pd.read_csv('ex2data2.txt', header=None, name['Test1', 'Test2', 'Accepted'])
# Separate the features to make it fit into the mapFeature function
X1 = data['Test1'].values.T
X2 = data['Test2'].values.T
# This function makes more features (degree)
def mapFeature(x1, x2):
degree = 6
out = np.ones((x1.shape[0], sum(range(degree + 2))))
curr_column = 1
for i in range(1, degree + 1):
for j in range(i+1):
out[:,curr_column] = np.power(x1, i-j) * np.power(x2, j)
curr_column += 1
return out
# Separate the data into training and target, also initialize theta
X = mapFeature(X1, X2)
y = np.matrix(data['Accepted'].values).T
m, n = X.shape
cols = X.shape[1]
theta = np.matrix(np.zeros(cols))
#Initialize the learningRate(sigma)
learningRate = 1
# Define the Sigmoid Function (Output between 0 and 1)
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def cost(theta, X, y, learningRate):
# This is require to make the optimize function work
theta = theta.reshape(-1, 1)
error = sigmoid(X # theta)
first = np.multiply(-y, np.log(error))
second = np.multiply(1 - y, np.log(1 - error))
j = np.sum((first - second)) / m + (learningRate * np.sum(np.power(theta, 2)) / 2 * m)
return j
# Define the gradient of the cost function
def gradient(theta, X, y, learningRate):
# This is require to make the optimize function work
theta = theta.reshape(-1, 1)
error = sigmoid(X # theta)
grad = (X.T # (error - y)) / m + ((learningRate * theta) / m)
grad_no = (X.T # (error - y)) / m
grad[0] = grad_no[0]
return grad
Result = op.minimize(fun=cost, x0=theta, args=(X, y, learningRate), method='TNC', jac=gradient)
opt_theta = np.matrix(Result.x)
def predict(theta, X):
sigValue = sigmoid(X # theta.T)
p = sigValue >= 0.5
return p
p = predict(opt_theta, X)
print('Train Accuracy: {:f}'.format(np.mean(p == y) * 100))
So, when the learningRate = 1, the accuracy should be around 83,05% but I'm getting 80.5% and when the learningRate = 0, the accuracy should be 91.52% but I'm getting 87.28%
So the question is What am I doing wrong? Why my accuracy is below the problem default answer?
Hope someone can guide me in the right direction. Thanks!
P.D: Here is the dataset, maybe it can help
https://raw.githubusercontent.com/TheGirlWhiteWithBandages/Machine-Learning-Algorithms/master/Logistic%20Regression/ex2data2.txt

Hey guys I found a way to make it even better!
Here is the code
import numpy as np
import pandas as pd
import scipy.optimize as op
from sklearn.preprocessing import PolynomialFeatures
# Read the data and give it labels
data = pd.read_csv('ex2data2.txt', header=None, names=['Test1', 'Test2', 'Accepted'])
# Separate the data into training and target
X = (data.iloc[:, 0:2]).values
y = (data.iloc[:, 2:3]).values
# Modify the features to a certain degree (Polynomial)
poly = PolynomialFeatures(6)
m = y.size
XX = poly.fit_transform(data.iloc[:, 0:2].values)
# Initialize Theta
theta = np.zeros(XX.shape[1])
# Define the Sigmoid Function (Output between 0 and 1)
def sigmoid(z):
return(1 / (1 + np.exp(-z)))
# Define the Regularized cost function
def costFunctionReg(theta, reg, *args):
# This is require to make the optimize function work
h = sigmoid(XX # theta)
first = np.log(h).T # - y
second = np.log(1 - h).T # (1 - y)
J = (1 / m) * (first - second) + (reg / (2 * m)) * np.sum(np.square(theta[1:]))
return J
# Define the Regularized gradient function
def gradientReg(theta, reg, *args):
theta = theta.reshape(-1, 1)
h = sigmoid(XX # theta)
grad = (1 / m) * (XX.T # (h - y)) + (reg / m) * np.r_[[[0]], theta[1:]]
return grad.flatten()
# Define the predict Function
def predict(theta, X):
sigValue = sigmoid(X # theta.T)
p = sigValue >= 0.5
return p
# A loop to test between different values for sigma (reg parameter)
for i, Sigma in enumerate([0, 1, 100]):
# Optimize costFunctionReg
res2 = op.minimize(costFunctionReg, theta, args=(Sigma, XX, y), method=None, jac=gradientReg)
# Get the accuracy of the model
accuracy = 100 * sum(predict(res2.x, XX) == y.ravel()) / y.size
# Get the Error between different weights
error1 = costFunctionReg(res2.x, Sigma, XX, y)
# print the accuracy and error
print('Train accuracy {}% with Lambda = {}'.format(np.round(accuracy, decimals=4), Sigma))
print(error1)
Thanks for all your help!

try out this:
# import library
import pandas as pd
import numpy as np
dataset = pd.read_csv('ex2data2.csv',names = ['Test #1','Test #2','Accepted'])
# splitting to x and y variables for features and target variable
x = dataset.iloc[:,:-1].values
y = dataset.iloc[:,-1].values
print('x[0] ={}, y[0] ={}'.format(x[0],y[0]))
m, n = x.shape
print('#{} Number of training samples, #{} features per sample'.format(m,n))
# import library FeatureMapping
from sklearn.preprocessing import PolynomialFeatures
# We also add one column of ones to interpret theta 0 (x with power of 0 = 1) by
include_bias as True
pf = PolynomialFeatures(degree = 6, include_bias = True)
x_poly = pf.fit_transform(x)
pd.DataFrame(x_poly).head(5)
m,n = x_poly.shape
# define theta as zero
theta = np.zeros(n)
# define hyperparameter λ
lambda_ = 1
# reshape (-1,1) because we just have one feature in y column
y = y.reshape(-1,1)
def sigmoid(z):
return 1/(1+np.exp(-z))
def lr_hypothesis(x,theta):
return np.dot(x,theta)
def compute_cost(theta,x,y,lambda_):
theta = theta.reshape(n,1)
infunc1 = -y*(np.log(sigmoid(lr_hypothesis(x,theta)))) - ((1-y)*(np.log(1 - sigmoid(lr_hypothesis(x,theta)))))
infunc2 = (lambda_*np.sum(theta[1:]**2))/(2*m)
j = np.sum(infunc1)/m+ infunc2
return j
# gradient[0] correspond to gradient for theta(0)
# gradient[1:] correspond to gradient for theta(j) j>0
def compute_gradient(theta,x,y,lambda_):
gradient = np.zeros(n).reshape(n,)
theta = theta.reshape(n,1)
infunc1 = sigmoid(lr_hypothesis(x,theta))-y
gradient_in = np.dot(x.transpose(),infunc1)/m
gradient[0] = gradient_in[0,0] # theta(0)
gradient[1:] = gradient_in[1:,0]+(lambda_*theta[1:,]/m).reshape(n-1,) # theta(j) ; j>0
gradient = gradient.flatten()
return gradient
You can now test your cost and gradient without optimization. Th below code will optimize the model:
# hyperparameters
m,n = x_poly.shape
# define theta as zero
theta = np.zeros(n)
# define hyperparameter λ
lambda_array = [0, 1, 10, 100]
import scipy.optimize as opt
for i in range(0,len(lambda_array)):
# Train
print('======================================== Iteration {} ===================================='.format(i))
optimized = opt.minimize(fun = compute_cost, x0 = theta, args = (x_poly, y,lambda_array[i]),
method = 'TNC', jac = compute_gradient)
new_theta = optimized.x
# Prediction
y_pred_train = predictor(x_poly,new_theta)
cm_train = confusion_matrix(y,y_pred_train)
t_train,f_train,acc_train = acc(cm_train)
print('With lambda = {}, {} correct, {} wrong ==========> accuracy = {}%'
.format(lambda_array[i],t_train,f_train,acc_train*100))
Now you should see output like this :
=== Iteration 0 === With lambda = 0, 104 correct, 14 wrong ==========> accuracy = 88.13559322033898%
=== Iteration 1 === With lambda = 1, 98 correct, 20 wrong ==========> accuracy = 83.05084745762711%
=== Iteration 2 === With lambda = 10, 88 correct, 30 wrong ==========> accuracy = 74.57627118644068%
=== Iteration 3 === With lambda = 100, 72 correct, 46 wrong ==========> accuracy = 61.016949152542374%

Related

Unexpected value of cost function in Logistic regression

I been trying to write a python code for logistic regression but the results are showing very high value of cost function which is unexpected. I have created a random variable X and Y and added a noise term to Y which will flip the element of based on the probability theta. This is my code:
import numpy as np
from scipy.stats import bernoulli
rg = np.random.default_rng(100)
def data_generate(n, m, theta):
X_0 = np.ones((n, 1))
X = np.random.normal(loc=0.0, scale=1.0, size=(n, m))
X = np.concatenate((X_0, X), axis = 1)
beta = rg.random((m+1, 1))
Y = np.zeros((n, 1))
P = 1.0/(1.0 + np.exp(-np.dot(X, beta)))
for i in range(len(P)):
if P[i] >= 0.5:
Y[i] = 1
else:
Y[i] = 0
# Noise addition
noise = bernoulli.rvs(size=(n,1), p=theta)
for j in range(len(noise)):
if noise[i] == 1:
Y[i] = int(not(Y[i]))
else:
pass
return X, Y, beta
def Gradient_Descent(X, Y, k, tollerence, learning_rate):
n,m = np.shape(X)
beta = rg.random((m, 1))
costs = []
initial_cost = 0.0
for i in range(k):
Y_pred = 1.0/(1.0 + np.exp(-np.dot(X, beta)))
cost = np.mean(np.dot(Y.T, np.log(Y_pred)) + np.dot((1-Y).T, np.log(1-Y_pred)))
if (abs(cost - initial_cost) <= tollerence):
break
else:
beta = beta - learning_rate*(np.mean(np.dot(X.T, (Y_pred - Y))))
initial_cost = cost
costs.append(cost)
return cost, beta, i
X = data_generate(200, 3, 0.1)[0]
Y = data_generate(200, 3, 0.1)[1]
Gradient_Descent(X, Y, 10000, 1e-6, 0.01)
# Output of code :
(-154.7689765716959,
array([[-0.02218003],
[-0.1182535 ],
[ 0.1169462 ],
[ 0.58610747]]),
14)`
Please tell what is the problem with the code.

Implementation of backpropagation in Python

I'm following the Andrew-Ng course on Machine Learning and I'm currently doing the week 5 exercise.
I've found myself stuck on the implementation of the backpropagation algorithm, due to the fact that the relative difference, compared to numerical gradient, is very high (order of 1e-1), but I can't find any error within my implementation, so I'm gently asking if someone could take a look at it and explain what I did wrong.
Forward propagation:
def forward_propagation(thetas, X, history=False):
activation_arr = []
a = X # X is the array of the first activation values
for k in range(0, len(thetas)):
a = add_intercept(a) # add the bias unit
a = sigmoid(a # thetas[k].T)
if history:
activation_arr.append(a)
return activation_arr if history else a
Backpropagation:
def gradient_nn(thetas, X, y, num_labels, reg_lambda=None):
n_examples = X.shape[0]
Y = np.zeros(( # creates a n_examples X num_labels matrix
n_examples, # n of examples
num_labels
))
for i in range(n_examples):
Y[i, y[i, 0]] = 1 # the index corresponding to the correct label for each row has value = 1
# add intercepted X to the activation array
activation_arr = [add_intercept(X)] + forward_propagation(thetas, X, history=True)
sigma = [activation_arr[-1] - Y] # sigma^L = a^L - y
delta = [sigma[-1].T # activation_arr[-2]] # find delta for the first row
thetas_grad = []
# Calculate sigma and delta
for idx in range(1, len(thetas)): # skip last iteration
sigma = [
(sigma[0] # thetas[-idx][:, 1:]) * partial_derivative(activation_arr[-1-idx])
] + sigma
delta = [
sigma[0].T # activation_arr[-2-idx]
] + delta
return [np.sum(d) / n_examples for d in thetas_grad]
Partial derivative:
def partial_derivative(a):
return a * (1 - a) # element wise multiplication
Numerical gradient:
def compute_numerical_gradient(cost_function, thetas):
# Unroll parameters
nn_params = unroll_thetas(thetas)
num_grad = np.zeros(nn_params.shape)
perturb = np.zeros(nn_params.shape)
shapes = [theta.shape for theta in thetas]
epsilon = 1e-4 # not the one of random initialization
for p in range(nn_params.shape[0]):
# Set perturbation vector
perturb[p] = epsilon
minus_theta = nn_params - perturb
plus_theta = nn_params + perturb
# --- Roll params back in order to use the cost function ---
minus_theta = roll_thetas(minus_theta, shapes)
plus_theta = roll_thetas(plus_theta, shapes)
# calculate the loss of the cost function
minus_loss = cost_function(minus_theta)
plus_loss = cost_function(plus_theta)
# Compute Numerical Gradient
num_grad[p] = (plus_loss - minus_loss) / (2 * epsilon)
perturb[p] = 0
num_grad = roll_thetas(num_grad, shapes)
return [np.sum(num_g) for num_g in num_grad]
Cost function:
def J_nn(num_labels, reg_lambda=None):
def non_reg_func(thetas, X, y):
n_examples = X.shape[0]
Y = np.zeros(( # creates a n_examples X num_labels matrix
n_examples, # n of examples
num_labels
))
for i in range(n_examples):
Y[i, y[i, 0]] = 1 # the index corresponding to the correct label for each row has value = 1
prediction = forward_propagation(thetas, X)
return np.sum(np.sum(-Y * np.log(prediction) - (1 - Y) * np.log(1 - prediction))) / n_examples
if reg_lambda is None:
func = non_reg_func
else: # regularization
def func(thetas, X, y):
cost = non_reg_func(thetas, X, y)
for theta in thetas: # regularize for every layer
theta = theta[1:] # remove bias unit
cost = cost + (reg_lambda / (2 * y.shape[0])) * np.sum(np.sum(theta[:, ] ** 2))
return cost
return func
Checking backpropagation with numerical gradient:
def check_nn_gradients(reg_lambda=None):
"""
Creates a small neural network (max 8 x 8 x 7 x 8) and checks that
the implementation of the backpropagation algorithm is good
"""
#n_examples, sizes = random.randint(5, 10), [random.randint(2, 8), random.randint(2, 8), random.randint(1, 8)]
n_examples, sizes = 5, [8, 8, 5, 4]
n_labels = sizes[-1] # Last size is equal to the number of labels
init_epsilon = 0.0001
thetas = random_init_thetas(sizes, init_epsilon)
X = np.array(
random_init_thetas([sizes[0]-1, n_examples], init_epsilon)
).squeeze() # We squeeze it because random_init_thetas returns a 3D array, but we want X to be 2D
y = np.array([random.randint(0, n_labels-1) for _ in X])
y = y[:, np.newaxis]
inner_cost = lambda _thetas: J_nn(n_labels, reg_lambda)(_thetas, X, y)
gradients = gradient_nn(thetas, X, y, n_labels, 0)
unrolled_gradients = unroll_thetas(gradients)
print(unrolled_gradients)
# finite difference method
grad_checking_epsilon = 1e-4
num_grad = compute_numerical_gradient(inner_cost, thetas)
unrolled_num_grad = unroll_thetas(num_grad)
print(unrolled_num_grad)
return diff = np.linalg.norm(unrolled_num_grad - unrolled_gradients) / np.linalg.norm(unrolled_num_grad + unrolled_gradients)

Neural network back propagation regression, how to correctly learn the cos function?

After Lutz Lehmann's suggestion, I discovered that it was a problem of random weights and biases. I used np.ramdom.seed(2021) to specify the random seed number, and the error has not converged. But if I use np.ramdom.seed(10) as the random seed number,the 600th ephoch error will converge to a relatively small amount.
Galletti_Lance's suggestion is correct and should be replaced with a periodic activation function. I expanded the interval of the sin function, and the learning error did not converge.Sure enough, it is overfitting.
input_data = np.arange(0, np.pi * 4, 0.1) # input
correct_data = np.sin(input_data) # correct answer
input_data = (input_data - np.pi*2) / np.pi
np.random.seed(2021) Learning cos function, the 20000th epoch is as follows:
Epoch:0/20001 Error:0.2904405534384431
Epoch:200/20001 Error:0.2752981376571506
Epoch:400/20001 Error:0.27356300803051226
Epoch:600/20001 Error:0.27409878767315193
Epoch:800/20001 Error:0.2638216736165815
Epoch:1000/20001 Error:0.27196157366033213
Epoch:1200/20001 Error:0.2743520487664953
Epoch:1400/20001 Error:0.2589745966244678
Epoch:1600/20001 Error:0.2705289192984957
Epoch:1800/20001 Error:0.2689693217636388
....
Epoch:20000/20001 Error:0.2678723095120438
But if I use np.ramdom.seed(10) as the random seed number,the 600th ephoch error will converge to a relatively small amount.
Epoch:0/20001 Error:0.283958515549615
Epoch:200/20001 Error:0.260819823215878
Epoch:400/20001 Error:0.23267630899157743
Epoch:600/20001 Error:0.0022589485429890047
Epoch:800/20001 Error:0.0007425256677052262
Epoch:1000/20001 Error:0.0003946220094805989
....
Epoch:2800/20001 Error:0.00011495288247859594
Epoch:3000/20001 Error:9.989662843897715e-05
....
Epoch:20000/20001 Error:4.6146397913360866e-05
np.random.seed(10) Learning cos function, the 600th epoch is as follows:
I use neural network back propagation regression to learn the cos function. When I learn the sin function, it is normal. If it is changed to cos, it is abnormal. What is the problem?
correct_data = np.cos(input_data)
Related settings:
1.The activation function of the middle layer: sigmoid function
2.Excitation function of the output layer: identity function
3.Loss function: sum of squares error
4.Optimization algorithm: stochastic gradient descent method
5.Batch size: 1
My code is as follows:
import numpy as np
import matplotlib.pyplot as plt
# - Prepare to input and correct answer data -
input_data = np.arange(0, np.pi * 2, 0.1) # input
correct_data = np.cos(input_data) # correct answer
input_data = (input_data - np.pi) / np.pi # Converge the input to the range of -1.0-1.0
n_data = len(correct_data) # number of data
# - Each setting value -
n_in = 1 # The number of neurons in the input layer
n_mid = 3 # The number of neurons in the middle layer
n_out = 1 # The number of neurons in the output layer
wb_width = 0.01 # The spread of weights and biases
eta = 0.1 # learning coefficient
epoch = 2001
interval = 200 # Display progress interval practice
# -- middle layer --
class MiddleLayer:
def __init__(self, n_upper, n): # Initialize settings
self.w = wb_width * np.random.randn(n_upper, n) # weight (matrix)
self.b = wb_width * np.random.randn(n) # offset (vector)
def forward(self, x): # forward propagation
self.x = x
u = np.dot(x, self.w) + self.b
self.y = 1 / (1 + np.exp(-u)) # Sigmoid function
def backward(self, grad_y): # Backpropagation
delta = grad_y * (1 - self.y) * self.y # Differentiation of Sigmoid function
self.grad_w = np.dot(self.x.T, delta)
self.grad_b = np.sum(delta, axis=0)
self.grad_x = np.dot(delta, self.w.T)
def update(self, eta): # update of weight and bias
self.w -= eta * self.grad_w
self.b -= eta * self.grad_b
# - Output layer -
class OutputLayer:
def __init__(self, n_upper, n): # Initialize settings
self.w = wb_width * np.random.randn(n_upper, n) # weight (matrix)
self.b = wb_width * np.random.randn(n) # offset (vector)
def forward(self, x): # forward propagation
self.x = x
u = np.dot(x, self.w) + self.b
self.y = u # Identity function
def backward(self, t): # Backpropagation
delta = self.y - t
self.grad_w = np.dot(self.x.T, delta)
self.grad_b = np.sum(delta, axis=0)
self.grad_x = np.dot(delta, self.w.T)
def update(self, eta): # update of weight and bias
self.w -= eta * self.grad_w
self.b -= eta * self.grad_b
# - Initialization of each network layer -
middle_layer = MiddleLayer(n_in, n_mid)
output_layer = OutputLayer(n_mid, n_out)
# -- learn --
for i in range(epoch):
# Randomly scramble the index value
index_random = np.arange(n_data)
np.random.shuffle(index_random)
# Used for the display of results
total_error = 0
plot_x = []
plot_y = []
for idx in index_random:
x = input_data[idx:idx + 1] # input
t = correct_data[idx:idx + 1] # correct answer
# Forward spread
middle_layer.forward(x.reshape(1, 1)) # Convert the input to a matrix
output_layer.forward(middle_layer.y)
# Backpropagation
output_layer.backward(t.reshape(1, 1)) # Convert the correct answer to a matrix
middle_layer.backward(output_layer.grad_x)
# Update of weights and biases
middle_layer.update(eta)
output_layer.update(eta)
if i % interval == 0:
y = output_layer.y.reshape(-1) # Restore the matrix to a vector
# Error calculation
total_error += 1.0 / 2.0 * np.sum(np.square(y - t)) # Square sum error
# Output record
plot_x.append(x)
plot_y.append(y)
if i % interval == 0:
# Display the number of epochs and errors
print("Epoch:" + str(i) + "/" + str(epoch), "Error:" + str(total_error / n_data))
# Display the output with a graph
plt.plot(input_data, correct_data, linestyle="dashed")
plt.scatter(plot_x, plot_y, marker="+")
plt.show()
If increasing the number of epochs worked, the model needed more training.
But you may be overfitting... Notice that the cosine function is a periodic function, yet you are using only monotonic functions (sigmoid, and identity) to approximate it.
So while on the bounded interval of your data it may work:
It does not generalize well:
Code for the above plots:
import math as m
import numpy as np
import matplotlib.pyplot as plt
import sklearn.datasets as datasets
from tensorflow import keras
from tensorflow.keras import layers
t, _ = datasets.make_blobs(n_samples=7500, centers=[[0, 0]], cluster_std=1, random_state=0)
X = np.array(list(filter(lambda x : m.cos(4*x[0]) - x[1] < -.5 or m.cos(4*x[0]) - x[1] > .5, t)))
Y = np.array([1 if m.cos(4*x[0]) - x[1] >= 0 else LABEL for x in X])
model = keras.models.Sequential()
model.add(layers.Dense(8, input_dim=2, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(loss="binary_crossentropy")
model.fit(X, Y, batch_size=500, epochs=3000)
# create a mesh to plot in
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
meshData = np.c_[xx.ravel(), yy.ravel()]
fig, ax = plt.subplots()
Z = model.predict(meshData)
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, alpha=.3, cmap=plt.cm.Paired)
ax.axis('off')
# Plot also the training points
T = model.predict(X)
T = T.reshape(X[:,0].shape)
ax.scatter(X[:, 0], X[:, 1], color=colors[T].tolist(), s=10, alpha=0.9)
plt.show()
# add duplicate plotting code here to generate second plot
# predicting on data generated from a blob
# with a larger standard deviation

RuntimeWarning: overflow encountered in reduce return

I am testing this code using real data or a generated dataset from sklearn. In both cases, the code works without errors if the number of factors in the model is less than 6. With 7 factors, I get an error:
RuntimeWarning: overflow encountered in square return np.mean((y_true - y_pred)**2)
With more than 9 factors, I already get several errors and the predicted values become Nan:
RuntimeWarning: overflow encountered in reduce
return ufunc.reduce(obj, axis, dtype, out, **passkwargs)
RuntimeWarning: overflow encountered in square
return np.mean((y_true - y_pred)**2)
It is obvious to me that the problem is in dimension, since the error occurs when the number of variables increases, but I do not know how to fix it. I'm already using np.float64 (as recommended for a similar question), but it doesn't help. I enclose the entire code (yes, it is far from perfect because I am relatively new to programming)
import numpy as np
import copy
from itertools import *
def MSE(y_true, y_pred):
return np.mean((y_true - y_pred)**2)
def weight_mult(c, weights):
r = copy.deepcopy(c)
for idx, num in enumerate(weights):
r[idx] = r[idx] * num
return r
def power_factors(X, deg):
X_power = []
for x in X:
x_power = x**deg
X_power.append(x_power)
return np.array(X_power)
def normalise_x(X):
X_norm = []
for x in X.T:
min_idx = np.argmin(x)
x -= x[min_idx]
max_idx = np.argmax(x)
x = x/x[max_idx]
X_norm.append(x)
return np.array(X_norm).T
def normalise_y(X):
min_idx = np.argmin(X)
X -= X[min_idx]
max_idx = np.argmax(X)
X = X/X[max_idx]
return X
class TakagiSugeno:
def __init__(self, cluster_n=2, lr=0.01, n_iters=1500):
self.lr = lr
self.n_iters = n_iters
self.weights = None
self.bias = None
self.weights_best = None
self.bias_best = None
self.combination_best = None
self.cluster_n = cluster_n
def fit(self, X, y, cluster_w):
power_degree = np.arange(self.cluster_n)
power_degree += 1
models_list = [[], [], [], []]
for combination in permutations(power_degree):
X_polynom = []
for c in combination:
X_power = power_factors(X, c)
X_polynom.append(X_power)
self.model_estimation(X_polynom, y, cluster_w)
y_pred = self.y_estimation(X_polynom, cluster_w)
mse = MSE(y, y_pred)
models_list[0].append(copy.deepcopy(self.weights))
models_list[1].append(copy.deepcopy(self.bias))
models_list[2].append(mse)
models_list[3].append(combination)
best_model = np.argmin(models_list[2])
self.weights_best = models_list[0][best_model]
self.bias_best = models_list[1][best_model]
self.combination_best = models_list[3][best_model]
def model_estimation(self, X_polynom, y, cluster_w):
n_samples, n_features = X_polynom[0].shape
self.weights = np.zeros((self.cluster_n, n_features))
self.bias = np.zeros(self.cluster_n)
for _ in range(self.n_iters):
y_predicted = np.zeros(n_samples)
for c in range(self.cluster_n):
# evaluate y
y_pred_cluster = np.dot(X_polynom[c], self.weights[c]) + self.bias[c]
weighted_y_pred = weight_mult(y_pred_cluster, cluster_w[c])
y_predicted += weighted_y_pred
for c in range(self.cluster_n):
# multiple grad count
dw = (2 / n_samples) * np.dot(weight_mult(X_polynom[c], cluster_w[c]).T, (y_predicted - y))
db = (2 / n_samples) * np.sum(weight_mult((y_predicted - y), cluster_w[c]))
# weights update
self.weights[c] -= self.lr * dw
self.bias[c] -= self.lr * db
def y_estimation(self, X_polynom, cluster_w):
y_predicted = np.zeros(len(X_polynom[0]))
for c in range(self.cluster_n):
# evaluate y
y_pred_cluster = np.dot(X_polynom[c], self.weights[c]) + self.bias[c]
weighted_y_pred = weight_mult(y_pred_cluster, cluster_w[c])
y_predicted += weighted_y_pred
return y_predicted
def predict(self, X, cluster_w):
y_predicted = np.zeros(len(X))
X_polynom = []
for c in self.combination_best:
X_power = power_factors(X, c)
X_polynom.append(X_power)
for c in range(self.cluster_n):
# evaluate y
y_pred_cluster = np.dot(X_polynom[c], self.weights_best[c]) + self.bias_best[c]
weighted_y_pred = weight_mult(y_pred_cluster, cluster_w[c])
y_predicted += weighted_y_pred
return y_predicted
if __name__ == '__main__':
import matplotlib.pyplot as plt
from sklearn import datasets
# Prepare data
X_numpy, y_numpy = datasets.make_regression(n_samples=100, n_features=10, noise=20, random_state=1)
# Normalisation
X_norm = np.array(normalise_x(X_numpy), dtype=np.float64)
y_norm = np.array(normalise_y(y_numpy), dtype=np.float64)
# Create y
y_sq = power_factors(y_norm, 2)
y = y_norm * 0.6 + y_sq * 0.4
# Create membership matrix
membership = np.zeros((len(X_norm), 2))
membership[:, 0] = 0.6
membership[:, 1] = 0.4
membership = np.array(membership, dtype=np.float64)
membership = membership.T
# training loop
model = TakagiSugeno(lr=1, n_iters=1000)
model.fit(X_norm, y, membership)
y_pred = model.predict(X_norm, membership)
I found the problem on my own. The code itself has no errors, but it uses gradient descent for optimization. I set the learning rate =1 and 2000 iterations. This, of course, is too much and retraining was taking place. The gradient continued to grow uncontrollably and I was getting huge values. The best solution is to set up a stop criterion.

Curve fitting with gradient descent

I wrote some code that performs gradient descent on a couple of data points.
For some reason the curve is not converging correctly, but I have no idea why that is. I always end up with an exploding tail.
Am I doing one of the computations wrong? Am I actually getting stuck in a local minimum or is it something else?
Here is my code:
import numpy as np
import matplotlib.pyplot as plt
def estimate(weights, x, order):
est = 0
for i in range(order):
est += weights[i] * x ** i
return est
def cost_function(x, y, weights, m):
cost = 0
for i in range(m-1):
cost += (((weights[i] * x ** i) - y) ** 2)
return (np.sum(cost ** 2) / ( 2 * m ))
def descent(A, b, iterations, descent_rate, order):
x = A.T[0]
y = b.reshape(4)
# features
ones = np.vstack(np.ones(len(A)))
x = np.vstack(A.T[0])
x2 = np.vstack(A.T[0] ** 2)
# Our feature matrix
features = np.concatenate((ones,x,x2), axis = 1).T
# Initialize our coefficients to zero
weights = np.zeros(order + 1)
m = len(y)
# gradient descent
for i in range(iterations):
est = estimate(weights, x, order).T
difference = est - y
weights = weights + (-descent_rate * (1/m) * np.matmul(difference, features.T)[0])
cost = cost_function(x, y, weights, m)
print(cost)
plt.scatter(x,y)
u = np.linspace(0,3,100)
plt.plot(u, (u ** 2) * weights[2] + u * weights[1] + weights[0], '-')
plt.show()
A = np.array(((0,1),
(1,1),
(2,1),
(3,1)))
b = np.array((1,2,0,3), ndmin = 2 ).T
iterations = 150
descent_rate = 0.01
order = 2
descent(A, b, iterations, descent_rate, order)
I would like to avoid getting stuck in such a minimum. I have attempted setting the initial weights to random values but to no avail, sometimes it dips a bit more but then gives me the same behaviour again.
Here is the one of the plots that I am getting:
And here is the expected result obtained by a least squares solution:
Your estimate function should be
def estimate(weights, x, order):
est = 0
for i in range(order+1):
est += weights[i] * x ** i
return est
Better yet, since the order information is already present in the size of the weights vector, remove the redundancy with:
def estimate(weights, x):
est = 0
for i in range(len(weights)):
est += weights[i] * x ** i
return est
This is what I got when using your code and running 2000 iterations:

Categories