Python: Passing a function with arguments as argument of another function - python

In the code below, inside WKB, I'm trying to pass the function "integrand" as an argument of the function int_gauss (like this: int_gauss(integrand(E,x0,m1,x),xp,wp)) and it returns me an error.
TypeError: 'numpy.ndarray' object is not callable
Thanks in advance for your help.
x0=1
m1=1
h=1
e=100
import numpy as np
import matplotlib.pyplot as plt
def gaussxw(N):
a = np.linspace(3,4*N-1,N)/(4*N+2)
x = np.cos(np.pi*a+1/(8*N*N*np.tan(a)))
# Find roots using Newton's method
epsilon = 1e-15
delta = 1.0
while delta>epsilon:
p0 = np.ones(N,float)
p1 = np.copy(x)
for k in range(1,N):
p0,p1 = p1,((2*k+1)*x*p1-k*p0)/(k+1)
dp = (N+1)*(p0-x*p1)/(1-x*x)
dx = p1/dp
x -= dx
delta = max(abs(dx))
# Calculate the weights
w = 2*(N+1)*(N+1)/(N*N*(1-x*x)*dp*dp)
return x,w
def gaussxwab(N,a,b):
x,w = gaussxw(N)
return 0.5*(b-a)*x+0.5*(b+a),0.5*(b-a)*w
N = 50
x, w = gaussxw(N)
def integrand(E,x0,m1,x):
return np.sqrt((2*m1*(E- (e*( ((x0/x)**12) -2*((x0/x)**6) ) ))))
def WKB(E,x0,e):
a, b = x0*(1+np.sqrt(1+(E/e)))**(-1/6), x0*(1-np.sqrt(1+(E/e)))**(-1/6)
xp = 0.5*(b - a)*x + 0.5*(b + a)
wp = 0.5*(b - a)*w
return int_gauss(integrand(E,x0,m1,x),xp,wp)
def int_gauss(f,nodes, weights):
result = 0.0
for x, w in zip(nodes, weights):
result += w *f(x)
return result
plt.close()
En=np.linspace(-100,-1,496)
W=[WKB(i,x0,e) for i in En]
plt.figure()
plt.plot(En,W)
plt.grid()
plt.axis()
plt.show()
I find the desired result when I explicitly compute the desired function "integrand" inside int_gauss like this but I'd like to be able to call int_gauss with the integrand function as a parameter int_gauss(integrand(E,x0,m1,x),xp,wp) inside WKB:
def WKB(E,x0,e):
a, b = x0*(1+np.sqrt(1+(E/e)))**(-1/6), x0*(1-np.sqrt(1+(E/e)))**(-1/6)
xp = 0.5*(b - a)*x + 0.5*(b + a)
wp = 0.5*(b - a)*w
return int_gauss(E,x0,m1,xp,wp)
def int_gauss(E,x0,m1,nodes, weights):
result = 0.0
for x, w in zip(nodes, weights):
result += w * np.sqrt((2*m1*(E- (e*( ((x0/x)**12) -2*((x0/x)**6) ) ))))
return result

Related

Error in newton raphson method finding root

I was trying to use the newton raphson method to compute the derivative of a function and I got the following error:
import numpy as np
import matplotlib.pyplot as plt
import sympy as sym
acc = 10**-4
x = sym.Symbol('x')
def p(x): #define the function
return 924*x**6 - 2772*x**5 + 3150*x**4 - 1680*x**3 + 420*x**2 - 42*x + 1
p_prime = sym.diff(p(x))
def newton(p,p_prime, acc, start_val):
x= start_val
delta = p(x)/p_prime(x)
while abs(delta) > acc:
delta = p(x)/p_prime(x)
print(abs(delta))
x =x- delta;
return round(x, acc);
a = newton(p, p_prime,-4, 0)
The error was:
delta = p(x)/p_prime(x)
TypeError: 'Add' object is not callable
There are a few mistakes in your code, correcting them as pointed out in the following modified code snippet, it works fine:
def p_prime(xval): # define as a function and substitute the value of x
return sym.diff(p(x)).subs(x, xval)
def newton(p, p_prime, prec, start_val): # rename the output precision variable
x = start_val # otherwise it's shadowing acc tolerance
delta = p(x) / p_prime(x) # call the function p_prime()
while abs(delta) > acc:
delta = p(x) / p_prime(x)
x = x - delta
return round(x, prec) # return when the while loop terminates
a = newton(p, p_prime,4, 0.1)
a
# 0.0338
The following animation shows how Newton-Raphson converges to a root of the polynomial p(x).
Your main problem is to call something which is not callable.
Functions are callable
Sympy expressions are not callable
import sympy as sym
def f(x):
return x**2
x = sym.Symbol("x")
g = 2*x
print(callable(f)) # True
print(callable(g)) # False
print(f(0)) # print(0)
print(g(0)) # error
So, in your case
def p(x):
return 924*x**6 - 2772*x**5 + 3150*x**4 - 1680*x**3 + 420*x**2 - 42*x + 1
print(p(0)) # p is callable, gives the result 1
print(p(1)) # p is callable, gives the result 1
print(p(2)) # p is callable, gives the result 8989
print(callable(p)) # True
And now, if you use a symbolic variable from sympy you get:
x = sym.Symbol("x")
myp = p(x)
print(callable(myp)) # False
print(myp(0)) # gives an error, because myp is an expression, which is not callable
So, if you use diff to get the derivative of the function, you will get a sympy expression. You must transform this expression to a callable function. To do it, you can use the lambda function:
def p(x):
return 924*x**6 - 2772*x**5 + 3150*x**4 - 1680*x**3 + 420*x**2 - 42*x + 1
x = sym.Symbol("x")
myp = p(x)
mydpdx = sym.diff(myp, x) # get the derivative, once myp is an expression
dpdx = lambda x0: mydpdx.subs(x, x0) # dpdx is callable
print(dpdx(0)) # gives '-42'
Another way to make a sympy expression callable is to use lambdify from sympy:
dpdx = sym.lambdify(x, mydpdx)
In sympy functions usually are represented as expressions. Therefore, sym.diff() returns an expression and not a callable function. It makes sense to also represent p as an expression involving x.
To "call" a function p on x, p.subs(x, value) is used. To get a numeric answer, use p.subs(x, value).evalf().
import sympy as sym
acc = 10 ** -4
x = sym.Symbol('x')
p = 924 * x ** 6 - 2772 * x ** 5 + 3150 * x ** 4 - 1680 * x ** 3 + 420 * x ** 2 - 42 * x + 1
p_prime = sym.diff(p)
def newton(p, p_prime, acc, start_val):
xi = start_val
delta = sym.oo
while abs(delta) > acc:
delta = (p / p_prime).subs(x, xi).evalf()
print(xi, delta)
xi -= delta
return xi
a = newton(p, p_prime, 10**-4, 0)
Intermediate values:
0 -0.0238095238095238
0.0238095238095238 -0.00876459050881560
0.0325741143183394 -0.00117130331903862
0.0337454176373780 -1.98196463560775e-5
0.0337652372837341
PS: To plot a function represented as an expression:
sym.plot(p, (x, -0.03, 1.03), ylim=(-0.5, 1.5))

Function fitting where one of the fitting parameter should be chosen from the list of fixed values

Let us assume that we have a closed form analytical function y = a * x + b. We have also experimental data to which this function should be fitted. Thus, error minimization by fitting right a and b, might be done. Attached Python 3.8 code do this easily.
import time
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
class Model:
def __init__(self, x_exp, y_exp, params_init):
self.x_exp = x_exp
self.y_exp = y_exp
self.params_init = params_init
self.possible_b = [-1, 0, 1, 2, 3, 4]
self.method = "brute"
self.tolerance = 1 * 10**(-10)
self.res = None
def _fun_linear(self, x, a, b):
y = a * x + b
return y
def _model(self, params):
a = params[0]
b = params[1]
y_fit = self._fun_linear(self.x_exp, a, b)
err = np.linalg.norm(y_fit - self.y_exp)
return err
def fit(self):
time_beg = time.time()
self.res = minimize(fun=self._model,
x0=self.params_init)
time_end = time.time()
print(time_end - time_beg)
print(self.res.x)
print(self.res)
def f(x, a, b):
return a * x + b
x_exp = np.linspace(0, 10, 1000)
y_exp = f(x_exp, a=1, b=2) + np.random.normal(0, 0.5, 1000)
model = Model(x_exp, y_exp, [0, 0])
model.fit()
y_fit = f(x_exp, a=model.res.x[0], b=model.res.x[1])
fig, ax = plt.subplots()
ax.plot(x_exp, y_exp)
ax.plot(x_exp, y_fit)
fig.show()
Now, I would like do chose b value from my own list defined in the self.possible_b instead to approximate it by the used minimization method. Its some-kind of fixed values constrain to b.
Is it possible? How such kind of algorithms are called if they exist? Any libraries are able to do this?

Reason for "Maximum recursion error" in Python

I wrote this program describing an algorithm for the simulation of a partial differential equation. The basic functions I use are defined by
import numpy as np
import math
from scipy import integrate, stats
def shift(func, x, a=0):
return func(x-a)
def scale(func, a=1):
return a*func
def trunc(func, x):
if x <= 0:
return 0
else:
return func(x)
def quad(func, a, b):
return integrate.quad(func, a, b)
def gauss(func, t, x):
def pregau(z):
k = (-t ** (1 / 2)) * z
return shift(func, x, k)*math.exp(-(z**2)/2)
fa = (1 / ((2 * math.pi) ** (1 / 2)) * integrate.quad(pregau, -np.inf, np.inf)[0])
return fa
The program then simulates the solution to the partial differential equation by
def vundl(x, u, l0=0.0, a=a, b=b, c=c):
v = [u(x)]
l = [l0]
f_temp_rec = u
for i in range(10):
def f_temp(x):
y = x - c * dt + B[i + 1] * 2 * a
z = b * dt
return gauss(f_temp_rec, z, y)
li = l[i] + quad(f_temp, 0, np.inf)[0]
l = np.append(l, li)
if x <= 0:
v = np.append(v, 0)
f_temp_rec = 0
else:
f_temp_rec = f_temp
v = np.append(v, f_temp(x))
return [v, l]
def u0(x):
return stats.beta.pdf(x, 2.7, 3.05)
print(vundl(x = 0.5, u0))
If I run this program for N=0 it produces a vector. Running the program for N>0 gives me the following error:
"RecursionError: maximum recursion depth exceeded"
but it actually should give me a vector v and a vector l.
I'm not sure exactly what you're trying to do in vundl with f_temp and f_temp_rec, but in the else: block you assign:
f_temp_rec = f_temp
and then call f_temp which calls gauss(f_temp_rec, z, y). Since at this point f_temp_rec is f_temp the function f_temp calls itself in an infinite recursion.
You should be able to see in in traceback that f_temp is calling itself repeatedly.

Regularized Logistic Regression in Python (Andrew ng Course)

I'm starting the ML journey and I'm having troubles with this coding exercise
here is my code
import numpy as np
import pandas as pd
import scipy.optimize as op
# Read the data and give it labels
data = pd.read_csv('ex2data2.txt', header=None, name['Test1', 'Test2', 'Accepted'])
# Separate the features to make it fit into the mapFeature function
X1 = data['Test1'].values.T
X2 = data['Test2'].values.T
# This function makes more features (degree)
def mapFeature(x1, x2):
degree = 6
out = np.ones((x1.shape[0], sum(range(degree + 2))))
curr_column = 1
for i in range(1, degree + 1):
for j in range(i+1):
out[:,curr_column] = np.power(x1, i-j) * np.power(x2, j)
curr_column += 1
return out
# Separate the data into training and target, also initialize theta
X = mapFeature(X1, X2)
y = np.matrix(data['Accepted'].values).T
m, n = X.shape
cols = X.shape[1]
theta = np.matrix(np.zeros(cols))
#Initialize the learningRate(sigma)
learningRate = 1
# Define the Sigmoid Function (Output between 0 and 1)
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def cost(theta, X, y, learningRate):
# This is require to make the optimize function work
theta = theta.reshape(-1, 1)
error = sigmoid(X # theta)
first = np.multiply(-y, np.log(error))
second = np.multiply(1 - y, np.log(1 - error))
j = np.sum((first - second)) / m + (learningRate * np.sum(np.power(theta, 2)) / 2 * m)
return j
# Define the gradient of the cost function
def gradient(theta, X, y, learningRate):
# This is require to make the optimize function work
theta = theta.reshape(-1, 1)
error = sigmoid(X # theta)
grad = (X.T # (error - y)) / m + ((learningRate * theta) / m)
grad_no = (X.T # (error - y)) / m
grad[0] = grad_no[0]
return grad
Result = op.minimize(fun=cost, x0=theta, args=(X, y, learningRate), method='TNC', jac=gradient)
opt_theta = np.matrix(Result.x)
def predict(theta, X):
sigValue = sigmoid(X # theta.T)
p = sigValue >= 0.5
return p
p = predict(opt_theta, X)
print('Train Accuracy: {:f}'.format(np.mean(p == y) * 100))
So, when the learningRate = 1, the accuracy should be around 83,05% but I'm getting 80.5% and when the learningRate = 0, the accuracy should be 91.52% but I'm getting 87.28%
So the question is What am I doing wrong? Why my accuracy is below the problem default answer?
Hope someone can guide me in the right direction. Thanks!
P.D: Here is the dataset, maybe it can help
https://raw.githubusercontent.com/TheGirlWhiteWithBandages/Machine-Learning-Algorithms/master/Logistic%20Regression/ex2data2.txt
Hey guys I found a way to make it even better!
Here is the code
import numpy as np
import pandas as pd
import scipy.optimize as op
from sklearn.preprocessing import PolynomialFeatures
# Read the data and give it labels
data = pd.read_csv('ex2data2.txt', header=None, names=['Test1', 'Test2', 'Accepted'])
# Separate the data into training and target
X = (data.iloc[:, 0:2]).values
y = (data.iloc[:, 2:3]).values
# Modify the features to a certain degree (Polynomial)
poly = PolynomialFeatures(6)
m = y.size
XX = poly.fit_transform(data.iloc[:, 0:2].values)
# Initialize Theta
theta = np.zeros(XX.shape[1])
# Define the Sigmoid Function (Output between 0 and 1)
def sigmoid(z):
return(1 / (1 + np.exp(-z)))
# Define the Regularized cost function
def costFunctionReg(theta, reg, *args):
# This is require to make the optimize function work
h = sigmoid(XX # theta)
first = np.log(h).T # - y
second = np.log(1 - h).T # (1 - y)
J = (1 / m) * (first - second) + (reg / (2 * m)) * np.sum(np.square(theta[1:]))
return J
# Define the Regularized gradient function
def gradientReg(theta, reg, *args):
theta = theta.reshape(-1, 1)
h = sigmoid(XX # theta)
grad = (1 / m) * (XX.T # (h - y)) + (reg / m) * np.r_[[[0]], theta[1:]]
return grad.flatten()
# Define the predict Function
def predict(theta, X):
sigValue = sigmoid(X # theta.T)
p = sigValue >= 0.5
return p
# A loop to test between different values for sigma (reg parameter)
for i, Sigma in enumerate([0, 1, 100]):
# Optimize costFunctionReg
res2 = op.minimize(costFunctionReg, theta, args=(Sigma, XX, y), method=None, jac=gradientReg)
# Get the accuracy of the model
accuracy = 100 * sum(predict(res2.x, XX) == y.ravel()) / y.size
# Get the Error between different weights
error1 = costFunctionReg(res2.x, Sigma, XX, y)
# print the accuracy and error
print('Train accuracy {}% with Lambda = {}'.format(np.round(accuracy, decimals=4), Sigma))
print(error1)
Thanks for all your help!
try out this:
# import library
import pandas as pd
import numpy as np
dataset = pd.read_csv('ex2data2.csv',names = ['Test #1','Test #2','Accepted'])
# splitting to x and y variables for features and target variable
x = dataset.iloc[:,:-1].values
y = dataset.iloc[:,-1].values
print('x[0] ={}, y[0] ={}'.format(x[0],y[0]))
m, n = x.shape
print('#{} Number of training samples, #{} features per sample'.format(m,n))
# import library FeatureMapping
from sklearn.preprocessing import PolynomialFeatures
# We also add one column of ones to interpret theta 0 (x with power of 0 = 1) by
include_bias as True
pf = PolynomialFeatures(degree = 6, include_bias = True)
x_poly = pf.fit_transform(x)
pd.DataFrame(x_poly).head(5)
m,n = x_poly.shape
# define theta as zero
theta = np.zeros(n)
# define hyperparameter λ
lambda_ = 1
# reshape (-1,1) because we just have one feature in y column
y = y.reshape(-1,1)
def sigmoid(z):
return 1/(1+np.exp(-z))
def lr_hypothesis(x,theta):
return np.dot(x,theta)
def compute_cost(theta,x,y,lambda_):
theta = theta.reshape(n,1)
infunc1 = -y*(np.log(sigmoid(lr_hypothesis(x,theta)))) - ((1-y)*(np.log(1 - sigmoid(lr_hypothesis(x,theta)))))
infunc2 = (lambda_*np.sum(theta[1:]**2))/(2*m)
j = np.sum(infunc1)/m+ infunc2
return j
# gradient[0] correspond to gradient for theta(0)
# gradient[1:] correspond to gradient for theta(j) j>0
def compute_gradient(theta,x,y,lambda_):
gradient = np.zeros(n).reshape(n,)
theta = theta.reshape(n,1)
infunc1 = sigmoid(lr_hypothesis(x,theta))-y
gradient_in = np.dot(x.transpose(),infunc1)/m
gradient[0] = gradient_in[0,0] # theta(0)
gradient[1:] = gradient_in[1:,0]+(lambda_*theta[1:,]/m).reshape(n-1,) # theta(j) ; j>0
gradient = gradient.flatten()
return gradient
You can now test your cost and gradient without optimization. Th below code will optimize the model:
# hyperparameters
m,n = x_poly.shape
# define theta as zero
theta = np.zeros(n)
# define hyperparameter λ
lambda_array = [0, 1, 10, 100]
import scipy.optimize as opt
for i in range(0,len(lambda_array)):
# Train
print('======================================== Iteration {} ===================================='.format(i))
optimized = opt.minimize(fun = compute_cost, x0 = theta, args = (x_poly, y,lambda_array[i]),
method = 'TNC', jac = compute_gradient)
new_theta = optimized.x
# Prediction
y_pred_train = predictor(x_poly,new_theta)
cm_train = confusion_matrix(y,y_pred_train)
t_train,f_train,acc_train = acc(cm_train)
print('With lambda = {}, {} correct, {} wrong ==========> accuracy = {}%'
.format(lambda_array[i],t_train,f_train,acc_train*100))
Now you should see output like this :
=== Iteration 0 === With lambda = 0, 104 correct, 14 wrong ==========> accuracy = 88.13559322033898%
=== Iteration 1 === With lambda = 1, 98 correct, 20 wrong ==========> accuracy = 83.05084745762711%
=== Iteration 2 === With lambda = 10, 88 correct, 30 wrong ==========> accuracy = 74.57627118644068%
=== Iteration 3 === With lambda = 100, 72 correct, 46 wrong ==========> accuracy = 61.016949152542374%

How can I fit an equation which is not a function

Given data points in the xy plane, I would like to use scipy.optimize.leastsq to find fit parameters for an ellipse (which cannot be written as a function of x and y). I tried setting the entire equation equal to zero, and then fitting this function, but the fit is failing to converge with error output
"The relative error between two consecutive iterates is at most 0.000000."
The code is shown below, as well as the output. The fitter clearly does not find any reasonable parameters. My question is whether or not this is a problem with scipy.optimize.leastsq, or whether the "trick" of setting the function equal to zero and instead fitting that is not valid.
from scipy.optimize import leastsq, curve_fit
import numpy as np
import matplotlib.pyplot as plt
def function(x,y,theta,smaj,smin):
xp = np.cos(theta)*x - np.sin(theta)*y
yp = np.sin(theta)*x + np.cos(theta)*y
z = ((xp)**2)/smaj**2 + ((yp)**2)/smin**2
return z
def g(x,y,smaj,smin):
return x*x/smaj**2 + y*y/smin**2
def window(array,alt,arange):
arr = [array[i] for i,a in enumerate(alt) if a > arange[0] and a < arange[1]]
return np.asarray(arr)
def fitter(p0,x,y,func,errfunc,err):
# the fitter function
out = leastsq(errfunc,p0,args=(x,y,func,err),full_output=1)
pfinal = out[0]
covar = out[1]
mydict = out[2]
mesg = out[3]
ier = out[4]
resids = mydict['fvec']
chisq = np.sum(resids**2)
degs_frdm = len(x)-len(pfinal)
reduced_chisq = chisq/degs_frdm
ls = [pfinal,covar,mydict,mesg,ier,resids,chisq,degs_frdm,reduced_chisq]
print('fitter status: ', ier, '-- aka -- ', mesg)
i = 0
if covar is not None:
if (ier == 1 or ier == 2 or ier == 3 or ier == 4):
for u in pfinal:
print ('Param', i+1, ': ',u, ' +/- ', np.sqrt(covar[i,i]))
i = i + 1
print ('reduced chisq',reduced_chisq)
else:
print('fitter failed')
return ls
def func(x,y,p):
x = x-p[3]
y = y-p[4]
xp = np.cos(p[0])*(x) - np.sin(p[0])*(y)
yp = np.sin(p[0])*(x) + np.cos(p[0])*(y)
z = ((xp)**2)/p[1]**2 + ((yp)**2)/p[2]**2 - 1
return z
def errfunc(p,x,y,func,err):
return (y-func(x,y,p))/err
t = np.linspace(0,2*np.pi,100)
xx = 5*np.cos(t); yy = np.sin(t)
p0 = [0,5,1,0,0]
sigma = np.ones(len(xx))
fit = fitter(p0,xx,yy,func,errfunc,sigma)
params = fit[0]
covariance = fit[1]
residuals = fit[5]
t = np.linspace(0,2*np.pi,100)
xx = 5*np.cos(t); yy = np.sin(t)
plt.plot(xx,yy,'bx',ms = 4)
xx = np.linspace(-10,10, 1000)
yy = np.linspace(-5, 5, 1000)
newx = []
newy = []
for x in xx:
for y in yy:
if 0.99 < func(x,y,params) < 1.01:
#if g(x,y,5,1) == 1:
newx.append(x)
newy.append(y)
plt.plot(newx,newy,'kx',ms = 1)
plt.show()
The blue crosses are the actual data, and the black line is the fitters guess at the parameters.

Categories