How would you write python code to find the root of the non linear equation fnon(x) = 0 using the secant method? Using secant(fnon,x0,x1,tol) returning x as the computed root and f as the function value at that root. My method isn't working
A solution provided by the website "Solving nonlinear algebraic equations" which has additional ways to calculate it.
def secant(f, x0, x1, eps):
f_x0 = f(x0)
f_x1 = f(x1)
iteration_counter = 0
while abs(f_x1) > eps and iteration_counter < 100:
try:
denominator = float(f_x1 - f_x0)/(x1 - x0)
x = x1 - float(f_x1)/denominator
except ZeroDivisionError:
print "Error! - denominator zero for x = ", x
sys.exit(1) # Abort with error
x0 = x1
x1 = x
f_x0 = f_x1
f_x1 = f(x1)
iteration_counter += 1
# Here, either a solution is found, or too many iterations
if abs(f_x1) > eps:
iteration_counter = -1
return x, iteration_counter
def f(x):
return x**2 - 9
x0 = 1000; x1 = x0 - 1
solution, no_iterations = secant(f, x0, x1, eps=1.0e-6)
if no_iterations > 0: # Solution found
print "Number of function calls: %d" % (2 + no_iterations)
print "A solution is: %f" % (solution)
else:
print "Solution not found!"
#example function
import numpy as np
fnon= lambda x: np.sin(x-np.pi)
from scipy.optimize import root_scalar
x0=0
x1=5
tol=1e-8
x= root_scalar(fnon,method='secant',x0=x0,x1=x1,rtol=tol)
print(x)
Related
Assume the following function:
f(x) = x * cos(x-4)
With x = [-2.5, 2.5] this function crosses 0 at f(0) = 0 and f(-0.71238898) = 0.
This was determined with the following code:
import math
from scipy.optimize import fsolve
def func(x):
return x*math.cos(x-4)
x0 = fsolve(func, 0.0)
# returns [0.]
x0 = fsolve(func, -0.75)
# returns [-0.71238898]
What is the proper way to use fzero (or any other Python root finder) to find both roots in one call? Is there a different scipy function that does this?
fzero reference
I once wrote a module for this task. It's based on chapter 4.3 from the book Numerical Methods in Engineering with Python by Jaan Kiusalaas:
import math
def rootsearch(f,a,b,dx):
x1 = a; f1 = f(a)
x2 = a + dx; f2 = f(x2)
while f1*f2 > 0.0:
if x1 >= b:
return None,None
x1 = x2; f1 = f2
x2 = x1 + dx; f2 = f(x2)
return x1,x2
def bisect(f,x1,x2,switch=0,epsilon=1.0e-9):
f1 = f(x1)
if f1 == 0.0:
return x1
f2 = f(x2)
if f2 == 0.0:
return x2
if f1*f2 > 0.0:
print('Root is not bracketed')
return None
n = int(math.ceil(math.log(abs(x2 - x1)/epsilon)/math.log(2.0)))
for i in range(n):
x3 = 0.5*(x1 + x2); f3 = f(x3)
if (switch == 1) and (abs(f3) >abs(f1)) and (abs(f3) > abs(f2)):
return None
if f3 == 0.0:
return x3
if f2*f3 < 0.0:
x1 = x3
f1 = f3
else:
x2 =x3
f2 = f3
return (x1 + x2)/2.0
def roots(f, a, b, eps=1e-6):
print ('The roots on the interval [%f, %f] are:' % (a,b))
while 1:
x1,x2 = rootsearch(f,a,b,eps)
if x1 != None:
a = x2
root = bisect(f,x1,x2,1)
if root != None:
pass
print (round(root,-int(math.log(eps, 10))))
else:
print ('\nDone')
break
f=lambda x:x*math.cos(x-4)
roots(f, -3, 3)
roots finds all roots of f in the interval [a, b].
Define your function so that it can take either a scalar or a numpy array as an argument:
>>> import numpy as np
>>> f = lambda x : x * np.cos(x-4)
Then pass a vector of arguments to fsolve.
>>> x = np.array([0.0, -0.75])
>>> fsolve(f,x)
array([ 0. , -0.71238898])
In general (i.e. unless your function belongs to some specific class) you can't find all the global solutions - these methods usually do local optimization from given starting points.
However, you can switch math.cos() with numpy.cos() and that will vectorize your function so it can solve for many values at once, e.g. fsolve(func, np.arange(-10,10,0.5)).
In the following code I have implemented Newtons method in Python.
import math
def Newton(f, dfdx, x, eps):
f_value = f(x)
iteration_counter = 0
while abs(f_value) > eps and iteration_counter < 100:
try:
x = x - float(f_value)/dfdx(x)
except ZeroDivisionError:
print ("Error! - derivative zero for x = ", x)
sys.exit(1) # Abort with error
f_value = f(x)
iteration_counter += 1
# Here, either a solution is found, or too many iterations
if abs(f_value) > eps:
iteration_counter = -1
return x, iteration_counter
def f(x):
return (math.cos(x)-math.sin(x))
def dfdx(x):
return (-math.sin(x)-math.cos(x))
solution, no_iterations = Newton(f, dfdx, x=1, eps=1.0e-14)
if no_iterations > 0: # Solution found
print ("Number of function calls: %d" % (1 + 2*no_iterations))
print ("A solution is: %f" % (solution))
else:
print ("Solution not found!")
However now I am looking to plot the convergence diagram on that same interval. This would be the absolute error as a function of the number of iterations on the interval [0,1]. Meaning the number of iterations on the x axis with the corresponding absolute error on the y axis
I attempted to make an iterable that each time yields a 2-tuple with the absolute error, and the iteration. Here is my code below, with the output and graph. Is my output correct? should the graph look like this? All help is greatly appreciated! The number of iterations from my code is 3
import math
def Newton(f, dfdx, x, eps):
f_value = f(x)
iteration_counter = 0
while abs(f_value) > eps and iteration_counter < 100:
try:
x = x - float(f_value)/dfdx(x)
yield iteration_counter, abs(f(x))
except ZeroDivisionError:
print ("Error! - derivative zero for x = ", x)
sys.exit(1) # Abort with error
f_value = f(x)
iteration_counter += 1
# Here, either a solution is found, or too many iterations
if abs(f_value) > eps:
iteration_counter = -1
return x, iteration_counter
def f(x):
return (math.cos(x)-math.sin(x))
def dfdx(x):
return (-math.sin(x)-math.cos(x))
import numpy as np
np.array(list(Newton(f,dfdx, 1,10e-4)))
which produces the following output:
array([[0.00000000e+00, 4.74646213e-03],
[1.00000000e+00, 1.78222779e-08]])
and finally:
import numpy as np
import matplotlib.pyplot as plt
data = np.array(list(Newton(f,dfdx, 1, 10e-14)))
plt.plot(data[:,0], data[:,1])
plt.yscale('log')
plt.show()
which produces the graph:
Your Newton function shouldn't yield and return at the same time
Use a more slowly converging function to test your results
This is what I would do:
import math
import sys
import numpy as np
import matplotlib.pyplot as plt
def newton(f, dfdx, x, eps):
f_value = f(x)
iteration_counter = 0
while abs(f_value) > eps and iteration_counter < 100:
try:
x = x - float(f_value)/dfdx(x)
yield iteration_counter, x, abs(f(x))
except ZeroDivisionError:
print ("Error! - derivative zero for x = ", x)
sys.exit(1) # Abort with error
f_value = f(x)
iteration_counter += 1
def f(x):
return x ** 2 - 1.34
def dfdx(x):
return 2 * x
data = np.array(list(newton(f, dfdx, 10, 10e-14)))
# plt.plot(data[:, 0], data[:, 1]) # x-axis: iteration, y-axis: x values
plt.plot(data[:, 0], data[:, 2]) # x-axis: iteration, y-axis: f(x) values
plt.yscale('log')
plt.show()
This question already has an answer here:
ValueError: math domain error - Quadratic Equation (Python)
(1 answer)
Closed 4 years ago.
I want to define a function that returns the values of the roots. It is supposed to return always something.
If b**2 - 4ac < 0, then it is supposed to return [ ], but it appears as an error.
My code is this by now:
from math import*
def solve(a, b, c):
x = sqrt(b**2 - 4*a*c)
if x > 0:
x1 = (-b + x)/(2*a)
x2 = (-b - x)/(2*a)
return [x1, x2]
elif x == 0:
x1 = x2 = -b/(2*a)
return [x1]
else:
return []
The math.sqrt is undefined for negative numbers and thus returns a ValueError.
If you wish to return the complex square root for negatives, use x**0.5:
x = (b**2 - 4*a*c)**0.5
Alternatively use the cmath.sqrt implementation:
from cmath import sqrt
x = sqrt(b**2 - 4*a*c)
sqrt will not accept negative values. To avoid this, you can check your 'else' condition before computing the square root:
from math import sqrt
def solve(a, b, c):
formula = b**2 - 4*a*c
if formula < 0:
return []
x = sqrt(formula)
if x > 0:
x1 = (-b + x)/(2*a)
x2 = (-b - x)/(2*a)
return [x1, x2]
elif x == 0:
x1 = x2 = -b/(2*a)
return [x1]
I am using scipy.optimize.minimize, with the default method ('Neldear-Mead').
The function I try to minimize is not strictly convex. It stays at the same value on some significant areas.
The issue that I have is that the steps taken by the algorithm are too small. For example my starting point has a first coordinate x0 = 0.2 . I know that the function will result in a different value only for a significant step, for example moving by 0.05. Unfortunately, I can see that the algorithm makes very small step (moving by around 0.000001). As a result, my function returns the same value, and the algorithm does not converge. Can I change that behaviour?
For convenience, here's the scipy code:
def _minimize_neldermead(func, x0, args=(), callback=None,
xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
Nelder-Mead algorithm.
Options for the Nelder-Mead algorithm are:
disp : bool
Set to True to print convergence messages.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
ftol : float
Relative error in ``fun(xopt)`` acceptable for convergence.
maxiter : int
Maximum number of iterations to perform.
maxfev : int
Maximum number of function evaluations to make.
This function is called by the `minimize` function with
`method=Nelder-Mead`. It is not supposed to be called directly.
"""
_check_unknown_options(unknown_options)
maxfun = maxfev
retall = return_all
fcalls, func = wrap_function(func, args)
x0 = asfarray(x0).flatten()
N = len(x0)
rank = len(x0.shape)
if not -1 < rank < 2:
raise ValueError("Initial guess must be a scalar or rank-1 sequence.")
if maxiter is None:
maxiter = N * 200
if maxfun is None:
maxfun = N * 200
rho = 1
chi = 2
psi = 0.5
sigma = 0.5
one2np1 = list(range(1, N + 1))
if rank == 0:
sim = numpy.zeros((N + 1,), dtype=x0.dtype)
else:
sim = numpy.zeros((N + 1, N), dtype=x0.dtype)
fsim = numpy.zeros((N + 1,), float)
sim[0] = x0
if retall:
allvecs = [sim[0]]
fsim[0] = func(x0)
nonzdelt = 0.05
zdelt = 0.00025
for k in range(0, N):
y = numpy.array(x0, copy=True)
if y[k] != 0:
y[k] = (1 + nonzdelt)*y[k]
else:
y[k] = zdelt
sim[k + 1] = y
f = func(y)
fsim[k + 1] = f
ind = numpy.argsort(fsim)
fsim = numpy.take(fsim, ind, 0)
# sort so sim[0,:] has the lowest function value
sim = numpy.take(sim, ind, 0)
iterations = 1
while (fcalls[0] < maxfun and iterations < maxiter):
if (numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0]))) <= xtol and
numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= ftol):
break
xbar = numpy.add.reduce(sim[:-1], 0) / N
xr = (1 + rho) * xbar - rho * sim[-1]
fxr = func(xr)
doshrink = 0
if fxr < fsim[0]:
xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]
fxe = func(xe)
if fxe < fxr:
sim[-1] = xe
fsim[-1] = fxe
else:
sim[-1] = xr
fsim[-1] = fxr
else: # fsim[0] <= fxr
if fxr < fsim[-2]:
sim[-1] = xr
fsim[-1] = fxr
else: # fxr >= fsim[-2]
# Perform contraction
if fxr < fsim[-1]:
xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]
fxc = func(xc)
if fxc <= fxr:
sim[-1] = xc
fsim[-1] = fxc
else:
doshrink = 1
else:
# Perform an inside contraction
xcc = (1 - psi) * xbar + psi * sim[-1]
fxcc = func(xcc)
if fxcc < fsim[-1]:
sim[-1] = xcc
fsim[-1] = fxcc
else:
doshrink = 1
if doshrink:
for j in one2np1:
sim[j] = sim[0] + sigma * (sim[j] - sim[0])
fsim[j] = func(sim[j])
ind = numpy.argsort(fsim)
sim = numpy.take(sim, ind, 0)
fsim = numpy.take(fsim, ind, 0)
if callback is not None:
callback(sim[0])
iterations += 1
if retall:
allvecs.append(sim[0])
x = sim[0]
fval = numpy.min(fsim)
warnflag = 0
if fcalls[0] >= maxfun:
warnflag = 1
msg = _status_message['maxfev']
if disp:
print('Warning: ' + msg)
elif iterations >= maxiter:
warnflag = 2
msg = _status_message['maxiter']
if disp:
print('Warning: ' + msg)
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % iterations)
print(" Function evaluations: %d" % fcalls[0])
result = OptimizeResult(fun=fval, nit=iterations, nfev=fcalls[0],
status=warnflag, success=(warnflag == 0),
message=msg, x=x)
if retall:
result['allvecs'] = allvecs
return result
I have used Nelder-Mead long time ago,but as I remember that you will find different local minima if you start from different starting points.You didn't give us your function,so we could only guess what should be best strategy for you.You should also read this
http://www.webpages.uidaho.edu/~fuchang/res/ANMS.pdf
Then you can try pure Python implementation
https://github.com/fchollet/nelder-mead/blob/master/nelder_mead.py
Assume the following function:
f(x) = x * cos(x-4)
With x = [-2.5, 2.5] this function crosses 0 at f(0) = 0 and f(-0.71238898) = 0.
This was determined with the following code:
import math
from scipy.optimize import fsolve
def func(x):
return x*math.cos(x-4)
x0 = fsolve(func, 0.0)
# returns [0.]
x0 = fsolve(func, -0.75)
# returns [-0.71238898]
What is the proper way to use fzero (or any other Python root finder) to find both roots in one call? Is there a different scipy function that does this?
fzero reference
I once wrote a module for this task. It's based on chapter 4.3 from the book Numerical Methods in Engineering with Python by Jaan Kiusalaas:
import math
def rootsearch(f,a,b,dx):
x1 = a; f1 = f(a)
x2 = a + dx; f2 = f(x2)
while f1*f2 > 0.0:
if x1 >= b:
return None,None
x1 = x2; f1 = f2
x2 = x1 + dx; f2 = f(x2)
return x1,x2
def bisect(f,x1,x2,switch=0,epsilon=1.0e-9):
f1 = f(x1)
if f1 == 0.0:
return x1
f2 = f(x2)
if f2 == 0.0:
return x2
if f1*f2 > 0.0:
print('Root is not bracketed')
return None
n = int(math.ceil(math.log(abs(x2 - x1)/epsilon)/math.log(2.0)))
for i in range(n):
x3 = 0.5*(x1 + x2); f3 = f(x3)
if (switch == 1) and (abs(f3) >abs(f1)) and (abs(f3) > abs(f2)):
return None
if f3 == 0.0:
return x3
if f2*f3 < 0.0:
x1 = x3
f1 = f3
else:
x2 =x3
f2 = f3
return (x1 + x2)/2.0
def roots(f, a, b, eps=1e-6):
print ('The roots on the interval [%f, %f] are:' % (a,b))
while 1:
x1,x2 = rootsearch(f,a,b,eps)
if x1 != None:
a = x2
root = bisect(f,x1,x2,1)
if root != None:
pass
print (round(root,-int(math.log(eps, 10))))
else:
print ('\nDone')
break
f=lambda x:x*math.cos(x-4)
roots(f, -3, 3)
roots finds all roots of f in the interval [a, b].
Define your function so that it can take either a scalar or a numpy array as an argument:
>>> import numpy as np
>>> f = lambda x : x * np.cos(x-4)
Then pass a vector of arguments to fsolve.
>>> x = np.array([0.0, -0.75])
>>> fsolve(f,x)
array([ 0. , -0.71238898])
In general (i.e. unless your function belongs to some specific class) you can't find all the global solutions - these methods usually do local optimization from given starting points.
However, you can switch math.cos() with numpy.cos() and that will vectorize your function so it can solve for many values at once, e.g. fsolve(func, np.arange(-10,10,0.5)).