Graphing diagram In Python - python

In the following code I have implemented Newtons method in Python.
import math
def Newton(f, dfdx, x, eps):
f_value = f(x)
iteration_counter = 0
while abs(f_value) > eps and iteration_counter < 100:
try:
x = x - float(f_value)/dfdx(x)
except ZeroDivisionError:
print ("Error! - derivative zero for x = ", x)
sys.exit(1) # Abort with error
f_value = f(x)
iteration_counter += 1
# Here, either a solution is found, or too many iterations
if abs(f_value) > eps:
iteration_counter = -1
return x, iteration_counter
def f(x):
return (math.cos(x)-math.sin(x))
def dfdx(x):
return (-math.sin(x)-math.cos(x))
solution, no_iterations = Newton(f, dfdx, x=1, eps=1.0e-14)
if no_iterations > 0: # Solution found
print ("Number of function calls: %d" % (1 + 2*no_iterations))
print ("A solution is: %f" % (solution))
else:
print ("Solution not found!")
However now I am looking to plot the convergence diagram on that same interval. This would be the absolute error as a function of the number of iterations on the interval [0,1]. Meaning the number of iterations on the x axis with the corresponding absolute error on the y axis
I attempted to make an iterable that each time yields a 2-tuple with the absolute error, and the iteration. Here is my code below, with the output and graph. Is my output correct? should the graph look like this? All help is greatly appreciated! The number of iterations from my code is 3
import math
def Newton(f, dfdx, x, eps):
f_value = f(x)
iteration_counter = 0
while abs(f_value) > eps and iteration_counter < 100:
try:
x = x - float(f_value)/dfdx(x)
yield iteration_counter, abs(f(x))
except ZeroDivisionError:
print ("Error! - derivative zero for x = ", x)
sys.exit(1) # Abort with error
f_value = f(x)
iteration_counter += 1
# Here, either a solution is found, or too many iterations
if abs(f_value) > eps:
iteration_counter = -1
return x, iteration_counter
def f(x):
return (math.cos(x)-math.sin(x))
def dfdx(x):
return (-math.sin(x)-math.cos(x))
import numpy as np
np.array(list(Newton(f,dfdx, 1,10e-4)))
which produces the following output:
array([[0.00000000e+00, 4.74646213e-03],
[1.00000000e+00, 1.78222779e-08]])
and finally:
import numpy as np
import matplotlib.pyplot as plt
data = np.array(list(Newton(f,dfdx, 1, 10e-14)))
plt.plot(data[:,0], data[:,1])
plt.yscale('log')
plt.show()
which produces the graph:

Your Newton function shouldn't yield and return at the same time
Use a more slowly converging function to test your results
This is what I would do:
import math
import sys
import numpy as np
import matplotlib.pyplot as plt
def newton(f, dfdx, x, eps):
f_value = f(x)
iteration_counter = 0
while abs(f_value) > eps and iteration_counter < 100:
try:
x = x - float(f_value)/dfdx(x)
yield iteration_counter, x, abs(f(x))
except ZeroDivisionError:
print ("Error! - derivative zero for x = ", x)
sys.exit(1) # Abort with error
f_value = f(x)
iteration_counter += 1
def f(x):
return x ** 2 - 1.34
def dfdx(x):
return 2 * x
data = np.array(list(newton(f, dfdx, 10, 10e-14)))
# plt.plot(data[:, 0], data[:, 1]) # x-axis: iteration, y-axis: x values
plt.plot(data[:, 0], data[:, 2]) # x-axis: iteration, y-axis: f(x) values
plt.yscale('log')
plt.show()

Related

NIST Suite Test for Nonlinear dynamical system

In my following code i m running a lorentz chaotic equation from which i will get random numbers in terms of xs , ys and zs
import numpy as np
def lorenz(x, y, z, a=10,b=8/3,c=28 ):
x_dot = a*(y -x)
y_dot = - y +c*x - x*z
z_dot = -b*z + x*y
return x_dot, y_dot, z_dot
dt = 0.01
num_steps = 10000
# Need one more for the initial values
xs = np.empty(num_steps + 1)
ys = np.empty(num_steps + 1)
zs = np.empty(num_steps + 1)
# Set initial values
xs[0], ys[0], zs[0]= (1,1,1)
# Step through "time", calculating the partial derivatives at the current point
# and using them to estimate the next point
for i in range(num_steps):
x_dot, y_dot, z_dot= lorenz(xs[i], ys[i], zs[i])
xs[i + 1] = xs[i] + (x_dot * dt)
ys[i + 1] = ys[i] + (y_dot * dt)
zs[i + 1] = zs[i] + (z_dot * dt)
I am actually trying to test the xs, ys and zs value for random number generating test via NIST 800 by using the code below
from __future__ import print_function
import math
from fractions import Fraction
from scipy.special import gamma, gammainc, gammaincc
# from gamma_functions import *
import numpy
import cmath
import random
#ones_table = [bin(i)[2:].count('1') for i in range(256)]
def count_ones_zeroes(bits):
ones = 0
zeroes = 0
for bit in bits:
if (bit == 1):
ones += 1
else:
zeroes += 1
return (zeroes,ones)
def runs_test(bits):
n = len(bits)
zeroes,ones = count_ones_zeroes(bits)
prop = float(ones)/float(n)
print(" prop ",prop)
tau = 2.0/math.sqrt(n)
print(" tau ",tau)
if abs(prop-0.5) > tau:
return (False,0.0,None)
vobs = 1.0
for i in range(n-1):
if bits[i] != bits[i+1]:
vobs += 1.0
print(" vobs ",vobs)
p = math.erfc(abs(vobs - (2.0*n*prop*(1.0-prop)))/(2.0*math.sqrt(2.0*n)*prop*(1-prop) ))
success = (p >= 0.01)
return (success,p,None)
print(runs_test(xs))
#%%
from __future__ import print_function
import math
def count_ones_zeroes(bits):
ones = 0
zeroes = 0
for bit in bits:
if (bit == 1):
ones += 1
else:
zeroes += 1
return (zeroes,ones)
def monobit_test(bits):
n = len(bits)
zeroes,ones = count_ones_zeroes(bits)
s = abs(ones-zeroes)
print(" Ones count = %d" % ones)
print(" Zeroes count = %d" % zeroes)
p = math.erfc(float(s)/(math.sqrt(float(n)) * math.sqrt(2.0)))
success = (p >= 0.01)
return (success,p,None)
print(runs_test(xs))
the output which i m getting is false i.e
output:
prop 0.00019998000199980003
tau 0.01999900007499375
(False, 0.0, None)
what should i do now?
The Lorenz system is chaotic, not random. You implemented the differential equation solver well, but it seems that count_ones_zeroes doesn't do what its name implies, at least, not on the data you provide. on xs, it returns that (zeroes, ones) = (9999, 2), which is not what you want. The code checks the value within the xs array, i.e. an x value (e.g. 8.2) against 1, but x is a float between -20 and 20, so it will be usually non1, and will be counted as 0. Only x==1 will be counted as ones.
In python, int/int results in float, so there is no need to cast it to float, in contrast to e.g. C or C++, so instead of prop = float(ones)/float(n), you can write prop = ones/n Similar statements hold for +,- and *

Secant method using python

How would you write python code to find the root of the non linear equation fnon(x) = 0 using the secant method? Using secant(fnon,x0,x1,tol) returning x as the computed root and f as the function value at that root. My method isn't working
A solution provided by the website "Solving nonlinear algebraic equations" which has additional ways to calculate it.
def secant(f, x0, x1, eps):
f_x0 = f(x0)
f_x1 = f(x1)
iteration_counter = 0
while abs(f_x1) > eps and iteration_counter < 100:
try:
denominator = float(f_x1 - f_x0)/(x1 - x0)
x = x1 - float(f_x1)/denominator
except ZeroDivisionError:
print "Error! - denominator zero for x = ", x
sys.exit(1) # Abort with error
x0 = x1
x1 = x
f_x0 = f_x1
f_x1 = f(x1)
iteration_counter += 1
# Here, either a solution is found, or too many iterations
if abs(f_x1) > eps:
iteration_counter = -1
return x, iteration_counter
def f(x):
return x**2 - 9
x0 = 1000; x1 = x0 - 1
solution, no_iterations = secant(f, x0, x1, eps=1.0e-6)
if no_iterations > 0: # Solution found
print "Number of function calls: %d" % (2 + no_iterations)
print "A solution is: %f" % (solution)
else:
print "Solution not found!"
#example function
import numpy as np
fnon= lambda x: np.sin(x-np.pi)
from scipy.optimize import root_scalar
x0=0
x1=5
tol=1e-8
x= root_scalar(fnon,method='secant',x0=x0,x1=x1,rtol=tol)
print(x)

How to print all polynomials up to Pn+1(x)

This is an exercise where you use the polynomial routines in numpy to generate the Legendre polynomials recursively.
I have finished every part except for printing all the polynomials up to Pn+1(x). How can I add a line to achieve this goal. Someone, please help !!!
import numpy as np
import matplotlib.pyplot as plt
from scipy import integrate
def P(n, x):
if(n == 0):
return 1 # P0 = 1
elif(n == 1):
return x # P1 = x
else:
return N_f(n) * x * P(n - 1, x) + alpha_f(n)*P(n-2, x)
def norm_f(x, n):
return P(n-1, x) * P(n-1, x)
def overlap_f(x, n):
return x*P(n, x) * P(n-1, x)
def alpha_f(n):
return -(n-1)/float(n)
def N_f(n):
return ((2 * n)-1)/float(n)
def norm_n(n):
return integrate.quad(norm_f(n), -1, 1)
def overlap_n(n):
return integrate.quad(overlap_f(n), -1, 1)
max_n = int(input('input the max n: '))
# Creating an array of x values
X = np.linspace(-1, 1, 200)
x = np.linspace(-1, 1, 200)
for i in range(1, 7):
plt.plot(x, P(i, x), label="P" + str(i))
plt.legend(loc="best")
plt.xlabel("X")
plt.ylabel("Pn")
plt.savefig('plot_legend.png')
plt.show()

Python newton raphson; precision in calculations

I've written this function in python:
def f2(x):
return (5.0*x + log1p(x) - 10000.0)
def dfdx2(x):
return (5.0-(1.0/x))
def newtonRaphson2(f, dfdx, x, tol):
x0 = x
for i in range(1, 2000):
if f(x) == 0.0:
return x
if dfdx(x) == 0.0:
print(dfdx(x))
break
x = x - (f(x) / dfdx(x))
#print(x)
Er = abs(x0-x)/abs(x0)
if Er <= tol:
return x
print(Er)
x0 = x
return x
Then I execute it like this:
task2 = newtonRaphson2(f2, dfdx2, 1, 0.000001);
print(task2)
For the output check the Er outputs final accuracy of 4.245665128208564e-05 before it returns x.
X is returned at 1998.479871524306, which is a pretty good estimate, but I'm preferably looking to get it down to 1.0e-06 at least. Changing tol variable to 1.0e-08 seems to do nothing.
I'm guessing maybe putting every variable into double is a better idea, but I still have no idea why my code stops where it does. I'm not that stable with python either, which is why I'm asking. I've already written one of these that works, but its for a far simpler equation.
Your code works fine, once you indent it properly and add from math import log1p . Just put the print(Er) line right after it's calculated to see its final value. Er gets to ~10^-9. This worked for me:
from math import log1p
def f2(x):
return (5.0*x + log1p(x) - 10000.0)
def dfdx2(x):
return (5.0-(1.0/x))
def newtonRaphson2(f, dfdx, x, tol):
x0 = x
for i in range(1, 2000):
if f(x) == 0.0:
return x
if dfdx(x) == 0.0:
print(dfdx(x))
break
x = x - (f(x) / dfdx(x))
#print(x)
Er = abs(x0-x)/abs(x0)
print('Er = {}'.format(Er))
if Er <= tol:
return x
x0 = x
return x
x = newtonRaphson2(f2, dfdx2, 1, 0.0000001)
print 'X = {}'.format(x)
The output was:
Er = 2498.5767132
Er = 0.200506616666
Er = 4.24566512821e-05
Er = 8.49642413214e-09
X = 1998.47987152
Consider using while here. Newton-Raphson algorithm typically converges very fast, so you won't need many iterations to run most cases.
This gives the same result:
from math import log1p
def f2(x):
return (5.0*x + log1p(x) - 10000.0)
def dfdx2(x):
return (5.0-(1.0/x))
def newtonRaphson2(f, dfdx, x, tol):
x0 = x
Er = 1
while Er >= tol:
if f(x) == 0.0:
return x
if dfdx(x) == 0.0:
print(dfdx(x))
break
x = x - (f(x) / dfdx(x))
#print(x)
Er = abs(x0-x)/abs(x0)
print('Er = {}'.format(Er))
x0 = x
return x
x = newtonRaphson2(f2, dfdx2, 1, 0.0000001)
print 'X = {}'.format(x)
As #alex-dubrovsky metioned, calculation that imply convergence should be implemented with conditional cycles, i.e.:
while True:
if f(x) == 0.0:
return x
if dfdx(x) == 0.0:
print(dfdx(x))
break
x = x - (f(x) / dfdx(x))
#print(x)
Er = abs(x0-x)/abs(x0)
if Er <= tol:
return x
print(Er)
x0 = x
With this approach you're always at risk of infinite loop, but this is more or less fine, as algo implies "running until converged"

How can I fit an equation which is not a function

Given data points in the xy plane, I would like to use scipy.optimize.leastsq to find fit parameters for an ellipse (which cannot be written as a function of x and y). I tried setting the entire equation equal to zero, and then fitting this function, but the fit is failing to converge with error output
"The relative error between two consecutive iterates is at most 0.000000."
The code is shown below, as well as the output. The fitter clearly does not find any reasonable parameters. My question is whether or not this is a problem with scipy.optimize.leastsq, or whether the "trick" of setting the function equal to zero and instead fitting that is not valid.
from scipy.optimize import leastsq, curve_fit
import numpy as np
import matplotlib.pyplot as plt
def function(x,y,theta,smaj,smin):
xp = np.cos(theta)*x - np.sin(theta)*y
yp = np.sin(theta)*x + np.cos(theta)*y
z = ((xp)**2)/smaj**2 + ((yp)**2)/smin**2
return z
def g(x,y,smaj,smin):
return x*x/smaj**2 + y*y/smin**2
def window(array,alt,arange):
arr = [array[i] for i,a in enumerate(alt) if a > arange[0] and a < arange[1]]
return np.asarray(arr)
def fitter(p0,x,y,func,errfunc,err):
# the fitter function
out = leastsq(errfunc,p0,args=(x,y,func,err),full_output=1)
pfinal = out[0]
covar = out[1]
mydict = out[2]
mesg = out[3]
ier = out[4]
resids = mydict['fvec']
chisq = np.sum(resids**2)
degs_frdm = len(x)-len(pfinal)
reduced_chisq = chisq/degs_frdm
ls = [pfinal,covar,mydict,mesg,ier,resids,chisq,degs_frdm,reduced_chisq]
print('fitter status: ', ier, '-- aka -- ', mesg)
i = 0
if covar is not None:
if (ier == 1 or ier == 2 or ier == 3 or ier == 4):
for u in pfinal:
print ('Param', i+1, ': ',u, ' +/- ', np.sqrt(covar[i,i]))
i = i + 1
print ('reduced chisq',reduced_chisq)
else:
print('fitter failed')
return ls
def func(x,y,p):
x = x-p[3]
y = y-p[4]
xp = np.cos(p[0])*(x) - np.sin(p[0])*(y)
yp = np.sin(p[0])*(x) + np.cos(p[0])*(y)
z = ((xp)**2)/p[1]**2 + ((yp)**2)/p[2]**2 - 1
return z
def errfunc(p,x,y,func,err):
return (y-func(x,y,p))/err
t = np.linspace(0,2*np.pi,100)
xx = 5*np.cos(t); yy = np.sin(t)
p0 = [0,5,1,0,0]
sigma = np.ones(len(xx))
fit = fitter(p0,xx,yy,func,errfunc,sigma)
params = fit[0]
covariance = fit[1]
residuals = fit[5]
t = np.linspace(0,2*np.pi,100)
xx = 5*np.cos(t); yy = np.sin(t)
plt.plot(xx,yy,'bx',ms = 4)
xx = np.linspace(-10,10, 1000)
yy = np.linspace(-5, 5, 1000)
newx = []
newy = []
for x in xx:
for y in yy:
if 0.99 < func(x,y,params) < 1.01:
#if g(x,y,5,1) == 1:
newx.append(x)
newy.append(y)
plt.plot(newx,newy,'kx',ms = 1)
plt.show()
The blue crosses are the actual data, and the black line is the fitters guess at the parameters.

Categories