Finding the intersection of a curve from polyfit - python

This seems simple but I can't quite figure it out. I have a curve calculated from x,y data. Then I have a line. I want to find the x, y values for where the two intersect.
Here is what I've got so far. It's super confusing and doesn't give the correct result. I can look at the graph and find the intersection x value and calculate the correct y value. I'd like to remove this human step.
import numpy as np
import matplotlib.pyplot as plt
from pylab import *
from scipy import linalg
import sys
import scipy.interpolate as interpolate
import scipy.optimize as optimize
w = np.array([0.0, 11.11111111111111, 22.22222222222222, 33.333333333333336, 44.44444444444444, 55.55555555555556, 66.66666666666667, 77.77777777777777, 88.88888888888889, 100.0])
v = np.array([0.0, 8.333333333333332, 16.666666666666664, 25.0, 36.11111111111111, 47.22222222222222, 58.333333333333336, 72.22222222222221, 86.11111111111111, 100.0])
z = np.polyfit(w, v, 2)
print (z)
p=np.poly1d(z)
g = np.polyval(z,w)
print (g)
N=100
a=arange(N)
b=(w,v)
b=np.array(b)
c=(w,g)
c=np.array(c)
print(c)
d=-a+99
e=(a,d)
print (e)
p1=interpolate.PiecewisePolynomial(w,v[:,np.newaxis])
p2=interpolate.PiecewisePolynomial(w,d[:,np.newaxis])
def pdiff(x):
return p1(x)-p2(x)
xs=np.r_[w,w]
xs.sort()
x_min=xs.min()
x_max=xs.max()
x_mid=xs[:-1]+np.diff(xs)/2
roots=set()
for val in x_mid:
root,infodict,ier,mesg = optimize.fsolve(pdiff,val,full_output=True)
# ier==1 indicates a root has been found
if ier==1 and x_min<root<x_max:
roots.add(root[0])
roots=list(roots)
print(np.column_stack((roots,p1(roots),p2(roots))))
plt.plot(w,v, 'r', a, -a+99, 'b-')
plt.show()
q=input("what is the intersection value? ")
print (p(q))
Any ideas to get this to work?
Thanks

I don't think I fully understand what you are trying to do in your code, but what you described in English can be done with
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
w = np.array([0.0, 11.11111111111111, 22.22222222222222, 33.333333333333336,
44.44444444444444, 55.55555555555556, 66.66666666666667,
77.77777777777777, 88.88888888888889, 100.0])
v = np.array([0.0, 8.333333333333332, 16.666666666666664, 25.0,
36.11111111111111, 47.22222222222222, 58.333333333333336,
72.22222222222221, 86.11111111111111, 100.0])
poly_coeff = np.polynomial.polynomial.polyfit(w, v, 2)
poly = np.polynomial.polynomial.Polynomial(poly_coeff)
roots = np.polynomial.polynomial.polyroots(poly_coeff - [99, -1, 0])
x = np.linspace(np.min(roots) - 50, np.max(roots) + 50, num=1000)
plt.plot(x, poly(x), 'r-')
plt.plot(x, 99 - x, 'b-')
for root in roots:
plt.plot(root, 99 - root, 'ro')

Related

interpolate, derivate and integrate a function -- some math fun

I have a problem. I have three lists. The list_umf are the x values and list list_kf are the y values, while list_kfm are y values, too. kfm is the integral of kf. The values are the output of my code.
To show that kfm is the integral of kf, I want to calculate the derivative of kfm, which shuold be the same as kf. But the re calculated kfm (list_kf_re) is just 101.0 every time.
Whats wrong with my code?
import numpy as np
from scipy import integrate, interpolate
from scipy.misc import derivative as deriv
import matplotlib.pyplot as plt
list_kfm = [15.348748494618041, 26.240336614039776, 37.76846357985518, 49.80068952374503, 62.25356792292074, 75.0692188764684, 88.20491343740369, 101.6276911997135,
115.31128207665246, 129.2342114999071, 143.37856687640036, 157.72915825067278, 172.27292637703843, 186.9985127198004, 201.89593919604192, 216.95636451973587]
list_kf = [168.08871431597626, 179.78615963605742, 188.728883379148, 196.0371678709251, 202.25334207341422, 207.68364358717665, 212.51893919883966, 216.88670040685466,
220.87653440371076, 224.55397301446894, 227.96847485999652, 231.15833919688876, 234.1538643061246, 236.97945558527186, 239.65507793294745, 242.19728380107006]
list_umf = [0.1, 0.15000000000000002, 0.20000000000000004, 0.25000000000000006, 0.30000000000000004, 0.3500000000000001, 0.40000000000000013, 0.45000000000000007,
0.5000000000000001, 0.5500000000000002, 0.6000000000000002, 0.6500000000000001, 0.7000000000000002, 0.7500000000000002, 0.8000000000000002, 0.8500000000000002]
f = interpolate.interp1d(
list_umf, list_kfm, bounds_error=False, fill_value=(15, 217))
list_kf_re = [deriv(f, x) for x in list_umf]
plt.plot(list_umf, list_kfm, label='kfm')
plt.plot(list_umf, list_kf, label='kf')
plt.plot(list_umf, list_kf_re, label='kfre')
print(list_kf_re)
print(list_kf)
Use UnivariateSpline to create an interpolator that you can later apply integral or derivative functions (see this post)
Sample:
import numpy as np
#from scipy import integrate, interpolate
from scipy.interpolate import UnivariateSpline as US, InterpolatedUnivariateSpline as IUS
#from scipy.misc import derivative as deriv
import matplotlib.pyplot as plt
list_kfm = [15.348748494618041, 26.240336614039776, 37.76846357985518, 49.80068952374503, 62.25356792292074, 75.0692188764684, 88.20491343740369, 101.6276911997135,
115.31128207665246, 129.2342114999071, 143.37856687640036, 157.72915825067278, 172.27292637703843, 186.9985127198004, 201.89593919604192, 216.95636451973587]
list_kf = [168.08871431597626, 179.78615963605742, 188.728883379148, 196.0371678709251, 202.25334207341422, 207.68364358717665, 212.51893919883966, 216.88670040685466,
220.87653440371076, 224.55397301446894, 227.96847485999652, 231.15833919688876, 234.1538643061246, 236.97945558527186, 239.65507793294745, 242.19728380107006]
list_umf = [0.1, 0.15000000000000002, 0.20000000000000004, 0.25000000000000006, 0.30000000000000004, 0.3500000000000001, 0.40000000000000013, 0.45000000000000007,
0.5000000000000001, 0.5500000000000002, 0.6000000000000002, 0.6500000000000001, 0.7000000000000002, 0.7500000000000002, 0.8000000000000002, 0.8500000000000002]
f = US(list_umf, list_kfm)
list_kf_re = [f.derivative()(x) for x in list_umf]
plt.plot(list_umf, list_kfm, label='kfm')
plt.plot(list_umf, list_kf, label='kf')
plt.plot(list_umf, list_kf_re, label='kfre')
plt.plot(list_umf, list_kf, 'o', label='kfre_2')
print(list_kf_re)
print(list_kf)

Exponential fit on a histogram

I'm trying to fit an exponential curve on a histogram created from the variable y1_pt and then get the exponential's parameters. Problem is it gives me the following warnings:
OptimizeWarning: Covariance of the parameters could not be estimated
and pcov_exponential =
array([[inf, inf, inf],
[inf, inf, inf],
[inf, inf, inf]]))
and the result is more an exponential fit which looks to me slightly random.. (see plot)
Does anyone have a clue as to what's wrong?
import pandas as pd
import numpy
from pylab import *
import scipy.stats as ss
from scipy.optimize import curve_fit
df=pd.read_hdf('data.h5','dataset')
pty1 = df1['y1_pt']
bins1 = numpy.linspace(35, 1235, 100)
counts, bins = numpy.histogram(pty1, bins = bins1, range = [35, 1235], density = False)
binscenters = numpy.array([0.5 * (bins1[i] + bins1[i+1]) for i in range(len(bins1)-1)])
def exponential(x, a, k, b):
return a*np.exp(-x*k) + b
popt_exponential, pcov_exponential = curve_fit(exponential, xdata=binscenters, ydata=counts)
print(popt_exponential)
xspace = numpy.linspace(0, 6, 100000)
plt.bar(binscenters, counts, color='navy', label=r'Histogram entries')
plt.plot(xspace, exponential(xspace, *popt_exponential), color='darkorange', linewidth=2.5, label=r'Fitted function')
plt.show()
I think you are missing a minus sign in the exponential formula, hence the overflow. It should be a * np.exp( - x * k) + b
See the example at https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html

why is it so bad that using acceptance-rejection-sampling for computing integral

I want to compute $\int_1^2 x^2 dx$ using the acceptance-rejection-sampling method, but it performs not well as my expectation.
import random
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
sns.set_style('darkgrid')
plt.rcParams['figure.figsize'] = (12,8)
def acceptRejectSampling(number,p,c):
sample=[]
while len(sample)<number:
y = random.uniform(2, 4)
u = random.uniform(0, c)
if u <= p(y):
sample.append(y)
return sample
def f(x):#the domain of x is [1,2]
return x**2
def p(y):# pdf of y, the domain of y is [2,4]
return 0.5/(y**(0.5))
S=7/3.0
X=np.random.uniform(1,2,1000)
print("area:",S)
print("monte calro estimator: ",np.mean([f(i) for i in X]))
#acceptance rejection sampling
y=np.linspace(2,4,200)
c=0.5/np.power(2,0.5)
cc=[c for i in range(200)]
samples=acceptRejectSampling(10000,p,c)
print("acceptance-rejection-sampling estimator: ", np.mean(samples))
plt.plot(y,cc, '--',label='c*g(x)')
py=[p(i) for i in y]
plt.plot(y, py,label='p(y)')
plt.hist(samples, bins=50, density=True,label='sampling')
plt.legend()
plt.show()
the output of code is:
aera: 2.3333333333333335
monte calro estimator: 2.391469143847661
acceptance-rejection-sampling estimator: 2.938527759943371
I am confused that why acceptance-rejection-sampling performs so bad and I want to consult you guys on this problem. please help me, thank you very much!

morse potential fit using python and curve fit from scipy

I am trying to fit a morse potential using a python and scipy.
The morse potential is defined as:
V = D*(exp(-2*m*(x-u)) - 2*exp(-m*(x-u)))
where D, m and u are the parameters I need to extract.
Unfortunately the fit is not satisfactory as you can see below (sorry I do not have 10 reputation so the image has to be clicked). Could anyone help me please? I must say I am not the best programmer with python.
Here is my code:
from scipy.optimize import curve_fit
import numpy as np
import matplotlib.pyplot as plt
xdata2=np.array([1.0 ,1.1 ,1.2 ,1.3 ,1.4 ,1.5 ,1.6 ,1.7 ,1.8 ,1.9 ,2.0 ,2.1 ,2.2 ,2.3 ,2.4 ,2.5 ,2.6 ,2.7 ,2.8 ,2.9 ,3.0 ,3.1 ,3.2 ,3.3 ,3.4 ,3.5 ,3.6 ,3.7 ,3.8 ,3.9 ,4.0 ,4.1 ,4.2 ,4.3 ,4.4 ,4.5 ,4.6 ,4.7 ,4.8 ,4.9 ,5.0 ,5.1 ,5.2 ,5.3 ,5.4 ,5.5 ,5.6 ,5.7 ,5.8 ,5.9])
ydata2=[-1360.121815,-1368.532641,-1374.215047,-1378.090480,-1380.648178,-1382.223113,-1383.091562,-1383.479384,-1383.558087,-1383.445803,-1383.220380,-1382.931531,-1382.609269,-1382.273574,-1381.940879,-1381.621299,-1381.319042,-1381.036231,-1380.772039,-1380.527051,-1380.301961,-1380.096257,-1379.907700,-1379.734621,-1379.575837,-1379.430693,-1379.299282,-1379.181303,-1379.077272,-1378.985220,-1378.903626,-1378.831588,-1378.768880,-1378.715015,-1378.668910,-1378.629996,-1378.597943,-1378.572742,-1378.554547,-1378.543296,-1378.539843,-1378.543593,-1378.554519,-1378.572747,-1378.597945,-1378.630024,-1378.668911,-1378.715015,-1378.768915,-1378.831593]
t=np.linspace(0.1,7)
def morse(q, m, u, x ):
return (q * (np.exp(-2*m*(x-u))-2*np.exp(-m*(x-u))))
popt, pcov = curve_fit(morse, xdata2, ydata2, maxfev=40000000)
yfit = morse(t,popt[0], popt[1], popt[2])
print popt
plt.plot(xdata2, ydata2,"ro")
plt.plot(t, yfit)
plt.show()
Old fit before gboffi's comment
I am guessing the exact depth of the morse potential does not interest you overly much. So I added an additional parameter to shift the morse potential up and down (v), includes #gboffis comment. Furthermore, the first argument of your function must be the arguments, not the parameters you want to fit (see http://docs.scipy.org/doc/scipy-0.16.1/reference/generated/scipy.optimize.curve_fit.html)
In addition, such fits are dependent on your starting position. The following should give you what you want.
from scipy.optimize import curve_fit
import numpy as np
import matplotlib.pyplot as plt
xdata2=np.array([1.0 ,1.1 ,1.2 ,1.3 ,1.4 ,1.5 ,1.6 ,1.7 ,1.8 ,1.9 ,2.0 ,2.1 ,2.2 ,2.3 ,2.4 ,2.5 ,2.6 ,2.7 ,2.8 ,2.9 ,3.0 ,3.1 ,3.2 ,3.3 ,3.4 ,3.5 ,3.6 ,3.7 ,3.8 ,3.9 ,4.0 ,4.1 ,4.2 ,4.3 ,4.4 ,4.5 ,4.6 ,4.7 ,4.8 ,4.9 ,5.0 ,5.1 ,5.2 ,5.3 ,5.4 ,5.5 ,5.6 ,5.7 ,5.8 ,5.9])
ydata2=[-1360.121815,-1368.532641,-1374.215047,-1378.090480,-1380.648178,-1382.223113,-1383.091562,-1383.479384,-1383.558087,-1383.445803,-1383.220380,-1382.931531,-1382.609269,-1382.273574,-1381.940879,-1381.621299,-1381.319042,-1381.036231,-1380.772039,-1380.527051,-1380.301961,-1380.096257,-1379.907700,-1379.734621,-1379.575837,-1379.430693,-1379.299282,-1379.181303,-1379.077272,-1378.985220,-1378.903626,-1378.831588,-1378.768880,-1378.715015,-1378.668910,-1378.629996,-1378.597943,-1378.572742,-1378.554547,-1378.543296,-1378.539843,-1378.543593,-1378.554519,-1378.572747,-1378.597945,-1378.630024,-1378.668911,-1378.715015,-1378.768915,-1378.831593]
t=np.linspace(0.1,7)
tstart = [1.e+3, 1, 3, 0]
def morse(x, q, m, u , v):
return (q * (np.exp(-2*m*(x-u))-2*np.exp(-m*(x-u))) + v)
popt, pcov = curve_fit(morse, xdata2, ydata2, p0 = tstart, maxfev=40000000)
print popt # [ 5.10155662 1.43329962 1.7991549 -1378.53461345]
yfit = morse(t,popt[0], popt[1], popt[2], popt[3])
#print popt
#
#
#
plt.plot(xdata2, ydata2,"ro")
plt.plot(t, yfit)
plt.show()

'numpy.ndarray' object is not callable when using a CALLABLE function in minimization

I keep getting the numpy.ndarray object is not callable error. I know that this mistake happens because one uses an np.array instead of a function. The problem in my code, is that I am indeed using a function to run the minimize python function.
Could someone please let me know what is happening?
The code is here:
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 15 06:27:54 2015
"""
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 12 20:22:27 2015
"""
# Midterm Macroeconometrics
import numpy as np
from numpy import log
import numpy.linalg as linalg
from scipy import *
from scipy.optimize import fminbound, broyden1, brentq, bisect, minimize
from scipy import interp
import pylab as pl
#from numdifftools import Gradient, Jacobian, Derivative
import matplotlib.pyplot as plt
import pandas as pd
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
import scipy.io as sio
import os
"""
IMPORTING DATA FROM PANDAS
"""
#Importing data from text file- using Pandas.
os.chdir(r'/Users/camilahenao/Dropbox/UIUC Phd Econ/Year 3/Fall/macroeconometrics shin/Homework/ps3-MIDTERM')
os.path.abspath(os.path.curdir)
data=pd.read_csv(r'midterm2015.csv', header= None)
data.columns = ['GDP_I', 'GDP_E']
GDP_I=np.array(data.GDP_I)
GDP_E=np.array(data.GDP_E)
y= np.vstack((GDP_I,GDP_E))
def kalman2(a_old, p_old, Z, gamma, theta, y):
mu, rho, h_I, h_E, h_G = theta[0], theta[1], np.log(theta[2]), np.log(theta[3]), np.log(theta[4])
sigma_I= np.exp(h_I)
sigma_E= np.exp(h_E)
sigma_G= np.exp(h_G)
H = np.array([[sigmaI,0],[0, sigmaE]])
H=np.matrix(H)
list_a = np.array([a_old])
list_p = np.array([p_old])
list_f = np.array([])
list_v = np.array([])
log_likelihood_Y= np.array([ ])
list_log_like_sum = np.array([])
for i in range(y[0].size):
N=y.shape[0]
Time=y[0].size
inv= np.matrix(linalg.inv(Z*p_old*Z.T+H))
cosa= Z.T*inv
temp= p_old*cosa
a_new= np.array(a_old +temp*(np.array([[y[0][i]],[y[1][i]]])-Z*a_old-gamma*w))[0]
list_a=np.hstack((list_a,a_new))
p_new= np.array(p_old - temp* Z*p_old)[0]
list_p=np.hstack((list_p, p_new))
#Transform the previous posterior into prior-
a_old=T*a_new
a_old=a_old[0]
p_old=T*p_new*T + R*Q*R #25
#Moments for log-likelihood:
f= np.linalg.det(Z*p_old*Z.T + H)
list_f= np.hstack((list_f,f))
#print list_f
v= np.array([[y[0][i]],[y[1][i]]])-Z*a_old - gamma*w
v_element= np.array((v.T *np.matrix(np.linalg.inv(Z*p_old*Z.T + H)) *v))[0]
list_v=np.hstack((list_v,v_element))
#print list_v
#Log likelihood function for each period of time:
log_like= (-N*(Time-1)/2*np.log(2*pi)-(1/2)*sum(log(list_f)) -(1/2)*sum(list_v))
log_likelihood_Y=np.hstack((log_likelihood_Y, log_like))
#Create the sum over all Time of the log-likelihood
log_like_sum=np.sum(log_likelihood_Y)
list_log_like_sum=np.hstack((list_log_like_sum, log_like_sum))
return list_a, list_p, log_likelihood_Y, list_log_like_sum
#Define the "callable function"
def mle(a_old, p_old, Z, gamma, theta, y, bds):
a, P, py, py_sum = kalman2(a_old, p_old, Z, gamma, theta, y)
mle= -1*py_sum
return mle
#Run the minimization algorithm
theta2=(.8, 3.0, 5.0, 5.0, 5.0)
a_old=0.0
p_old= sigmaG/(1-rho**2)
Z=np.array([[1.0],[1.0]])
gamma=np.array([[1.0],[1.0]])
bds = [[-10e100, 10e100], [-10e100, 10e100], [1e-6, 10e100], [1e-6, 10e100], [1e-6, 10e100]]
theta_guess = [3, 0.8, np.sqrt(5), np.sqrt(5), np.sqrt(5)]
result = minimize(mle(a_old, p_old, Z, gamma, theta, y, bds), theta_guess, bounds = bds)
As Warren Weckesser mentioned in a comment, you're passing the result of calling mle(a_old, p_old, Z, gamma, theta, y, bds) — which is a floating point value — as the first argument to the minimize() function. According to the scipy documentation the first argument to minimize() should be a callable function, so for starters you're going to need to change the call it so it's something like this:
result = minimize(mle, (a_old, p_old, Z, gamma, theta, y, bds),
theta_guess, bounds=bds)
However you're going to run into new problems because your mle() function doesn't accept a vector as its first argument, which is required of the function you pass to minimize() — so you're also going to need to modify its definition accordingly.
Unfortunately I don't understand enough of what you're actually trying to accomplish to suggest how you should do that.

Categories