I am working on fitting a 3d distribution function in scipy. I have a numpy array with counts in x- and y-bins, and I am trying to fit that to a rather complicated 3-d distribution function. The data is fit to 26 (!) parameters, which describe the shape of its two constituent populations.
I learned here that I have to pass my x- and y-coordinates as 'args' when I call leastsq. The code presented by unutbu works as written for me, but when I try to apply it to my specific case, I am given the error "TypeError: leastsq() got multiple values for keyword argument 'args' "
Here's my code (sorry for the length):
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as spopt
from textwrap import wrap
import collections
cl = 0.5
ch = 3.5
rl = -23.5
rh = -18.5
mbins = 10
cbins = 10
def hist_data(mixed_data, mbins, cbins):
import numpy as np
H, xedges, yedges = np.histogram2d(mixed_data[:,1], mixed_data[:,2], bins = (mbins, cbins), weights = mixed_data[:,3])
x, y = 0.5 * (xedges[:-1] + xedges[1:]), 0.5 * (yedges[:-1] + yedges[1:])
return H.T, x, y
def gauss(x, s, mu, a):
import numpy as np
return a * np.exp(-((x - mu)**2. / (2. * s**2.)))
def tanhlin(x, p0, p1, q0, q1, q2):
import numpy as np
return p0 + p1 * (x + 20.) + q0 * np.tanh((x - q1)/q2)
def func3d(p, x, y):
import numpy as np
from sys import exit
rsp0, rsp1, rsq0, rsq1, rsq2, rmp0, rmp1, rmq0, rmq1, rmq2, rs, rm, ra, bsp0, bsp1, bsq0, bsq1, bsq2, bmp0, bmp1, bmq0, bmq1, bmq2, bs, bm, ba = p
x, y = np.meshgrid(coords[0], coords[1])
rs = tanhlin(x, rsp0, rsp1, rsq0, rsq1, rsq2)
rm = tanhlin(x, rmp0, rmp1, rmq0, rmq1, rmq2)
ra = schechter(x, rap, raa, ram) # unused
bs = tanhlin(x, bsp0, bsp1, bsq0, bsq1, bsq2)
bm = tanhlin(x, bmp0, bmp1, bmq0, bmq1, bmq2)
ba = schechter(x, bap, baa, bam) # unused
red_dist = ra / (rs * np.sqrt(2 * np.pi)) * gauss(y, rs, rm, ra)
blue_dist = ba / (bs * np.sqrt(2 * np.pi)) * gauss(y, bs, bm, ba)
result = red_dist + blue_dist
return result
def residual(p, coords, data):
import numpy as np
model = func3d(p, coords)
res = (model.flatten() - data.flatten())
# can put parameter restrictions in here
return res
def poiss_err(data):
import numpy as np
return np.where(np.sqrt(H) > 0., np.sqrt(H), 2.)
# =====
H, x, y = hist_data(mixed_data, mbins, cbins)
data = H
coords = x, y
# x and y will be the projected coordinates of the data H onto the plane z = 0
# x has bins of width 0.5, with centers at -23.25, -22.75, ... , -19.25, -18.75
# y has bins of width 0.3, with centers at 0.65, 0.95, ... , 3.05, 3.35
Param = collections.namedtuple('Param', 'rsp0 rsp1 rsq0 rsq1 rsq2 rmp0 rmp1 rmq0 rmq1 rmq2 rs rm ra bsp0 bsp1 bsq0 bsq1 bsq2 bmp0 bmp1 bmq0 bmq1 bmq2 bs bm ba')
p_guess = Param(rsp0 = 0.152, rsp1 = 0.008, rsq0 = 0.044, rsq1 = -19.91, rsq2 = 0.94, rmp0 = 2.279, rmp1 = -0.037, rmq0 = -0.108, rmq1 = -19.81, rmq2 = 0.96, rs = 1., rm = -20.5, ra = 10000., bsp0 = 0.298, bsp1 = 0.014, bsq0 = -0.067, bsq1 = -19.90, bsq2 = 0.58, bmp0 = 1.790, bmp1 = -0.053, bmq0 = -0.363, bmq1 = -20.75, bmq2 = 1.12, bs = 1., bm = -20., ba = 2000.)
opt, cov, infodict, mesg, ier = spopt.leastsq(residual, p_guess, poiss_err(H), args = coords, maxfev = 100000, full_output = True)
Here's my data, just with fewer bins:
[[ 1.00000000e+01 1.10000000e+01 2.10000000e+01 1.90000000e+01
1.70000000e+01 2.10000000e+01 2.40000000e+01 1.90000000e+01
2.80000000e+01 1.90000000e+01]
[ 1.40000000e+01 4.50000000e+01 6.00000000e+01 6.80000000e+01
1.34000000e+02 1.97000000e+02 2.23000000e+02 2.90000000e+02
3.23000000e+02 3.03000000e+02]
[ 3.00000000e+01 1.17000000e+02 3.78000000e+02 9.74000000e+02
1.71900000e+03 2.27700000e+03 2.39000000e+03 2.25500000e+03
1.85600000e+03 1.31000000e+03]
[ 1.52000000e+02 9.32000000e+02 2.89000000e+03 5.23800000e+03
6.66200000e+03 6.19100000e+03 4.54900000e+03 3.14600000e+03
2.09000000e+03 1.33800000e+03]
[ 5.39000000e+02 2.58100000e+03 6.51300000e+03 8.89900000e+03
8.52900000e+03 6.22900000e+03 3.55000000e+03 2.14300000e+03
1.19000000e+03 6.92000000e+02]
[ 1.49600000e+03 4.49200000e+03 8.77200000e+03 1.07610000e+04
9.76700000e+03 7.04900000e+03 4.23200000e+03 2.47200000e+03
1.41500000e+03 7.02000000e+02]
[ 2.31800000e+03 7.01500000e+03 1.28870000e+04 1.50840000e+04
1.35590000e+04 8.55600000e+03 4.15600000e+03 1.77100000e+03
6.57000000e+02 2.55000000e+02]
[ 1.57500000e+03 3.79300000e+03 5.20900000e+03 4.77800000e+03
3.26600000e+03 1.44700000e+03 5.31000000e+02 1.85000000e+02
9.30000000e+01 4.90000000e+01]
[ 7.01000000e+02 1.21600000e+03 1.17600000e+03 7.93000000e+02
4.79000000e+02 2.02000000e+02 8.80000000e+01 3.90000000e+01
2.30000000e+01 1.90000000e+01]
[ 2.93000000e+02 3.93000000e+02 2.90000000e+02 1.97000000e+02
1.18000000e+02 6.40000000e+01 4.10000000e+01 1.20000000e+01
1.10000000e+01 4.00000000e+00]]
Thanks very much!
So what leastsq does is try to:
"Minimize the sum of squares of a set of equations"
-scipy docs
as it says it's minimizing a set of functions and therefore doesn't actually take any x or y data inputs in the easiest manner if you look at the arguments here so you can do it as you like and pass a residual function however, it's significantly easier to just use curve_fit which does it for you :) and creates the necessary equations
For fitting you should use: curve_fit if you are ok with the generic residual they use which is actually the function you pass itself res = leastsq(func, p0, args=args, full_output=1, **kw) if you look in the code here.
e.g. If I fit the rosenbrock function in 2d and guess the y-parameter:
from scipy.optimize import curve_fit
from itertools import imap
import numpy as np
# use only an even number of arguments
def rosen2d(x,a):
return (1-x)**2 + 100*(a - (x**2))**2
#generate some random data slightly off
datax = np.array([.01*x for x in range(-10,10)])
datay = 2.3
dataz = np.array(map(lambda x: rosen2d(x,datay), datax))
optimalparams, covmatrix = curve_fit(rosen2d, datax, dataz)
print 'opt:',optimalparams
fitting the colville function in 4d:
from scipy.optimize import curve_fit
import numpy as np
# 4 dimensional colville function
# definition from http://www.sfu.ca/~ssurjano/colville.html
def colville(x,x3,x4):
x1,x2 = x[:,0],x[:,1]
return 100*(x1**2 - x2)**2 + (x1-1)**2 + (x3-1)**2 + \
90*(x3**2 - x4)**2 + \
10.1*((x2 - 1)**2 + (x4 - 1)**2) + \
19.8*(x2 - 1)*(x4 - 1)
#generate some random data slightly off
datax = np.array([[x,x] for x in range(-10,10)])
#add gaussian noise
datax+= np.random.rand(*datax.shape)
#set 2 of the 4 parameters to constants
x3 = 3.5
x4 = 4.5
#calculate the function
dataz = colville(datax, x3, x4)
#fit the function
optimalparams, covmatrix = curve_fit(colville, datax, dataz)
print 'opt:',optimalparams
Using a custom residual function:
from scipy.optimize import leastsq
import numpy as np
# 4 dimensional colville function
# definition from http://www.sfu.ca/~ssurjano/colville.html
def colville(x,x3,x4):
x1,x2 = x[:,0],x[:,1]
return 100*(x1**2 - x2)**2 + (x1-1)**2 + (x3-1)**2 + \
90*(x3**2 - x4)**2 + \
10.1*((x2 - 1)**2 + (x4 - 1)**2) + \
19.8*(x2 - 1)*(x4 - 1)
#generate some random data slightly off
datax = np.array([[x,x] for x in range(-10,10)])
#add gaussian noise
datax+= np.random.rand(*datax.shape)
#set 2 of the 4 parameters to constants
x3 = 3.5
x4 = 4.5
def residual(p, x, y):
return y - colville(x,*p)
#calculate the function
dataz = colville(datax, x3, x4)
#guess some initial parameter values
p0 = [0,0]
#calculate a minimization of the residual
optimalparams = leastsq(residual, p0, args=(datax, dataz))[0]
print 'opt:',optimalparams
Edit: you used both the position and the keyword arg for args: if you look at the docs you'll see it uses position 3, but also can be used as a keyword argument. You used both which means the function is as expected, confused.
Related
I'm trying to plot the graphs of the following equation in Python.
Solution of the radial differential equation of a 2d quantum ring
The beta parameter is
This was my attempt
import numpy as np
from scipy.special import gamma, genlaguerre
import matplotlib.pyplot as plt
from scipy import exp, sqrt
m = 0.067*9.1*10E-31
R = 5E-9
r = np.linspace(0, 20E-9)
#Definição do parâmetro beta
def beta(gama):
flux = np.linspace(0,1.0)
beta = sqrt((m-flux)**2+(gama**4)/4)
return beta
def Rn(n,gama):
raiz = sqrt((gamma(n+1)/((2**beta(gama)) * gamma(n+beta(gama)+1))))
eval_g = genlaguerre((n,beta(gama)),((gama * r/R)**2/2))
exp_g = exp(-((gama * r/R)**2)/4)
return (1/R) * raiz * (gama * r/R)**beta(gama) * exp_g * eval_g
sol1 = Rn(0,1.5)
sol2 = Rn(0,2.0)
sol3 = Rn(0,2.5)
sol4 = Rn(0,3.0)
fig, ax = plt.subplots()
ax.plot(r/R, sol1, color = 'red', label = '$\gamma$ = 1.5')
ax.plot(r/R, sol2, color = 'green', label = '$\gamma$ = 2.0')
ax.plot(r/R, sol3, color = 'blue', label = '$\gamma$ = 2.5')
ax.plot(r/R, sol4, color = 'black', label = '$\gamma$ = 3.0')
ax.legend()
ax.set_xlabel('R/r')
ax.set_ylabel('$R_0(r)$')
erro using genlaguerre
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
Here the link to the article
I'm not into this topic, but afaics there are at least the following mistakes (I assume setting silently n=0 was on purpose):
In opposite to the posted image of the beta function, you added a division by 4 into your function definition.
Correct version:
def beta(gama):
return np.sqrt((m-flux)**2+(gama**4))
That's no reason for distributed instead of fixed peaks but I tell what I see here.
Product instead of division because of lacking parantheses in definition of amplitude function:
Correct version:
def amplitude(gama):
return np.sqrt(gamma(1)/((2**beta(gama)*gamma(beta(gama)+1))))
In the definition of Rn function there's the introducing 1/R missing.
However, all this unfortunately does not change the peaks to happen at equal x positions...
Sorry! The radial wave function of the article is wrong! I made the calculations and found the correct answer. The following code is correct
import numpy as np
from scipy.special import genlaguerre, gamma
import numpy as np
import matplotlib.pyplot as plt
m = 0 # magnetic number
flux = 0 # Phi in eqn 8
R = 5 # nm
r = np.linspace(0, 6 * R)
rho = r / R
def R0(n, gama):
beta = np.sqrt((m - flux)**2 + gama**4/4)
return (gama/R*
np.sqrt(gamma(n+ 1) / ( 2**beta * gamma(n + beta + 1))) *
(gama * rho)**beta *
np.exp(-gama**2 * rho**2 / 4) *
genlaguerre(n, beta)(gama**2 * rho**2 / 2))
sol1 = R0(0, 1.5)
sol2 = R0(0, 2.0)
sol3 = R0(0, 2.5)
sol4 = R0(0, 3.0)
plt.plot(rho, sol1, rho, sol2, rho, sol3, rho, sol4)
plt.legend(['$\gamma = 1.5$', '$\gamma = 2$', '$\gamma = 2.5$', '$\gamma = 3$'])
plt.ylabel('$R_{0}(r)$')
plt.xlabel('$r/R$')
plt.show()
The problem is to fitter on all my wavelength peaks a Gaussian in order to make a medium adjustment as accurate as possible
My question is how to make the Gaussian adjustment on all my peaks automatically without having to manually specify the coordinates of the peaks
For that, I realized the Gaussian adjustment of the brightest peaks, but I would like to generalize it to the following peaks. Subsequently, the Gaussian adjustment will allow me to obtain a polynomial adjustment fine enough to stagger pixels in wavelength
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
from scipy import interpolate
from tqdm import tqdm
from scipy import ndimage
import peakutils
from scipy.optimize import curve_fit
def gauss(x, x0, amp, wid):
return amp * np.exp( -((x - x0)/wid)**2)
def multi_gauss(x, *params):
y = np.zeros_like(x)
for i in range(0, len(params), 3):
x0, amp, wid = params[i:i+3]
y = y + gauss(x, x0, amp, wid)
return y
neon = fits.getdata(data_directory + wave_filename + '.fits')
neon_sp = np.mean(neon, axis= 0)
n_pix = len(neon_sp)
peaks_index = peakutils.peak.indexes(neon_sp, thres=0.05, min_dist=2)
### peals around the brightest peak
bright_index = peaks_index[np.argmax(neon_sp[peaks_index])]
delta_pix = 20
ind_min = bright_index - delta_pix
ind_max = bright_index + delta_pix
peak_select = peaks_index[np.where((peaks_index > ind_min) & (peaks_index < ind_max))]
peak_select_sort = peak_select[np.argsort(-neon_sp[peak_select])]
if peak_select_sort[1] > peak_select_sort[0] :
ind_max = bright_index + 40
else :
ind_min = bright_index - 40
peak_select = peaks_index[np.where((peaks_index > ind_min) & (peaks_index < ind_max))]
peak_select_sort = peak_select[np.argsort(-neon_sp[peak_select])]
plt.figure(num=0)
plt.clf()
plt.plot(neon_sp)
plt.plot(peaks_index,neon_sp[peaks_index], 'r+')
plt.plot(peak_select,neon_sp[peak_select], 'ro')
### Gaussian fit
x = np.arange(n_pix)
xx = np.arange(0, n_pix, .1)
n_peak = 4
bright_index_fit = np.zeros(n_peak)
for i in range(n_peak):
p = peak_select_sort[i]
guess = [p, neon_sp[p], .5]
popt, pcov = curve_fit(gauss, x, neon_sp, p0=guess)
fit = gauss(xx, *popt)
bright_index_fit[i] = popt[0]
plt.plot(xx,fit, '--')
bright_wave = [703.2, 724.5, 693.0, 743.9]
I am using Scipy's odrpack to fit a linear function to some data that has uncertainties in both the x and y dimensions. Each data point has it's own uncertainty that is asymmetric.
I can fit a function using symmetric uncertainties, but this is not a true representation of my data.
How can I perform the fit with this in mind?
This is my code so far. It receives input data as a command line argument, and the uncertainties i'm using are just random numbers at the moment. (also, two fits are happening, one for positive data points another for the negative. The reasons are unrelated to this question)
import sys
import numpy as np
import scipy.odr.odrpack as odrpack
def f(B, x):
return B[0]*x + B[1]
xdata = sys.argv[1].split(',')
xdata = [float(i) for i in xdata]
xdata = np.array(xdata)
#find indices of +/- data
zero_ind = np.where(xdata >= 0)[0][0]
x_p = xdata[zero_ind:]
x_m = xdata[:zero_ind+1]
ydata = sys.argv[2].split(',')
ydata = [float(i) for i in ydata]
ydata = np.array(ydata)
y_p = ydata[zero_ind:]
y_m = ydata[:zero_ind+1]
sx_m = np.random.random(len(x_m))
sx_p = np.random.random(len(x_p))
sy_m = np.random.random(len(y_m))
sy_p = np.random.random(len(y_p))
linear = odrpack.Model(f)
data_p = odrpack.RealData(x_p, y_p, sx=sx_p, sy=sy_p)
odr_p = odrpack.ODR(data_p, linear, beta0=[1.,2.])
out_p = odr_p.run()
data_m = odrpack.RealData(x_m, y_m, sx=sx_m, sy=sy_m)
odr_m = odrpack.ODR(data_m, linear, beta0=[1.,2.])
out_m = odr_m.run()
Thanks!
I will just give you solution with random data,I could not bother to import your data
import numpy as np
import scipy.odr.odrpack as odrpack
np.random.seed(1)
N = 10
x = np.linspace(0,5,N)*(-1)
y = 2*x - 1 + np.random.random(N)
sx = np.random.random(N)
sy = np.random.random(N)
def f(B, x):
return B[0]*x + B[1]
linear = odrpack.Model(f)
# mydata = odrpack.Data(x, y, wd=1./np.power(sx,2), we=1./np.power(sy,2))
mydata = odrpack.RealData(x, y, sx=sx, sy=sy)
myodr = odrpack.ODR(mydata, linear, beta0=[1., 2.])
myoutput = myodr.run()
myoutput.pprint()
Than we got
Beta: [ 1.92743947 -0.94409236]
Beta Std Error: [ 0.03117086 0.11273067]
Beta Covariance: [[ 0.02047196 0.06690713]
[ 0.06690713 0.26776027]]
Residual Variance: 0.04746112419196648
Inverse Condition #: 0.10277763521624257
Reason(s) for Halting:
Sum of squares convergence
I have been trying to make a function that solves a set of ordinary differential equations and then fit it to experimental data using the the scipy.optimize.curve_fit function, but I get an error message containing:
"ValueError: object too deep for desired array
odepack.error: Result from function call is not a proper array of floats."
Any help would be much appreciated.
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from pylab import *
from scipy.optimize import curve_fit
kB = 8.6173324e-5 #eV/K
P_H2O = 0.05 # bar
H_H2O = -0.8 #eV
S_H2O = -0.0016
def K_H2O(T):
return np.exp(-H_H2O/(kB*T))*np.exp(S_H2O/(kB))
def FracA(T):
return K_H2O(T)*P_H2O / (1+K_H2O(T)*P_H2O)
def FracB(T):
return 1-FracA(T)
EA1_A = 0.725 # eV (from experimental data)
def k1_A(A1_A,T):
return A1_A*np.exp(-EA1_A/(kB*T))
A1_B = 5.e3 #
EA1_B = 0.8 #eV
def k1_B(T):
return A1_B*np.exp(-EA1_B/(kB*T))
A2_B = 8.e3 #
EA2_B = 1.0 #eV
def k2_B(T):
return A2_B*np.exp(-EA2_B/(kB*T))
# initial conditions
P_NO0 = 500.e-6 # initial NO
P_NH30 = 530.e-6 # initial NH3
y0 = [P_NO0, P_NH30] # initial condition vector
t = np.linspace(0, 30., 1000) # time grid
def conv(T,A1_A):
def f(y, t):
P_NOi = y[0]
P_NH3i = y[1]
# the differential equations
if y[0] and y[1] > 0:
f0 = -k1_A(A1_A,T)*FracA(T)*P_NOi -k1_B(T)*FracB(T)*P_NOi*P_NH3i**(-0.25)
else:
f0 = 0
if y[0] and y[1] > 0:
f1 = -k1_A(A1_A,T)*FracA(T)*P_NOi -k1_B(T)*FracB(T)*P_NOi*P_NH3i**(-0.25)-k2_B(T)*P_NH3i
else:
f1 = 0
return [f0, f1]
# solve the DEs
soln = odeint(f, y0, t)
P_NO = soln[:, 0]
P_NH3 = soln[:, 1]
NO_conv = 1-P_NO[-1]/P_NO0
return NO_conv
x_real = np.array([433.1,443.1,453.2,463.1,473.1,483.7,494.2,503.5,523.9,553.7,573.6,623.4,673.4,723.3,773.4,823.2])
y_real =np.array([0.064305859, 0.098333053, 0.151494329, 0.217225336, 0.296164608, 0.397472394, 0.508515308, 0.612339428, 0.793549257, 0.892454094, 0.895511489, 0.861625527, 0.949118344, 0.940025727, 0.852439418, 0.727332885])
popt, pcov = curve_fit(conv, x_real, y_real)
I am trying to fit some data that are distributed in the time following a rising gaussian curve, and then decaying exponentially.
I have found this example on the web, that is very similar to my case, but I just started to fit with python, and the example seems quite confusing to me.
Nonetheless, I have tryied to adapt the example to my script and data, and in the following is my progress:
#!/usr/bin/env python
import pyfits, os, re, glob, sys
from scipy.optimize import leastsq
from numpy import *
from pylab import *
from scipy import *
from scipy import optimize
import numpy as N
import pylab as P
data=pyfits.open('http://heasarc.gsfc.nasa.gov/docs/swift/results/transients/weak/GX304-1.orbit.lc.fits')
time = data[1].data.field(0)/86400. + data[1].header['MJDREFF'] + data[1].header['MJDREFI']
rate = data[1].data.field(1)
error = data[1].data.field(2)
data.close()
cond = ((time > 56200) & (time < 56220))
time=time[cond]
rate=rate[cond]
error=error[cond]
def expGauss(x, pos, wid, tConst, expMod = 0.5, amp = 1):
expMod *= 1.0
gNorm = amp * N.exp(-0.5*((x-pos)/(wid))**2)
g = expBroaden(gNorm, tConst, expMod)
return g, gNorm
def expBroaden(y, t, expMod):
fy = F.fft(y)
a = N.exp(-1*expMod*time/t)
fa = F.fft(a)
fy1 = fy*fa
yb = (F.ifft(fy1).real)/N.sum(a)
return yb
if __name__ == '__main__':
# Fit the first set
#p[0] -- amplitude, p[1] -- position, p[2] -- width
fitfuncG = lambda p, x: p[0]*N.exp(-0.5*(x-p[1])**2/p[2]**2) # Target function
errfuncG = lambda p, x, y: fitfuncG(p, x) - y # Distance to the target function
p0 = [0.20, 56210, 2.0] # Initial guess for the parameters
p1, success = optimize.leastsq(errfuncG, p0[:], args=(time, rate))
p1G = fitfuncG(p1, time)
# P.plot(rate, 'ro', alpha = 0.4, label = "Gaussian")
# P.plot(p1G, label = 'G-Fit')
def expGauss(x, pos, wid, tConst, expMod = 0.5, amp = 1):
#p[0] -- amplitude, p[1] -- position, p[2] -- width, p[3]--tConst, p[4] -- expMod
fitfuncExpG = lambda p, x: expGauss(x, p[1], p[2], p[3], p[4], p[0])[0]
errfuncExpG = lambda p, x, y: fitfuncExpG(p, x) - y # Distance to the target function
p0a = [0.20, 56210, 2.0] # Initial guess for the parameters
p1a, success = optimize.leastsq(errfuncExpG, p0a[:], args=(time, rate))
p1aG = fitfuncExpG(p1a, time)
print type(rate), type(time), len(rate), len(time)
P.plot(rate, 'go', alpha = 0.4, label = "ExpGaussian")
P.plot(p1aG, label = 'ExpG-Fit')
P.legend()
P.show()
I am sure to have confused the whole thing, so sorry in advance for that, but at this point I don't know how to go further...
The code take the data from the web, so it is directly executable.
At the moment the code runs without any error, but it doesn't produce any plot.
Again, my goal is to fit the data with those two functions, how can I improve my code to do that?
Any suggestion is really appreciated.
Similarly to your other question, here also I would use a trigonometric function to fit this peaK:
The following code works if pasted after your code:
import numpy as np
from scipy.optimize import curve_fit
x = time
den = x.max() - x.min()
x -= x.min()
y_points = rate
def func(x, a1, a2, a3):
return a1*sin(1*pi*x/den)+\
a2*sin(2*pi*x/den)+\
a3*sin(3*pi*x/den)
popt, pcov = curve_fit(func, x, y_points)
y = func(x, *popt)
plot(time,rate)
plot(x,y, color='r', linewidth=2.)
show()