Solving an ODE Numerically with SciPy - python

I'm trying to find a numerical solution and eventually graph, the Gyllenberg-Webb model (cancer cell growth model). This model looks like:
Where β is the reproduction rate of proliferating cells, µp is the death rate of proliferating cells, µq is the death rate of quiescent cells, and r0 and ri are functions (transition rates) of N(t). Also N(t) = P(t)+Q(t).
For my purposes here I defined r_0(N) = bN and r_i(N) = aN to make things more simple.
My problem is when I try and plot my solution with pyplot I get
ValueError: x and y must have same first dimension
which I guess is self-explanatory, but I'm not sure how to go about fixing it without breaking everything else.
My code, which I've done only for the first equation so far, is:
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate
def fun(P,t, params):
beta, mp,b,N,Q = params
return(beta-mp-(b*N))*P+(a*N)*Q
params = (0.5,0.6,0.7,0.8,0.9)
tvec = np.arange(0,6,0.1)
s1 = scipy.integrate.odeint(
fun,
y0 = 1,
t = tvec,
args = (params,))
#print(s1)
plt.plot(fun,tvec)

In the end you will want to solve the coupled system. This is not complicated, just make the state object a vector and return the derivatives in the correct order.
def fun(state,t, params):
P, Q = state
beta, mp, mq, a, b = params
N = P+Q
r0N, riN = b*N, a*N
return [ (beta-mp-r0N)*P + riN*Q, r0N*P - (riN+mq)*Q ]
params = (0.5,0.6,0.7,0.8,0.9)
tsol = np.arange(0,6,0.1)
sol = odeint( fun, y0 = [ 1, 0], t = tsol, args = (params,))
Psol, Qsol = sol.T; plt.plot(tsol, Psol, tsol, Qsol)

You are currently plotting fun vs. tvec. What you actually want is to plot tvec vs s1. You will also have to define the parameter a in fun; I set it to 1 in the code below:
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate
def fun(P, t, params):
beta, mp, b, N, Q = params
return (beta-mp-(b*N))*P + (1.0 * N)*Q
params = (0.5, 0.6, 0.7, 0.8, 0.9)
tvec = np.arange(0, 6, 0.1)
s1 = scipy.integrate.odeint(
fun,
y0=1.,
t=tvec,
args=(params,))
plt.plot(tvec, s1)
plt.show()
This will plot:

Related

python does not solve an ode system properly

I have an ODE which I need to solve and plot. This is my code so far:
import numpy as np
import math
from scipy.integrate import odeint
import matplotlib.pyplot as plt
import math
from scipy import integrate
##############################
# Constants
#α = 1,β = 12,μ = 0.338,Vb = 0.01,ω = 0.5,
alpha = 1#p1
beta = 12#p2
mu = 0.338#p3
omega = 0.5#p5
gamma = 0.25#p6
Acoef=0.5
tmax = 1000
t = np.arange(0.0, tmax, 0.001)
################################
# Initial conditions vector
X0=0
I0=0
Z0=0.06
H0=0.04
E0=0
O0=0.02
# The model differential equations.
def deriv(y,t):
X, I, Z, H, E, O = y
dXdt = I
dIdt = -(alpha)*(X)-(beta)*(X**3)-(mu)*I+gamma*(1/(1-X)**2)-gamma*(1/(1+X)**2)+Acoef*(1/(1-X)**2)*(np.sin(omega*t))
dZdt = H
dHdt =-(alpha)*(Z)-(beta)*(Z**3)-(mu)*I+gamma*(1/(1-Z)**2)-gamma*(1/(1+Z)**2)+Acoef*(1/(1-Z)**2)*(np.sin(omega*t))
dEdt=O
dOdt=-(alpha)*(X)-(beta)*(X**3)-(mu)*I+gamma*(1/(1-X)**2)-gamma*(1/(1+X)**2)+Acoef*(1/(1-X)**2)*(np.sin(omega*t))-(-(alpha)*(Z)-(beta)*(Z**3)-(mu)*I+gamma*(1/(1-Z)**2)-gamma*(1/(1+Z)**2)+Acoef*(1/(1-Z)**2)*(np.sin(omega*t)))
return dXdt, dIdt, dZdt, dHdt, dEdt,dOdt
# Initial conditions vector
y0 = X0,I0,Z0,H0,E0,O0
# Integrate the SIR equations over the time grid, t.
ret = odeint(deriv, y0, t)
X, I, Z, H, E, O = ret.T
# Plot the data on three separate curves for S(t), I(t) and R(t)
plt.xlim([-10, 10])
plt.plot(ret[:,4], ret[:,5])
plt.show()
e1=ret[:,4]
e2=ret[:,5]
Unfortunately, it produces the following warning:
File "C:\Users\user\AppData\Local\Programs\Python\Python37\lib\site-packages\scipy\integrate\odepack.py", line 247
warnings.warn(warning_msg, ODEintWarning)
ODEintWarning: Excess work done on this call (perhaps wrong Dfun type). Run with full_output = 1 to get quantitative information.
And the result is not as expected. I cannot here post graphical result probably.
Does anyone know how I can fix it, please?

How can I control odeint to stop integration when the result reach a threshold?

Here is my code.
import numpy as np
from scipy.integrate import odeint
#Constant
R0=1.475
gamma=2.
ScaleMeVfm3toEskm3 = 8.92*np.power(10.,-7.)
def EOSe(p):
return np.power((p/450.785),(1./gamma))
def M(m,r):
return (4./3.)*np.pi*np.power(r,3.)*p
# function that returns dz/dt
def model(z,r):
p, m = z
dpdr = -((R0*EOSe(p)*m)/(np.power(r,2.)))*(1+(p/EOSe(p)))*(1+((4*math.pi*(np.power(r,3))*p)/(m)))*((1-((2*R0)*m)/(r))**(-1.))
dmdr = 4.*math.pi*(r**2.)*EOSe(p)
dzdr = [dpdr,dmdr]
return dzdr
# initial condition
r0=10.**-12.
p0=10**-6.
z0 = [p0, M(r0, p0)]
# radius
r = np.linspace(r0, 15, 100000)
# solve ODE
z = odeint(model,z0,r)
The result of z[:,0] keeps decreasing as I expected. But what I want is only positive values. One may run the code and try print(z[69306]) and it will show [2.89636405e-11 5.46983202e-01]. That is the last point I want the odeint to stop integration.
Of course, the provided code shows
RuntimeWarning: invalid value encountered in power
return np.power((p/450.785),(1./gamma))
because the result of p starts being negative. For any further points, the odeint yields the result [nan nan].
However, I could use np.nanmin() to find the minimum of z[:,0] that is not nan. But I have a set of p0 values for my work. I will need to call odeint in a loop like
P=np.linspace(10**-8.,10**-2.,10000)
for p0 in P:
#the code for solving ode provided above.
which takes more time.
I think it would reduce a time for execution if I can just stop at before z[:,0] going to be negative a value?
Here is the modified code using solve_ivp:
import numpy as np
from scipy.integrate import solve_ivp
import matplotlib.pylab as plt
# Constants
R0 = 1.475
gamma = 2.
def EOSe(p):
return np.power(np.abs(p)/450.785, 1./gamma)
def M(m, r):
return (4./3.)*np.pi*np.power(r,3.)*p
# function that returns dz/dt
# note: the argument order is reversed compared to `odeint`
def model(r, z):
p, m = z
dpdr = -R0*EOSe(p)*m/r**2*(1 + p/EOSe(p))*(1 + 4*np.pi*r**3*p/m)*(1 - 2*R0*m/r)**(-1)
dmdr = 4*np.pi * r**2 * EOSe(p)
dzdr = [dpdr, dmdr]
return dzdr
# initial condition
r0 = 1e-3
r_max = 50
p0 = 1e-6
z0 = [p0, M(r0, p0)]
# Define the event function
# from the doc: "The solver will find an accurate value
# of t at which event(t, y(t)) = 0 using a root-finding algorithm. "
def stop_condition(r, z):
return z[0]
stop_condition.terminal = True
# solve ODE
r_span = (r0, r_max)
sol = solve_ivp(model, r_span, z0,
events=stop_condition)
print(sol.message)
print('last p, m = ', sol.y[:, -1], 'for r_event=', sol.t_events[0][0])
r_sol = sol.t
p_sol = sol.y[0, :]
m_sol = sol.y[1, :]
# Graph
plt.subplot(2, 1, 1);
plt.plot(r_sol, p_sol, '.-b')
plt.xlabel('r'); plt.ylabel('p');
plt.subplot(2, 1, 2);
plt.plot(r_sol, m_sol, '.-r')
plt.xlabel('r'); plt.ylabel('m');
Actually, using events in this case do not prevent a warning because of negative p. The reason is that the solver is going to evaluate the model for p<O anyway. A solution is to take the absolute value of p in the square root (as in the code above). Using np.sign(p)*np.power(np.abs(p)/450.785, 1./gamma) gives interesting result too.

Solving system of ODE's using python and graphing (Chua's circuit)

I'm trying to model Chau's Circuit in Python using matplotlib and scipy, which involves solving a system of ordinary differential equations.
This has been done in matlab, and I simply wanted to attempt the problem in python. The matlab code linked is a little confusing; the code on the left doesn't appear to have much relevance to solving the system of ode's that describe Chua's Circuit (page 3, equations (2)(3) and (4)), whilst the code on the right goes beyond that to modelling the circuit component by component.
I'm not familiar with scipy's odeint function so I used some of the examples from the scipy cookbook for guidance.
Can anyone help me troubleshoot my system; why do I get a graph looking like this:
As opposed to one looking like this?
My code is attached below:
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
def fV_1(V_1, G_a, G_b, V_b):
if V_1 < -V_b:
fV_1 = G_b*V_1+(G_b-G_a)*V_b
elif -V_b <= V_1 and V_1 <=V_b:
fV_1 = G_a*V_1
elif V_1 > V_b:
fV_1 = G_b*V_1+(G_a-G_b)*V_b
else:
print "Error!"
return fV_1
def ChuaDerivatives(state,t):
#unpack the state vector
V_1 = state[0]
V_2 = state[1]
I_3 = state[2]
#definition of constant parameters
L = 0.018 #H, or 18 mH
C_1 = 0.00000001 #F, or 10 nF
C_2 = 0.0000001 #F, or 100 nF
G_a = -0.000757576 #S, or -757.576 uS
G_b = -0.000409091 #S, or -409.091 uS
V_b = 1 #V (E)
G = 0.000550 #S, or 550 uS VARIABLE
#compute state derivatives
dV_1dt = (G/C_1)*(V_2-V_1)-(1/C_1)*fV_1(V_1, G_a, G_b, V_b)
dV_2dt = -(G/C_2)*(V_2-V_1)+(1/C_2)*I_3
dI_3dt = -(1/L)*V_2
#return state derivatives
return dV_1dt, dV_2dt, dI_3dt
#set up time series
state0 = [0.1, 0.1, 0.0001]
t = np.arange(0.0, 53.0, 0.1)
#populate state information
state = odeint(ChuaDerivatives, state0, t)
# do some fancy 3D plotting
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(state[:,0],state[:,1],state[:,2])
ax.set_xlabel('V_1')
ax.set_ylabel('V_2')
ax.set_zlabel('I_3')
plt.show()
So I managed to work it out for myself after some fiddling; I was interpreting the odeint function wrong; more careful reading of the docstring and starting from scratch to stop me following a difficult method solved it. Code below:
import numpy as np
import scipy.integrate as integrate
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#define universal variables
c0 = 15.6
c1 = 1.0
c2 = 28.0
m0 = -1.143
m1 = -0.714
#just a little extra, quite unimportant
def f(x):
f = m1*x+(m0-m1)/2.0*(abs(x+1.0)-abs(x-1.0))
return f
#the actual function calculating
def dH_dt(H, t=0):
return np.array([c0*(H[1]-H[0]-f(H[0])),
c1*(H[0]-H[1]+H[2]),
-c2*H[1]])
#computational time steps
t = np.linspace(0, 30, 1000)
#x, y, and z initial conditions
H0 = [0.7, 0.0, 0.0]
H, infodict = integrate.odeint(dH_dt, H0, t, full_output=True)
print infodict['message']
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot(H[:,0], H[:,1], H[:,2])
plt.show()
Which gives me this:

Python: fit data with gaussian rising and exponential decay

I am trying to fit some data that are distributed in the time following a rising gaussian curve, and then decaying exponentially.
I have found this example on the web, that is very similar to my case, but I just started to fit with python, and the example seems quite confusing to me.
Nonetheless, I have tryied to adapt the example to my script and data, and in the following is my progress:
#!/usr/bin/env python
import pyfits, os, re, glob, sys
from scipy.optimize import leastsq
from numpy import *
from pylab import *
from scipy import *
from scipy import optimize
import numpy as N
import pylab as P
data=pyfits.open('http://heasarc.gsfc.nasa.gov/docs/swift/results/transients/weak/GX304-1.orbit.lc.fits')
time = data[1].data.field(0)/86400. + data[1].header['MJDREFF'] + data[1].header['MJDREFI']
rate = data[1].data.field(1)
error = data[1].data.field(2)
data.close()
cond = ((time > 56200) & (time < 56220))
time=time[cond]
rate=rate[cond]
error=error[cond]
def expGauss(x, pos, wid, tConst, expMod = 0.5, amp = 1):
expMod *= 1.0
gNorm = amp * N.exp(-0.5*((x-pos)/(wid))**2)
g = expBroaden(gNorm, tConst, expMod)
return g, gNorm
def expBroaden(y, t, expMod):
fy = F.fft(y)
a = N.exp(-1*expMod*time/t)
fa = F.fft(a)
fy1 = fy*fa
yb = (F.ifft(fy1).real)/N.sum(a)
return yb
if __name__ == '__main__':
# Fit the first set
#p[0] -- amplitude, p[1] -- position, p[2] -- width
fitfuncG = lambda p, x: p[0]*N.exp(-0.5*(x-p[1])**2/p[2]**2) # Target function
errfuncG = lambda p, x, y: fitfuncG(p, x) - y # Distance to the target function
p0 = [0.20, 56210, 2.0] # Initial guess for the parameters
p1, success = optimize.leastsq(errfuncG, p0[:], args=(time, rate))
p1G = fitfuncG(p1, time)
# P.plot(rate, 'ro', alpha = 0.4, label = "Gaussian")
# P.plot(p1G, label = 'G-Fit')
def expGauss(x, pos, wid, tConst, expMod = 0.5, amp = 1):
#p[0] -- amplitude, p[1] -- position, p[2] -- width, p[3]--tConst, p[4] -- expMod
fitfuncExpG = lambda p, x: expGauss(x, p[1], p[2], p[3], p[4], p[0])[0]
errfuncExpG = lambda p, x, y: fitfuncExpG(p, x) - y # Distance to the target function
p0a = [0.20, 56210, 2.0] # Initial guess for the parameters
p1a, success = optimize.leastsq(errfuncExpG, p0a[:], args=(time, rate))
p1aG = fitfuncExpG(p1a, time)
print type(rate), type(time), len(rate), len(time)
P.plot(rate, 'go', alpha = 0.4, label = "ExpGaussian")
P.plot(p1aG, label = 'ExpG-Fit')
P.legend()
P.show()
I am sure to have confused the whole thing, so sorry in advance for that, but at this point I don't know how to go further...
The code take the data from the web, so it is directly executable.
At the moment the code runs without any error, but it doesn't produce any plot.
Again, my goal is to fit the data with those two functions, how can I improve my code to do that?
Any suggestion is really appreciated.
Similarly to your other question, here also I would use a trigonometric function to fit this peaK:
The following code works if pasted after your code:
import numpy as np
from scipy.optimize import curve_fit
x = time
den = x.max() - x.min()
x -= x.min()
y_points = rate
def func(x, a1, a2, a3):
return a1*sin(1*pi*x/den)+\
a2*sin(2*pi*x/den)+\
a3*sin(3*pi*x/den)
popt, pcov = curve_fit(func, x, y_points)
y = func(x, *popt)
plot(time,rate)
plot(x,y, color='r', linewidth=2.)
show()

Fitting data to system of ODEs using Python via Scipy & Numpy

I am having some trouble translating my MATLAB code into Python via Scipy & Numpy. I am stuck on how to find optimal parameter values (k0 and k1) for my system of ODEs to fit to my ten observed data points. I currently have an initial guess for k0 and k1. In MATLAB, I can using something called 'fminsearch' which is a function that takes the system of ODEs, the observed data points, and the initial values of the system of ODEs. It will then calculate a new pair of parameters k0 and k1 that will fit the observed data. I have included my code to see if you can help me implement some kind of 'fminsearch' to find the optimal parameter values k0 and k1 that will fit my data. I want to add whatever code to do this to my lsqtest.py file.
I have three .py files - ode.py, lsq.py, and lsqtest.py
ode.py:
def f(y, t, k):
return (-k[0]*y[0],
k[0]*y[0]-k[1]*y[1],
k[1]*y[1])
lsq.py:
import pylab as py
import numpy as np
from scipy import integrate
from scipy import optimize
import ode
def lsq(teta,y0,data):
#INPUT teta, the unknowns k0,k1
# data, observed
# y0 initial values needed by the ODE
#OUTPUT lsq value
t = np.linspace(0,9,10)
y_obs = data #data points
k = [0,0]
k[0] = teta[0]
k[1] = teta[1]
#call the ODE solver to get the states:
r = integrate.odeint(ode.f,y0,t,args=(k,))
#the ODE system in ode.py
#at each row (time point), y_cal has
#the values of the components [A,B,C]
y_cal = r[:,1] #separate the measured B
#compute the expression to be minimized:
return sum((y_obs-y_cal)**2)
lsqtest.py:
import pylab as py
import numpy as np
from scipy import integrate
from scipy import optimize
import lsq
if __name__ == '__main__':
teta = [0.2,0.3] #guess for parameter values k0 and k1
y0 = [1,0,0] #initial conditions for system
y = [0.000,0.416,0.489,0.595,0.506,0.493,0.458,0.394,0.335,0.309] #observed data points
data = y
resid = lsq.lsq(teta,y0,data)
print resid
For these kind of fitting tasks you could use the package lmfit. The outcome of the fit would look like this; as you can see, the data are reproduced very well:
For now, I fixed the initial concentrations, you could also set them as variables if you like (just remove the vary=False in the code below). The parameters you obtain are:
[[Variables]]
x10: 5 (fixed)
x20: 0 (fixed)
x30: 0 (fixed)
k0: 0.12183301 +/- 0.005909 (4.85%) (init= 0.2)
k1: 0.77583946 +/- 0.026639 (3.43%) (init= 0.3)
[[Correlations]] (unreported correlations are < 0.100)
C(k0, k1) = 0.809
The code that reproduces the plot looks like this (some explanation can be found in the inline comments):
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from lmfit import minimize, Parameters, Parameter, report_fit
from scipy.integrate import odeint
def f(y, t, paras):
"""
Your system of differential equations
"""
x1 = y[0]
x2 = y[1]
x3 = y[2]
try:
k0 = paras['k0'].value
k1 = paras['k1'].value
except KeyError:
k0, k1 = paras
# the model equations
f0 = -k0 * x1
f1 = k0 * x1 - k1 * x2
f2 = k1 * x2
return [f0, f1, f2]
def g(t, x0, paras):
"""
Solution to the ODE x'(t) = f(t,x,k) with initial condition x(0) = x0
"""
x = odeint(f, x0, t, args=(paras,))
return x
def residual(paras, t, data):
"""
compute the residual between actual data and fitted data
"""
x0 = paras['x10'].value, paras['x20'].value, paras['x30'].value
model = g(t, x0, paras)
# you only have data for one of your variables
x2_model = model[:, 1]
return (x2_model - data).ravel()
# initial conditions
x10 = 5.
x20 = 0
x30 = 0
y0 = [x10, x20, x30]
# measured data
t_measured = np.linspace(0, 9, 10)
x2_measured = np.array([0.000, 0.416, 0.489, 0.595, 0.506, 0.493, 0.458, 0.394, 0.335, 0.309])
plt.figure()
plt.scatter(t_measured, x2_measured, marker='o', color='b', label='measured data', s=75)
# set parameters including bounds; you can also fix parameters (use vary=False)
params = Parameters()
params.add('x10', value=x10, vary=False)
params.add('x20', value=x20, vary=False)
params.add('x30', value=x30, vary=False)
params.add('k0', value=0.2, min=0.0001, max=2.)
params.add('k1', value=0.3, min=0.0001, max=2.)
# fit model
result = minimize(residual, params, args=(t_measured, x2_measured), method='leastsq') # leastsq nelder
# check results of the fit
data_fitted = g(np.linspace(0., 9., 100), y0, result.params)
# plot fitted data
plt.plot(np.linspace(0., 9., 100), data_fitted[:, 1], '-', linewidth=2, color='red', label='fitted data')
plt.legend()
plt.xlim([0, max(t_measured)])
plt.ylim([0, 1.1 * max(data_fitted[:, 1])])
# display fitted statistics
report_fit(result)
plt.show()
If you have data for additional variables, you can simply update the function residual.
The following worked for me:
import pylab as pp
import numpy as np
from scipy import integrate, interpolate
from scipy import optimize
##initialize the data
x_data = np.linspace(0,9,10)
y_data = np.array([0.000,0.416,0.489,0.595,0.506,0.493,0.458,0.394,0.335,0.309])
def f(y, t, k):
"""define the ODE system in terms of
dependent variable y,
independent variable t, and
optinal parmaeters, in this case a single variable k """
return (-k[0]*y[0],
k[0]*y[0]-k[1]*y[1],
k[1]*y[1])
def my_ls_func(x,teta):
"""definition of function for LS fit
x gives evaluation points,
teta is an array of parameters to be varied for fit"""
# create an alias to f which passes the optional params
f2 = lambda y,t: f(y, t, teta)
# calculate ode solution, retuen values for each entry of "x"
r = integrate.odeint(f2,y0,x)
#in this case, we only need one of the dependent variable values
return r[:,1]
def f_resid(p):
""" function to pass to optimize.leastsq
The routine will square and sum the values returned by
this function"""
return y_data-my_ls_func(x_data,p)
#solve the system - the solution is in variable c
guess = [0.2,0.3] #initial guess for params
y0 = [1,0,0] #inital conditions for ODEs
(c,kvg) = optimize.leastsq(f_resid, guess) #get params
print "parameter values are ",c
# fit ODE results to interpolating spline just for fun
xeval=np.linspace(min(x_data), max(x_data),30)
gls = interpolate.UnivariateSpline(xeval, my_ls_func(xeval,c), k=3, s=0)
#pick a few more points for a very smooth curve, then plot
# data and curve fit
xeval=np.linspace(min(x_data), max(x_data),200)
#Plot of the data as red dots and fit as blue line
pp.plot(x_data, y_data,'.r',xeval,gls(xeval),'-b')
pp.xlabel('xlabel',{"fontsize":16})
pp.ylabel("ylabel",{"fontsize":16})
pp.legend(('data','fit'),loc=0)
pp.show()
Look at the scipy.optimize module. The minimize function looks fairly similar to fminsearch, and I believe that both basically use a simplex algorithm for optimization.
# cleaned up a bit to get my head around it - thanks for sharing
import pylab as pp
import numpy as np
from scipy import integrate, optimize
class Parameterize_ODE():
def __init__(self):
self.X = np.linspace(0,9,10)
self.y = np.array([0.000,0.416,0.489,0.595,0.506,0.493,0.458,0.394,0.335,0.309])
self.y0 = [1,0,0] # inital conditions ODEs
def ode(self, y, X, p):
return (-p[0]*y[0],
p[0]*y[0]-p[1]*y[1],
p[1]*y[1])
def model(self, X, p):
return integrate.odeint(self.ode, self.y0, X, args=(p,))
def f_resid(self, p):
return self.y - self.model(self.X, p)[:,1]
def optim(self, p_quess):
return optimize.leastsq(self.f_resid, p_guess) # fit params
po = Parameterize_ODE(); p_guess = [0.2, 0.3]
c, kvg = po.optim(p_guess)
# --- show ---
print "parameter values are ", c, kvg
x = np.linspace(min(po.X), max(po.X), 2000)
pp.plot(po.X, po.y,'.r',x, po.model(x, c)[:,1],'-b')
pp.xlabel('X',{"fontsize":16}); pp.ylabel("y",{"fontsize":16}); pp.legend(('data','fit'),loc=0); pp.show()

Categories