How to plot a DE equation? - python

I need to plot the DE equation, but I have no idea how to do that.
First, solve the problem.
#10527113
from sympy import *
import numpy as np
import matplotlib.pyplot as plt
# Solve the DE: 3y'' + 2y' +y = 0
x = symbols('x')
y = symbols('y', cls=Function)
diffeq1 = Eq(3*y(x).diff(x,x) + 2*y(x).diff(x) + y(x), 0)
ans1 = dsolve ( diffeq1, y(x) )
print ( ans1 )
Then the plotting part.
#plotting
#Let x : [0, 2*pi], c1 = 1 and c2 = 0
b = []
a = np.linspace( 0, 2*np.pi )
for k in a:
b.append(ans1.subs(x, k))
plt.plot(a, b, label="curve")
I got a right 'ans1' in my code, but I can't plot it successfully.

When you plot, you said in comments that c1 = 1 and c2 = 0 but you did not set it in your DE solution ans1. I added the substitution in your code as well as selecting the evaluation value of the solution for plotting.
from sympy import *
import numpy as np
import matplotlib.pyplot as plt
# Solve the DE: 3y'' + 2y' +y = 0
from sympy.solvers.ode import classify_sysode
x = symbols('x')
y = symbols('y', cls=Function)
diffeq1 = Eq(3*y(x).diff(x,x) + 2*y(x).diff(x) + y(x), 0)
ans1 = dsolve ( diffeq1, y(x) )
print ( ans1 )
# Eq(y(x), (C1*sin(sqrt(2)*x/3) + C2*cos(sqrt(2)*x/3))/exp(x)**(1/3))
print (ans1)
#plotting
# Let x : [0, 2*pi], c1 = 1 and c2 = 0
c1 = symbols('C1')
c2 = symbols('C2')
ans1 = ans1.subs(c1, 1) # substitute for c1
ans1 = ans1.subs(c2, 0) # substitute for c2
b = []
a = np.linspace( 0, 2*np.pi )
for k in a:
sol = ans1.subs(x, k)
b.append(sol.rhs) # select the value for plotting
plt.plot(a, b, label="curve")
plt.show()

Related

Runge Kutta 4th order Python

I am trying to solve this equation using Runge Kutta 4th order:
applying d2Q/dt2=F(y,x,v) and dQ/dt=u Q=y in my program.
I try to run the code but i get this error:
Traceback (most recent call last):
File "C:\Users\Egw\Desktop\Analysh\Askhsh1\asdasda.py", line 28, in <module>
k1 = F(y, u, x) #(x, v, t)
File "C:\Users\Egw\Desktop\Analysh\Askhsh1\asdasda.py", line 13, in F
return ((Vo/L -(R0/L)*u -(R1/L)*u**3 - y*(1/L*C)))
OverflowError: (34, 'Result too large')
I tried using the decimal library but I still couldnt make it work properly.I might have not used it properly tho.
My code is this one:
import numpy as np
from math import pi
from numpy import arange
from matplotlib.pyplot import plot, show
#parameters
R0 = 200
R1 = 250
L = 15
h = 0.002
Vo=1000
C=4.2*10**(-6)
t=0.93
def F(y, u, x):
return ((Vo/L -(R0/L)*u -(R1/L)*u**3 - y*(1/L*C)))
xpoints = arange(0,t,h)
ypoints = []
upoints = []
y = 0.0
u = Vo/L
for x in xpoints:
ypoints.append(y)
upoints.append(u)
m1 = u
k1 = F(y, u, x) #(x, v, t)
m2 = h*(u + 0.5*k1)
k2 = (h*F(y+0.5*m1, u+0.5*k1, x+0.5*h))
m3 = h*(u + 0.5*k2)
k3 = h*F(y+0.5*m2, u+0.5*k2, x+0.5*h)
m4 = h*(u + k3)
k4 = h*F(y+m3, u+k3, x+h)
y += (m1 + 2*m2 + 2*m3 + m4)/6
u += (k1 + 2*k2 + 2*k3 + k4)/6
plot(xpoints, upoints)
show()
plot(xpoints, ypoints)
show()
I expected to get the plots of u and y against t.
Turns out I messed up with the equations I was using for Runge Kutta
The correct code is the following:
import numpy as np
from math import pi
from numpy import arange
from matplotlib.pyplot import plot, show
#parameters
R0 = 200
R1 = 250
L = 15
h = 0.002
Vo=1000
C=4.2*10**(-6)
t0=0
#dz/dz
def G(x,y,z):
return Vo/L -(R0/L)*z -(R1/L)*z**3 - y/(L*C)
#dy/dx
def F(x,y,z):
return z
t = np.arange(t0, 0.93, h)
x = np.zeros(len(t))
y = np.zeros(len(t))
z = np.zeros(len(t))
y[0] = 0.0
z[0] = 0
for i in range(1, len(t)):
k0=h*F(x[i-1],y[i-1],z[i-1])
l0=h*G(x[i-1],y[i-1],z[i-1])
k1=h*F(x[i-1]+h*0.5,y[i-1]+k0*0.5,z[i-1]+l0*0.5)
l1=h*G(x[i-1]+h*0.5,y[i-1]+k0*0.5,z[i-1]+l0*0.5)
k2=h*F(x[i-1]+h*0.5,y[i-1]+k1*0.5,z[i-1]+l1*0.5)
l2=h*G(x[i-1]+h*0.5,y[i-1]+k1*0.5,z[i-1]+l1*0.5)
k3=h*F(x[i-1]+h,y[i-1]+k2,z[i-1]+l2)
l3 = h * G(x[i - 1] + h, y[i - 1] + k2, z[i - 1] + l2)
y[i]=y[i-1]+(k0+2*k1+2*k2+k3)/6
z[i] = z[i - 1] + (l0 + 2 * l1 + 2 * l2 + l3) / 6
Q=y
I=z
plot(t, Q)
show()
plot(t, I)
show()
If I may draw your attention to these 4 lines
m1 = u
k1 = F(y, u, x) #(x, v, t)
m2 = h*(u + 0.5*k1)
k2 = (h*F(y+0.5*m1, u+0.5*k1, x+0.5*h))
You should note a fundamental structural difference between the first two lines and the second pair of lines.
You need to multiply with the step size h also in the first pair.
The next problem is the step size and the cubic term. It contributes a term of size 3*(R1/L)*u^2 ~ 50*u^2 to the Lipschitz constant. In the original IVP per the question with u=Vo/L ~ 70 this term is of size 2.5e+5. To compensate only that term to stay in the stability region of the method, the step size has to be smaller 1e-5.
In the corrected initial conditions with u=0 at the start the velocity u remains below 0.001 so the cubic term does not determine stability, this is now governed by the last term contributing a Lipschitz term of 1/sqrt(L*C) ~ 125. The step size for stability is now 0.02, with 0.002 one can expect quantitatively useful results.
You can use decimal libary for more precision (handle more digits), but it's kind of annoying every value should be the same class (decimal.Decimal).
For example:
import numpy as np
from math import pi
from numpy import arange
from matplotlib.pyplot import plot, show
# Import decimal.Decimal as D
import decimal
from decimal import Decimal as D
# Precision
decimal.getcontext().prec = 10_000_000
#parameters
# Every value should be D class (decimal.Decimal class)
R0 = D(200)
R1 = D(250)
L = D(15)
h = D(0.002)
Vo = D(1000)
C = D(4.2*10**(-6))
t = D(0.93)
def F(y, u, x):
# Decomposed for use D
a = D(Vo/L)
b = D(-(R0/L)*u)
c = D(-(R1/L)*u**D(3))
d = D(-y*(D(1)/L*C))
return ((a + b + c + d ))
xpoints = arange(0,t,h)
ypoints = []
upoints = []
y = D(0.0)
u = D(Vo/L)
for x in xpoints:
ypoints.append(y)
upoints.append(u)
m1 = u
k1 = F(y, u, x) #(x, v, t)
m2 = (h*(u + D(0.5)*k1))
k2 = (h*F(y+D(0.5)*m1, u+D(0.5)*k1, x+D(0.5)*h))
m3 = h*(u + D(0.5)*k2)
k3 = h*F(y+D(0.5)*m2, u+D(0.5)*k2, x+D(0.5)*h)
m4 = h*(u + k3)
k4 = h*F(y+m3, u+k3, x+h)
y += (m1 + D(2)*m2 + D(2)*m3 + m4)/D(6)
u += (k1 + D(2)*k2 + D(2)*k3 + k4)/D(6)
plot(xpoints, upoints)
show()
plot(xpoints, ypoints)
show()
But even with ten million of precision I still get an overflow error. Check the components of the formula, their values are way too high. You can increase precision for handle them, but you'll notice it takes time to calculate them.
Problem implementation using scipy.integrate.odeint and scipy.integrate.solve_ivp.
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint, solve_ivp
# Input data initial conditions
ti = 0.0
tf = 0.5
N = 100000
h = (tf-ti)/N
# Initial conditions
u0 = 0.0
Q0 = 0.0
t_span = np.linspace(ti,tf,N)
r0 = np.array([Q0,u0])
# Parameters
R0 = 200
R1 = 250
L = 15
C = 4.2*10**(-6)
V0 = 1000
# Systems of First Order Equations
# This function is used with odeint, as specified in the documentation for scipy.integrate.odeint
def f(r,t,R0,R1,L,C,V0):
Q,u = r
ode1 = u
ode2 = -((R0/L)*u)-((R1/L)*u**3)-((1/(L*C))*Q)+(V0/L)
return np.array([ode1,ode2])
# This function is used in our 4Order Runge-Kutta implementation and in scipy.integrate.solve_ivp
def F(t,r,R0,R1,L,C,V0):
Q,u = r
ode1 = u
ode2 = -((R0/L)*u)-((R1/L)*u**3)-((1/(L*C))*Q)+(V0/L)
return np.array([ode1,ode2])
# Resolution with oedint
sol_1 = odeint(f,r0,t_span,args=(R0,R1,L,C,V0))
sol_2 = solve_ivp(fun=F,t_span=(ti,tf), y0=r0, method='LSODA',args=(R0,R1,L,C,V0))
Q_odeint, u_odeint = sol_1[:,0], sol_1[:,1]
Q_solve_ivp, u_solve_ivp = sol_2.y[0,:], sol_2.y[1,:]
# Figures
plt.figure(figsize=[30.0,10.0])
plt.subplot(3,1,1)
plt.grid(color = 'red',linestyle='--',linewidth=0.4)
plt.plot(t_span,Q_odeint,'r',t_span,u_odeint,'b')
plt.xlabel('t(s)')
plt.ylabel('Q(t), u(t)')
plt.subplot(3,1,2)
plt.plot(sol_2.t,Q_solve_ivp,'g',sol_2.t,u_solve_ivp,'y')
plt.grid(color = 'yellow',linestyle='--',linewidth=0.4)
plt.xlabel('t(s)')
plt.ylabel('Q(t), u(t)')
plt.subplot(3,1,3)
plt.plot(Q_solve_ivp,u_solve_ivp,'green')
plt.grid(color = 'yellow',linestyle='--',linewidth=0.4)
plt.xlabel('Q(t)')
plt.ylabel('u(t)')
plt.show()
Runge-Kutta 4th
# Code development of Runge-Kutta 4 Order
# Parameters
R0 = 200
R1 = 250
L = 15
C = 4.2*10**(-6)
V0 = 1000
# Input data initial conditions #
ti = 0.0
tf = 0.5
N = 100000
h = (tf-ti)/N
# Initial conditions
u0 = 0.0
Q0 = 0.0
# First order ordinary differential equations
def f1(t,Q,u):
return u
def f2(t,Q,u):
return -((R0/L)*u)-((R1/L)*u**3)-((1/(L*C))*Q)+(V0/L)
t = np.zeros(N); Q = np.zeros(N); u = np.zeros(N)
t[0] = ti
Q[0] = Q0
u[0] = u0
for i in range(0,N-1,1):
k1 = h*f1(t[i],Q[i],u[i])
l1 = h*f2(t[i],Q[i],u[i])
k2 = h*f1(t[i]+(h/2),Q[i]+(k1/2),u[i]+(l1/2))
l2 = h*f2(t[i]+(h/2),Q[i]+(k1/2),u[i]+(l1/2))
k3 = h*f1(t[i]+(h/2),Q[i]+(k2/2),u[i]+(l2/2))
l3 = h*f2(t[i]+(h/2),Q[i]+(k2/2),u[i]+(l2/2))
k4 = h*f1(t[i]+h,Q[i]+k3,u[i]+l3)
l4 = h*f2(t[i]+h,Q[i]+k3,u[i]+l3)
Q[i+1] = Q[i] + ((k1+2*k2+2*k3+k4)/6)
u[i+1] = u[i] + ((l1+2*l2+2*l3+l4)/6)
t[i+1] = t[i] + h
plt.figure(figsize=[20.0,10.0])
plt.subplot(1,2,1)
plt.plot(t,Q_solve_ivp,'r',t,Q_odeint,'y',t,Q,'b')
plt.grid(color = 'yellow',linestyle='--',linewidth=0.4)
plt.xlabel('t(s)')
plt.ylabel(r'$Q(t)_{Odeint}$, $Q(t)_{RK4}$')
plt.subplot(1,2,2)
plt.plot(t,Q_solve_ivp,'g',t,Q_odeint,'y',t,Q,'b')
plt.grid(color = 'yellow',linestyle='--',linewidth=0.4)
plt.xlabel('t(s)')
plt.ylabel(r'$Q(t)_{solve_ivp}$, $Q(t)_{RK4}$')

How to animate this optimization model correctly

I have implemented a simple randomized, population-based optimization method - Grey Wolf optimizer. I am having some trouble with properly capturing the Matplotlib plots at each iteration using the camera package.
I am running GWO for the objective function f(x,y) = x^2 + y^2. I can only see the candidate solutions converging to the minima, but the contour plot doesn't show up.
Do you have any suggestions, how can I display the contour plot in the background?
GWO Algorithm implementation
%matplotlib notebook
import matplotlib.pyplot as plt
import numpy as np
from celluloid import Camera
import ffmpeg
import pillow
# X : Position vector of the initial population
# n : Initial population size
def gwo(f,max_iterations,LB,UB):
fig = plt.figure()
camera = Camera(fig)
def random_population_uniform(m,a,b):
dims = len(a)
x = [list(a + np.multiply(np.random.rand(dims),b - a)) for i in range(m)]
return np.array(x)
def search_agent_fitness(fitness):
alpha = 0
if fitness[1] < fitness[alpha]:
alpha, beta = 1, alpha
else:
beta = 1
if fitness[2] > fitness[alpha] and fitness[2] < fitness[beta]:
beta, delta = 2, beta
elif fitness[2] < fitness[alpha]:
alpha,beta,delta = 2,alpha,beta
else:
delta = 2
for i in range(3,len(fitness)):
if fitness[i] <= fitness[alpha]:
alpha, beta,delta = i, alpha, beta
elif fitness[i] > fitness[alpha] and fitness[i]<= fitness[beta]:
beta,delta = i,beta
elif fitness[i] > fitness[beta] and fitness[i]<= fitness[delta]:
delta = i
return alpha, beta, delta
def plot_search_agent_positions(f,X,alpha,beta,delta,a,b):
# Plot the positions of search agents
x = X[:,0]
y = X[:,1]
s = plt.scatter(x,y,c='gray',zorder=1)
s = plt.scatter(x[alpha],y[alpha],c='red',zorder=1)
s = plt.scatter(x[beta],y[beta],c='blue',zorder=1)
s = plt.scatter(x[delta],y[delta],c='green',zorder=1)
camera.snap()
# Initialize the position of the search agents
X = random_population_uniform(50,np.array(LB),np.array(UB))
n = len(X)
l = 1
# Plot the first image on screen
x = np.linspace(LB[0],LB[1],1000)
y = np.linspace(LB[0],UB[1],1000)
X1,X2 = np.meshgrid(x,y)
Z = f(X1,X2)
cont = plt.contour(X1,X2,Z,20,linewidths=0.75)
while (l < max_iterations):
# Take the x,y coordinates of the initial population
x = X[:,0]
y = X[:,1]
# Calculate the objective function for each search agent
fitness = list(map(f,x,y))
# Update alpha, beta and delta
alpha,beta,delta = search_agent_fitness(fitness)
# Plot search agent positions
plot_search_agent_positions(f,X,alpha,beta,delta,LB,UB)
# a decreases linearly from 2 to 0
a = 2 - l *(2 / max_iterations)
# Update the position of search agents including the Omegas
for i in range(n):
x_prey = X[alpha]
r1 = np.random.rand(2) #r1 is a random vector in [0,1] x [0,1]
r2 = np.random.rand(2) #r2 is a random vector in [0,1] x [0,1]
A1 = 2*a*r1 - a
C1 = 2*r2
D_alpha = np.abs(C1 * x_prey - X[i])
X_1 = x_prey - A1*D_alpha
x_prey = X[beta]
r1 = np.random.rand(2)
r2 = np.random.rand(2)
A2 = 2*a*r1 - a
C2 = 2*r2
D_beta = np.abs(C2 * x_prey - X[i])
X_2 = x_prey - A2*D_beta
x_prey = X[delta]
r1 = np.random.rand(2)
r2 = np.random.rand(2)
A3 = 2*a*r1 - a
C3 = 2*r2
D_delta = np.abs(C3 * x_prey - X[i])
X_3 = x_prey - A3*D_delta
X[i] = (X_1 + X_2 + X_3)/3
l = l + 1
return X[alpha],camera
Function call
# define the objective function
def f(x,y):
return x**2 + y**2
minimizer,camera = gwo(f,7,[-10,-10],[10,10])
animation = camera.animate(interval = 1000, repeat = True,
repeat_delay = 500)
Is it possible that the line x = np.linspace(LB[0],LB[1],1000) should be x = np.linspace(LB[0],UB[1],1000) instead? With your current definition of x, x is an array only filled with the value -10 which means that you are unlikely to find a contour.
Another thing that you might want to do is to move the cont = plt.contour(X1,X2,Z,20,linewidths=0.75) line inside of your plot_search_agent_positions function to ensure that the contour is plotted at each iteration of the animation.
Once you make those changes, the code looks like that:
import matplotlib.pyplot as plt
import numpy as np
from celluloid import Camera
import ffmpeg
import PIL
from matplotlib import animation, rc
from IPython.display import HTML, Image # For GIF
from scipy.interpolate import griddata
rc('animation', html='html5')
# X : Position vector of the initial population
# n : Initial population size
def gwo(f,max_iterations,LB,UB):
fig = plt.figure()
fig.gca(aspect='equal')
camera = Camera(fig)
def random_population_uniform(m,a,b):
dims = len(a)
x = [list(a + np.multiply(np.random.rand(dims),b - a)) for i in range(m)]
return np.array(x)
def search_agent_fitness(fitness):
alpha = 0
if fitness[1] < fitness[alpha]:
alpha, beta = 1, alpha
else:
beta = 1
if fitness[2] > fitness[alpha] and fitness[2] < fitness[beta]:
beta, delta = 2, beta
elif fitness[2] < fitness[alpha]:
alpha,beta,delta = 2,alpha,beta
else:
delta = 2
for i in range(3,len(fitness)):
if fitness[i] <= fitness[alpha]:
alpha, beta,delta = i, alpha, beta
elif fitness[i] > fitness[alpha] and fitness[i]<= fitness[beta]:
beta,delta = i,beta
elif fitness[i] > fitness[beta] and fitness[i]<= fitness[delta]:
delta = i
return alpha, beta, delta
def plot_search_agent_positions(f,X,alpha,beta,delta,a,b,X1,X2,Z):
# Plot the positions of search agents
x = X[:,0]
y = X[:,1]
s = plt.scatter(x,y,c='gray',zorder=1)
s = plt.scatter(x[alpha],y[alpha],c='red',zorder=1)
s = plt.scatter(x[beta],y[beta],c='blue',zorder=1)
s = plt.scatter(x[delta],y[delta],c='green',zorder=1)
Z=f(X1,X2)
cont=plt.contour(X1,X2,Z,levels=20,colors='k',norm=True)
plt.clabel(cont, cont.levels, inline=True, fontsize=10)
camera.snap()
# Initialize the position of the search agents
X = random_population_uniform(50,np.array(LB),np.array(UB))
n = len(X)
l = 1
# Plot the first image on screen
x = np.linspace(LB[0],UB[1],1000)
y = np.linspace(LB[0],UB[1],1000)
X1,X2 = np.meshgrid(x,y)
Z=f(X1,X2)
while (l < max_iterations):
# Take the x,y coordinates of the initial population
x = X[:,0]
y = X[:,1]
# Calculate the objective function for each search agent
fitness = list(map(f,x,y))
# Update alpha, beta and delta
alpha,beta,delta = search_agent_fitness(fitness)
# Plot search agent positions
plot_search_agent_positions(f,X,alpha,beta,delta,LB,UB,X1,X2,Z)
# a decreases linearly from 2 to 0
a = 2 - l *(2 / max_iterations)
# Update the position of search agents including the Omegas
for i in range(n):
x_prey = X[alpha]
r1 = np.random.rand(2) #r1 is a random vector in [0,1] x [0,1]
r2 = np.random.rand(2) #r2 is a random vector in [0,1] x [0,1]
A1 = 2*a*r1 - a
C1 = 2*r2
D_alpha = np.abs(C1 * x_prey - X[i])
X_1 = x_prey - A1*D_alpha
x_prey = X[beta]
r1 = np.random.rand(2)
r2 = np.random.rand(2)
A2 = 2*a*r1 - a
C2 = 2*r2
D_beta = np.abs(C2 * x_prey - X[i])
X_2 = x_prey - A2*D_beta
x_prey = X[delta]
r1 = np.random.rand(2)
r2 = np.random.rand(2)
A3 = 2*a*r1 - a
C3 = 2*r2
D_delta = np.abs(C3 * x_prey - X[i])
X_3 = x_prey - A3*D_delta
X[i] = (X_1 + X_2 + X_3)/3
l = l + 1
return X[alpha],camera
# define the objective function
def f(x,y):
return x**2 + y**2
minimizer,camera = gwo(f,7,[-10,-10],[10,10])
animation = camera.animate(interval = 1000, repeat = True,repeat_delay = 500)
And the output gives:

Transport equation in 1D (python)

I'm trying to write a python program to solve the convection equation in 1D using the finite differences method (upwind scheme). The problem is as follows:
Here's what I've attempted
from numpy import *
from numpy.linalg import *
from matplotlib.pyplot import *
def u0(x):
if (0.4 <= x <= 0.5):
y = 10*(x - 0.4)
elif (0.5 <= x <= 0.6):
y = 10*(0.6 - x)
else:
y = 0
return y
print('Choix de la vitesse de transport c : ')
c = float(input('c = '))
def solex(x, t):
return u0(x - c*t)
print('Choix de pas h : ')
h = float(input('h = '))
print('Choix du pas dt et du temps final T : ')
dt = float(input('dt = '))
T = float(input('T = '))
# Maillage
N = int((1/h) - 1)
x = linspace(0, 1, N + 2)
M = int((T/dt) - 1)
t = linspace(0, T, M + 2)
# Itération
U1 = zeros(N)
U2 = zeros(N)
sol = zeros((N, M + 2))
for i in range(1, N + 1):
U1[i - 1] = u0(x[i])
sol[:, 0] = U1
for j in range(1, size(t)):
for i in range(1, N-1):
U2[i] = U1[i] - c*(dt/h)*(U1[i] - U1[i - 1])
sol[:, j] = U2
U1 = U2
It doesn't seem to work and I don't know why
Though you said you already solved your problem, I would still like to suggest some general improvements:
wildcard imports like from numpy import * are considered bad practice, better use import numpy as np and refer to the necessary functions as np.linspace etc.
the power of numpy comes from vectorization, so try to replace as much for-loops as possible by vectorized operations.
at least from what you showed us, the variables U1 and U2 are not really necessary.
using input for every single parameter might be overkill
The following code includes my suggested improvements. Note how I replaced your u0 with a vectorized version using np.piecewise and replaced several for-loops. I also added a visualisation.
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def u0(x):
y= np.piecewise(
x,
[(0.4 <= x)&(x <= 0.5), (0.5 <= x)&(x<= 0.6) ],
[lambda x: 10*(x - 0.4), lambda x: 10*(0.6 - x), 0])
return y
c = 0.9
h = 0.01
dt = 0.01
T = 2
N = int(np.ceil(1/h))
x = np.linspace(0, 1, N)
M = int(np.ceil(T/dt))
t = np.linspace(0, T, M)
#solve with upwind scheme
sol = np.zeros((N, M))
sol[:,0] = u0(x)
#you could add boundary values here by setting
#sol[0,:] = <your_boundary_data>
for i in range(1,len(t)):
sol[1:,i] = sol[1:,i-1] - c*(dt/h)*(sol[1:,i-1] - sol[:-1,i-1])
#Visualization
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.set_xlabel('x')
ax.set_ylabel('t')
T, X = np.meshgrid(t, x)
surf = ax.plot_surface(X, T, sol)

How can I fit an equation which is not a function

Given data points in the xy plane, I would like to use scipy.optimize.leastsq to find fit parameters for an ellipse (which cannot be written as a function of x and y). I tried setting the entire equation equal to zero, and then fitting this function, but the fit is failing to converge with error output
"The relative error between two consecutive iterates is at most 0.000000."
The code is shown below, as well as the output. The fitter clearly does not find any reasonable parameters. My question is whether or not this is a problem with scipy.optimize.leastsq, or whether the "trick" of setting the function equal to zero and instead fitting that is not valid.
from scipy.optimize import leastsq, curve_fit
import numpy as np
import matplotlib.pyplot as plt
def function(x,y,theta,smaj,smin):
xp = np.cos(theta)*x - np.sin(theta)*y
yp = np.sin(theta)*x + np.cos(theta)*y
z = ((xp)**2)/smaj**2 + ((yp)**2)/smin**2
return z
def g(x,y,smaj,smin):
return x*x/smaj**2 + y*y/smin**2
def window(array,alt,arange):
arr = [array[i] for i,a in enumerate(alt) if a > arange[0] and a < arange[1]]
return np.asarray(arr)
def fitter(p0,x,y,func,errfunc,err):
# the fitter function
out = leastsq(errfunc,p0,args=(x,y,func,err),full_output=1)
pfinal = out[0]
covar = out[1]
mydict = out[2]
mesg = out[3]
ier = out[4]
resids = mydict['fvec']
chisq = np.sum(resids**2)
degs_frdm = len(x)-len(pfinal)
reduced_chisq = chisq/degs_frdm
ls = [pfinal,covar,mydict,mesg,ier,resids,chisq,degs_frdm,reduced_chisq]
print('fitter status: ', ier, '-- aka -- ', mesg)
i = 0
if covar is not None:
if (ier == 1 or ier == 2 or ier == 3 or ier == 4):
for u in pfinal:
print ('Param', i+1, ': ',u, ' +/- ', np.sqrt(covar[i,i]))
i = i + 1
print ('reduced chisq',reduced_chisq)
else:
print('fitter failed')
return ls
def func(x,y,p):
x = x-p[3]
y = y-p[4]
xp = np.cos(p[0])*(x) - np.sin(p[0])*(y)
yp = np.sin(p[0])*(x) + np.cos(p[0])*(y)
z = ((xp)**2)/p[1]**2 + ((yp)**2)/p[2]**2 - 1
return z
def errfunc(p,x,y,func,err):
return (y-func(x,y,p))/err
t = np.linspace(0,2*np.pi,100)
xx = 5*np.cos(t); yy = np.sin(t)
p0 = [0,5,1,0,0]
sigma = np.ones(len(xx))
fit = fitter(p0,xx,yy,func,errfunc,sigma)
params = fit[0]
covariance = fit[1]
residuals = fit[5]
t = np.linspace(0,2*np.pi,100)
xx = 5*np.cos(t); yy = np.sin(t)
plt.plot(xx,yy,'bx',ms = 4)
xx = np.linspace(-10,10, 1000)
yy = np.linspace(-5, 5, 1000)
newx = []
newy = []
for x in xx:
for y in yy:
if 0.99 < func(x,y,params) < 1.01:
#if g(x,y,5,1) == 1:
newx.append(x)
newy.append(y)
plt.plot(newx,newy,'kx',ms = 1)
plt.show()
The blue crosses are the actual data, and the black line is the fitters guess at the parameters.

Plot basic example of neural network

I am studying about neural network tutorial and made simple perceptron code like this below
The purpose is
Spliting 20 points into two groups.
perceptron.py
import numpy as np
from pprint import pprint
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from tensorflow.contrib.learn.python.learn.tests.dataframe.mocks import Mock2x2Transform
plt.style.use('ggplot')
font = {'family' : 'meiryo'}
matplotlib.rc('font', **font)
rng = np.random.RandomState(123)
d = 2 #dimension
N = 10 # each group items
mean = 5
x1 = rng.randn(N,d) + np.array([0,0]) # group 0
x2 = rng.randn(N,d) + np.array([mean,mean]) $group 1
x = np.concatenate((x1,x2),axis = 0)
##### Plot points
allDf = pd.DataFrame(columns=['x','y'])
k = 0
for i in x:
print(i[0])
temp = pd.DataFrame({'x' : i[0],
'y' : i[1]},index=[k])
k = k + 1
allDf = pd.concat([allDf,temp])
pprint(allDf)
allDf.plot(kind='scatter',x = 'x',y='y')
#########
#initialize w b
w = np.zeros(d)
b = 0
def y(x):
return step(np.dot(w,x) + b)
def step(x):
return 1 * (x > 0)
def t(i):
if i < N:
return 0
else:
return 1
while True:
classified = True
for i in range(N * 2):
delta_w = (t(i) - y(x[i])) * x[i]
delta_b = (t(i) - y(x[i]))
w += delta_w
b += delta_b
classified *= all(delta_w == 0 ) * (delta_b == 0)
if classified:
print("Final answer")
pprint(w)
pprint(b) # I get the answer here but how can I plot this w and b
X = np.linspace(-2,6,100) # it's wrong!!
Y = (w[0] * X + w[1] * X) - b # it's wrong!!
plt.plot(X,Y)
plt.show()
break
This source code gives me
the final answer like this
w = array([ 2.14037745, 1.2763927 ])
b = -9
But how can I plot this??
I want to make line between two groups.
The final graph(line) is supposed to be like this

Categories