Complex-valued ODE from PDE in python - python

I have a complexed valued system from a PDE problem, the odeint() in Python cannot deal with it. I wrote a RK4 module to solve my system. It seems to work, however, the computed values are obvious incorrect. At the second time step, whole computed values are zero. Here are my code:
import matplotlib.pyplot as plt
import numpy as np
import drawnow
import time
import math
### Parameters ###
L = 20
n = 64
delta_t = 1.
tmax = 10
miu = 1e-6
x2 = np.linspace(-L/2,L/2, n+1)
x = x2[:n] # periodic B.C. #0 = #n
kx1 = np.linspace(0,n/2-1,n/2)
kx2 = np.linspace(1,n/2, n/2)
kx2 = -1*kx2[::-1]
kx = (2.*math.pi/L)*np.concatenate((kx1,kx2)); kx[0] = 1e-6
ky = kx; y = x
X, Y = np.meshgrid(x, y)
KX,KY = np.meshgrid(kx,ky)
K = KX**2 + KY**2
K2 = np.reshape(K, n**2,1)
### Initial Condition ###
vorticity = np.exp(-0.25*X**2 - 2.*Y**2)
wt = np.fft.fft2(vorticity)
wt2 = np.reshape(wt, n**2, 1) # wt2 is initial condition
### Define ODE ###
def yprime(t,rhs):
global miu, K, K2,n,KX, KY, wt2, wt
psit = -wt/ K
psix = np.real(np.fft.ifft2(1j*KX*psit))
psiy = np.real(np.fft.ifft2(1j*KY*psit))
wx = np.real(np.fft.ifft2(1j*KX*wt))
wy = np.real(np.fft.ifft2(1j*KY*wt))
rhs = -miu * K2 * wt2 + np.reshape(np.fft.fft2(wx*psiy - wy*psix), n**2,1)
return rhs
def RK4(domain,wt2,tmax):
w = np.empty((tmax+1,n**2))
w = w + 0j
t = np.empty(tmax+1) # length
w[0,:] = wt2 # enter initial conditions in y
t[0] = domain[0]
for i in range(1,tmax):
t[i+1] = t[i]+delta_t
w[i+1,:] = RK4Step(t[i], w[i,:],delta_t)
return w
def RK4Step(t,w,delta_t):
k1 = yprime(t,w)
k2 = yprime(t+0.5*delta_t, w+0.5*k1*delta_t)
k3 = yprime(t+0.5*delta_t, w+0.5*k2*delta_t)
k4 = yprime(t+delta_t, w+k3*delta_t)
return w + (k1+2*k2+2*k3+k4)*delta_t/6.
### Prediction ###
TimeStart = 0.
TimeEnd = tmax+1
TimeSpan = np.arange(TimeStart, TimeEnd, delta_t)
wt2_sol = RK4(TimeSpan, wt2, tmax)
for i in TimeSpan:
w = np.real(np.fft.ifft2(np.reshape(wt2_sol[i,:], (n, n))))
plt.pcolor(X,Y,w,shading = 'interp',cmap='jet')
drawnow
time.sleep(0.2)
plt.show()
Any idea why it doesn't work? In addition, I like to make a short video based on the solution. the function 'drawnow' and 'time.sleep() do not seem to work here.
Thank you!

My cleaned up version. Changing the number of inner steps does not change the quality of the result.
Make the Runge-Kutta solver (more) universal, input time array with (times[0],y0) being the initial point of the IVP
replace def yprime(t,rhs): with def yprime(t,wt):, since wt is your state variable, and rhs is the result. So wt is a local variable in yprime. Eliminate rhs by direct assembly in the return statement.
remove all reshape operation, act on the vector space of 2D arrays, numpy is good in treating matrices as some other kind of vector
add matplotlib.animate for the pre-generated image sequence. The tutorial for that seemed easier than the function-based animation
Played with arange to replace linspace in generation of kx. Better option is probably to use fftshift to swap the halves of the frequency array
.
import numpy as np
import math
from matplotlib import pyplot as plt
from matplotlib import animation
#----- Numerical integration of ODE via fixed-step classical Runge-Kutta -----
def RK4Step(yprime,t,y,dt):
k1 = yprime(t , y )
k2 = yprime(t+0.5*dt, y+0.5*k1*dt)
k3 = yprime(t+0.5*dt, y+0.5*k2*dt)
k4 = yprime(t+ dt, y+ k3*dt)
return y + (k1+2*k2+2*k3+k4)*dt/6.
def RK4(yprime,times,y0):
y = np.empty(times.shape+y0.shape,dtype=y0.dtype)
y[0,:] = y0 # enter initial conditions in y
steps = 4
for i in range(times.size-1):
dt = (times[i+1]-times[i])/steps
y[i+1,:] = y[i,:]
for k in range(steps):
y[i+1,:] = RK4Step(yprime, times[i]+k*dt, y[i+1,:], dt)
return y
#====================================================================
#----- Parameters for PDE -----
L = 20
n = 64
delta_t = 1.
tmax = 10
miu = 1e-6
#----- Constructing the grid and kernel functions
x2 = np.linspace(-L/2,L/2, n+1)
x = x2[:n] # periodic B.C. #0 = #n
y = x
kx = np.linspace( -n/2 , n/2-1, n)
kx = (2.*math.pi/L)*np.concatenate((np.arange(0,n/2),np.arange(-n/2,0)));
kx[0] = 1e-6
ky = kx;
X, Y = np.meshgrid(x, y)
KX,KY = np.meshgrid(kx,ky)
K = KX**2 + KY**2
#----- Initial Condition -----
vorticity = np.exp(-0.25*X**2 - 2.*Y**2)
wt0 = np.fft.fft2(vorticity)
#----- Define ODE -----
def wprime(t,wt):
global miu, K, K2,n,KX, KY
psit = -wt / K
psix = np.real(np.fft.ifft2(1j*KX*psit))
psiy = np.real(np.fft.ifft2(1j*KY*psit))
wx = np.real(np.fft.ifft2(1j*KX*wt))
wy = np.real(np.fft.ifft2(1j*KY*wt))
return -miu * K * wt + np.fft.fft2(wx*psiy - wy*psix)
#====================================================================
#----- Compute the numerical solution -----
TimeStart = 0.
TimeEnd = tmax+delta_t
TimeSpan = np.arange(TimeStart, TimeEnd, delta_t)
wt_sol = RK4(wprime,TimeSpan, wt0)
#----- Animate the numerical solution -----
fig = plt.figure()
ims = []
for i in TimeSpan:
w = np.real(np.fft.ifft2(wt_sol[i,:]))
im = plt.pcolor(X,Y,w,edgecolors='none',cmap='jet')
ims.append([im])
ani = animation.ArtistAnimation(fig, ims, interval=50, blit=True,
repeat_delay=1000)
#ani.save('PDE-animation.mp4')
plt.show()

Related

Runge Kutta 4th order Python

I am trying to solve this equation using Runge Kutta 4th order:
applying d2Q/dt2=F(y,x,v) and dQ/dt=u Q=y in my program.
I try to run the code but i get this error:
Traceback (most recent call last):
File "C:\Users\Egw\Desktop\Analysh\Askhsh1\asdasda.py", line 28, in <module>
k1 = F(y, u, x) #(x, v, t)
File "C:\Users\Egw\Desktop\Analysh\Askhsh1\asdasda.py", line 13, in F
return ((Vo/L -(R0/L)*u -(R1/L)*u**3 - y*(1/L*C)))
OverflowError: (34, 'Result too large')
I tried using the decimal library but I still couldnt make it work properly.I might have not used it properly tho.
My code is this one:
import numpy as np
from math import pi
from numpy import arange
from matplotlib.pyplot import plot, show
#parameters
R0 = 200
R1 = 250
L = 15
h = 0.002
Vo=1000
C=4.2*10**(-6)
t=0.93
def F(y, u, x):
return ((Vo/L -(R0/L)*u -(R1/L)*u**3 - y*(1/L*C)))
xpoints = arange(0,t,h)
ypoints = []
upoints = []
y = 0.0
u = Vo/L
for x in xpoints:
ypoints.append(y)
upoints.append(u)
m1 = u
k1 = F(y, u, x) #(x, v, t)
m2 = h*(u + 0.5*k1)
k2 = (h*F(y+0.5*m1, u+0.5*k1, x+0.5*h))
m3 = h*(u + 0.5*k2)
k3 = h*F(y+0.5*m2, u+0.5*k2, x+0.5*h)
m4 = h*(u + k3)
k4 = h*F(y+m3, u+k3, x+h)
y += (m1 + 2*m2 + 2*m3 + m4)/6
u += (k1 + 2*k2 + 2*k3 + k4)/6
plot(xpoints, upoints)
show()
plot(xpoints, ypoints)
show()
I expected to get the plots of u and y against t.
Turns out I messed up with the equations I was using for Runge Kutta
The correct code is the following:
import numpy as np
from math import pi
from numpy import arange
from matplotlib.pyplot import plot, show
#parameters
R0 = 200
R1 = 250
L = 15
h = 0.002
Vo=1000
C=4.2*10**(-6)
t0=0
#dz/dz
def G(x,y,z):
return Vo/L -(R0/L)*z -(R1/L)*z**3 - y/(L*C)
#dy/dx
def F(x,y,z):
return z
t = np.arange(t0, 0.93, h)
x = np.zeros(len(t))
y = np.zeros(len(t))
z = np.zeros(len(t))
y[0] = 0.0
z[0] = 0
for i in range(1, len(t)):
k0=h*F(x[i-1],y[i-1],z[i-1])
l0=h*G(x[i-1],y[i-1],z[i-1])
k1=h*F(x[i-1]+h*0.5,y[i-1]+k0*0.5,z[i-1]+l0*0.5)
l1=h*G(x[i-1]+h*0.5,y[i-1]+k0*0.5,z[i-1]+l0*0.5)
k2=h*F(x[i-1]+h*0.5,y[i-1]+k1*0.5,z[i-1]+l1*0.5)
l2=h*G(x[i-1]+h*0.5,y[i-1]+k1*0.5,z[i-1]+l1*0.5)
k3=h*F(x[i-1]+h,y[i-1]+k2,z[i-1]+l2)
l3 = h * G(x[i - 1] + h, y[i - 1] + k2, z[i - 1] + l2)
y[i]=y[i-1]+(k0+2*k1+2*k2+k3)/6
z[i] = z[i - 1] + (l0 + 2 * l1 + 2 * l2 + l3) / 6
Q=y
I=z
plot(t, Q)
show()
plot(t, I)
show()
If I may draw your attention to these 4 lines
m1 = u
k1 = F(y, u, x) #(x, v, t)
m2 = h*(u + 0.5*k1)
k2 = (h*F(y+0.5*m1, u+0.5*k1, x+0.5*h))
You should note a fundamental structural difference between the first two lines and the second pair of lines.
You need to multiply with the step size h also in the first pair.
The next problem is the step size and the cubic term. It contributes a term of size 3*(R1/L)*u^2 ~ 50*u^2 to the Lipschitz constant. In the original IVP per the question with u=Vo/L ~ 70 this term is of size 2.5e+5. To compensate only that term to stay in the stability region of the method, the step size has to be smaller 1e-5.
In the corrected initial conditions with u=0 at the start the velocity u remains below 0.001 so the cubic term does not determine stability, this is now governed by the last term contributing a Lipschitz term of 1/sqrt(L*C) ~ 125. The step size for stability is now 0.02, with 0.002 one can expect quantitatively useful results.
You can use decimal libary for more precision (handle more digits), but it's kind of annoying every value should be the same class (decimal.Decimal).
For example:
import numpy as np
from math import pi
from numpy import arange
from matplotlib.pyplot import plot, show
# Import decimal.Decimal as D
import decimal
from decimal import Decimal as D
# Precision
decimal.getcontext().prec = 10_000_000
#parameters
# Every value should be D class (decimal.Decimal class)
R0 = D(200)
R1 = D(250)
L = D(15)
h = D(0.002)
Vo = D(1000)
C = D(4.2*10**(-6))
t = D(0.93)
def F(y, u, x):
# Decomposed for use D
a = D(Vo/L)
b = D(-(R0/L)*u)
c = D(-(R1/L)*u**D(3))
d = D(-y*(D(1)/L*C))
return ((a + b + c + d ))
xpoints = arange(0,t,h)
ypoints = []
upoints = []
y = D(0.0)
u = D(Vo/L)
for x in xpoints:
ypoints.append(y)
upoints.append(u)
m1 = u
k1 = F(y, u, x) #(x, v, t)
m2 = (h*(u + D(0.5)*k1))
k2 = (h*F(y+D(0.5)*m1, u+D(0.5)*k1, x+D(0.5)*h))
m3 = h*(u + D(0.5)*k2)
k3 = h*F(y+D(0.5)*m2, u+D(0.5)*k2, x+D(0.5)*h)
m4 = h*(u + k3)
k4 = h*F(y+m3, u+k3, x+h)
y += (m1 + D(2)*m2 + D(2)*m3 + m4)/D(6)
u += (k1 + D(2)*k2 + D(2)*k3 + k4)/D(6)
plot(xpoints, upoints)
show()
plot(xpoints, ypoints)
show()
But even with ten million of precision I still get an overflow error. Check the components of the formula, their values are way too high. You can increase precision for handle them, but you'll notice it takes time to calculate them.
Problem implementation using scipy.integrate.odeint and scipy.integrate.solve_ivp.
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint, solve_ivp
# Input data initial conditions
ti = 0.0
tf = 0.5
N = 100000
h = (tf-ti)/N
# Initial conditions
u0 = 0.0
Q0 = 0.0
t_span = np.linspace(ti,tf,N)
r0 = np.array([Q0,u0])
# Parameters
R0 = 200
R1 = 250
L = 15
C = 4.2*10**(-6)
V0 = 1000
# Systems of First Order Equations
# This function is used with odeint, as specified in the documentation for scipy.integrate.odeint
def f(r,t,R0,R1,L,C,V0):
Q,u = r
ode1 = u
ode2 = -((R0/L)*u)-((R1/L)*u**3)-((1/(L*C))*Q)+(V0/L)
return np.array([ode1,ode2])
# This function is used in our 4Order Runge-Kutta implementation and in scipy.integrate.solve_ivp
def F(t,r,R0,R1,L,C,V0):
Q,u = r
ode1 = u
ode2 = -((R0/L)*u)-((R1/L)*u**3)-((1/(L*C))*Q)+(V0/L)
return np.array([ode1,ode2])
# Resolution with oedint
sol_1 = odeint(f,r0,t_span,args=(R0,R1,L,C,V0))
sol_2 = solve_ivp(fun=F,t_span=(ti,tf), y0=r0, method='LSODA',args=(R0,R1,L,C,V0))
Q_odeint, u_odeint = sol_1[:,0], sol_1[:,1]
Q_solve_ivp, u_solve_ivp = sol_2.y[0,:], sol_2.y[1,:]
# Figures
plt.figure(figsize=[30.0,10.0])
plt.subplot(3,1,1)
plt.grid(color = 'red',linestyle='--',linewidth=0.4)
plt.plot(t_span,Q_odeint,'r',t_span,u_odeint,'b')
plt.xlabel('t(s)')
plt.ylabel('Q(t), u(t)')
plt.subplot(3,1,2)
plt.plot(sol_2.t,Q_solve_ivp,'g',sol_2.t,u_solve_ivp,'y')
plt.grid(color = 'yellow',linestyle='--',linewidth=0.4)
plt.xlabel('t(s)')
plt.ylabel('Q(t), u(t)')
plt.subplot(3,1,3)
plt.plot(Q_solve_ivp,u_solve_ivp,'green')
plt.grid(color = 'yellow',linestyle='--',linewidth=0.4)
plt.xlabel('Q(t)')
plt.ylabel('u(t)')
plt.show()
Runge-Kutta 4th
# Code development of Runge-Kutta 4 Order
# Parameters
R0 = 200
R1 = 250
L = 15
C = 4.2*10**(-6)
V0 = 1000
# Input data initial conditions #
ti = 0.0
tf = 0.5
N = 100000
h = (tf-ti)/N
# Initial conditions
u0 = 0.0
Q0 = 0.0
# First order ordinary differential equations
def f1(t,Q,u):
return u
def f2(t,Q,u):
return -((R0/L)*u)-((R1/L)*u**3)-((1/(L*C))*Q)+(V0/L)
t = np.zeros(N); Q = np.zeros(N); u = np.zeros(N)
t[0] = ti
Q[0] = Q0
u[0] = u0
for i in range(0,N-1,1):
k1 = h*f1(t[i],Q[i],u[i])
l1 = h*f2(t[i],Q[i],u[i])
k2 = h*f1(t[i]+(h/2),Q[i]+(k1/2),u[i]+(l1/2))
l2 = h*f2(t[i]+(h/2),Q[i]+(k1/2),u[i]+(l1/2))
k3 = h*f1(t[i]+(h/2),Q[i]+(k2/2),u[i]+(l2/2))
l3 = h*f2(t[i]+(h/2),Q[i]+(k2/2),u[i]+(l2/2))
k4 = h*f1(t[i]+h,Q[i]+k3,u[i]+l3)
l4 = h*f2(t[i]+h,Q[i]+k3,u[i]+l3)
Q[i+1] = Q[i] + ((k1+2*k2+2*k3+k4)/6)
u[i+1] = u[i] + ((l1+2*l2+2*l3+l4)/6)
t[i+1] = t[i] + h
plt.figure(figsize=[20.0,10.0])
plt.subplot(1,2,1)
plt.plot(t,Q_solve_ivp,'r',t,Q_odeint,'y',t,Q,'b')
plt.grid(color = 'yellow',linestyle='--',linewidth=0.4)
plt.xlabel('t(s)')
plt.ylabel(r'$Q(t)_{Odeint}$, $Q(t)_{RK4}$')
plt.subplot(1,2,2)
plt.plot(t,Q_solve_ivp,'g',t,Q_odeint,'y',t,Q,'b')
plt.grid(color = 'yellow',linestyle='--',linewidth=0.4)
plt.xlabel('t(s)')
plt.ylabel(r'$Q(t)_{solve_ivp}$, $Q(t)_{RK4}$')

Drawing Poincare Section using Python

I was about to plot a Poincare section of the following DE, which is quite meaningful to have a periodic potential function V(x) = - cos(x) in this equation.
After calculating the solution using RK4 with time interval dt = 0.001, the one that python drew was as the following plot.
But according to the textbook(referred to 2E by J.M.T. Thompson and H.B. Stewart), the section would look like as
:
it has so much difference. For my personal opinion, since Poincare section does not appear as what writers draw, there must be some error in my code. However, I actually done for other forced oscillation DE, including Duffing's equation, and obtained the identical one as those in the textbook. So, I was wodering if there are some typos in the equation given by the textbook, or somewhere else. I posted my code, but might be quite messy to understand. So appreicate dealing with it.
import numpy as np
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import time
state = [1]
def print_percent_done(index, total, state, title='Please wait'):
percent_done2 = (index+1)/total*100
percent_done = round(percent_done2, 1)
print(f'\t⏳{title}: {percent_done}% done', end='\r')
if percent_done2 > 99.9 and state[0]:
print('\t✅'); state = [0]
####
no = 1
####
def multiple(n, q):
m = n; i = 0
while m >= 0:
m -= q
i += 1
return min(abs(n - (i - 1)*q), abs(i*q - n))
# system(2)
#Basic info.
filename = 'sinPotentialWell'
# a = 1
# alpha = 0.01
# w = 4
w0 = .5
n = 1000000
h = .01
t_0 = 0
x_0 = 0.1
y_0 = 0
A = [(t_0, x_0, y_0)]
def f(t, x, y):
return y
def g(t, x, y):
return -0.5*y - np.sin(x) + 1.1*np.sin(0.5*t)
for i in range(n):
t0 = A[i][0]; x0 = A[i][1]; y0 = A[i][2]
k1 = f(t0, x0, y0)
u1 = g(t0, x0, y0)
k2 = f(t0 + h/2, x0 + h*k1/2, y0 + h*u1/2)
u2 = g(t0 + h/2, x0 + h*k1/2, y0 + h*u1/2)
k3 = f(t0 + h/2, x0 + h*k2/2, y0 + h*u2/2)
u3 = g(t0 + h/2, x0 + h*k2/2, y0 + h*u2/2)
k4 = f(t0 + h, x0 + h*k3, y0 + h*u3)
u4 = g(t0 + h, x0 + h*k3, y0 + h*u3)
t = t0 + h
x = x0 + (k1 + 2*k2 + 2*k3 + k4)*h/6
y = y0 + (u1 + 2*u2 + 2*u3 + u4)*h/6
A.append([t, x, y])
if i%1000 == 0: print_percent_done(i, n, state, 'Solving given DE')
#phase diagram
print('showing 3d_(x, y, phi) graph')
PHI=[[]]; X=[[]]; Y=[[]]
PHI_period1 = []; X_period1 = []; Y_period1 = []
for i in range(n):
if w0*A[i][0]%(2*np.pi) < 1 and w0*A[i-1][0]%(2*np.pi) > 6:
PHI.append([]); X.append([]); Y.append([])
PHI_period1.append((w0*A[i][0])%(2*np.pi)); X_period1.append(A[i][1]); Y_period1.append(A[i][2])
phi_period1 = np.array(PHI_period1); x_period1 = np.array(X_period1); y_period1 = np.array(Y_period1)
print('showing Poincare Section at phi=0')
plt.plot(x_period1, y_period1, 'gs', markersize = 2)
plt.plot()
plt.title('phi=0 Poincare Section')
plt.xlabel('x'); plt.ylabel('y')
plt.show()
If you factor out some of the computation blocks, you can make the code more flexible and computations more direct. No need to reconstruct something if you can construct it in the first place. You want to catch the points where w0*t is a multiple of 2*pi, so just construct the time loops so you integrate in chunks of 2*pi/w0 and only remember the interesting points.
num_plot_points = 2000
h = .01
t,x,y = t_0,x_0,y_0
x_section,y_section = [],[]
T = 2*np.pi/w0
for k in range(num_plot_points):
t = 0;
while t < T-1.2*h:
x,y = RK4step(t,x,y,h)
t += h
x,y = RK4step(t,x,y,T-t)
if k%100 == 0: print_percent_done(k, num_plot_points, state, 'Solving given DE')
x_section.append(x); y_section.append(y)
with RK4step just containing the code of the RK4 step.
This will not solve the mystery. The veil gets lifted if you consider that x is the angle theta (of a forced pendulum with friction) on a circle. Thus to get points with the same spacial location it needs to be reduced by multiples of 2*pi. Doing that,
plt.plot([x%(2*np.pi) for x in x_section], y_section, 'gs', markersize = 2)
results in the expected plot

How to animate this optimization model correctly

I have implemented a simple randomized, population-based optimization method - Grey Wolf optimizer. I am having some trouble with properly capturing the Matplotlib plots at each iteration using the camera package.
I am running GWO for the objective function f(x,y) = x^2 + y^2. I can only see the candidate solutions converging to the minima, but the contour plot doesn't show up.
Do you have any suggestions, how can I display the contour plot in the background?
GWO Algorithm implementation
%matplotlib notebook
import matplotlib.pyplot as plt
import numpy as np
from celluloid import Camera
import ffmpeg
import pillow
# X : Position vector of the initial population
# n : Initial population size
def gwo(f,max_iterations,LB,UB):
fig = plt.figure()
camera = Camera(fig)
def random_population_uniform(m,a,b):
dims = len(a)
x = [list(a + np.multiply(np.random.rand(dims),b - a)) for i in range(m)]
return np.array(x)
def search_agent_fitness(fitness):
alpha = 0
if fitness[1] < fitness[alpha]:
alpha, beta = 1, alpha
else:
beta = 1
if fitness[2] > fitness[alpha] and fitness[2] < fitness[beta]:
beta, delta = 2, beta
elif fitness[2] < fitness[alpha]:
alpha,beta,delta = 2,alpha,beta
else:
delta = 2
for i in range(3,len(fitness)):
if fitness[i] <= fitness[alpha]:
alpha, beta,delta = i, alpha, beta
elif fitness[i] > fitness[alpha] and fitness[i]<= fitness[beta]:
beta,delta = i,beta
elif fitness[i] > fitness[beta] and fitness[i]<= fitness[delta]:
delta = i
return alpha, beta, delta
def plot_search_agent_positions(f,X,alpha,beta,delta,a,b):
# Plot the positions of search agents
x = X[:,0]
y = X[:,1]
s = plt.scatter(x,y,c='gray',zorder=1)
s = plt.scatter(x[alpha],y[alpha],c='red',zorder=1)
s = plt.scatter(x[beta],y[beta],c='blue',zorder=1)
s = plt.scatter(x[delta],y[delta],c='green',zorder=1)
camera.snap()
# Initialize the position of the search agents
X = random_population_uniform(50,np.array(LB),np.array(UB))
n = len(X)
l = 1
# Plot the first image on screen
x = np.linspace(LB[0],LB[1],1000)
y = np.linspace(LB[0],UB[1],1000)
X1,X2 = np.meshgrid(x,y)
Z = f(X1,X2)
cont = plt.contour(X1,X2,Z,20,linewidths=0.75)
while (l < max_iterations):
# Take the x,y coordinates of the initial population
x = X[:,0]
y = X[:,1]
# Calculate the objective function for each search agent
fitness = list(map(f,x,y))
# Update alpha, beta and delta
alpha,beta,delta = search_agent_fitness(fitness)
# Plot search agent positions
plot_search_agent_positions(f,X,alpha,beta,delta,LB,UB)
# a decreases linearly from 2 to 0
a = 2 - l *(2 / max_iterations)
# Update the position of search agents including the Omegas
for i in range(n):
x_prey = X[alpha]
r1 = np.random.rand(2) #r1 is a random vector in [0,1] x [0,1]
r2 = np.random.rand(2) #r2 is a random vector in [0,1] x [0,1]
A1 = 2*a*r1 - a
C1 = 2*r2
D_alpha = np.abs(C1 * x_prey - X[i])
X_1 = x_prey - A1*D_alpha
x_prey = X[beta]
r1 = np.random.rand(2)
r2 = np.random.rand(2)
A2 = 2*a*r1 - a
C2 = 2*r2
D_beta = np.abs(C2 * x_prey - X[i])
X_2 = x_prey - A2*D_beta
x_prey = X[delta]
r1 = np.random.rand(2)
r2 = np.random.rand(2)
A3 = 2*a*r1 - a
C3 = 2*r2
D_delta = np.abs(C3 * x_prey - X[i])
X_3 = x_prey - A3*D_delta
X[i] = (X_1 + X_2 + X_3)/3
l = l + 1
return X[alpha],camera
Function call
# define the objective function
def f(x,y):
return x**2 + y**2
minimizer,camera = gwo(f,7,[-10,-10],[10,10])
animation = camera.animate(interval = 1000, repeat = True,
repeat_delay = 500)
Is it possible that the line x = np.linspace(LB[0],LB[1],1000) should be x = np.linspace(LB[0],UB[1],1000) instead? With your current definition of x, x is an array only filled with the value -10 which means that you are unlikely to find a contour.
Another thing that you might want to do is to move the cont = plt.contour(X1,X2,Z,20,linewidths=0.75) line inside of your plot_search_agent_positions function to ensure that the contour is plotted at each iteration of the animation.
Once you make those changes, the code looks like that:
import matplotlib.pyplot as plt
import numpy as np
from celluloid import Camera
import ffmpeg
import PIL
from matplotlib import animation, rc
from IPython.display import HTML, Image # For GIF
from scipy.interpolate import griddata
rc('animation', html='html5')
# X : Position vector of the initial population
# n : Initial population size
def gwo(f,max_iterations,LB,UB):
fig = plt.figure()
fig.gca(aspect='equal')
camera = Camera(fig)
def random_population_uniform(m,a,b):
dims = len(a)
x = [list(a + np.multiply(np.random.rand(dims),b - a)) for i in range(m)]
return np.array(x)
def search_agent_fitness(fitness):
alpha = 0
if fitness[1] < fitness[alpha]:
alpha, beta = 1, alpha
else:
beta = 1
if fitness[2] > fitness[alpha] and fitness[2] < fitness[beta]:
beta, delta = 2, beta
elif fitness[2] < fitness[alpha]:
alpha,beta,delta = 2,alpha,beta
else:
delta = 2
for i in range(3,len(fitness)):
if fitness[i] <= fitness[alpha]:
alpha, beta,delta = i, alpha, beta
elif fitness[i] > fitness[alpha] and fitness[i]<= fitness[beta]:
beta,delta = i,beta
elif fitness[i] > fitness[beta] and fitness[i]<= fitness[delta]:
delta = i
return alpha, beta, delta
def plot_search_agent_positions(f,X,alpha,beta,delta,a,b,X1,X2,Z):
# Plot the positions of search agents
x = X[:,0]
y = X[:,1]
s = plt.scatter(x,y,c='gray',zorder=1)
s = plt.scatter(x[alpha],y[alpha],c='red',zorder=1)
s = plt.scatter(x[beta],y[beta],c='blue',zorder=1)
s = plt.scatter(x[delta],y[delta],c='green',zorder=1)
Z=f(X1,X2)
cont=plt.contour(X1,X2,Z,levels=20,colors='k',norm=True)
plt.clabel(cont, cont.levels, inline=True, fontsize=10)
camera.snap()
# Initialize the position of the search agents
X = random_population_uniform(50,np.array(LB),np.array(UB))
n = len(X)
l = 1
# Plot the first image on screen
x = np.linspace(LB[0],UB[1],1000)
y = np.linspace(LB[0],UB[1],1000)
X1,X2 = np.meshgrid(x,y)
Z=f(X1,X2)
while (l < max_iterations):
# Take the x,y coordinates of the initial population
x = X[:,0]
y = X[:,1]
# Calculate the objective function for each search agent
fitness = list(map(f,x,y))
# Update alpha, beta and delta
alpha,beta,delta = search_agent_fitness(fitness)
# Plot search agent positions
plot_search_agent_positions(f,X,alpha,beta,delta,LB,UB,X1,X2,Z)
# a decreases linearly from 2 to 0
a = 2 - l *(2 / max_iterations)
# Update the position of search agents including the Omegas
for i in range(n):
x_prey = X[alpha]
r1 = np.random.rand(2) #r1 is a random vector in [0,1] x [0,1]
r2 = np.random.rand(2) #r2 is a random vector in [0,1] x [0,1]
A1 = 2*a*r1 - a
C1 = 2*r2
D_alpha = np.abs(C1 * x_prey - X[i])
X_1 = x_prey - A1*D_alpha
x_prey = X[beta]
r1 = np.random.rand(2)
r2 = np.random.rand(2)
A2 = 2*a*r1 - a
C2 = 2*r2
D_beta = np.abs(C2 * x_prey - X[i])
X_2 = x_prey - A2*D_beta
x_prey = X[delta]
r1 = np.random.rand(2)
r2 = np.random.rand(2)
A3 = 2*a*r1 - a
C3 = 2*r2
D_delta = np.abs(C3 * x_prey - X[i])
X_3 = x_prey - A3*D_delta
X[i] = (X_1 + X_2 + X_3)/3
l = l + 1
return X[alpha],camera
# define the objective function
def f(x,y):
return x**2 + y**2
minimizer,camera = gwo(f,7,[-10,-10],[10,10])
animation = camera.animate(interval = 1000, repeat = True,repeat_delay = 500)
And the output gives:

Is there any way to optimize time and path simultaneously?

I have a problem with time and path optimization. I couldn't define two objectives for both time and path simultaneously. Python reads the last objective and gives result according to that way.
Could you please help me to solve this optimization problem? Thanks..
import matplotlib.pyplot as plt
from gekko import GEKKO
# Gekko model
m = GEKKO(remote=False)
# Time points
nt = 501 # nt=101
tm = np.linspace(0, 1, nt) # tm = np.linspace(0, 100, nt)
m.time = tm
# Variables
g = m.Const(value=9.80665)
V = m.Const(value=200) # velocity
Xi = m.Var(value=0, lb=-2*np.pi, ub=2*np.pi) # Heading angle value=-np.pi dene
x = m.Var(value=0, lb=-100000, ub=100000) # x position
y = m.Var(value=0, lb=-100000, ub=100000) # y position
pathx = m.Const(value=70000) # intended distance in x direction
pathy = m.Const(value=20000) # intended distance in y direction
p = np.zeros(nt) # final time=1
p[-1] = 1.0
final = m.Param(value=p)
m.options.MAX_ITER = 1000000 # iteration number
# Optimize Final Time
tf = m.FV(value=1.0, lb=0.0001, ub=1000.0)
tf.STATUS = 1
# Controlled parameters
Mu = m.MV(value=0, lb=-1, ub=1) # solver controls bank angle
Mu.STATUS = 1
Mu.DCOST = 1e-3
# Equations
m.Equation(x.dt() == tf * (V * (m.cos(Xi))))
m.Equation(y.dt() == tf * (V * (m.sin(Xi))))
m.Equation(Xi.dt() == tf * (g * m.tan(Mu)) / V )
# Objective Function
w = 1e4
m.Minimize(w * (x * final - pathx) ** 2) # 1D part (x)
m.Minimize(w * (pathy - y * final) ** 2) # 2D part (y)
m.Obj(tf)
'''
Multiple objectives are combined into a single objective with summation in Gekko. If there are multiple separate objectives that should be considered separately then a Pareto front is a possible approach to evaluate the tradeoff (see Section 6.5 of the Optimization book). However, this isn't implemented in Gekko.
One correct is that m.Equation(y * final <= pathy ) should be m.Equation((y-pathy) * final <= 0) to avoid a potential infeasible solution if pathy<0.
import matplotlib.pyplot as plt
from gekko import GEKKO
import numpy as np
# Gekko model
m = GEKKO(remote=False)
# Time points
nt = 51 # nt=101
tm = np.linspace(0, 1, nt) # tm = np.linspace(0, 100, nt)
m.time = tm
# Variables
g = m.Const(value=9.80665)
V = m.Const(value=200) # velocity
Xi = m.Var(value=0, lb=-2*np.pi, ub=2*np.pi) # Heading angle value=-np.pi dene
x = m.Var(value=0, lb=-100000, ub=100000) # x position
y = m.Var(value=0, lb=-100000, ub=100000) # y position
pathx = m.Const(value=70000) # intended distance in x direction
pathy = m.Const(value=20000) # intended distance in y direction
p = np.zeros(nt) # final time=1
p[-1] = 1.0
final = m.Param(value=p)
m.options.MAX_ITER = 1000000 # iteration number
# Optimize Final Time
tf = m.FV(value=1.0, lb=0.0001, ub=1000.0)
tf.STATUS = 1
# Controlled parameters
Mu = m.MV(value=0, lb=-1, ub=1) # solver controls bank angle
Mu.STATUS = 1
Mu.DCOST = 1e-3
# Equations
m.Equation(x.dt() == tf * (V * (m.cos(Xi))))
m.Equation(y.dt() == tf * (V * (m.sin(Xi))))
m.Equation(Xi.dt() == tf * (g * m.tan(Mu)) / V )
m.Equation((x-pathx) * final <= 0)
m.Equation((y-pathy) * final <= 0)
# Objective Function
w = 1e4
m.Minimize(w * (x * final - pathx) ** 2) # 1D part (x)
m.Minimize(w * (y * final - pathy) ** 2) # 2D part (y)
#in here python reads the last objective how can i run this two objectives simultaneously.
#m.Obj(Xi * final)
m.Minimize(tf)
#m.Maximize(0.2 * mass * final) # objective function
m.options.IMODE = 6
#m.options.NODES = 2
#m.options.MV_TYPE = 1
#m.options.SOLVER = 3
# m.open_folder() # to search for infeasibilities
m.solve(disp=False)
print('Final Time: ' + str(tf.value[0]))
tm = np.linspace(0,tf.value[0],nt)
#tm = tm * tf.value[0]
fig, axs = plt.subplots(3)
fig.suptitle('Results')
axs[0].plot(tm, Mu.value, 'b-', lw=2, label=r'$Bank$')
axs[0].legend(loc='best')
axs[1].plot(tm, x.value, 'r--', lw=2, label=r'$x$')
axs[1].legend(loc='best')
axs[2].plot(tm, y.value, 'p-', lw=2, label=r'$y$')
axs[2].legend(loc='best')
plt.xlabel('Time')
#plt.ylabel('Value')
plt.show()
plt.figure()
plt.plot(x.value, y.value)
plt.show()

Implement pseudo-spectral method with RK4 in Python

I am using pseudo-spectral method to solve for KdV equation u_t + u*u_x + u_xxx = 0. After simplification by Fourier Transform, I got two equations with two variables:
uhat = vhat * exp(-i*k^3*t)
d(vhat)/dt =-0.5i * k * exp(-i*k^3*t)*F[u^2]
where F represents Fourier Transform, uhat = F[u], vhat = F[v]
I'd like to use RK4 to solve for u by è ifft(uhat)è eventually. Now I got two variables uhat and vhat, I have 2 ideas for solving uhat and vhat:
To treat them as a system of ODEs to implement RK4.
Treat the equation 2 above as the to-be solved ODE to solve chat by
RK4, and calculate uhat = vhat * exp(-i*k^2*delta_t) after each
time step delta_t.
I got problem to implement both ideas. Here are the codes for the 2nd idea above.
import numpy as np
import math
from matplotlib import pyplot as plt
from matplotlib import animation
#----- Numerical integration of ODE via fixed-step classical Runge-Kutta -----
def RK4Step(yprime,t,y,dt):
k1 = yprime(t , y )
k2 = yprime(t+0.5*dt, y+0.5*k1*dt)
k3 = yprime(t+0.5*dt, y+0.5*k2*dt)
k4 = yprime(t+ dt, y+ k3*dt)
return y + (k1+2*k2+2*k3+k4)*dt/6.
def RK4(yprime,times,y0):
y = np.empty(times.shape+y0.shape,dtype=y0.dtype)
y[0,:] = y0 # enter initial conditions in y
steps = 4
for i in range(times.size-1):
dt = (times[i+1]-times[i])/steps
y[i+1,:] = y[i,:]
for k in range(steps):
y[i+1,:] = RK4Step(yprime, times[i]+k*dt, y[i+1,:], dt)
y[i+1,:] = y[i+1,:] * np.exp(delta_t * 1j * kx**3)
return y
#====================================================================
#----- Parameters for PDE -----
L = 100
n = 512
delta_t = 0.1
tmax = 20
c1 = 1.5 # amplitude of 1st wave
c2 = 0.5 # amplitude of 2nd wave
#----- Constructing the grid and kernel functions
x2 = np.linspace(-L/2,L/2, n+1)
x = x2[:n] # periodic B.C. #0 = #n
kx1 = np.linspace(0,n/2-1,n/2)
kx2 = np.linspace(1,n/2, n/2)
kx2 = -1*kx2[::-1]
kx = (2.* np.pi/L)*np.concatenate((kx1,kx2)); kx[0] = 1e-6
#----- Initial Condition -----
z1 = np.sqrt(c1)/2. * (x-0.1*L)
z2 = np.sqrt(c2)/2. * (x-0.4*L)
soliton = 6*(0.5 * c1 / (np.cosh(z1))**2 + 0.5 * c2/ (np.cosh(z2))**2)
uhat0 = np.fft.fft(soliton)
vhat0 = uhat0
#----- Define ODE -----
def wprime(t,vhat):
g = -0.5 * 1j* kx * np.exp(-1j * kx**3 * t)
return g * np.fft.fft(np.real(np.fft.ifft(np.exp(1j*kx**3*t)*vhat)**2))
#====================================================================
#----- Compute the numerical solution -----
TimeStart = 0.
TimeEnd = tmax+delta_t
TimeSpan = np.arange(TimeStart, TimeEnd, delta_t)
w_sol = RK4(wprime,TimeSpan, vhat0)
#----- Animate the numerical solution -----
fig = plt.figure()
ims = []
for i in TimeSpan:
w = np.real(np.fft.ifft(w_sol[i,:]))
im = plt.plot(x,w)
ims.append([im])
ani = animation.ArtistAnimation(fig, ims, interval=100, blit=False)
plt.show()
The RK4 part was from #LutzL.
In general it looks good. Better use a FuncAnimation, reduces the memory footprint and works:
#----- Animate the numerical solution -----
fig = plt.figure()
ax = plt.axes(ylim=(-5,5))
line, = ax.plot(x, soliton, '-')
NT = TimeSpan.size;
def animate(i):
i = i % (NT + 10)
if i<NT:
w = np.real(np.fft.ifft(w_sol[i,:]))
line.set_ydata(w)
return line,
anim = animation.FuncAnimation(fig, animate, interval=50, blit=True)
plt.show()
However, the first graph remains standing, I do not know why.

Categories