Generate a graph for the diffusion equation - python

I have a code that represents the diffusion equation (Concentration as a function of time and space):
∂²C/∂x² - ∂C/∂t= 0
I discretized to the following form:
C[n+1,j] = C[n,j] + (dt/dx²)(C[n,j+1] - 2(C[n,j]) + C[n,j-1])
I am trying to generate the following graph, however I haven't had much success. Is there anyone who could help me with this? Many thanks!
The graph that I obtain:
The code that I have to reproduce the diffusion equation:
import numpy as np
import matplotlib.pyplot as plt
dt = 0.001 # grid size for time (s)
dx = 0.05 # grid size for space (m)
x_max = 1 # in m
t_max = 1 # total time in s
C0 = 1 # concentration
# function to calculate concentration profiles based on a
# finite difference approximation to the 1D diffusion
# equation:
def diffusion(dt,dx,t_max,x_max,C0):
# diffusion number:
s = dt/dx**2
x = np.arange(0,x_max+dx,dx)
t = np.arange(0,t_max+dt,dt)
r = len(t)
a = len(x)
C = np.zeros([r,a]) # initial condition
C[:,0] = C0 # boundary condition on left side
C[:,-1] = 0 # boundary condition on right side
for n in range(0,r-1): # time
for j in range(1,a-1): # space
C[n+1,j] = C[n,j] + s*(C[n,j-1] -
2*C[n,j] + C[n,j+1])
return x,C,r,a
# note that this can be written without the for-loop
# in space, but it is easier to read it this way
x,C,r,a = diffusion(dt,dx,t_max,x_max,C0)
# plotting:
plt.figure()
plt.xlim([0,1])
plt.ylim([0,1])
plot_times = np.arange(0,1,0.02)
for t in plot_times:
plt.plot(x,C[int(t/dt),:],'Gray',label='numerical')
plt.xlabel('Membrane position x',fontsize=12)
plt.ylabel('Concentration',fontsize=12)

Related

Weird results obtained while solving a set of coupled differential equations (using a sparse array) in python

I have tried to no avail for a week while trying to solve a system of coupled differential equations and reproduce the results shown in the attached image. I seem to be getting weird results as shown also. I don't seem to know what I might be doing wrong.The set of coupled differential equations were solved using Newman's BAND. Here's a link to the python implementation: python solution using BAND . And another link to the original image of the problem in case the attached is not clear enough: here you find a clearer image of the problem. Now what I am trying to do is to solve the same problem by creating a sparse array directly from the discretized equations using a combination of sympy and numpy and then solving using scipy's spsolve. Here is my code below. I need some help to figure out what I am doing wrong.
I have represented the variables as c1 = cA, c2 = cB, c3 = cC, c4 = cD in my code. Equation 2 has been linearized and phi10 and phi20 are the trial values of the variables cC and cD.
# import modules
import numpy as np
import sympy
from sympy.core.function import _mexpand
import scipy as sp
import scipy.sparse as ss
import scipy.sparse.linalg as ssl
import matplotlib.pyplot as plt
# define functions
def flatten(t):
"""
function to flatten lists
"""
return [item for sublist in t for item in sublist]
def get_coeffs(coeff_dict, func_vars):
"""
function to extract coefficients from variables
and form the sparse symbolic array
"""
c = coeff_dict
for i in list(c.keys()):
b, _ = i.as_base_exp()
if b == i:
continue
if b in c:
c[i] = 0
if any(k.has(b) for k in c):
c[i] = 0
return [coeff_dict[val] for val in func_vars]
# Constants for the problem
I = 0.1 # A/cm2
L = 1.0 # distance (x) in cm
m = 100 # grid spacing
h = L / (m-1)
a = 23300 # 1/cm
io = 2e-7 # A/cm2
n = 1
F = 96500 # C/mol
R = 8.314 # J/mol-K
T = 298 # K
sigma = 20 # S/cm
kappa = 0.06 # S/cm
alpha = 0.5
beta = -(1-alpha)*n*F/R/T
phi10 , phi20 = 5, 0.5 # these are just guesses
P = a*io*np.exp(beta*(phi10-phi20))
j = sympy.symbols('j',integer = True)
cA = sympy.IndexedBase('cA')
cB = sympy.IndexedBase('cB')
cC = sympy.IndexedBase('cC')
cD = sympy.IndexedBase('cD')
# write the boundary conditions at x = 0
bc=[cA[1], cB[1],
(4/3) * cC[2] - (1/3)*cC[3], # use a three point approximation for cC_prime
cD[1]
]
# form a list of expressions from the boundary conditions and equations
expr=flatten([bc,flatten([[
-cA[j-1] - cB[j-1] + cA[j+1] + cB[j+1],
cB[j-1] - 2*h*P*beta*cC[j] + 2*h*P*beta*cD[j] - cB[j+1],
-sigma*cC[j-1] + 2*h*cA[j] + sigma * cC[j+1],
-kappa * cD[j-1] + 2*h * cB[j] + kappa * cD[j+1]] for j in range(2, m)])])
vars = [cA[j], cB[j], cC[j], cD[j]]
# flatten the list of variables
unknowns = flatten([[cA[j], cB[j], cC[j], cD[j]] for j in range(1,m)])
var_len = len(unknowns)
# # # substitute in the boundary conditions at x = L while getting the coefficients
A = sympy.SparseMatrix([get_coeffs(_mexpand(i.subs({cA[m]:I}))\
.as_coefficients_dict(), unknowns) for i in expr])
# convert to a numpy array
mat_temp = np.array(A).astype(np.float64)
# you can view the sparse array with this
fig = plt.figure(figsize=(6,6))
ax = fig.add_axes([0,0, 1,1])
cmap = plt.cm.binary
plt.spy(mat_temp, cmap = cmap, alpha = 0.8)
def solve_sparse(b0, error):
# create the b column vector
b = np.copy(b0)
b[0:4] = np.array([0.0, I, 0.0, 0.0])
b[var_len-4] = I
b[var_len-3] = 0
b[var_len-2] = 0
b[var_len-1] = 0
print(b.shape)
old = np.copy(b0)
mat = np.copy(mat_temp)
b_2 = np.copy(b)
resid = 10
lss = 0
while lss < 100:
mat_2 = np.copy(mat)
for j in range(3, var_len - 3, 4):
# update the forcing term of equation 2
b_2[j+2] = 2*h*(1-beta*old[j+3]+beta*old[j+4])*a*io*np.exp(beta*(old[j+3]-old[j+4]))
# update the sparse array at every iteration for variables cC and cD in equation2
mat_2[j+2, j+3] += 2*h*beta*a*io*np.exp(beta*(old[j+3]-old[j+4]))
mat_2[j+2, j+4] += 2*h*beta*a*io*np.exp(beta*(old[j+3]-old[j+4]))
# form the column sparse matrix
A_s = ss.csc_matrix(mat_2)
new = ssl.spsolve(A_s, b_2).flatten()
resid = np.sum((new - old)**2)/var_len
lss += 1
old = np.copy(new)
return new
val0 = np.array([[0.0, 0.0, 0.0, 0.0] for _ in range(m-1)]).flatten() # form an array of initial values
error = 1e-7
## Run the code
conc = solve_sparse(val0, error).reshape(m-1, len(vars))
conc.shape # gives (99, 4)
# Plot result for cA:
plt.plot(conc[:,0], marker = 'o', linestyle = '')
What happens seems pretty clear now, after having seen that the plotted solution indeed oscillates between the upper and lower values. You are using the central Euler method as discretization, for u'=F(u) this reads as
u[j+1]-u[j-1] = 2*h*F(u[j])
This method is only weakly stable and allows the sub-sequences of odd and even indices to evolve rather independently. As equation this would mean that the solution might approximate the system ue'=F(uo), uo'=F(ue) with independent functions ue, uo that follow the path of the even or odd sub-sequence.
These even and odd parts are only tied together by the treatment of the boundary points, two or three points deep. So to avoid or reduce the oscillation requires a very careful handling of boundary conditions and also the differential equations for the boundary points.
But one can avoid all this unpleasantness by using the trapezoidal method
u[j+1]-u[j] = 0.5*h*(F(u[j+1])+F(u[j]))
This also reduces the band-width of the system matrix.
To properly implement the implied Newton method correctly (linearizing via Taylor and solving the linearized equation is what the Newton-Kantorovich method does) you need to replace F(u[j]) with F(u_old[j])+F'(u_old[j])*(u[j]-u_old[j]). This then gives a linear system of equations in u for the iteration step.
For the trapezoidal method this gives
(I-0.5*h*F'(u_old[j+1]))*u[j+1] - (I+0.5*h*F'(u_old[j]))*u[j]
= 0.5*h*(F(u_old[j+1])-F'(u_old[j+1])*u_old[j+1] + F(u_old[j])-F'(u_old[j])*u_old[j])
In general, the derivatives values and thus the system matrix need not be updated every step, only the function value (else the iteration does not move forward).

FTCS Solution of the Wave Equation - Issues with Vpython

I am attempting to make an animation of the motion of the piano string
using the facilities provided by the vpython package. There are
various ways you could do this, but my goal is to do this with using
the curve object within the vpython package. Below is my code for
solution of the initial problem of solving the complete sets of
simultaneous 1st-order equation. Thanks in advance, I am really
uncertain as to where to start with the vpython animation.
# Key Module and Function Import(s):
import numpy as np
import math as m
import pylab as py
import matplotlib
from time import time
import scipy
# Variable(s) and Constant(s):
L = 1.0 # Length on string in m
C = 1.0 # velocity of the hammer strike in ms^-1
d = 0.1 # Hammer distance from 0 to point of impact with string
N = 100 # Number of divisions in grid
sigma = 0.3 # sigma value in meters
a = L/N # Grid spacing
v = 100.0 # Initial velocity of wave on the string
h = 1e-6 # Time-step
epsilon = h/1000
# Computation(s):
def initialpsi(x):
return (C*x*(L-x)/(L**2))*m.exp((-(x-d)**2)/(2*sigma**2)) # Definition of the function
phibeg = 0.0 # Beginning - fixed point
phimiddle = 0.0 # Initial x
phiend = 0.0 # End fixed point
psibeg = 0.0 # Initial v at beg
psiend = 0.0 # Initial v at end
t2 = 2e-3 # string at 2ms
t50 = 50e-3 # string at 50ms
t100 = 100e-3 # string at 100ms
tend = t100 + epsilon
# Creation of empty array(s)
phi = np.empty(N+1,float)
phi[0] = phibeg
phi[N] = phiend
phi[1:N] = phimiddle
phip = np.empty(N+1,float)
phip[0] = phibeg
phip[N] = phiend
psi = np.empty(N+1,float)
psi[0] = psibeg
psi[N] = psiend
for i in range(1,N):
psi[i] = initialpsi(i*a)
psip = np.empty(N+1,float)
psip[0] = psibeg
psip[N] = psiend
# Main loop
t = 0.0
D = h*v**2 / (a*a)
timestart = time()
while t<tend:
# Calculation the new values of T
for i in range(1,N):
phip[i] = phi[i] + h*psi[i]
psip[i] = psi[i] + D*(phi[i+1]+phi[i-1]-2*phi[i])
phip[1:N] = phi[1:N] + h*psi[1:N]
psip[1:N] = psi[1:N] + D*(phi[0:N-1] + phi[2:N+1] -2*phi[1:N])
phi= np.copy(phip)
psi= np.copy(psip)
#phi,phip = phip,phi
#psi,psip = psip,psi
t += h
# Plot creation in step(s)
if abs(t-t2)<epsilon:
t2array = np.copy(phi)
py.plot(phi, label = "2 ms")
if abs(t-t50)<epsilon:
t50array = np.copy(phi)
py.plot(phi, label = "50 ms")
if abs(t-t100)<epsilon:
t100array = np.copy(phi)
py.plot(phi, label = "100 ms")
See the curve documentation at
https://www.glowscript.org/docs/VPythonDocs/curve.html
Use the "modify" method to change the individual points along the curve object, inside a loop that contains a rate statement:
https://www.glowscript.org/docs/VPythonDocs/rate.html

Solve a second order ode using numpy [duplicate]

I am solving an ODE for an harmonic oscillator numerically with Python. When I add a driving force it makes no difference, so I'm guessing something is wrong with the code. Can anyone see the problem? The (h/m)*f0*np.cos(wd*i) part is the driving force.
import numpy as np
import matplotlib.pyplot as plt
# This code solves the ODE mx'' + bx' + kx = F0*cos(Wd*t)
# m is the mass of the object in kg, b is the damping constant in Ns/m
# k is the spring constant in N/m, F0 is the driving force in N,
# Wd is the frequency of the driving force and x is the position
# Setting up
timeFinal= 16.0 # This is how far the graph will go in seconds
steps = 10000 # Number of steps
dT = timeFinal/steps # Step length
time = np.linspace(0, timeFinal, steps+1)
# Creates an array with steps+1 values from 0 to timeFinal
# Allocating arrays for velocity and position
vel = np.zeros(steps+1)
pos = np.zeros(steps+1)
# Setting constants and initial values for vel. and pos.
k = 0.1
m = 0.01
vel0 = 0.05
pos0 = 0.01
freqNatural = 10.0**0.5
b = 0.0
F0 = 0.01
Wd = 7.0
vel[0] = vel0 #Sets the initial velocity
pos[0] = pos0 #Sets the initial position
# Numerical solution using Euler's
# Splitting the ODE into two first order ones
# v'(t) = -(k/m)*x(t) - (b/m)*v(t) + (F0/m)*cos(Wd*t)
# x'(t) = v(t)
# Using the definition of the derivative we get
# (v(t+dT) - v(t))/dT on the left side of the first equation
# (x(t+dT) - x(t))/dT on the left side of the second
# In the for loop t and dT will be replaced by i and 1
for i in range(0, steps):
vel[i+1] = (-k/m)*dT*pos[i] + vel[i]*(1-dT*b/m) + (dT/m)*F0*np.cos(Wd*i)
pos[i+1] = dT*vel[i] + pos[i]
# Ploting
#----------------
# With no damping
plt.plot(time, pos, 'g-', label='Undampened')
# Damping set to 10% of critical damping
b = (freqNatural/50)*0.1
# Using Euler's again to compute new values for new damping
for i in range(0, steps):
vel[i+1] = (-k/m)*dT*pos[i] + vel[i]*(1-(dT*(b/m))) + (F0*dT/m)*np.cos(Wd*i)
pos[i+1] = dT*vel[i] + pos[i]
plt.plot(time, pos, 'b-', label = '10% of crit. damping')
plt.plot(time, 0*time, 'k-') # This plots the x-axis
plt.legend(loc = 'upper right')
#---------------
plt.show()
The problem here is with the term np.cos(Wd*i). It should be np.cos(Wd*i*dT), that is note that dT has been added into the correct equation, since t = i*dT.
If this correction is made, the simulation looks reasonable. Here's a version with F0=0.001. Note that the driving force is clear in the continued oscillations in the damped condition.
The problem with the original equation is that np.cos(Wd*i) just jumps randomly around the circle, rather than smoothly moving around the circle, causing no net effect in the end. This can be best seen by plotting it directly, but the easiest thing to do is run the original form with F0 very large. Below is F0 = 10 (ie, 10000x the value used in the correct equation), but using the incorrect form of the equation, and it's clear that the driving force here just adds noise as it randomly moves around the circle.
Note that your ODE is well behaved and has an analytical solution. So you could utilize sympy for an alternate approach:
import sympy as sy
sy.init_printing() # Pretty printer for IPython
t,k,m,b,F0,Wd = sy.symbols('t,k,m,b,F0,Wd', real=True) # constants
consts = {k: 0.1, # values
m: 0.01,
b: 0.0,
F0: 0.01,
Wd: 7.0}
x = sy.Function('x')(t) # declare variables
dx = sy.Derivative(x, t)
d2x = sy.Derivative(x, t, 2)
# the ODE:
ode1 = sy.Eq(m*d2x + b*dx + k*x, F0*sy.cos(Wd*t))
sl1 = sy.dsolve(ode1, x) # solve ODE
xs1 = sy.simplify(sl1.subs(consts)).rhs # substitute constants
# Examining the solution, we note C3 and C4 are superfluous
xs2 = xs1.subs({'C3':0, 'C4':0})
dxs2 = xs2.diff(t)
print("Solution x(t) = ")
print(xs2)
print("Solution x'(t) = ")
print(dxs2)
gives
Solution x(t) =
C1*sin(3.16227766016838*t) + C2*cos(3.16227766016838*t) - 0.0256410256410256*cos(7.0*t)
Solution x'(t) =
3.16227766016838*C1*cos(3.16227766016838*t) - 3.16227766016838*C2*sin(3.16227766016838*t) + 0.179487179487179*sin(7.0*t)
The constants C1,C2 can be determined by evaluating x(0),x'(0) for the initial conditions.

Programming of 4th order Runge-Kutta in advection equation in python

%matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from math import pi
# wave speed
c = 1
# spatial domain
xmin = 0
xmax = 1
#time domain
m=500; # num of time steps
tmin=0
T = tmin + np.arange(m+1);
tmax=500
n = 50 # num of grid points
# x grid of n points
X, dx = np.linspace(xmin, xmax, n+1, retstep=True);
X = X[:-1] # remove last point, as u(x=1,t)=u(x=0,t)
# for CFL of 0.1
CFL = 0.3
dt = CFL*dx/c
# initial conditions
def initial_u(x):
return np.sin(2*pi*x)
# each value of the U array contains the solution for all x values at each timestep
U = np.zeros((m+1,n),dtype=float)
U[0] = u = initial_u(X);
def derivatives(t,u,c,dx):
uvals = [] # u values for this time step
for j in range(len(X)):
if j == 0: # left boundary
uvals.append((-c/(2*dx))*(u[j+1]-u[n-1]))
elif j == n-1: # right boundary
uvals.append((-c/(2*dx))*(u[0]-u[j-1]))
else:
uvals.append((-c/(2*dx))*(u[j+1]-u[j-1]))
return np.asarray(uvals)
# solve for 500 time steps
for k in range(m):
t = T[k];
k1 = derivatives(t,u,c,dx)*dt;
k2 = derivatives(t+0.5*dt,u+0.5*k1,c,dx)*dt;
k3 = derivatives(t+0.5*dt,u+0.5*k2,c,dx)*dt;
k4 = derivatives(t+dt,u+k3,c,dx)*dt;
U[k+1] = u = u + (k1+2*k2+2*k3+k4)/6;
# plot solution
plt.style.use('dark_background')
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
line, = ax1.plot(X,U[0],color='cyan')
ax1.grid(True)
ax1.set_ylim([-2,2])
ax1.set_xlim([0,1])
def animate(i):
line.set_ydata(U[i])
return line,
I want to program in Python an advection equation which is (∂u/∂t) +c (∂u/∂x) = 0. Time should be discretized with Runge-kutta 4th order. Spatial discretiziation is 2nd order finite difference. When I run my code, I get straight line which transforms into sine wave. But I gave as initial condition sine wave. Why does it start as straight line? And I want to have sine wave moving forward. Do you have any idea on how to get sine wave moving forward? I appreciate your help. Thanks in advance!
While superficially your computation steps are related to the RK4 method, they deviate from the RK4 method and the correct space discretization too much to mention it all.
The traditional way to apply ODE integration methods is to have a function derivatives(t, state, params) and then apply that to compute the Euler step or the RK4 step. In your case it would be
def derivatives(t,u,c,dx):
du = np.zeros(len(u));
p = c/(2*dx);
du[0] = p*(u[1]-u[-1]);
du[1:-1] = p*(u[2:]-u[:-2]);
du[-1] = p*(u[0]-u[-2]);
return du;
Then you can do
X, dx = np.linspace(xmin, xmax, n+1, retstep=True);
X = X[:-1] # remove last point, as u(x=1,t)=u(x=0,t)
m=500; # number of time steps
T = tmin + np.arange(m+1);
U = np.zeros((m+1,n),dtype=float)
U[0] = u = initial_u(X);
for k in range(m):
t = T[k];
k1 = derivatives(t,u,c,dx)*dt;
k2 = derivatives(t+0.5*dt,u+0.5*k1,c,dx)*dt;
k3 = derivatives(t+0.5*dt,u+0.5*k2,c,dx)*dt;
k4 = derivatives(t+dt,u+k3,c,dx)*dt;
U[k+1] = u = u + (k1+2*k2+2*k3+k4)/6;
This uses dt as computed as the primary variable in the time stepping, then constructs the arithmetic sequence from tmin with step dt. Other ways are possible, but one has to make tmax and the number of time steps compatible.
The computation up to this point should now be successful and can be used in the animation. In my understanding, you do not produce a new plot in each frame, you only draw the graph once and after that just change the line data
# animate the time data
line, = ax1.plot(X,U[0],color='cyan')
ax1.grid(True)
ax1.set_ylim([-2,2])
ax1.set_xlim([0,1])
def animate(i):
line.set_ydata(U[i])
return line,
etc.

Find time shift of two signals using cross correlation

I have two signals which are related to each other and have been captured by two different measurement devices simultaneously.
Since the two measurements are not time synchronized there is a small time delay between them which I want to calculate. Additionally, I need to know which signal is the leading one.
The following can be assumed:
no or only very less noise present
speed of the algorithm is not an issue, only accuracy and robustness
signals are captured with an high sampling rate (>10 kHz) for several seconds
expected time delay is < 0.5s
I though of using-cross correlation for that purpose.
Any suggestions how to implement that in Python are very appreciated.
Please let me know if I should provide more information in order to find the most suitable algorithmn.
A popular approach: timeshift is the lag corresponding to the maximum cross-correlation coefficient. Here is how it works with an example:
import matplotlib.pyplot as plt
from scipy import signal
import numpy as np
def lag_finder(y1, y2, sr):
n = len(y1)
corr = signal.correlate(y2, y1, mode='same') / np.sqrt(signal.correlate(y1, y1, mode='same')[int(n/2)] * signal.correlate(y2, y2, mode='same')[int(n/2)])
delay_arr = np.linspace(-0.5*n/sr, 0.5*n/sr, n)
delay = delay_arr[np.argmax(corr)]
print('y2 is ' + str(delay) + ' behind y1')
plt.figure()
plt.plot(delay_arr, corr)
plt.title('Lag: ' + str(np.round(delay, 3)) + ' s')
plt.xlabel('Lag')
plt.ylabel('Correlation coeff')
plt.show()
# Sine sample with some noise and copy to y1 and y2 with a 1-second lag
sr = 1024
y = np.linspace(0, 2*np.pi, sr)
y = np.tile(np.sin(y), 5)
y += np.random.normal(0, 5, y.shape)
y1 = y[sr:4*sr]
y2 = y[:3*sr]
lag_finder(y1, y2, sr)
In the case of noisy signals, it is common to apply band-pass filters first. In the case of harmonic noise, they can be removed by identifying and removing frequency spikes present in the frequency spectrum.
Numpy has function correlate which suits your needs: https://docs.scipy.org/doc/numpy/reference/generated/numpy.correlate.html
To complement Reveille's answer above (I reproduce his algorithm), I would like to point out some ideas for preprocessing the input signals.
Since there seems to be no fit-for-all (duration in periods, resolution, offset, noise, signal type, ...) you may play with it.
In my example the application of a window function improves the detected phase shift (within resolution of the discretization).
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
r2d = 180.0/np.pi # conversion factor RAD-to-DEG
delta_phi_true = 50.0/r2d
def detect_phase_shift(t, x, y):
'''detect phase shift between two signals from cross correlation maximum'''
N = len(t)
L = t[-1] - t[0]
cc = signal.correlate(x, y, mode="same")
i_max = np.argmax(cc)
phi_shift = np.linspace(-0.5*L, 0.5*L , N)
delta_phi = phi_shift[i_max]
print("true delta phi = {} DEG".format(delta_phi_true*r2d))
print("detected delta phi = {} DEG".format(delta_phi*r2d))
print("error = {} DEG resolution for comparison dphi = {} DEG".format((delta_phi-delta_phi_true)*r2d, dphi*r2d))
print("ratio = {}".format(delta_phi/delta_phi_true))
return delta_phi
L = np.pi*10+2 # interval length [RAD], for generality not multiple period
N = 1001 # interval division, odd number is better (center is integer)
noise_intensity = 0.0
X = 0.5 # amplitude of first signal..
Y = 2.0 # ..and second signal
phi = np.linspace(0, L, N)
dphi = phi[1] - phi[0]
'''generate signals'''
nx = noise_intensity*np.random.randn(N)*np.sqrt(dphi)
ny = noise_intensity*np.random.randn(N)*np.sqrt(dphi)
x_raw = X*np.sin(phi) + nx
y_raw = Y*np.sin(phi+delta_phi_true) + ny
'''preprocessing signals'''
x = x_raw.copy()
y = y_raw.copy()
window = signal.windows.hann(N) # Hanning window
#x -= np.mean(x) # zero mean
#y -= np.mean(y) # zero mean
#x /= np.std(x) # scale
#y /= np.std(y) # scale
x *= window # reduce effect of finite length
y *= window # reduce effect of finite length
print(" -- using raw data -- ")
delta_phi_raw = detect_phase_shift(phi, x_raw, y_raw)
print(" -- using preprocessed data -- ")
delta_phi_preprocessed = detect_phase_shift(phi, x, y)
Without noise (to be deterministic) the output is
-- using raw data --
true delta phi = 50.0 DEG
detected delta phi = 47.864788975654 DEG
...
-- using preprocessed data --
true delta phi = 50.0 DEG
detected delta phi = 49.77938053468019 DEG
...
Numpy has a useful function, called correlation_lags for this, which uses the underlying correlate function mentioned by other answers to find the time lag. The example displayed at the bottom of that page is useful:
from scipy import signal
from numpy.random import default_rng
rng = default_rng()
x = rng.standard_normal(1000)
y = np.concatenate([rng.standard_normal(100), x])
correlation = signal.correlate(x, y, mode="full")
lags = signal.correlation_lags(x.size, y.size, mode="full")
lag = lags[np.argmax(correlation)]
Then lag would be -100

Categories