The model I'm working on has a neuron (modeled by the Hodgkin-Huxley equations), and the neuron itself receives a bunch of synaptic inputs from other neurons because it is in a network. The standard way to model the inputs is with a spike train made up of a bunch of delta function pulses that arrive at a specified rate, as a Poisson process. Some of the pulses provide an excitatory reaction to the neuron, and some provide an inhibitory pulse. So the synaptic current should look like this:
Here, Ne is the number of excitatory neurons, Ni is inhibitory, the h's are either 0 or 1 (1 with probability p) representing whether or not a spike was successfully transmitted, and the $t_k^l$ in the delta function is the discharge time of the l^th spike of the kth neuron (same for the $t_m^n$). So the basic idea behind how we tried coding this was to suppose first I had 100 neurons providing pulses into my HH neuron (80 of which are excitatory, 20 of which are inhibitory). We then formed an array where one column enumerated the neurons (so that neurons #0-79 were excitatory ones and #80-99 were inhibitory). We then checked to see if there is a spike in some time interval, and if there was, choose a random number between 0-1 and if it's below my specified probability p, then assign it the number 1, otherwise make it 0. We then plot the voltage as a function of time to look to see when the neuron spikes.
I think the code works, BUT the problem is that as soon as I add some more neurons in the network (one paper claimed they used 5000 total neurons), it takes forever to run, which is just unfeasible for doing numerical simulations. My question is: is there a better way to simulate a spike train pulsing into a neuron so that the computation is substantially faster for a large number of neurons in the network? Here is the code we tried: (it's a little long because the HH equations are quite detailed):
import scipy as sp
import numpy as np
import pylab as plt
#Constants
C_m = 1.0 #membrane capacitance, in uF/cm^2"""
g_Na = 120.0 #Sodium (Na) maximum conductances, in mS/cm^2""
g_K = 36.0 #Postassium (K) maximum conductances, in mS/cm^2"""
g_L = 0.3 #Leak maximum conductances, in mS/cm^2"""
E_Na = 50.0 #Sodium (Na) Nernst reversal potentials, in mV"""
E_K = -77.0 #Postassium (K) Nernst reversal potentials, in mV"""
E_L = -54.387 #Leak Nernst reversal potentials, in mV"""
def poisson_spikes(t, N=100, rate=1.0 ):
spks = []
dt = t[1] - t[0]
for n in range(N):
spkt = t[np.random.rand(len(t)) < rate*dt/1000.] #Determine list of times of spikes
idx = [n]*len(spkt) #Create vector for neuron ID number the same length as time
spkn = np.concatenate([[idx], [spkt]], axis=0).T #Combine tw lists
if len(spkn)>0:
spks.append(spkn)
spks = np.concatenate(spks, axis=0)
return spks
N = 100
N_ex = 80 #(0..79)
N_in = 20 #(80..99)
G_ex = 1.0
K = 4
dt = 0.01
t = sp.arange(0.0, 300.0, dt) #The time to integrate over """
ic = [-65, 0.05, 0.6, 0.32]
spks = poisson_spikes(t, N, rate=10.)
def alpha_m(V):
return 0.1*(V+40.0)/(1.0 - sp.exp(-(V+40.0) / 10.0))
def beta_m(V):
return 4.0*sp.exp(-(V+65.0) / 18.0)
def alpha_h(V):
return 0.07*sp.exp(-(V+65.0) / 20.0)
def beta_h(V):
return 1.0/(1.0 + sp.exp(-(V+35.0) / 10.0))
def alpha_n(V):
return 0.01*(V+55.0)/(1.0 - sp.exp(-(V+55.0) / 10.0))
def beta_n(V):
return 0.125*sp.exp(-(V+65) / 80.0)
def I_Na(V, m, h):
return g_Na * m**3 * h * (V - E_Na)
def I_K(V, n):
return g_K * n**4 * (V - E_K)
def I_L(V):
return g_L * (V - E_L)
def I_app(t):
return 3
def I_syn(spks, t):
"""
Synaptic current
spks = [[synid, t],]
"""
exspk = spks[spks[:,0]<N_ex] # Check for all excitatory spikes
delta_k = exspk[:,1] == t # Delta function
if sum(delta_k) > 0:
h_k = np.random.rand(len(delta_k)) < 0.5 # p = 0.5
else:
h_k = 0
inspk = spks[spks[:,0] >= N_ex] #Check remaining neurons for inhibitory spikes
delta_m = inspk[:,1] == t #Delta function for inhibitory neurons
if sum(delta_m) > 0:
h_m = np.random.rand(len(delta_m)) < 0.5 #p =0.5
else:
h_m = 0
isyn = C_m*G_ex*(np.sum(h_k*delta_k) - K*np.sum(h_m*delta_m))
return isyn
def dALLdt(X, t):
V, m, h, n = X
dVdt = (I_app(t)+I_syn(spks,t)-I_Na(V, m, h) - I_K(V, n) - I_L(V)) / C_m
dmdt = alpha_m(V)*(1.0-m) - beta_m(V)*m
dhdt = alpha_h(V)*(1.0-h) - beta_h(V)*h
dndt = alpha_n(V)*(1.0-n) - beta_n(V)*n
return np.array([dVdt, dmdt, dhdt, dndt])
X = [ic]
for i in t[1:]:
dx = (dALLdt(X[-1],i))
x = X[-1]+dt*dx
X.append(x)
X = np.array(X)
V = X[:,0]
m = X[:,1]
h = X[:,2]
n = X[:,3]
ina = I_Na(V, m, h)
ik = I_K(V, n)
il = I_L(V)
plt.figure()
plt.subplot(3,1,1)
plt.title('Hodgkin-Huxley Neuron')
plt.plot(t, V, 'k')
plt.ylabel('V (mV)')
plt.subplot(3,1,2)
plt.plot(t, ina, 'c', label='$I_{Na}$')
plt.plot(t, ik, 'y', label='$I_{K}$')
plt.plot(t, il, 'm', label='$I_{L}$')
plt.ylabel('Current')
plt.legend()
plt.subplot(3,1,3)
plt.plot(t, m, 'r', label='m')
plt.plot(t, h, 'g', label='h')
plt.plot(t, n, 'b', label='n')
plt.ylabel('Gating Value')
plt.legend()
plt.show()
I'm not familiar with other packages designed specifically for neural networks, but I wanted to write my own, mainly because I plan to do stochastic analysis which requires quite a bit of mathematical detail, and I don't know if those packages provide such detail.
Profiling shows that most of your time is being spent in these two lines:
if sum(delta_k) > 0:
and
if sum(delta_m) > 0:
Changing each of these to:
if np.any(...)
speeds everything up by a factor of 10. Take a look at kernprof if you'd like to do more line by line profiling:
https://github.com/rkern/line_profiler
In complement to welch's answer, you can use scipy.integrate.odeint to accelerate integration: replacing
X = [ic]
for i in t[1:]:
dx = (dALLdt(X[-1],i))
x = X[-1]+dt*dx
X.append(x)
by
from scipy.integrate import odeint
X=odeint(dALLdt,ic,t)
speeds the calculation by more than 10 on my computer.
if you have an NVidia grpahics board you can use numba/numbapro to accelerate your python code and reach a real time 4K neurons with 128 presynaptic neurons each one.
Related
I'm attempting to implement a Gaussian smoothing/flattening function in my Python 3.10 script to flatten a set of XY-points. For each data point, I'm creating a Y buffer and a Gaussian kernel, which I use to flatten each one of the Y-points based on it's neighbours.
Here are some sources on the Gaussian-smoothing method:
Source 1
Source 2
I'm using the NumPy module for my data arrays, and MatPlotLib to plot the data.
I wrote a minimal reproducible example, with some randomly-generated data, and each one of the arguments needed for the Gaussian function listed at the top of the main function:
import numpy as np
import matplotlib.pyplot as plt
import time
def main():
dataSize = 1000
yDataRange = [-4, 4]
reachPercentage = 0.1
sigma = 10
phi = 0
amplitude = 1
testXData = np.arange(stop = dataSize)
testYData = np.random.uniform(low = yDataRange[0], high = yDataRange[1], size = dataSize)
print("Flattening...")
startTime = time.time()
flattenedYData = GaussianFlattenData(testXData, testYData, reachPercentage, sigma, phi, amplitude)
totalTime = round(time.time() - startTime, 2)
print("Flattened! (" + str(totalTime) + " sec)")
plt.title(str(totalTime) + " sec")
plt.plot(testXData, testYData, label = "Original Data")
plt.plot(testXData, flattenedYData, label = "Flattened Data")
plt.legend()
plt.show()
plt.close()
def GaussianFlattenData(xData, yData, reachPercentage, sigma, phi, amplitude):
flattenedYData = np.empty(shape = len(xData), dtype = float)
# For each data point, create a Y buffer and a Gaussian kernel, and flatten it based on it's neighbours
for i in range(len(xData)):
gaussianCenter = xData[i]
baseReachEdges = GetGaussianValueX((GetGaussianValueY(0, 0, sigma, phi, amplitude) * reachPercentage), 0, sigma, phi, amplitude)
reachEdgeIndices = [FindInArray(xData, GetClosestNum((gaussianCenter + baseReachEdges[0]), xData)),
FindInArray(xData, GetClosestNum((gaussianCenter + baseReachEdges[1]), xData))]
currDataScanNum = reachEdgeIndices[0] - reachEdgeIndices[1]
# Creating Y buffer and Gaussian kernel...
currYPoints = np.empty(shape = currDataScanNum, dtype = float)
kernel = np.empty(shape = currDataScanNum, dtype = float)
for j in range(currDataScanNum):
currYPoints[j] = yData[j + reachEdgeIndices[1]]
kernel[j] = GetGaussianValueY(j, (i - reachEdgeIndices[1]), sigma, phi, amplitude)
# Dividing kernel by its sum...
kernelSum = np.sum(kernel)
for j in range(len(kernel)):
kernel[j] = (kernel[j] / kernelSum)
# Acquiring the current flattened Y point...
newCurrYPoints = np.empty(shape = len(currYPoints), dtype = float)
for j in range(len(currYPoints)):
newCurrYPoints[j] = currYPoints[j] * kernel[j]
flattenedYData[i] = np.sum(newCurrYPoints)
return flattenedYData
def GetGaussianValueX(y, mu, sigma, phi, amplitude):
x = ((sigma * np.sqrt(-2 * np.log(y / (amplitude * np.cos(phi))))) + mu)
return [x, (mu - (x - mu))]
def GetGaussianValueY(x, mu, sigma, phi, amplitude):
y = ((amplitude * np.cos(phi)) * np.exp(-np.power(((x - mu) / sigma), 2) / 2))
return y
def GetClosestNum(base, nums):
closestIdx = 0
closestDiff = np.abs(base - nums[0])
idx = 1
while (idx < len(nums)):
currDiff = np.abs(base - nums[idx])
if (currDiff < closestDiff):
closestDiff = currDiff
closestIdx = idx
idx += 1
return nums[closestIdx]
def FindInArray(arr, value):
for i in range(len(arr)):
if (arr[i] == value):
return i
return -1
if (__name__ == "__main__"):
main()
In the example above, I generate 1,000 random data points, between the ranges of -4 and 4. The reachPercentage variable is the percentage of the Gaussian amplitude above which the Gaussian values will be inserted into the kernel. The sigma, phi and amplitude variables are all inputs to the Gaussian function which will actually generate the Gaussians for each Y-data point to be smoothened.
I wrote some additional utility functions which I needed as well.
The script above works to smoothen the generated data, and I get the following plot:
Blue being the original data, and Orange being the flattened data.
However, it takes a surprisingly long amount of time to smoothen even smaller amounts of data. In the example above I generated 1,000 data points, and it takes ~8 seconds to flatten that. With datasets exceeding 10,000 in number, it can easily take over 10 minutes.
Since this is a very popular and known way of smoothening data, I was wondering why this script ran so slow. I originally had this implemented with standard Pythons Lists with calling append, however it was extremely slow. I hoped that using the NumPy arrays instead without calling the append function would make it faster, but that is not really the case.
Is there a way to speed up this process? Is there a Gaussian-smoothing function that already exists out there, that takes in the same arguments, and that could do the job faster?
Thanks for reading my post, any guidance is appreciated.
You have a number of loops - those tend to slow you down.
Here are two examples. Refactoring GetClosestNum to this:
def GetClosestNum(base, nums):
nums = np.array(nums)
diffs = np.abs(nums - base)
return nums[np.argmin(diffs)]
and refactoring FindInArray to this:
def FindInArray(arr, value):
res = np.where(np.array(arr) - value == 0)[0]
if res.size > 0:
return res[0]
else:
return -1
lets me process 5000 datapoints in 1.5s instead of the 54s it took with your original code.
Numpy lets you do a lot of powerful stuff without looping - Jake Vanderplas has a few really good (oldie but goodie) videos on using numpy constructs in place of loops to massively increase speed - https://www.youtube.com/watch?v=EEUXKG97YRw.
After asking people on the Python forums, as well as doing some more searching online, I managed to find much faster alternatives to most of the functions I had in my loop.
In order to get a better image of which parts of the smoothing function took up the most time, I subdivided the code into 4 parts, and timed each one to see how much each part contributed to the total runtime. To my surprise, the part that took up over 90% of the time, was the first part of the loop:
gaussianCenter = xData[i]
baseReachEdges = GetGaussianValueX((GetGaussianValueY(0, 0, sigma, phi, amplitude) * reachPercentage), 0, sigma, phi, amplitude)
reachEdgeIndices = [FindInArray(xData, GetClosestNum((gaussianCenter + baseReachEdges[0]), xData)),
FindInArray(xData, GetClosestNum((gaussianCenter + baseReachEdges[1]), xData))]
currDataScanNum = reachEdgeIndices[0] - reachEdgeIndices[1]
Luckly, the people on the Python forums and here were able to assist me, and I was able find a much faster alternative GetClosestNum function (thanks Vin), as well as removing the FindInArray function:
There are also replacements in the latter parts of the loop, where instead of having 3 for loops, they were all replaced my NumPy iterative expressions.
The whole script now looks like this:
import numpy as np
import matplotlib.pyplot as plt
import time
def main():
dataSize = 3073
yDataRange = [-4, 4]
reachPercentage = 0.001
sigma = 100
phi = 0
amplitude = 1
testXData = np.arange(stop = dataSize)
testYData = np.random.uniform(low = yDataRange[0], high = yDataRange[1], size = dataSize)
print("Flattening...")
startTime = time.time()
flattenedYData = GaussianFlattenData(testXData, testYData, reachPercentage, sigma, phi, amplitude)
totalTime = round(time.time() - startTime, 2)
print("Flattened! (" + str(totalTime) + " sec)")
plt.title(str(totalTime) + " sec")
plt.plot(testXData, testYData, label = "Original Data")
plt.plot(testXData, flattenedYData, label = "Flattened Data")
plt.legend()
plt.show()
plt.close()
def GaussianFlattenData(xData, yData, reachPercentage, sigma, phi, amplitude):
flattenedYData = np.empty(shape = len(xData), dtype = float)
# For each data point, create a Y buffer and a Gaussian kernel, and flatten it based on it's neighbours
for i in range(len(xData)):
gaussianCenter = xData[i]
baseReachEdges = GetGaussianValueX((GetGaussianValueY(0, 0, sigma, phi, amplitude) * reachPercentage), 0, sigma, phi, amplitude)
reachEdgeIndices = [np.where(xData == GetClosestNum((gaussianCenter + baseReachEdges[0]), xData))[0][0],
np.where(xData == GetClosestNum((gaussianCenter + baseReachEdges[1]), xData))[0][0]]
currDataScanNum = reachEdgeIndices[0] - reachEdgeIndices[1]
# Creating Y buffer and Gaussian kernel...
currYPoints = yData[reachEdgeIndices[1] : reachEdgeIndices[1] + currDataScanNum]
kernel = GetGaussianValueY(np.arange(currDataScanNum), (i - reachEdgeIndices[1]), sigma, phi, amplitude)
# Acquiring the current flattened Y point...
flattenedYData[i] = np.sum(currYPoints * (kernel / np.sum(kernel)))
return flattenedYData
def GetGaussianValueX(y, mu, sigma, phi, amplitude):
x = ((sigma * np.sqrt(-2 * np.log(y / (amplitude * np.cos(phi))))) + mu)
return [x, (mu - (x - mu))]
def GetGaussianValueY(x, mu, sigma, phi, amplitude):
y = ((amplitude * np.cos(phi)) * np.exp(-np.power(((x - mu) / sigma), 2) / 2))
return y
def GetClosestNum(base, nums):
nums = np.asarray(nums)
return nums[(np.abs(nums - base)).argmin()]
if (__name__ == "__main__"):
main()
Instead of taking ~8 seconds to process the 1,000 data points, it now takes merely ~0.15 seconds!
It also takes ~1.75 seconds to process the 10,000 points.
Thanks for the feedback everyone, cheers!
I am triyng to use scipy curve_fit function to fit a gaussian function to my data to estimate a theoretical power spectrum density. While doing so, the curve_fit function always return the initial parameters (p0=[1,1,1]) , thus telling me that the fitting didn't work.
I don't know where the issue is. I am using python 3.9 (spyder 5.1.5) from the anaconda distribution on windows 11.
here a Wetransfer link to the data file
https://wetransfer.com/downloads/6097ebe81ee0c29ee95a497128c1c2e420220704110130/86bf2d
Here is my code below. Can someone tell me what the issue is, and how can i solve it?
on the picture of the plot, the blue plot is my experimental PSD and the orange one is the result of the fit.
import numpy as np
import math
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import scipy.constants as cst
File = np.loadtxt('test5.dat')
X = File[:, 1]
Y = File[:, 2]
f_sample = 50000
time=[]
for i in range(1,len(X)+1):
t=i*(1/f_sample)
time= np.append(time,t)
N = X.shape[0] # number of observation
N1=int(N/2)
delta_t = time[2] - time[1]
T_mes = N * delta_t
freq = np.arange(1/T_mes, (N+1)/T_mes, 1/T_mes)
freq=freq[0:N1]
fNyq = f_sample/2 # Nyquist frequency
nb = 350
freq_block = []
# discrete fourier tansform
X_ft = delta_t*np.fft.fft(X, n=N)
X_ft=X_ft[0:N1]
plt.figure()
plt.plot(time, X)
plt.xlabel('t [s]')
plt.ylabel('x [micro m]')
# Experimental power spectrum on both raw and blocked data
PSD_X_exp = (np.abs(X_ft)**2/T_mes)
PSD_X_exp_b = []
STD_PSD_X_exp_b = []
for i in range(0, N1+2, nb):
freq_b = np.array(freq[i:i+nb]) # i-nb:i
psd_b = np.array(PSD_X_exp[i:i+nb])
freq_block = np.append(freq_block, (1/nb)*np.sum(freq_b))
PSD_X_exp_b = np.append(PSD_X_exp_b, (1/nb)*np.sum(psd_b))
STD_PSD_X_exp_b = np.append(STD_PSD_X_exp_b, PSD_X_exp_b/np.sqrt(nb))
plt.figure()
plt.loglog(freq, PSD_X_exp)
plt.legend(['Raw Experimental PSD'])
plt.xlabel('f [Hz]')
plt.ylabel('PSD')
plt.figure()
plt.loglog(freq_block, PSD_X_exp_b)
plt.legend(['Experimental PSD after blocking'])
plt.xlabel('f [Hz]')
plt.ylabel('PSD')
kB = cst.k # Boltzmann constant [m^2kg/s^2K]
T = 273.15 + 25 # Temperature [K]
r = (2.8 / 2) * 1e-6 # Particle radius [m]
v = 0.00002414 * 10 ** (247.8 / (-140 + T)) # Water viscosity [Pa*s]
gamma = np.pi * 6 * r * v # [m*Pa*s]
Do = kB*T/gamma # expected value for D
f3db_o = 50000 # expected value for f3db
fc_o = 300 # expected value pour fc
n = np.arange(-10,11)
def theo_spectrum_lorentzian_filter(x, D_, fc_, f3db_):
PSD_theo=[]
for i in range(0,len(x)):
# print(i)
psd_theo=np.sum((((D_*Do)/2*math.pi**2)/((fc_*fc_o)**2+(x[i]+n*f_sample)
** 2))*(1/(1+((x[i]+n*f_sample)/(f3db_*f3db_o))**2)))
PSD_theo= np.append(PSD_theo,psd_theo)
return PSD_theo
popt, pcov = curve_fit(theo_spectrum_lorentzian_filter, freq_block, PSD_X_exp_b, p0=[1, 1, 1], sigma=STD_PSD_X_exp_b, absolute_sigma=True, check_finite=True,bounds=(0.1, 10), method='trf', jac=None)
D_, fc_, f3db_ = popt
D1 = D_*Do
fc1 = fc_*fc_o
f3db1 = f3db_*f3db_o
print('Diffusion constant D = ', D1, ' Corner frequency fc= ',fc1, 'f3db(diode,eff)= ', f3db1)
I believe I've successfully fitted your data. Here's the approach I took.
First, I plotted your model (with popt=[1, 1, 1]) and the data you had. I noticed your data was significantly lower than the model. Then I started fiddling with the parameters. I wanted to push the model upwards. I did that by multiplying popt[0] by increasingly large values. I ended up with 1E13 as a ballpark value. Note that I have no idea if this is physically possible for your model. Then I jury-rigged your fitting function to multiply D_ by 1E13 and ran your code. I got this fit:
So I believe it's a problem of 1) inappropriate starting values and 2) inappropriate bounds. In your position, I would revise this model, check if there's any problems with units and so on.
Here's what I used to try to fit your model:
plt.figure()
plt.loglog(freq_block[:170], PSD_X_exp_b[:170], label='Exp')
plt.loglog(freq_block[:170],
theo_spectrum_lorentzian_filter(
freq_block[:170],
1E13*popt[0], popt[1], popt[2]),
label='model'
)
plt.xlabel('f [Hz]')
plt.ylabel('PSD')
plt.legend()
I limited the data to point 170 because there were some weird backwards values that made me uncomfortable. I would recheck them if I were you.
Here's the model code I used. I didn't change the curve_fit call (except to limit x to :170.
def theo_spectrum_lorentzian_filter(x, D_, fc_, f3db_):
PSD_theo=[]
D_ = 1E13*D_ # I only changed here
for i in range(0,len(x)):
psd_theo=np.sum((((D_*Do)/2*math.pi**2)/((fc_*fc_o)**2+(x[i]+n*f_sample)
** 2))*(1/(1+((x[i]+n*f_sample)/(f3db_*f3db_o))**2)))
PSD_theo= np.append(PSD_theo,psd_theo)
return PSD_theo
I am simulating a chemical reaction of the form A --> B --> C using a chemical batch reactor model. The corresponding ODE is a follows:
dcA/dt = - kA * cA(t) ** nA1
dcB/dt = kA * cA(t) ** nA1 - kB * cB(t) **nB2
dcC/dt = - kB * cB(t) ** nB2
Pyomo solves the ODE system fine if the exponents nA1 and nB2 are 1 or higher. But in my case they below 1 and as the components concentrations approach zero the ode integration fails, giving out only nans. The reason is that once the concentrations approach zero they numerically become values of cA(t) = -10e-20 for example and then the expression cA(t)**nA1 is not solvable any more.
I tried to implement a workaround of the form:
if cA < 0:
R1 = 0
else:
R1 = kA * cA(t) ** nA1
but I wasn't able to do it properly as I had a hard time using the pyomo synthax.
This is the minimal working example:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from pyomo.environ import *
from pyomo.dae import *
V = 40 # l
kA = 0.5 # 1/min
kB = 0.1 # 1/min
nA1 = 0.5
nB2 = 0.5
cAf = 2.0 # mol/l
def batch_plot(t, y):
plt.plot(t, y[:, 0], label = "cA")
plt.plot(t, y[:, 1], label = "cB")
plt.plot(t, y[:, 2], label = "cC")
plt.legend()
def batch():
m = ConcreteModel()
m.t = ContinuousSet(bounds = (0, 500))
m.cA = Var(m.t, domain = NonNegativeReals)
m.cB = Var(m.t, domain = NonNegativeReals)
m.cC = Var(m.t, domain = NonNegativeReals)
m.dcA = DerivativeVar(m.cA, wrt = m.t)
m.dcB = DerivativeVar(m.cB, wrt = m.t)
m.dcC = DerivativeVar(m.cC, wrt = m.t)
m.cA[0] = cAf
m.cB[0] = 0
m.cC[0] = 0
R1 = lambda m, t: kA * m.cA[t] ** nA1
R2 = lambda m, t: kB * m.cB[t] ** nB2
m.odeA = Constraint(m.t, rule = lambda m, t: m.dcA[t] == - R1(m, t) )
m.odeB = Constraint(m.t,
rule = lambda m, t: m.dcB[t] == R1(m, t) - R2(m, t) )
m.odeC = Constraint(m.t,
rule = lambda m, t: m.dcC[t] == R2(m, t) )
return m
tsim, profiles = Simulator(batch(), package = "scipy").simulate(numpoints = 100)
batch_plot(tsim, profiles)
I expect the ode integration to work even with reaction orders below 1.
Does anybody have an idea on how to achieve this?
There are two aims in modifying the power function x^n:
extend to negative x in a smooth way so that the numerical method does not hiccup close to x=0 and
have a small slope for small x so that the numerical integration for very small x has a greater chance to be stable.
The first condition is satisfied by constructs like
x*max(eps,abs(x))^(n-1) or
x*(eps+abs(x-eps))^(n-1),
x*(eps^2+abs(x-eps)^2)^(0.5*(n-1)),
which all have the exact same value x^n for x>eps and are continuous and piecewise smooth. But the slope at x=0 is of the size eps^(n-1) which will require very small step sizes even after the system stabilizes.
The solution is to extract even more integer power from the rational power in the form of
x*abs(x) * max(eps,abs(x))^(n-2)
or one of the other variants for the last factor. For 0<x<eps and n=0.5 this results in the value r(x)=x^2 * eps^(-1.5), so that the equation x'=-k*r(x) has the solution x(t)=x1/(1+x1*k*eps^(-1.5)*(t-t1)) after it fell to a point 0<x1<eps at t=t1. The slope of r is smaller 2, which is nice for numerical integrators.
This was implemented for scipy.integrate.solve_ivp, using method LSODA and rather strict tolerances, with the ODE right side function
# your original function, stabilizes at negative values
power0 = lambda x,n: max(0,x) ** n;
# linear at x=0, small step sizes
def power1(x,n): eps=1e-4; return x * max(eps, abs(x)) ** (n-1);
def power2(x,n): eps=1e-4; return x * (eps**2+(x-eps)**2) ** (0.5*(n-1))
# quadratic at x=0, large step sizes on the tail
eps = 1e-8
power3 = lambda x,n: x * abs(x) * max(eps,abs(x)) ** (n-2)
power4 = lambda x,n: x * abs(x) * (eps**2+(x-eps)**2) ** (0.5*n-1)
# select the power approximation used
power = power3
def model(t,u):
cA, cB, Cc = u;
R1 = kA * power(cA, nA1)
R2 = kB * power(cB, nB2)
return [ -R1, R1-R2, R2 ]
The integration runs successfully, using step sizes 20-30 in the tail end. The resulting plot looks qualitatively correct,
and in the zoom for small values is smooth and remains positive.
I have two signals which are related to each other and have been captured by two different measurement devices simultaneously.
Since the two measurements are not time synchronized there is a small time delay between them which I want to calculate. Additionally, I need to know which signal is the leading one.
The following can be assumed:
no or only very less noise present
speed of the algorithm is not an issue, only accuracy and robustness
signals are captured with an high sampling rate (>10 kHz) for several seconds
expected time delay is < 0.5s
I though of using-cross correlation for that purpose.
Any suggestions how to implement that in Python are very appreciated.
Please let me know if I should provide more information in order to find the most suitable algorithmn.
A popular approach: timeshift is the lag corresponding to the maximum cross-correlation coefficient. Here is how it works with an example:
import matplotlib.pyplot as plt
from scipy import signal
import numpy as np
def lag_finder(y1, y2, sr):
n = len(y1)
corr = signal.correlate(y2, y1, mode='same') / np.sqrt(signal.correlate(y1, y1, mode='same')[int(n/2)] * signal.correlate(y2, y2, mode='same')[int(n/2)])
delay_arr = np.linspace(-0.5*n/sr, 0.5*n/sr, n)
delay = delay_arr[np.argmax(corr)]
print('y2 is ' + str(delay) + ' behind y1')
plt.figure()
plt.plot(delay_arr, corr)
plt.title('Lag: ' + str(np.round(delay, 3)) + ' s')
plt.xlabel('Lag')
plt.ylabel('Correlation coeff')
plt.show()
# Sine sample with some noise and copy to y1 and y2 with a 1-second lag
sr = 1024
y = np.linspace(0, 2*np.pi, sr)
y = np.tile(np.sin(y), 5)
y += np.random.normal(0, 5, y.shape)
y1 = y[sr:4*sr]
y2 = y[:3*sr]
lag_finder(y1, y2, sr)
In the case of noisy signals, it is common to apply band-pass filters first. In the case of harmonic noise, they can be removed by identifying and removing frequency spikes present in the frequency spectrum.
Numpy has function correlate which suits your needs: https://docs.scipy.org/doc/numpy/reference/generated/numpy.correlate.html
To complement Reveille's answer above (I reproduce his algorithm), I would like to point out some ideas for preprocessing the input signals.
Since there seems to be no fit-for-all (duration in periods, resolution, offset, noise, signal type, ...) you may play with it.
In my example the application of a window function improves the detected phase shift (within resolution of the discretization).
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
r2d = 180.0/np.pi # conversion factor RAD-to-DEG
delta_phi_true = 50.0/r2d
def detect_phase_shift(t, x, y):
'''detect phase shift between two signals from cross correlation maximum'''
N = len(t)
L = t[-1] - t[0]
cc = signal.correlate(x, y, mode="same")
i_max = np.argmax(cc)
phi_shift = np.linspace(-0.5*L, 0.5*L , N)
delta_phi = phi_shift[i_max]
print("true delta phi = {} DEG".format(delta_phi_true*r2d))
print("detected delta phi = {} DEG".format(delta_phi*r2d))
print("error = {} DEG resolution for comparison dphi = {} DEG".format((delta_phi-delta_phi_true)*r2d, dphi*r2d))
print("ratio = {}".format(delta_phi/delta_phi_true))
return delta_phi
L = np.pi*10+2 # interval length [RAD], for generality not multiple period
N = 1001 # interval division, odd number is better (center is integer)
noise_intensity = 0.0
X = 0.5 # amplitude of first signal..
Y = 2.0 # ..and second signal
phi = np.linspace(0, L, N)
dphi = phi[1] - phi[0]
'''generate signals'''
nx = noise_intensity*np.random.randn(N)*np.sqrt(dphi)
ny = noise_intensity*np.random.randn(N)*np.sqrt(dphi)
x_raw = X*np.sin(phi) + nx
y_raw = Y*np.sin(phi+delta_phi_true) + ny
'''preprocessing signals'''
x = x_raw.copy()
y = y_raw.copy()
window = signal.windows.hann(N) # Hanning window
#x -= np.mean(x) # zero mean
#y -= np.mean(y) # zero mean
#x /= np.std(x) # scale
#y /= np.std(y) # scale
x *= window # reduce effect of finite length
y *= window # reduce effect of finite length
print(" -- using raw data -- ")
delta_phi_raw = detect_phase_shift(phi, x_raw, y_raw)
print(" -- using preprocessed data -- ")
delta_phi_preprocessed = detect_phase_shift(phi, x, y)
Without noise (to be deterministic) the output is
-- using raw data --
true delta phi = 50.0 DEG
detected delta phi = 47.864788975654 DEG
...
-- using preprocessed data --
true delta phi = 50.0 DEG
detected delta phi = 49.77938053468019 DEG
...
Numpy has a useful function, called correlation_lags for this, which uses the underlying correlate function mentioned by other answers to find the time lag. The example displayed at the bottom of that page is useful:
from scipy import signal
from numpy.random import default_rng
rng = default_rng()
x = rng.standard_normal(1000)
y = np.concatenate([rng.standard_normal(100), x])
correlation = signal.correlate(x, y, mode="full")
lags = signal.correlation_lags(x.size, y.size, mode="full")
lag = lags[np.argmax(correlation)]
Then lag would be -100
I am following Andrew's Coursera course on machine learning. I am trying to build a 3 layers neural net for digit recognition in Python (784 input, 25 hidden, 10 output). However, I am unable to get the predictions (of the training data) correct (accuracy < 5% at 100 iter, accuracy not increasing with iteration).
J (the cost function) seems to be going down (see photo 1) and I have done gradient checking (before minimizing) and it seems to match to around 1e-11 (see photo 2).
I have compared the theta1 and theta2 after 100 iterations to my working matlab code (see code snippet 1 for octave and code snippet 2 for python). It seems theta1 is reasonably similar but theta2 is very different -- see code snippet 2. (I know they should differ because of the different optimisation routines. However, firstly, I have place the same initial thetas into both codes. Secondly, my reasoning is that they should start to converge, or at least get close, after 100 iterations)
The only error I see is:
-c:32: RuntimeWarning: overflow encountered in exp
when running the sigmoid during the optimising. However, I was told that this is not essential and it is normal to encounter this error during optimising? Furthermore, because it is a sigmoid, anytime the input is large, it will tend towards 1 anyways.
I have also attached my code in snippet 3. I have cut out all the other non-essential bits (like gradient checking) to make it as short as possible.
I would appreciate any help into this as I cannot even find where it is going wrong, let alone fix it. Thank you.
Photos:
J (cost function) decreasing to 1.8 after 12 iterations
Gradient checking before optimizing, they look very similar
Code snippet:
Initializing Neural Network Parameters ...
initial1
-0.0100100
-0.0771400
-0.1113800
-0.0230100
0.0547800
-0.0505500
-0.0731200
-0.0988700
0.0128000
-0.0855400
-0.1002500
-0.1137200
-0.0669300
-0.0999900
0.0084500
-0.0363200
-0.0588600
-0.0431100
-0.1133700
-0.0326300
0.0282800
0.0052400
-0.1134600
-0.0617700
0.0267600
initial2
0.0273700
0.1026000
-0.0502100
-0.0699100
0.0190600
0.1004000
0.0784600
-0.0075900
-0.0362100
0.0286200
Doing fminunc
Training Neural Network...
Iteration 100 | Cost: 6.219605e-01
theta1
-0.0099719
-0.0768462
-0.1109559
-0.0229224
0.0545714
-0.0503575
-0.0728415
-0.0984935
0.0127513
-0.0852143
-0.0998682
-0.1132869
-0.0666751
-0.0996092
0.0084178
-0.0361817
-0.0586359
-0.0429458
-0.1129383
-0.0325057
0.0281723
0.0052200
-0.1130279
-0.0615348
0.0266581
theta2
1.124918
1.603780
-1.266390
-0.848874
0.037956
-1.360841
2.145562
-1.448657
-1.262285
-1.357635
theta1_initial
[-0.01001 -0.07714 -0.11138 -0.02301 0.05478 -0.05055 -0.07312 -0.09887
0.0128 -0.08554 -0.10025 -0.11372 -0.06693 -0.09999 0.00845 -0.03632
-0.05886 -0.04311 -0.11337 -0.03263 0.02828 0.00524 -0.11346 -0.06177
0.02676]
theta2_initial
[ 0.02737 0.1026 -0.05021 -0.06991 0.01906 0.1004 0.07846 -0.00759
-0.03621 0.02862]
Doing fminunc
-c:32: RuntimeWarning: overflow encountered in exp
theta1
[-0.00997202 -0.07680716 -0.11086841 -0.02292044 0.05455335 -0.05034252
-0.07280686 -0.09842603 0.01275117 -0.08516515 -0.0997987 -0.11319546
-0.06664666 -0.09954009 0.00841804 -0.03617494 -0.05861458 -0.04293555
-0.1128474 -0.0325006 0.02816879 0.00522031 -0.1129369 -0.06151103
0.02665508]
theta2
[ 0.27954826 -0.08007496 -0.36449273 -0.22988024 0.06849659 -0.47803973
1.09023041 -0.25570559 -0.24537494 -0.40341995]
#-----------------BEGIN HEADERS-----------------
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import csv
import scipy
#-----------------END HEADERS-----------------
#-----------------BEGIN FUNCTION 1-----------------
def randinitialize(L_in, L_out):
w = np.zeros((L_out, 1 + L_in))
epsilon_init = 0.12
w = np.random.rand(L_out, 1 + L_in) * 2 * epsilon_init - epsilon_init
return w
#-----------------END FUNCTION 1-----------------
#-----------------BEGIN FUNCTION 2-----------------
def sigmoid(lz):
g = 1.0/(1.0+np.exp(-lz))
return g
#-----------------END FUNCTION 2-----------------
#-----------------BEGIN FUNCTION 3-----------------
def sigmoidgradient(lz):
g = np.multiply(sigmoid(lz),(1-sigmoid(lz)))
return g
#-----------------END FUNCTION 3-----------------
#-----------------BEGIN FUNCTION 4-----------------
def nncostfunction(ltheta_ravel, linput_layer_size, lhidden_layer_size, lnum_labels, lx, ly, llambda_reg):
ltheta1 = np.array(np.reshape(ltheta_ravel[:lhidden_layer_size * (linput_layer_size + 1)], (lhidden_layer_size, (linput_layer_size + 1))))
ltheta2 = np.array(np.reshape(ltheta_ravel[lhidden_layer_size * (linput_layer_size + 1):], (lnum_labels, (lhidden_layer_size + 1))))
ltheta1_grad = np.zeros((np.shape(ltheta1)))
ltheta2_grad = np.zeros((np.shape(ltheta2)))
y_matrix = []
lm = np.shape(lx)[0]
eye_matrix = np.eye(lnum_labels)
for i in range(len(ly)):
y_matrix.append(eye_matrix[int(ly[i])-1,:]) #The minus one as python is zero based
y_matrix = np.array(y_matrix)
a1 = np.hstack((np.ones((lm,1)), lx)).astype(float)
z2 = sigmoid(ltheta1.dot(a1.T))
a2 = (np.concatenate((np.ones((np.shape(z2)[1], 1)), z2.T), axis=1)).astype(float)
a3 = sigmoid(ltheta2.dot(a2.T))
h = a3
J_unreg = 0
J = 0
J_unreg = (1/float(lm))*np.sum(\
-np.multiply(y_matrix,np.log(h.T))\
-np.multiply((1-y_matrix),np.log(1-h.T))\
,axis=None)
J = J_unreg + (llambda_reg/(2*float(lm)))*\
(np.sum(\
np.multiply(ltheta1[:,1:],ltheta1[:,1:])\
,axis=None)+np.sum(\
np.multiply(ltheta2[:,1:],ltheta2[:,1:])\
,axis=None))
delta3 = a3.T - y_matrix
delta2 = np.multiply((delta3.dot(ltheta2[:,1:])), (sigmoidgradient(ltheta1.dot(a1.T))).T)
cdelta2 = ((a2.T).dot(delta3)).T
cdelta1 = ((a1.T).dot(delta2)).T
ltheta1_grad = (1/float(lm))*cdelta1
ltheta2_grad = (1/float(lm))*cdelta2
theta1_hold = ltheta1
theta2_hold = ltheta2
theta1_hold[:,0] = 0;
theta2_hold[:,0] = 0;
ltheta1_grad = ltheta1_grad + (llambda_reg/float(lm))*theta1_hold;
ltheta2_grad = ltheta2_grad + (llambda_reg/float(lm))*theta2_hold;
thetagrad_ravel = np.concatenate((np.ravel(ltheta1_grad), np.ravel(ltheta2_grad)))
return (J, thetagrad_ravel)
#-----------------END FUNCTION 4-----------------
#-----------------BEGIN FUNCTION 5-----------------
def predict(ltheta1, ltheta2, x):
m, n = np.shape(x)
p = np.zeros(m)
h1 = sigmoid((np.hstack((np.ones((m,1)),x.astype(float)))).dot(ltheta1.T))
h2 = sigmoid((np.hstack((np.ones((m,1)),h1))).dot(ltheta2.T))
for i in range(0,np.shape(h2)[0]):
p[i] = np.argmax(h2[i,:])
return p
#-----------------END FUNCTION 5-----------------
## Setup the parameters you will use for this exercise
input_layer_size = 784; # 28x28 Input Images of Digits
hidden_layer_size = 25; # 25 hidden units
num_labels = 10; # 10 labels, from 0 to 9
data = []
#Reading in data, split into X and y, rewrite label 0 to 10 (for easy comparison to course)
with open('train.csv', 'rb') as csvfile:
has_header = csv.Sniffer().has_header(csvfile.read(1024))
csvfile.seek(0) # rewind
data_csv = csv.reader(csvfile, delimiter=',')
if has_header:
next(data_csv)
for row in data_csv:
data.append(row)
data = np.array(data)
x = data[:,1:]
y = data[:,0]
y = y.astype(int)
for i in range(len(y)):
if y[i] == 0:
y[i] = 10
#Set basic parameters
m, n = np.shape(x)
lambda_reg = 1.0
#Randomly initalize weights for Theta_initial
#theta1_initial = np.genfromtxt('tt1.csv', delimiter=',')
#theta2_initial = np.genfromtxt('tt2.csv', delimiter=',')
theta1_initial = randinitialize(input_layer_size, hidden_layer_size);
theta2_initial = randinitialize(hidden_layer_size, num_labels);
theta_initial_ravel = np.concatenate((np.ravel(theta1_initial), np.ravel(theta2_initial)))
#Doing optimize
fmin = scipy.optimize.minimize(fun=nncostfunction, x0=theta_initial_ravel, args=(input_layer_size, hidden_layer_size, num_labels, x, y, lambda_reg), method='L-BFGS-B', jac=True, options={'maxiter': 10, 'disp': True})
fmin
theta1 = np.array(np.reshape(fmin.x[:hidden_layer_size * (input_layer_size + 1)], (hidden_layer_size, (input_layer_size + 1))))
theta2 = np.array(np.reshape(fmin.x[hidden_layer_size * (input_layer_size + 1):], (num_labels, (hidden_layer_size + 1))))
p = predict(theta1, theta2, x);
for i in range(len(y)):
if y[i] == 10:
y[i] = 0
correct = [1 if a == b else 0 for (a, b) in zip(p,y)]
accuracy = (sum(map(int, correct)) / float(len(correct)))
print 'accuracy = {0}%'.format(accuracy * 100)
I think I have fixed the problem: it seems I messed up the index
should be:
y_matrix.append(eye_matrix[int(ly[i]),:])
instead of:
y_matrix.append(eye_matrix[int(ly[i])-1,:])