How to fix error when inversing image maps in OpenCV - python

I am trying to inverse a set of maps using this answer here. I used two of his methods so there is more detail as to how they work in his answer. I also left some comments out to shorten the code.
I have my own camera matrix and distortion coeff's that I use to create an x and y map with cv2.initUndistortRectifyMap(), but when I pass them to invert_maps() I get an out-of-bounds error shown below.
None of this (except the bottom part) is my code and its pretty advanced stuff so I have no clue how to debug it. And I dont have enough credit to comment on the orignal answer. Anyone got a solution?
import numpy as np
import cv2 as cv2
from scipy import ndimage as ndi
from matplotlib import pyplot as plt
import glob
def bilinear_inverse(p, vertices, numiter=4):
p = np.asarray(p)
v = np.asarray(vertices)
sh = p.shape[1:]
if v.ndim == 2:
v = np.expand_dims(v, axis=tuple(range(2, 2 + len(sh))))
# Start in the center
s = .5 * np.ones((2,) + sh)
s0, s1 = s
for k in range(numiter):
# Residual
r = v[0] * (1 - s0) * (1 - s1) + v[1] * s0 * (1 - s1) + v[2] * s0 * s1 + v[3] * (1 - s0) * s1 - p
# Jacobian
J11 = -v[0, 0] * (1 - s1) + v[1, 0] * (1 - s1) + v[2, 0] * s1 - v[3, 0] * s1
J21 = -v[0, 1] * (1 - s1) + v[1, 1] * (1 - s1) + v[2, 1] * s1 - v[3, 1] * s1
J12 = -v[0, 0] * (1 - s0) - v[1, 0] * s0 + v[2, 0] * s0 + v[3, 0] * (1 - s0)
J22 = -v[0, 1] * (1 - s0) - v[1, 1] * s0 + v[2, 1] * s0 + v[3, 1] * (1 - s0)
inv_detJ = 1. / (J11 * J22 - J12 * J21)
s0 -= inv_detJ * (J22 * r[0] - J12 * r[1])
s1 -= inv_detJ * (-J21 * r[0] + J11 * r[1])
return s
def invert_map(xmap, ymap, diagnostics=False):
"""
Generate the inverse of deformation map defined by (xmap, ymap) using inverse bilinear interpolation.
"""
# Generate quadrilaterals from mapped grid points.
quads = np.array([[ymap[:-1, :-1], xmap[:-1, :-1]],
[ymap[1:, :-1], xmap[1:, :-1]],
[ymap[1:, 1:], xmap[1:, 1:]],
[ymap[:-1, 1:], xmap[:-1, 1:]]])
# Range of indices possibly within each quadrilateral
x0 = np.floor(quads[:, 1, ...].min(axis=0)).astype(int)
x1 = np.ceil(quads[:, 1, ...].max(axis=0)).astype(int)
y0 = np.floor(quads[:, 0, ...].min(axis=0)).astype(int)
y1 = np.ceil(quads[:, 0, ...].max(axis=0)).astype(int)
# Quad indices
i0, j0 = np.indices(x0.shape)
# Offset of destination map
x0_offset = x0.min()
y0_offset = y0.min()
# Index range in x and y (per quad)
xN = x1 - x0 + 1
yN = y1 - y0 + 1
# Shape of destination array
sh_dest = (1 + x1.max() - x0_offset, 1 + y1.max() - y0_offset)
# Coordinates of destination array
yy_dest, xx_dest = np.indices(sh_dest)
xmap1 = np.zeros(sh_dest)
ymap1 = np.zeros(sh_dest)
TN = np.zeros(sh_dest, dtype=int)
# Smallish number to avoid missing point lying on edges
epsilon = .01
# Loop through indices possibly within quads
for ix in range(xN.max()):
for iy in range(yN.max()):
# Work only with quads whose bounding box contain indices
valid = (xN > ix) * (yN > iy)
# Local points to check
p = np.array([y0[valid] + ix, x0[valid] + iy])
# Map the position of the point in the quad
s = bilinear_inverse(p, quads[:, :, valid])
# s out of unit square means p out of quad
# Keep some epsilon around to avoid missing edges
in_quad = np.all((s > -epsilon) * (s < (1 + epsilon)), axis=0)
# Add found indices
ii = p[0, in_quad] - y0_offset
jj = p[1, in_quad] - x0_offset
ymap1[ii, jj] += i0[valid][in_quad] + s[0][in_quad]
xmap1[ii, jj] += j0[valid][in_quad] + s[1][in_quad]
# Increment count
TN[ii, jj] += 1
ymap1 /= TN + (TN == 0)
xmap1 /= TN + (TN == 0)
if diagnostics:
diag = {'x_offset': x0_offset,
'y_offset': y0_offset,
'mask': TN > 0}
return xmap1, ymap1, diag
else:
return xmap1, ymap1
# cam matrix and dist coeff's that I brought
cam_matrix = np.array([ [1223.07784, 0, 926.80065],
[ 0, 1231.71291, 546.10496],
[ 0, 0, 1]], dtype='float32')
distortion_profile = np.array([-0.32077, 0.15041, 0.001004, 0.00028, -0.04252], dtype='float32')
# get current maps
mapx, mapy = cv2.initUndistortRectifyMap(cam_matrix, distortion, None, cam_matrix, (1920, 1080), 5)
# invert the maps
mapx_invert, mapy_invert = invert_map(mapx, mapy)
# apply mapping to image
inversed = cv2.remap(img, mapx_invert, mapy_invert ,cv2.INTER_LINEAR)
cv2.imwrite('inversed.png', inversed)
Error:
File "c:\Users\...\redist_image2.py", line 121, in invert_map
ymap1[ii, jj] += i0[valid][in_quad] + s[0][in_quad]
IndexError: index 1382 is out of bounds for axis 1 with size 1020

Related

The output value of cv2.projectPoints differs from my implementation from scratch

I am trying to understand how the cv2.projectPoints. For this I have created the following test:
import numpy as np
import cv2
##################################################################
# Camera parameters
I = np.array([[0.11867264, 0, 0.5399652], [0, 0.37119691, 0.76215127], [0, 0, 1]])
E = np.array([[0.85939021, 0.78837968, 0.04341585, 0.99755739],[0.84512984, 0.19973536, 0.09509114, 0.47567923], [0.00813835, 0.00662538, 0.16825557, 0.00391849]])
D = np.array([0.0809947, 0.1508598, 0.69108758, 0.2972208, 0.96983757])
point_3d = np.array([0.2249427, 0.13465326, 0.02870871])
##################################################################
# METHOD 1
# project onto image place using opencv, we assume 0 distortion
[u, v] = cv2.projectPoints(point_3d, E[:, :3], E[:, 3], I, D)[0].squeeze()
print('Image coordinates are {}, {} using opencv'.format(u, v))
# METHOD 2
# project onto image place using extrinsics and intrinsics independently
# Convert the 3D point to homogeneous coordinates
point_h = np.array([point_3d[0], point_3d[1], point_3d[2], 1]).reshape(4, 1)
# Transform the 3D point to camera coordinates
cam_coords= np.dot(E, point_h)
# Normalize the camera coordinates
x, y, z = cam_coords/cam_coords[2]
# radial dist
r2 = x**2 + y**2
r4 = r2**2
r6 = r2**3
rdist = 1 + D[0] * r2 + D[1] * r4 + D[4]*r6
x_dist = x * rdist
y_dist = y * rdist
# tan dist
tanx = 2*D[2] * x * y + D[3]*(r2 + 2 * x**2)
tany = D[2]*(r2 + 2 * y**2) + 2*D[3] * x * y
x_dist = x_dist + tanx
y_dist = y_dist + tany
# # Back to absolute coordinates
x_dist = I[0][0] * x_dist + I[0][2]
y_dist = I[1][1] * y_dist + I[1][2]
print('Image coordinates are {}, {} using E and I independently'.format(x_dist, y_dist))
diff = np.abs((u - x_dist) + (v - y_dist))
print('Comparison against opencv implementation {}'.format(diff))
# METHOD 3
# project onto image place using the P matrix
#
point_h = np.array([point_3d[0], point_3d[1], point_3d[2], 1]).reshape(4, 1)
P = I # E
coord = P # point_h
x = coord[0] / coord[2]
y = coord[1] / coord[2]
# # To relative coordinates
x = (x - I[0, 2])/I[0, 0]
y = (y - I[1, 2])/I[1, 1]
# radial dist
r2 = x**2 + y**2
r4 = r2**2
r6 = r2**3
rdist = 1 + D[0] * r2 + D[1] * r4 + D[4]*r6
x_dist = x * rdist
y_dist = y * rdist
# tan dist
tanx = 2*D[2] * x * y + D[3]*(r2 + 2 * x**2)
tany = D[2]*(r2 + 2 * y**2) + 2*D[3] * x * y
x_dist = x_dist + tanx
y_dist = y_dist + tany
# # Back to absolute coordinates
x_dist = x_dist * I[0, 0] + I[0, 2]
y_dist = y_dist * I[1, 1] + I[1, 2]
print('Image coordinates are {}, {} using P'.format(x_dist,y_dist))
diff = np.abs((u - x_dist) + (v - y_dist))
print('Comparison against opencv implementation {}'.format(diff))
The output of this piece of code is the following one:
Image coordinates are 58328092212356.85, 97724995854418.78 using opencv
Image coordinates are [5.83280922e+13], [9.77249959e+13] using E and I independently
Comparison against opencv implementation [0.015625]
Image coordinates are [5.83280922e+11], [9.77249959e+11] using P
Comparison against opencv implementation [0.265625]
It seems like when I compute the Projection matrix using the intrinsic and the extrisinc matrices, I do not get the same solution as OpenCV. The distortion model applied is the one described in the documentation.

Error in implementation of Crank-Nicolson method applied to 1D TDSE?

This is more of a computational physics problem, and I've asked it on physics stack exchange, but no answers on there. This is, I suppose, a mix of the disciplines on here and there (and maybe even mathematics stack exchange), so finding the right place to post is a task in of itself apparently...
I'm attempting to use Crank-Nicolson scheme to solve the TDSE in 1D. The initial wave is a real Gaussian that has been normalised wrt its probability density. As the solution evolves, a depression grows in the central peak of the real part of the wave, and the imaginary part's central trough is perhaps a bit higher than I expect (image below).
Does this behaviour seem reasonable? I have searched around and not seen questions/figures that are similar. I've tested another person's code from Github and it exhibits the same behaviour, which makes me feel a bit better. But I still think the center peak should just decrease in height and increase in width. The likelihood of me getting a physics-based explanation is relatively low here I'd assume, but a computational-based explanation on errors I may have made is more likely.
I'm happy to give more information, for example my code, or the matrices used in the scheme, etc. Thanks in advance!
Here's a link to GIF of time evolution:
And the part of my code relevant to solving the 1D TDSE:
(pretty much the entire thing except the plotting)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
# Define function for norm.
def normf(dxc, uc, ic):
return sum(dxc * np.square(np.abs(uc[ic, :])))
# Define function for expectation value of position.
def xexpf(dxc, xc, uc, ic):
return sum(dxc * xc * np.square(np.abs(uc[ic, :])))
# Define function for expectation value of squared position.
def xexpsf(dxc, xc, uc, ic):
return sum(dxc * np.square(xc) * np.square(np.abs(uc[ic, :])))
# Define function for standard deviation.
def sdaf(xexpc, xexpsc, ic):
return np.sqrt(xexpsc[ic] - np.square(xexpc[ic]))
# Time t: t0 =< t =< tf. Have N steps at which to evaluate the CN scheme. The
# time interval is dt. decp: variable for plotting to certain number of decimal
# places.
t0 = 0
tf = 20
N = 200
dt = tf / N
t = np.linspace(t0, tf, num = N + 1, endpoint = True)
decp = str(dt)[::-1].find('.')
# Initialise array for filling with norm values at each time step.
norm = np.zeros(len(t))
# Initialise array for expectation value of position.
xexp = np.zeros(len(t))
# Initialise array for expectation value of squared position.
xexps = np.zeros(len(t))
# Initialise array for alternate standard deviation.
sda = np.zeros(len(t))
# Position x: -a =< x =< a. M is an even number. There are M + 1 total discrete
# positions, for the points to be symmetric and centred at x = 0.
a = 100
M = 1200
dx = (2 * a) / M
x = np.linspace(-a, a, num = M + 1, endpoint = True)
# The gaussian function u diffuses over time. sd sets the width of gaussian. u0
# is the initial gaussian at t0.
sd = 1
var = np.power(sd, 2)
mu = 0
u0 = np.sqrt(1 / np.sqrt(np.pi * var)) * np.exp(-np.power(x - mu, 2) / (2 * \
var))
u = np.zeros([len(t), len(x)], dtype = 'complex_')
u[0, :] = u0
# Normalise u.
u[0, :] = u[0, :] / np.sqrt(normf(dx, u, 0))
# Set coefficients of CN scheme.
alpha = dt * -1j / (4 * np.power(dx, 2))
beta = dt * 1j / (4 * np.power(dx, 2))
# Tridiagonal matrices Al and AR. Al to be solved using Thomas algorithm.
Al = np.zeros([len(x), len(x)], dtype = 'complex_')
for i in range (0, M):
Al[i + 1, i] = alpha
Al[i, i] = 1 - (2 * alpha)
Al[i, i + 1] = alpha
# Corner elements for BC's.
Al[M, M], Al[0, 0] = 1 - alpha, 1 - alpha
Ar = np.zeros([len(x), len(x)], dtype = 'complex_')
for i in range (0, M):
Ar[i + 1, i] = beta
Ar[i, i] = 1 - (2 * beta)
Ar[i, i + 1] = beta
# Corner elements for BC's.
Ar[M, M], Ar[0, 0] = 1 - 2*beta, 1 - beta
# Thomas algorithm variables. Following similar naming as in Wiki article.
a = np.diag(Al, -1)
b = np.diag(Al)
c = np.diag(Al, 1)
NT = len(b)
cp = np.zeros(NT - 1, dtype = 'complex_')
for n in range(0, NT - 1):
if n == 0:
cp[n] = c[n] / b[n]
else:
cp[n] = c[n] / (b[n] - (a[n - 1] * cp[n - 1]))
d = np.zeros(NT, dtype = 'complex_')
dp = np.zeros(NT, dtype = 'complex_')
# Iterate over each time step to solve CN method. Maintain boundary
# conditions. Keep track of standard deviation.
for i in range(0, N):
# BC's.
u[i, 0], u[i, M] = 0, 0
# Find RHS.
d = np.dot(Ar, u[i, :])
for n in range(0, NT):
if n == 0:
dp[n] = d[n] / b[n]
else:
dp[n] = (d[n] - (a[n - 1] * dp[n - 1])) / (b[n] - (a[n - 1] * \
cp[n - 1]))
nc = NT - 1
while nc > -1:
if nc == NT - 1:
u[i + 1, nc] = dp[nc]
nc -= 1
else:
u[i + 1, nc] = dp[nc] - (cp[nc] * u[i + 1, nc + 1])
nc -= 1
norm[i] = normf(dx, u, i)
xexp[i] = xexpf(dx, x, u, i)
xexps[i] = xexpsf(dx, x, u, i)
sda[i] = sdaf(xexp, xexps, i)
# Fill in final norm value.
norm[N] = normf(dx, u, N)
# Fill in final position expectation value.
xexp[N] = xexpf(dx, x, u, N)
# Fill in final squared position expectation value.
xexps[N] = xexpsf(dx, x, u, N)
# Fill in final standard deviation value.
sda[N] = sdaf(xexp, xexps, N)

How to find bezier coefficients without matrices?

The function get_cubic needs 4 points and i need to find b and c by calculation (a and d is given).
Here is my code and i need help specifically with get_bezier_coef
def get_bezier_coef(points):
# since the formulas work given that we have n+1 points
# then n must be this:
n = len(points) - 1
# build coefficents matrix
C = 4 * np.identity(n)
np.fill_diagonal(C[1:], 1)
np.fill_diagonal(C[:, 1:], 1)
C[0, 0] = 2
C[n - 1, n - 1] = 7
C[n - 1, n - 2] = 2
# build points vector
P = [2. * (2. * points[i] + points[i + 1]) for i in range(n)]
P[0] = points[0] + 2 * points[1]
P[n - 1] = 8 * points[n - 1] + points[n]
# solve system, find a & b
A = np.linalg.solve(C,P)
B = [0] * n
for i in range(n - 1):
B[i] = 2. * points[i + 1] - A[i + 1]
B[n - 1] = (A[n - 1] + points[n]) / 2.
return A, B
# returns the general Bezier cubic formula given 4 control points
def get_cubic(a, b, c, d):
return lambda t: np.power(1 - t, 3) * a + 3 * np.power(1 - t, 2) * t * b + 3 * (1 - t) * np.power(t,2) * c + np.power(t, 3) * d
# return one cubic curve for each consecutive points
def get_bezier_cubic(points):
A, B = get_bezier_coef(points)
return [
get_cubic(points[i], A[i], B[i], points[i + 1])
for i in range(len(points) - 1)
]
The function get_bezier_coef get list of points [(X0,Y0),(X1,Y1)....] and return the coefficients of the bezier (find the 2 control points between the start and end point). Is there anyway to calculate the coefficients without matrices? Or any other way that will reduce time.

Grid Search over function

The function HH_model(I,area_factor) has as return value the number of spikes which are triggered by n runs. Assuming 1000 runs, there are 157 times that max(v[]-v_rest) > 60, then the return value of HH_model(I,area_factor) is 157.
Now I know value pairs from another model - the x-values are related to the stimulus I, while the y-values are the number of spikes.
I have written these values as a comment under the code. I want to choose my input parameters I and area_factor in a way that the error to the data is as small as possible. I have no idea how I should do this optimization.
import matplotlib.pyplot as py
import numpy as np
import scipy.optimize as optimize
# HH parameters
v_Rest = -65 # in mV
gNa = 1200 # in mS/cm^2
gK = 360 # in mS/cm^2
gL = 0.3*10 # in mS/cm^2
vNa = 115 # in mV
vK = -12 # in mV
vL = 10.6 # in mV
#Number of runs
runs = 1000
c = 1 # in uF/cm^2
ROOT = False
def HH_model(I,area_factor):
count = 0
t_end = 10 # in ms
delay = 0.1 # in ms
duration = 0.1 # in ms
dt = 0.0025 # in ms
area_factor = area_factor
#geometry
d = 2 # diameter in um
r = d/2 # Radius in um
l = 10 # Length of the compartment in um
A = (1*10**(-8))*area_factor # surface [cm^2]
I = I
C = c*A # uF
for j in range(0,runs):
# Introduction of equations and channels
def alphaM(v): return 12 * ((2.5 - 0.1 * (v)) / (np.exp(2.5 - 0.1 * (v)) - 1))
def betaM(v): return 12 * (4 * np.exp(-(v) / 18))
def betaH(v): return 12 * (1 / (np.exp(3 - 0.1 * (v)) + 1))
def alphaH(v): return 12 * (0.07 * np.exp(-(v) / 20))
def alphaN(v): return 12 * ((1 - 0.1 * (v)) / (10 * (np.exp(1 - 0.1 * (v)) - 1)))
def betaN(v): return 12 * (0.125 * np.exp(-(v) / 80))
# compute the timesteps
t_steps= t_end/dt+1
# Compute the initial values
v0 = 0
m0 = alphaM(v0)/(alphaM(v0)+betaM(v0))
h0 = alphaH(v0)/(alphaH(v0)+betaH(v0))
n0 = alphaN(v0)/(alphaN(v0)+betaN(v0))
# Allocate memory for v, m, h, n
v = np.zeros((int(t_steps), 1))
m = np.zeros((int(t_steps), 1))
h = np.zeros((int(t_steps), 1))
n = np.zeros((int(t_steps), 1))
# Set Initial values
v[:, 0] = v0
m[:, 0] = m0
h[:, 0] = h0
n[:, 0] = n0
### Noise component
knoise= 0.0005 #uA/(mS)^1/2
### --------- Step3: SOLVE
for i in range(0, int(t_steps)-1, 1):
# Get current states
vT = v[i]
mT = m[i]
hT = h[i]
nT = n[i]
# Stimulus current
IStim = 0
if delay / dt <= i <= (delay + duration) / dt:
IStim = I # in uA
else:
IStim = 0
# Compute change of m, h and n
m[i + 1] = (mT + dt * alphaM(vT)) / (1 + dt * (alphaM(vT) + betaM(vT)))
h[i + 1] = (hT + dt * alphaH(vT)) / (1 + dt * (alphaH(vT) + betaH(vT)))
n[i + 1] = (nT + dt * alphaN(vT)) / (1 + dt * (alphaN(vT) + betaN(vT)))
# Ionic currents
iNa = gNa * m[i + 1] ** 3. * h[i + 1] * (vT - vNa)
iK = gK * n[i + 1] ** 4. * (vT - vK)
iL = gL * (vT-vL)
Inoise = (np.random.normal(0, 1) * knoise * np.sqrt(gNa * A))
IIon = ((iNa + iK + iL) * A) + Inoise #
# Compute change of voltage
v[i + 1] = (vT + ((-IIon + IStim) / C) * dt)[0] # in ((uA / cm ^ 2) / (uF / cm ^ 2)) * ms == mV
# adjust the voltage to the resting potential
v = v + v_Rest
# test if there was a spike
if max(v[:]-v_Rest) > 60:
count += 1
return count
# some datapoints from another model out of 1000 runs. ydata means therefore 'count' out of 1000 runs.
# xdata = np.array([0.92*I,0.925*I,0.9535*I,0.975*I,0.9789*I,I,1.02*I,1.043*I,1.06*I,1.078*I,1.09*I])
# ydata = np.array([150,170,269,360,377,500,583,690,761,827,840])
EDIT:
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import minimize
# HH parameters
v_Rest = -65 # in mV
gNa = 120 # in mS/cm^2
gK = 36 # in mS/cm^2
gL = 0.3 # in mS/cm^2
vNa = 115 # in mV
vK = -12 # in mV
vL = 10.6 # in mV
#Number of runs
runs = 1000
c = 1 # in uF/cm^2
def HH_model(x,I,area_factor):
count = 0
t_end = 10 # in ms
delay = 0.1 # in ms
duration = 0.1 # in ms
dt = 0.0025 # in ms
area_factor = area_factor
#geometry
d = 2 # diameter in um
r = d/2 # Radius in um
l = 10 # Length of the compartment in um
A = (1*10**(-8))*area_factor # surface [cm^2]
I = I*x
C = c*A # uF
for j in range(0,runs):
# Introduction of equations and channels
def alphaM(v): return 12 * ((2.5 - 0.1 * (v)) / (np.exp(2.5 - 0.1 * (v)) - 1))
def betaM(v): return 12 * (4 * np.exp(-(v) / 18))
def betaH(v): return 12 * (1 / (np.exp(3 - 0.1 * (v)) + 1))
def alphaH(v): return 12 * (0.07 * np.exp(-(v) / 20))
def alphaN(v): return 12 * ((1 - 0.1 * (v)) / (10 * (np.exp(1 - 0.1 * (v)) - 1)))
def betaN(v): return 12 * (0.125 * np.exp(-(v) / 80))
# compute the timesteps
t_steps= t_end/dt+1
# Compute the initial values
v0 = 0
m0 = alphaM(v0)/(alphaM(v0)+betaM(v0))
h0 = alphaH(v0)/(alphaH(v0)+betaH(v0))
n0 = alphaN(v0)/(alphaN(v0)+betaN(v0))
# Allocate memory for v, m, h, n
v = np.zeros((int(t_steps), 1))
m = np.zeros((int(t_steps), 1))
h = np.zeros((int(t_steps), 1))
n = np.zeros((int(t_steps), 1))
# Set Initial values
v[:, 0] = v0
m[:, 0] = m0
h[:, 0] = h0
n[:, 0] = n0
### Noise component
knoise= 0.0005 #uA/(mS)^1/2
### --------- Step3: SOLVE
for i in range(0, int(t_steps)-1, 1):
# Get current states
vT = v[i]
mT = m[i]
hT = h[i]
nT = n[i]
# Stimulus current
IStim = 0
if delay / dt <= i <= (delay + duration) / dt:
IStim = I # in uA
else:
IStim = 0
# Compute change of m, h and n
m[i + 1] = (mT + dt * alphaM(vT)) / (1 + dt * (alphaM(vT) + betaM(vT)))
h[i + 1] = (hT + dt * alphaH(vT)) / (1 + dt * (alphaH(vT) + betaH(vT)))
n[i + 1] = (nT + dt * alphaN(vT)) / (1 + dt * (alphaN(vT) + betaN(vT)))
# Ionic currents
iNa = gNa * m[i + 1] ** 3. * h[i + 1] * (vT - vNa)
iK = gK * n[i + 1] ** 4. * (vT - vK)
iL = gL * (vT-vL)
Inoise = (np.random.normal(0, 1) * knoise * np.sqrt(gNa * A))
IIon = ((iNa + iK + iL) * A) + Inoise #
# Compute change of voltage
v[i + 1] = (vT + ((-IIon + IStim) / C) * dt)[0] # in ((uA / cm ^ 2) / (uF / cm ^ 2)) * ms == mV
# adjust the voltage to the resting potential
v = v + v_Rest
# test if there was a spike
if max(v[:]-v_Rest) > 60:
count += 1
return count
def loss(parameters, model, x_ref, y_ref):
# unpack multiple parameters
I, area_factor = parameters
# compute prediction
y_predicted = np.array([model(x, I, area_factor) for x in x_ref])
# compute error and use it as loss
mse = ((y_ref - y_predicted) ** 2).mean()
return mse
# some datapoints from another model out of 1000 runs. ydata means therefore 'count' out of 1000 runs.
xdata = np.array([0.92,0.925,0.9535, 0.975, 0.9789, 1])
ydata = np.array([150,170,269, 360, 377, 500])
y_data_scaled = ydata / runs
y_predicted = np.array([HH_model(x,I=10**(-3), area_factor=1) for x in xdata])
parameters = (10**(-3), 1)
mse0 = loss(parameters, HH_model, xdata, y_data_scaled)
# compute the parameters that minimize the loss (alias, the error between the data and the predictions of the model)
optimum = minimize(loss, x0=np.array([10**(-3), 1]), args=(HH_model, xdata, y_data_scaled))
# compute the predictions with the optimized parameters
I = optimum['x'][0]
area_factor = optimum['x'][1]
y_predicted_opt = np.array([HH_model(x, I, area_factor) for x in xdata])
# plot the raw data, the model with handcrafted guess and the model with optimized parameters
fig, ax = plt.subplots(1, 1)
ax.set_xlabel('input')
ax.set_ylabel('output predictions')
ax.plot(xdata, y_data_scaled, marker='o')
ax.plot(xdata, y_predicted, marker='*')
ax.plot(xdata, y_predicted_opt, marker='v')
ax.legend([
"raw data points",
"initial guess",
"predictions with optimized parameters"
])
I started using your function,
then I noticed it was very slow to execute.
Hence, I decided to show the process with a toy (linear) model.
The process remains the same.
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
def loss(parameters, model, x_ref, y_ref):
# unpack multiple parameters
m, q = parameters
# compute prediction
y_predicted = np.array([model(x, m, q) for x in x_ref])
# compute error and use it as loss
mse = ((y_ref - y_predicted) ** 2).mean()
return mse
# load a dataset to fit a model
x_data = np.array([0.92, 0.925, 0.9535, 0.975, 0.9789, 1, 1.02, 1.043, 1.06, 1.078, 1.09])
y_data = np.array([150, 170, 269, 360, 377, 500, 583, 690, 761, 827, 840])
# normalise the data - input is already normalised
y_data_scaled = y_data / 1000
# create a model (linear, as an example) using handcrafted parameters, ex:(1,1)
linear_fun = lambda x, m, q: m * x + q
y_predicted = np.array([linear_fun(x, m=1, q=1) for x in x_data])
# create a function that given a model (linear_fun), a dataset(x,y) and the parameters, compute the error
parameters = (1, 1)
mse0 = loss(parameters, linear_fun, x_data, y_data_scaled)
# compute the parameters that minimize the loss (alias, the error between the data and the predictions of the model)
optimum = minimize(loss, x0=np.array([1, 1]), args=(linear_fun, x_data, y_data_scaled))
# compute the predictions with the optimized parameters
m = optimum['x'][0]
q = optimum['x'][1]
y_predicted_opt = np.array([linear_fun(x, m, q) for x in x_data])
# plot the raw data, the model with handcrafted guess and the model with optimized parameters
fig, ax = plt.subplots(1, 1)
ax.set_xlabel('input')
ax.set_ylabel('output predictions')
ax.plot(x_data, y_data_scaled, marker='o')
ax.plot(x_data, y_predicted, marker='*')
ax.plot(x_data, y_predicted_opt, marker='v')
ax.legend([
"raw data points",
"initial guess",
"predictions with optimized parameters"
])
# Note1: good practise is to validate your model with a different set of data,
# respect to the one that you have used to find the parameters
# here, however, it is shown just the optimization procedure
# Note2: in your case you should use the HH_model instead of the linear_fun
# and I and Area_factor instead of m and q.
Output:
-- EDIT: To use the HH_model:
I went deeper in your code,
I tried few values for area and stimulus
and I executed a single run of HH_Model without taking the threshold.
Then, I checked the predicted dynamic of voltage (v):
the sequence is always diverging ( all values become nan after few steps )
if you have an initial guess for stimulus and area that could make the code to work, great.
if you have no idea of the order of magnitude of these parameters
the unique solution I see is a grid search over them - just to find this initial guess.
however, it might take a very long time without guarantee of success.
given that the code is based on a physical model, I would suggest to:
1 - find pen and paper a reasonable values.
2 - check that this simulation works for these values.
3 - then, run the optimizer to find the minimum.
Or, worst case scenario, reverse engineer the code and find the value that makes the equation to converge
Here the refactored code:
import math
import numpy as np
# HH parameters
v_Rest = -65 # in mV
gNa = 1200 # in mS/cm^2
gK = 360 # in mS/cm^2
gL = 0.3 * 10 # in mS/cm^2
vNa = 115 # in mV
vK = -12 # in mV
vL = 10.6 # in mV
# Number of runs
c = 1 # in uF/cm^2
# Introduction of equations and channels
def alphaM(v):
return 12 * ((2.5 - 0.1 * (v)) / (np.exp(2.5 - 0.1 * (v)) - 1))
def betaM(v):
return 12 * (4 * np.exp(-(v) / 18))
def betaH(v):
return 12 * (1 / (np.exp(3 - 0.1 * (v)) + 1))
def alphaH(v):
return 12 * (0.07 * np.exp(-(v) / 20))
def alphaN(v):
return 12 * ((1 - 0.1 * (v)) / (10 * (np.exp(1 - 0.1 * (v)) - 1)))
def betaN(v):
return 12 * (0.125 * np.exp(-(v) / 80))
def predict_voltage(A, C, delay, dt, duration, stimulus, t_end):
# compute the timesteps
t_steps = t_end / dt + 1
# Compute the initial values
v0 = 0
m0 = alphaM(v0) / (alphaM(v0) + betaM(v0))
h0 = alphaH(v0) / (alphaH(v0) + betaH(v0))
n0 = alphaN(v0) / (alphaN(v0) + betaN(v0))
# Allocate memory for v, m, h, n
v = np.zeros((int(t_steps), 1))
m = np.zeros((int(t_steps), 1))
h = np.zeros((int(t_steps), 1))
n = np.zeros((int(t_steps), 1))
# Set Initial values
v[:, 0] = v0
m[:, 0] = m0
h[:, 0] = h0
n[:, 0] = n0
# Noise component
knoise = 0.0005 # uA/(mS)^1/2
for i in range(0, int(t_steps) - 1, 1):
# Get current states
vT = v[i]
mT = m[i]
hT = h[i]
nT = n[i]
# Stimulus current
if delay / dt <= i <= (delay + duration) / dt:
IStim = stimulus # in uA
else:
IStim = 0
# Compute change of m, h and n
m[i + 1] = (mT + dt * alphaM(vT)) / (1 + dt * (alphaM(vT) + betaM(vT)))
h[i + 1] = (hT + dt * alphaH(vT)) / (1 + dt * (alphaH(vT) + betaH(vT)))
n[i + 1] = (nT + dt * alphaN(vT)) / (1 + dt * (alphaN(vT) + betaN(vT)))
# Ionic currents
iNa = gNa * m[i + 1] ** 3. * h[i + 1] * (vT - vNa)
iK = gK * n[i + 1] ** 4. * (vT - vK)
iL = gL * (vT - vL)
Inoise = (np.random.normal(0, 1) * knoise * np.sqrt(gNa * A))
IIon = ((iNa + iK + iL) * A) + Inoise #
# Compute change of voltage
v[i + 1] = (vT + ((-IIon + IStim) / C) * dt)[0] # in ((uA / cm ^ 2) / (uF / cm ^ 2)) * ms == mV
# stop simulation if it diverges
if math.isnan(v[i + 1]):
return [None]
# adjust the voltage to the resting potential
v = v + v_Rest
return v
def HH_model(stimulus, area_factor, runs=1000):
count = 0
t_end = 10 # in ms
delay = 0.1 # in ms
duration = 0.1 # in ms
dt = 0.0025 # in ms
area_factor = area_factor
# geometry
d = 2 # diameter in um
r = d / 2 # Radius in um
l = 10 # Length of the compartment in um
A = (1 * 10 ** (-8)) * area_factor # surface [cm^2]
stimulus = stimulus
C = c * A # uF
for j in range(0, runs):
v = predict_voltage(A, C, delay, dt, duration, stimulus, t_end)
if max(v[:] - v_Rest) > 60:
count += 1
return count
And the attempt to run one simulation:
import time
from ex_21.equations import c, predict_voltage
area_factor = 0.1
stimulus = 70
# input signal
count = 0
t_end = 10 # in ms
delay = 0.1 # in ms
duration = 0.1 # in ms
dt = 0.0025 # in ms
# geometry
d = 2 # diameter in um
r = d / 2 # Radius in um
l = 10 # Length of the compartment in um
A = (1 * 10 ** (-8)) * area_factor # surface [cm^2]
C = c * A # uF
start = time.time()
voltage_dynamic = predict_voltage(A, C, delay, dt, duration, stimulus, t_end)
elapse = time.time() - start
print(voltage_dynamic)
Output:
[None]

IndexError: index 10000 is out of bounds for axis 0 with size 10000

For my physics degree, I have to take some Python lessons. I'm an absolute beginner and as such, I can't understand other answers. The code is to plot an object's trajectory with air resistance. I would really appreciate a quick fix - I think it has something to do with the time variable being too small but increasing it doesn't help.
import matplotlib.pyplot as plt
import numpy as np
import math # need math module for trigonometric functions
g = 9.81 #gravitational constant
dt = 1e-3 #integration time step (delta t)
v0 = 40 # initial speed at t = 0
angle = math.pi/4 #math.pi = 3.14, launch angle in radians
time = np.arange(0, 10, dt) #time axis
vx0 = math.cos(angle)*v0 # starting velocity along x axis
vy0 = math.sin(angle)*v0 # starting velocity along y axis
xa = vx0*time # compute x coordinates
ya = -0.5*g*time**2 + vy0*time # compute y coordinates
def traj_fric(angle, v0): # function for trajectory
vx0 = math.cos(angle) * v0 # for some launch angle and starting velocity
vy0 = math.sin(angle) * v0 # compute x and y component of starting velocity
x = np.zeros(len(time)) #initialise x and y arrays
y = np.zeros(len(time))
x[0], y[0], 0 #projecitle starts at 0,0
x[1], y[1] = x[0] + vx0 * dt, y[0] + vy0 * dt # second elements of x and
# y are determined by initial
# velocity
i = 1
while y[i] >= 0: # conditional loop continuous until
# projectile hits ground
gamma = 0.005 # constant of friction
height = 100 # height at which air friction disappears
f = 0.5 * gamma * (height - y[i]) * dt
x[i + 1] = (2 * x[i] - x[i - 1] + f * x[i - 1])/1 + f # numerical integration to find x[i + 1]
y[i + 1] = (2 * y[i] - y[i - 1] + f * y[i - 1] - g * dt ** 2)/ 1 + f # and y[i + 1]
i = i + 1 # increment i for next loop
x = x[0:i+1] # truncate x and y arrays
y = y[0:i+1]
return x, y, (dt*i), x[i] # return x, y, flight time, range of projectile
x, y, duration, distance = traj_fric(angle, v0)
fig1 = plt.figure()
plt.plot(xa, ya) # plot y versus x
plt.xlabel ("x")
plt.ylabel ("y")
plt.ylim(0, max(ya)+max(ya)*0.2)
plt.xlim(0, distance+distance*0.1)
plt.show()
print "Distance:" ,distance
print "Duration:" ,duration
n = 5
angles = np.linspace(0, math.pi/2, n)
maxrange = np.zeros(n)
for i in range(n):
x,y, duration, maxrange [i] = traj_fric(angles[i], v0)
angles = angles/2/math.pi*360 #convert rad to degress
print "Optimum angle:", angles[np.where(maxrange==np.max(maxrange))]
The error is:
File "C:/Python27/Lib/site-packages/xy/projectile_fric.py", line 43, in traj_fric
x[i + 1] = (2 * x[i] - x[i - 1] + f * x[i - 1])/1 + f # numerical integration to find x[i + 1]
IndexError: index 10000 is out of bounds for axis 0 with size 10000
This is pretty straightforward. When you have a size of 10000, element index 10000 is out of bounds because indexing begins with 0, not 1. Therefore, the 10,000th element is index 9999, and anything larger than that is out of bounds.
Mason Wheeler's answer told you what Python was telling you. The problem occurs in this loop:
while y[i] >= 0: # conditional loop continuous until
# projectile hits ground
gamma = 0.005 # constant of friction
height = 100 # height at which air friction disappears
f = 0.5 * gamma * (height - y[i]) * dt
x[i + 1] = (2 * x[i] - x[i - 1] + f * x[i - 1])/1 + f # numerical integration to find x[i + 1]
y[i + 1] = (2 * y[i] - y[i - 1] + f * y[i - 1] - g * dt ** 2)/ 1 + f # and y[i + 1]
i = i + 1 # increment i for next loop
The simple fix is to change the loop to something like (I don't know Python syntax, so bear with me):
while (y[i] >= 0) and (i < len(time)):
That will stop the sim when you run out of array, but it will (potentially) also stop the sim with the projectile hanging in mid-air.
What you have here is a very simple ballistic projectile simulation, modeling atmospheric friction as a linear function of altitude. QUALITATIVELY, what is happening is that your projectile is not hitting the ground in the time you allowed, and you are attempting to overrun your tracking arrays. This is caused by failure to allow sufficient time-of-flight. Observe that the greatest possible time-of-flight occurs when atmospheric friction is zero, and it is then trivial to compute a closed-form upper bound for time-of-flight. You then use that upper bound as your time, and you will allocate sufficient array space to simulate the projectile all the way to impact.
enter code heredef data_to_array(total):
random.shuffle(total)
X = np.zeros((len(total_train), 224, 224, 3)).astype('float')
y = []
for i, img_path in enumerate(total):
img = cv2.imread('/content/gdrive/My Drive/PP/Training/COVID/COVID-19 (538).jpg')
img = cv2.resize(img, (224, 224))
X[i] = img - 1
if len(re.findall('covid', '/content/gdrive/My Drive/PP/Training/COVID/COVID-19 (538).jpg')) == 3:
y.append(0)
else:
y.append(1)
y = np.array(y)
return X, y
X_train, y_train = data_to_array(total_train)
X_test, y_test = data_to_array(total_val)

Categories