Mpi4py: printing and plotting during execution - python

I recently start working with mpi in order to use it to accelerate some code (a sph/gravity simulation).
So far it seem to be working. The position of my particle have change between the beginning and the end of the program, task manager show that several python thread are working...
But i have two problem:
1/ i can't print text during the execution of the program. The text is only print once it's finished
2/ I'm unable to create a graph using matplotlib.
In both case, neither python nor mpi return any error. My guess, for the text at least, is that its print when the execution end with mpiexec.
thanks for any insight !
here the code i run it with !mpiexec -n 8 python mpi4py_test.py
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from mpi4py import MPI
class particule:
def __init__(self, h, pos, vel = [0, 0], m = 1, P = 0, density = 0, acc = [0, 0]):
self.x, self.y = pos
self.u, self.v = vel
self.acc_x, self.acc_y = acc
self.m = m
self.h = h # kernel lenght
self.P = P # Pressure
self.density = density
def kernel(part_a, part_b ):
" monaghan cubic spline "
cst = 1 / (np.pi * part_a.h**3)
dist = np.sqrt((part_b.x-part_a.x)**2 + (part_b.y-part_a.y)**2 )
r = dist / h
tmp = 0
if r < 1:
tmp = cst * (1 - 3/2* r**2 + 3/4*r**3)
elif r < 2:
tmp = cst * (1/4*(2-r)**3)
return tmp
def grad_kernel(part_a, part_b):
cst = 1 / (np.pi * part_a.h**3)
dist = np.sqrt((part_b.x-part_a.x)**2 + (part_b.y-part_a.y)**2 )
r = dist / part_a.h
tmp = 0
if r < 1:
tmp = cst * (9/4 * r**2 - 3*r)
elif r < 2:
tmp = cst * (-3/4*(2-r)**2)
return tmp
class hash_grid:
def __init__(self, cell_size):
self.cell_size = cell_size
self.cell = {}
self.part_list = []
def key(self, part):
return (part.x//self.cell_size,
part.y//self.cell_size)
def add(self, part):
tmp = self.key(part)
self.cell.setdefault(tmp, []).append(part)
self.part_list.append(part)
def neighbours(self, part):
idx = self.key(part)
cell_N = None
cell_S = None
cell_E = None
cell_W = None
cell_NE = None
cell_NW = None
cell_SE = None
cell_SW = None
if (idx[0], idx[1]+1) in self.cell: cell_N = (idx[0], idx[1]+1)
if (idx[0], idx[1]-1) in self.cell: cell_S = (idx[0], idx[1]-1)
if (idx[0]+1, idx[1]) in self.cell: cell_E = (idx[0]+1, idx[1])
if (idx[0]-1, idx[1]) in self.cell: cell_W = (idx[0]-1, idx[1])
if (idx[0]+1, idx[1]+1) in self.cell: cell_NE = (idx[0]+1, idx[1]+1)
if (idx[0]-1, idx[1]+1) in self.cell: cell_NW = (idx[0]-1, idx[1]+1)
if (idx[0]+1, idx[1]-1) in self.cell: cell_SE = (idx[0]+1, idx[1]-1)
if (idx[0]-1, idx[1]-1) in self.cell: cell_SW = (idx[0]-1, idx[1]-1)
return [value for cel in (idx, cell_N, cell_S, cell_E, cell_W, cell_NE, cell_NW, cell_SE, cell_SW) if cel!=None for value in self.cell.get(cel) ]
def split(to_split, nb_chunk):
"take a list and split it most evenly possible"
result = []
q, r = divmod(len(to_split), nb_chunk)
curr = 0
last = 0
for i in range(nb_chunk):
if r>0:
last = curr + q + 1
result.append(to_split[curr: last])
r = r-1
curr = last
else:
last = curr + q
result.append(to_split[curr: last])
curr = last
return result
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
if rank == 0:
n_part = 2000
h = 2
grid = hash_grid(h)
points = np.zeros((n_part, 2))
sim_range = 20
dt = 0.005
n_iter = 2500
k = 5
for _i in range(n_part):
pos = np.random.uniform(-sim_range, sim_range, 2)
vel = np.random.uniform(-.2, .2, 2)
p = particule(h, pos, vel)
grid.add(p)
points[_i, :] = pos
for part in grid.part_list:
part.density = 0
part.P = 0
for p in grid.neighbours(part):
part.density = part.density + p.m * kernel(part, p)
part.P = k * ( part.density )**2 # - density_0)
data = split(grid.part_list,8)
for t in range(10):
if rank == 0 :
"1 - verlet 1/2 ; serial"
for i, part in enumerate(grid.part_list):
part.u, part.v = part.u + part.acc_x * dt/2, part.v + part.acc_y * dt/2
part.x, part.y = part.x + part.u * dt, part.y + part.v * dt
"2 - update grid ; serial"
jnk = grid.part_list
del grid
grid=hash_grid(h)
for p in jnk:
grid.add(p)
grid_bcast = grid
data_b = grid
chunk_of_part_list = split(grid.part_list,8)
else:
grid_bcast=None
chunk_of_part_list = None
data_b=None
grid_bcast = comm.bcast(grid_bcast, root=0)
chunk_of_part_list = comm.scatter(chunk_of_part_list, root=0)
"3 - get acc ; parallel"
for part in chunk_of_part_list:
part.acc_x = 0
part.acc_y = 0
for p in grid_bcast.neighbours(part):
if p != part:
r = np.sqrt((p.x-part.x)**2 + (p.y-part.y)**2)
if r==0: pass
else:
part.acc_x = part.acc_x - p.m * (part.P/part.density**2 + p.P/p.density**2) * grad_kernel(part, p) * (p.x - part.x)/r
part.acc_y = part.acc_y - p.m * (part.P/part.density**2 + p.P/p.density**2) * grad_kernel(part, p) * (p.y - part.y)/r
dist = np.sqrt(part.x**2+part.y**2)
part.acc_x = part.acc_x - .5 * part.x -1*part.u
part.acc_y = part.acc_y - .5 * part.y -1*part.v
chunk_of_part_list = comm.gather(chunk_of_part_list,root=0)
"4 - verlet 2/2 ; serial"
if rank == 0:
grid.part_list = list(matplotlib.cbook.flatten(chunk_of_part_list))
for i, part in enumerate(grid.part_list):
part.u, part.v = part.u + part.acc_x * dt/2, part.v + part.acc_y * dt/2
points[i,0], points[i,1] = part.x, part.y # for the figure
if rank==0:
print(t, flush=True)
if rank == 0:
print('point 0', points[0,:])
fig = plt.figure()
ax = fig.add_subplot()
sc = ax.scatter(points[:, 0], points[:, 1],s=3)
ax.set_aspect('equal', 'box')
ax.set_xlim(-sim_range,sim_range)
ax.set_ylim(-sim_range,sim_range)

Related

offset a parallel line to a given line python

I want to draw parallel line to given X,Y coordinate below code helps to draw ,
import numpy as np
import matplotlib.pyplot as plt
x = [187, 879, 722, 322]
y = [341, 344, 112, 112]
newX = []
newY = []
def findIntesection(p1x, p1y, p2x, p2y, p3x,p3y, p4x, p4y):
dx12 = p2x - p1x
dy12 = p2y - p1y
dx34 = p4x - p3x
dy34 = p4y - p3y
denominator = (dy12*dx34-dx12*dy34)
t1 = ((p1x - p3x) * dy34 + (p3y - p1y) * dx34)/ denominator
t2 = ((p3x - p1x) * dy12 + (p1y - p3y) * dx12)/ -denominator;
intersectX = p1x + dx12 * t1
intersectY = p1y + dy12 * t1
if (t1 < 0): t1 = 0
elif (t1 > 1): t1 = 1
if (t2 < 0): t2 = 0
elif (t2 > 1): t2 = 1
return intersectX,intersectY
def normalizeVec(x,y):
distance = np.sqrt(x*x+y*y)
return x/distance, y/distance
def getEnlarged(oldX, oldY, offset):
num_points = len(oldX)
for j in range(num_points):
i = j - 1
if i < 0:
i += num_points
k = (j + 1) % num_points
vec1X = oldX[j] - oldX[i]
vec1Y = oldY[j] - oldY[i]
v1normX, v1normY = normalizeVec(vec1X,vec1Y)
v1normX *= offset
v1normY *= offset
n1X = -v1normY
n1Y = v1normX
pij1X = oldX[i] + n1X
pij1Y = oldY[i] + n1Y
pij2X = oldX[j] + n1X
pij2Y = oldY[j] + n1Y
vec2X = oldX[k] - oldX[j]
vec2Y = oldY[k] - oldY[j]
v2normX, v2normY = normalizeVec(vec2X,vec2Y)
v2normX *= offset
v2normY *= offset
n2X = -v2normY
n2Y = v2normX
pjk1X = oldX[j] + n2X
pjk1Y = oldY[j] + n2Y
pjk2X = oldX[k] + n2X
pjk2Y = oldY[k] + n2Y
intersectX,intersetY = findIntesection(pij1X,pij1Y,pij2X,pij2Y,pjk1X,pjk1Y,pjk2X,pjk2Y)
#print(intersectX,intersetY)
newX.append(intersectX)
newY.append(intersetY)
getEnlarged(x, y, 20)
plt.plot(x, y)
plt.plot(newX, newY)
plt.show()
This gives result as below
Here it is giving good result by drawing parallel line to each line of our trapezoidal shaped , but i want it to be a closed shape in place of open shape
i want to join the 1st and last coordinate so that it should form a closed shape. Any help will be appreciated .
Using approach from here
outer_ccw parameters combines vertex order and desired offset direction. For CCW order and outer polygon it is 1, for inner polygon it should be -1.
def makeOffsetPoly(oldX, oldY, offset, outer_ccw = 1):
num_points = len(oldX)
for curr in range(num_points):
prev = (curr + num_points - 1) % num_points
next = (curr + 1) % num_points
vnX = oldX[next] - oldX[curr]
vnY = oldY[next] - oldY[curr]
vnnX, vnnY = normalizeVec(vnX,vnY)
nnnX = vnnY
nnnY = -vnnX
vpX = oldX[curr] - oldX[prev]
vpY = oldY[curr] - oldY[prev]
vpnX, vpnY = normalizeVec(vpX,vpY)
npnX = vpnY * outer_ccw
npnY = -vpnX * outer_ccw
bisX = (nnnX + npnX) * outer_ccw
bisY = (nnnY + npnY) * outer_ccw
bisnX, bisnY = normalizeVec(bisX, bisY)
bislen = offset / np.sqrt((1 + nnnX*npnX + nnnY*npnY)/2)
newX.append(oldX[curr] + bislen * bisnX)
newY.append(oldY[curr] + bislen * bisnY)
x = [0, 100, 60, 40]
y = [0, 0, 50, 50]
makeOffsetPoly(x, y, 20)
print(newX, newY)
>>>[-29.424478775259594, 129.4244787752596, 66.79706177729007, 33.202938222709925]
[-14.14213562373095, -14.14213562373095, 64.14213562373095, 64.14213562373095]
Just append the first coordinates to the end of your lists.
x.append(x[0])
y.append(y[0])
newX.append(newX[0])
newY.append(newY[0])
Place this right before you plot. Here's my output

MPC with python and Error ValueError: `f0` passed has more than 1 dimension

I wrote a MPC with Python and it worked before. After a long time I want to use it again but I got this Error
f0 passed has more than 1 dimension.
But I didn't change anything on my code. It is some kind of strange.
Here is my code:
import numpy as np
import numpy.linalg as npl
import matplotlib.pyplot as plt
from scipy.optimize import minimize
def mpcAugment(Am, Bm, Cm ):
"Function for Augmented Model"
nx, nu = Bm.shape
ny = Cm.shape[0]
A = np.zeros((nx+ny,nx+ny))
A[0:nx,0:nx] = Am
A[nx:nx+ny,0:nx] = Cm#Am
A[nx:nx+ny,nx:nx+ny] = np.eye(ny)
B = np.zeros((nx+ny,nu))
B[0:nx,:nu] = Bm
B[nx:nx+ny,:nu] = Cm#Bm
C = np.zeros((ny,nx+ny))
C[:ny,nx:nx+ny] = np.eye(ny)
return A, B, C
'Define Parameters'
k = 0.4
AICB = 153.8
mcp = 8.8e4
vamb1 = 30
vamb2 = 45
a = -k*AICB/mcp
b = -1/mcp
Ts = 20
VICBref = -5.0
Am = np.array([[1+Ts*a]])
Bm = np.array([[Ts*b]])
Gm = np.array([[-Ts*a]])
Cm = np.array([[1]])
A, B, C = mpcAugment(Am,Bm,Cm)
A, G, C = mpcAugment(Am,Gm,Cm)
nx, nu = B.shape
ny = C.shape[0]
nd = G.shape[1]
Np = 20
Nu = 5
F = np.zeros((Np*ny,nx))
PHI = np.zeros((Np*ny,Nu*nu))
PHIw = np.zeros((Np*ny,Np*nd))
for i in range(0,Np):
Ai = npl.matrix_power(A, i+1)
F[i*ny:(i+1)*ny,:] = C#Ai
for j in range(0, Nu):
if j <= i:
Aij = np.linalg.matrix_power(A, i-j)
PHI[i*ny:(i+1)*ny, j*nu:(j+1)*nu] = C#Aij#B
for j in range(0, Np):
if j <= i:
Aij = np.linalg.matrix_power(A, i-j)
PHIw[i*ny:(i+1)*ny, j*nd:(j+1)*nd] = C#Aij#G
umax = 3100
umin = 0
Q = np.eye(Np*ny)
R = 1e-2*np.eye(Nu*nu)
Rs = VICBref*np.ones((Np*ny,1))
Ainq = np.zeros((2*Nu*nu,Nu*nu))
binq = np.zeros((2*Nu*nu,1))
cinq = np.zeros((2*Nu*nu,1))
for i in range(0,Nu):
binq[i*nu:(i+1)*nu] = umax
binq[(i+Nu)*nu:(Nu+i+1)*nu] = 1
cinq[i*nu:(i+1)*nu] = 1
cinq[(i+Nu)*nu:(Nu+i+1)*nu] = -1
for j in range(0,i+1):
Ainq[i*nu:(i+1)*nu,j*nu:(j+1)*nu] = np.eye(nu)
Ainq[(i+Nu)*nu:(Nu+i+1)*nu,j*nu:(j+1)*nu] = np.eye(nu)
u0 = 0
def objective(du):
dU = np.array(du).reshape((len(du),1))
Y = F#x + PHI#dU + PHIw#w
return np.transpose((Rs-Y))#(Rs-Y)+np.transpose(dU)#R#(dU)
def constraint1(du):
dU = np.array(du).reshape((len(du),1))
return (binq - Ainq#dU - cinq*u0)[0]
#print(objective([1,1,1]))
ulim = (umin, umax)
bnds = np.kron(np.ones((Nu,1)),ulim)
#print(bnds)
Um = np.ones((nu*Nu,1))
Tsim = 5e4
time = np.arange(0,Tsim,Ts)
Nt = len(time)
xm = np.zeros((Nt,1))
um = np.zeros((Nt,nu))
ym = np.zeros((Nt,ny))
xm[0] = 0
ym[0] = Cm.dot(xm[0])
w = np.zeros((Np*nd,1))
print('Am = ',Am)
print('Bm = ',Bm)
print('Cm = ',Cm)
x = np.zeros((nx,1))
x[1] = xm[0]
vamb = vamb1
Vamb = np.zeros((Nt,1))
Ns = int(np.floor(Nt/2))
Vamb[0:Ns] = vamb1*np.ones((Ns,1))
Vamb[Ns:Nt] = vamb2*np.ones((Nt-Ns,1))
Vref = VICBref*np.ones((Nt,1))
con = {'type':'ineq','fun':constraint1}
for i in range(0,Nt-1):
sol = minimize(objective, Um, method = 'SLSQP',constraints = con)
if sol.success == False:
print('Error Cant solve problem')
exit()
Um = sol.x
um[i+1] = um[i] + Um[0]
u0 = um[i+1]
xm[i+1] = Am.dot(xm[i])+Bm.dot(um[i+1])+Gm.dot(Vamb[i])
ym[i+1] = Cm.dot(xm[i+1])
for j in range(0,Np):
if i+j < Nt:
Rs[j] = Vref[i+j]
w[j] = Vamb[i+j]-Vamb[i+j-1]
else:
Rs[j] = Vref[Nt-1]
w[j] = 0
x[0] = xm[i+1] - xm[i]
x[1] = xm[i+1]
print('Q = ',um[i+1],' , VICB = ',xm[i+1], ' vamb = ', Vamb[i])
hour = 60*60
plt.figure()
plt.subplot(2,1,1)
plt.plot(time/hour,ym)
plt.plot(time/hour,Vref,'--')
plt.xlabel('time(hours)')
plt.xlim([0, Tsim/hour])
plt.subplot(2,1,2)
plt.plot(time/hour,um)
plt.xlim([0, Tsim/hour])
plt.show()
It about a controller, which control the temperature of a cool box.
Is that possible that anything changed in main simply code?
I think the problem is now in minimizations part.
I reinstalled all of my libraries and it worked

Spectral analysis on HRV data with LombScargle in Python

I'm working with RR peaks and want to derive the frequency domain measures for HRV to recreate the results from the native C package by Physionet (WFDB tools). Both signal processing and spectral analysis are new fields for me, but after a long week with trial an error I've hacked together some code based on the Astropy module after trying several other solutions.
from astropy.stats import LombScargle
import random
dy = 0.1 * random.randint(1,100)
t = drive01["time"].values
y = drive01["intervals"].values
frequency, power = LombScargle(t, y,dy).autopower(minimum_frequency=0.0,maximum_frequency=4)
plt.plot(frequency, power)
This creates a plot that looks quite similar to the plot from Physionets package.
Physionets HRV tools with the code get_hrv makes this plot
Then by calculating common frequency domain measures I get quite different results.
Pxx = np.nan_to_num(power)
Fxx = np.nan_to_num(frequency)
ulf = 0.003
vlf = 0.04
lf = 0.15
hf = 0.4
Fs = 15.5 # the sampling rate of the drive file
# find the indexes corresponding to the VLF, LF, and HF bands
vlf_freq_band = (Fxx >= ulf) & (Fxx <= vlf)
lf_freq_band = (Fxx >= vlf) & (Fxx <= lf)
hf_freq_band = (Fxx >= lf) & (Fxx <= hf)
tp_freq_band = (Fxx >= 0) & (Fxx <= hf)
# Calculate the area under the given frequency band
dy = 1.0 / Fs
VLF = np.trapz(y=abs(Pxx[vlf_freq_band]), x=None, dx=dy)
LF = np.trapz(y=abs(Pxx[lf_freq_band]), x=None, dx=dy)
HF = np.trapz(y=abs(Pxx[hf_freq_band]), x=None, dx=dy)
TP = np.trapz(y=abs(Pxx[tp_freq_band]), x=None, dx=dy)
LF_HF = float(LF) / HF
Python
'HF': 0.10918703853414605,
'LF': 0.050074418080717789,
'LF/HF': 0.45861137689028925,
'TP': 0.20150514290250854,
'VLF': 0.025953350304821571
From the Physionet package:
TOT PWR = 0.0185973
VLF PWR = 0.00372733
LF PWR = 0.00472635
HF PWR = 0.0101436
LF/HF = 0.465944
When comparing the results it looks like this:
Python Physionet
TP 0.201505143 0.0185973 Quite similar + decimal dif
HF 0.109187039 0.0101436 Quite similar + decimal dif
LF 0.050074418 0.00472635 Quite similar + decimal dif
VLF 0.02595335 0.00372733 Not similar
LF/HF 0.458611377 0.465944 Quite similar
The calculations in Python are based on the code from another Stackoverflow post but the fix he got from the respondent is based on a python module I'm not able to get working and he is not using the Lomb Periodgram. I'm very open for trying something else as well, as long as its working with uneven samples.
the data I'm working with is the drivedb from Physionet and I've used the Physionet packages to make a text file with RR peaks and time which is read into a Pandas DataFrame. The textfile can be found here
LombScargle based on the Astropy caculate power different with C package by Physionet (WFDB tools). I write lombscargle again in python and result the same with C package by Physionet (WFDB tools).
import numpy as np
import os
import math
import csv
from itertools import zip_longest
import time
DATA_PATH = '/home/quangpc/Desktop/Data/PhysionetData/mitdb/'
class FreqDomainClass:
#staticmethod
def power(freq, mag):
lo = [0, 0.0033, 0.04, 0.15]
hi = [0.0033, 0.04, 0.15, 0.4]
pr = np.zeros(4)
nbands = 4
for index in range(0, len(freq)):
pwr = np.power(mag[index], 2)
for n in range(0, nbands):
if (freq[index] >= lo[n]) and freq[index] <= hi[n]:
pr[n] += pwr
break
return pr
#staticmethod
def avevar(y):
var = 0.0
ep = 0.0
ave = np.mean(y)
for i in range(len(y)):
s = y[i] - ave
ep += s
var += s * s
var = (var - ep * ep / len(y)) / (len(y) - 1)
return var
def lomb(self, t, h, ofac, hifac):
period = max(t) - min(t)
z = h - np.mean(h)
f = np.arange(1 / (period * ofac), hifac * len(h) / (2 * period), 1 / (period * ofac))
f = f[:int(len(f) / 2) + 1]
f = np.reshape(f, (len(f), -1))
w = 2 * np.pi * f
lenght_t = len(t)
t = np.reshape(t, (lenght_t, -1))
t = np.transpose(t)
tau = np.arctan2(np.sum(np.sin(2 * w * t), axis=1), np.sum(np.cos(2 * w * t), axis=1)) / (2 * w)
tau = np.diag(tau)
tau = np.reshape(tau, (len(tau), -1))
tau = np.tile(tau, (1, lenght_t))
cos = np.cos(w * (t - tau))
sin = np.sin(w * (t - tau))
pc = np.power(np.sum(z * cos, axis=1), 2)
ps = np.power(np.sum(z * sin, axis=1), 2)
cs = pc / np.sum(np.power(cos, 2), axis=1)
ss = ps / np.sum(np.power(sin, 2), axis=1)
p = cs + ss
pwr = self.avevar(h)
nout = len(h)
p = p / (2 * pwr)
p = p / (nout / (2.0 * pwr))
return f, np.sqrt(p)
def lomb_for(self, t, h, ofac, hifac):
period = max(t) - min(t)
f = np.arange(1 / (period * ofac), hifac * len(h) / (2 * period), 1 / (period * ofac))
f = f[:int(len(f) / 2) + 1]
z = h - np.mean(h)
p = np.zeros(len(f))
for i in range(len(f)):
w = 2 * np.pi * f[i]
if w > 0:
twt = 2 * w * t
y = sum(np.sin(twt))
x = sum(np.cos(twt))
tau = math.atan2(y, x) / (2 * w)
wtmt = w * (t - tau)
cs = np.power(sum(np.multiply(z, np.cos(wtmt))), 2) / sum(np.power((np.cos(wtmt)), 2))
ss = np.power(sum(np.multiply(z, np.sin(wtmt))), 2) / sum(np.power((np.sin(wtmt)), 2))
p[i] = cs + ss
else:
p[i] = np.power(sum(np.multiply(z, t)), 1) / sum(np.power(t), 1)
pwr = self.avevar(h)
nout = len(h)
p = p / (2 * pwr)
p = p / (nout / (2.0 * pwr))
return f, np.sqrt(p)
def freq_domain(self, time, rr_intervals):
frequency, mag0 = self.lomb(time, rr_intervals, 4.0, 2.0)
frequency = np.round(frequency, 8)
mag0 = mag0 / 2.0
mag0 = np.round(mag0, 8)
result = self.power(frequency, mag0)
return result[0], result[1], result[2], result[3], result[0] + result[1] + result[2] + result[3], \
result[2] / result[3]
def time_domain(time, rr_intervals, ann):
sum_rr = 0.0
sum_rr2 = 0.0
rmssd = 0.0
totnn = 0
totnnn = 0
nrr = 1
totrr = 1
nnx = 0
nnn = 0
lastann = ann[0]
lastrr = int(rr_intervals[0])
lenght = 300
t = float(time[0])
end = t + lenght
i = 0
ratbuf = np.zeros(2400)
avbuf = np.zeros(2400)
sdbuf = np.zeros(2400)
for x in range(1, len(ann)):
t = float(time[x])
while t > (end+lenght):
i += 1
end += lenght
if t >= end:
if nnn > 1:
ratbuf[i] = nnn/nrr
sdbuf[i] = np.sqrt(((sdbuf[i] - avbuf[i]*avbuf[i]/nnn) / (nnn-1)))
avbuf[i] /= nnn
i += 1
nnn = nrr = 0
end += lenght
nrr += 1
totrr += 1
if ann[x] == 'N' and ann[x-1] == 'N':
rr_intervals[x] = int(rr_intervals[x])
nnn += 1
avbuf[i] += rr_intervals[x]
sdbuf[i] += (rr_intervals[x] * rr_intervals[x])
sum_rr += rr_intervals[x]
sum_rr2 += (rr_intervals[x] * rr_intervals[x])
totnn += 1
if lastann == 'N':
totnnn += 1
rmssd += (rr_intervals[x] - lastrr) * (rr_intervals[x] - lastrr)
# nndif[0] = NNDIF
if abs(rr_intervals[x] - lastrr) - 0.05 > (10 ** -10):
nnx += 1
lastann = ann[x-1]
lastrr = rr_intervals[x]
if totnn == 0:
return 0, 0, 0, 0
sdnn = np.sqrt((sum_rr2 - sum_rr * sum_rr / totnn) / (totnn - 1))
rmssd = np.sqrt(rmssd/totnnn)
pnn50 = nnx / totnnn
if nnn > 1:
ratbuf[i] = nnn / nrr
sdbuf[i] = np.sqrt((sdbuf[i] - avbuf[i] * avbuf[i] / nnn) / (nnn - 1))
avbuf[i] /= nnn
nb = i + 1
sum_rr = 0.0
sum_rr2 = 0.0
k = 0
h = 0
while k < nb:
if ratbuf[k] != 0:
h += 1
sum_rr += avbuf[k]
sum_rr2 += (avbuf[k] * avbuf[k])
k += 1
sdann = np.sqrt((sum_rr2 - sum_rr * sum_rr / h) / (h - 1))
return sdnn, sdann, rmssd, pnn50
def get_result_from_get_hrv(filename):
with open(filename, 'r') as f:
csv_reader = csv.reader(f, delimiter=',')
index = 0
for row in csv_reader:
if index > 0:
output = [s.strip() for s in row[0].split('=') if s]
# print('output = ', output)
if output[0] == 'SDNN':
sdnn = output[1]
if output[0] == 'SDANN':
sdann = output[1]
if output[0] == 'rMSSD':
rmssd = output[1]
if output[0] == 'pNN50':
pnn50 = output[1]
if output[0] == 'ULF PWR':
ulf = output[1]
if output[0] == 'VLF PWR':
vlf = output[1]
if output[0] == 'LF PWR':
lf = output[1]
if output[0] == 'HF PWR':
hf = output[1]
if output[0] == 'TOT PWR':
tp = output[1]
if output[0] == 'LF/HF':
ratio_lf_hf = output[1]
index += 1
return float(sdnn), float(sdann), float(rmssd), float(pnn50), float(ulf), float(vlf), \
float(lf), float(hf), float(tp), float(ratio_lf_hf)
def save_file():
extension = "atr"
result_all = []
file_process = ['File']
sdnn_l = ['sdnn']
sdann_l = ['sdann']
rmssd_l = ['rmssd']
pnn50_l = ['pnn50']
ulf_l = ['ulf']
vlf_l = ['vlf']
lf_l = ['lf']
hf_l = ['hf']
tp_l = ['tp']
ratio_lf_hf_l = ['ratio_lf_hf']
sdnn_l_p = ['sdnn']
sdann_l_p = ['sdann']
rmssd_l_p = ['rmssd']
pnn50_l_p = ['pnn50']
ulf_l_p = ['ulf']
vlf_l_p = ['vlf']
lf_l_p = ['lf']
hf_l_p = ['hf']
tp_l_p = ['tp']
ratio_lf_hf_l_p = ['ratio_lf_hf']
test_file = ['103', '113', '117', '121', '123', '200', '202', '210', '212', '213',
'219', '221', '213', '228', '231', '233', '234',
'101', '106', '108', '112', '114', '115', '116', '119', '122', '201', '203',
'205', '208', '209', '215', '220', '223', '230',
'105', '100']
file_dis = ['109', '111', '118', '124', '207', '214', '232']
for root, dirs, files in os.walk(DATA_PATH):
files = np.sort(files)
for name in files:
if extension in name:
if os.path.basename(name[:-4]) not in test_file:
continue
print('Processing file...', os.path.basename(name))
cur_dir = os.getcwd()
os.chdir(DATA_PATH)
os.system('rrlist {} {} -M -s >{}.rr'.format(extension, name.split('.')[0], name.split('.')[0]))
time_m = []
rr_intervals = []
ann = []
with open(name.split('.')[0] + '.rr', 'r') as rr_file:
for line in rr_file:
time_m.append(line.split(' ')[0])
rr_intervals.append(line.split(' ')[1])
ann.append(line.split(' ')[2].split('\n')[0])
time_m = np.asarray(time_m, dtype=float)
rr_intervals = np.asarray(rr_intervals, dtype=float)
sdnn, sdann, rmssd, pnn50 = time_domain(time_m, rr_intervals, ann)
if sdnn == 0 and sdann == 0 and rmssd == 0 and pnn50 == 0:
print('No result hrv')
file_dis.append(os.path.basename(name[:-4]))
continue
print('sdnn', sdnn)
print('rmssd', rmssd)
print('pnn50', pnn50)
print('sdann', sdann)
time_m = time_m - time_m[0]
time_m = np.round(time_m, 3)
time_nn = []
nn_intervals = []
for i in range(1, len(ann)):
if ann[i] == 'N' and ann[i - 1] == 'N':
nn_intervals.append(rr_intervals[i])
time_nn.append(time_m[i])
time_nn = np.asarray(time_nn, dtype=float)
nn_intervals = np.asarray(nn_intervals, dtype=float)
fc = FreqDomainClass()
ulf, vlf, lf, hf, tp, ratio_lf_hf = fc.freq_domain(time_nn, nn_intervals)
sdnn_l.append(sdnn)
sdann_l.append(sdann)
rmssd_l.append(rmssd)
pnn50_l.append(pnn50)
ulf_l.append(ulf)
vlf_l.append(vlf)
lf_l.append(lf)
hf_l.append(hf)
tp_l.append(tp)
ratio_lf_hf_l.append(ratio_lf_hf)
print('ULF PWR: ', ulf)
print('VLF PWR: ', vlf)
print('LF PWR: ', lf)
print('HF PWR: ', hf)
print('TOT PWR: ', tp)
print('LF/HF: ', ratio_lf_hf)
if os.path.exists('physionet_hrv.txt'):
os.remove('physionet_hrv.txt')
os.system('get_hrv -R ' + name.split('.')[0] + '.rr >> ' + 'physionet_hrv.txt')
sdnn, sdann, rmssd, pnn50, ulf, vlf, lf, hf, tp, ratio_lf_hf = \
get_result_from_get_hrv('physionet_hrv.txt')
file_process.append(os.path.basename(name[:-4]))
sdnn_l_p.append(sdnn)
sdann_l_p.append(sdann)
rmssd_l_p.append(rmssd)
pnn50_l_p.append(pnn50)
ulf_l_p.append(ulf)
vlf_l_p.append(vlf)
lf_l_p.append(lf)
hf_l_p.append(hf)
tp_l_p.append(tp)
ratio_lf_hf_l_p.append(ratio_lf_hf)
os.chdir(cur_dir)
result_all.append(file_process)
result_all.append(sdnn_l)
result_all.append(sdnn_l_p)
result_all.append(sdann_l)
result_all.append(sdann_l_p)
result_all.append(rmssd_l)
result_all.append(rmssd_l_p)
result_all.append(pnn50_l)
result_all.append(pnn50_l_p)
result_all.append(ulf_l)
result_all.append(ulf_l_p)
result_all.append(vlf_l)
result_all.append(vlf_l_p)
result_all.append(lf_l)
result_all.append(lf_l_p)
result_all.append(hf_l)
result_all.append(hf_l_p)
result_all.append(tp_l)
result_all.append(tp_l_p)
result_all.append(ratio_lf_hf_l)
result_all.append(ratio_lf_hf_l_p)
print(file_dis)
with open('hrv2.csv', 'w+') as f:
writer = csv.writer(f)
for values in zip_longest(*result_all):
writer.writerow(values)
def main():
extension = "atr"
for root, dirs, files in os.walk(DATA_PATH):
files = np.sort(files)
for name in files:
if extension in name:
if name not in ['101.atr']:
continue
cur_dir = os.getcwd()
os.chdir(DATA_PATH)
os.system('rrlist {} {} -M -s >{}.rr'.format(extension, name.split('.')[0], name.split('.')[0]))
time_m = []
rr_intervals = []
ann = []
with open(name.split('.')[0] + '.rr', 'r') as rr_file:
for line in rr_file:
time_m.append(line.split(' ')[0])
rr_intervals.append(line.split(' ')[1])
ann.append(line.split(' ')[2].split('\n')[0])
time_m = np.asarray(time_m, dtype=float)
rr_intervals = np.asarray(rr_intervals, dtype=float)
sdnn, sdann, rmssd, pnn50 = time_domain(time_m, rr_intervals, ann)
if sdnn == 0 and sdann == 0 and rmssd == 0 and pnn50 == 0:
print('No result hrv')
return 0
print('sdnn', sdnn)
print('rmssd', rmssd)
print('pnn50', pnn50)
print('sdann', sdann)
time_m = time_m - time_m[0]
time_m = np.round(time_m, 3)
time_nn = []
nn_intervals = []
for i in range(1, len(ann)):
if ann[i] == 'N' and ann[i - 1] == 'N':
nn_intervals.append(rr_intervals[i])
time_nn.append(time_m[i])
time_nn = np.asarray(time_nn, dtype=float)
nn_intervals = np.asarray(nn_intervals, dtype=float)
start = time.time()
fc = FreqDomainClass()
ulf, vlf, lf, hf, tp, ratio_lf_hf = fc.freq_domain(time_nn, nn_intervals)
end = time.time()
print('ULF PWR: ', ulf)
print('VLF PWR: ', vlf)
print('LF PWR: ', lf)
print('HF PWR: ', hf)
print('TOT PWR: ', tp)
print('LF/HF: ', ratio_lf_hf)
print('finish', end - start)
os.chdir(cur_dir)

Numerical issue in scipy.ode.integrate solver

I am using ode solver to solve stiff problem (since odeint function could not able to solve it). But by this way also I have some warnings and my plot get saturate at some point. Here is image What should I do? Here is the list of warnings:
DVODE-- Warning..internal T (=R1) and H (=R2) are
such that in the machine, T + H = T on the next step
(H = step size). solver will continue anyway
In above, R1 = 0.3667661010318D+00 R2 = 0.1426374862242D-16
DVODE-- Warning..internal T (=R1) and H (=R2) are
such that in the machine, T + H = T on the next step
(H = step size). solver will continue anyway
In above, R1 = 0.3667661010318D+00 R2 = 0.1426374862242D-16
DVODE-- Above warning has been issued I1 times.
it will not be issued again for this problem
In above message, I1 = 2
DVODE-- At current T (=R1), MXSTEP (=I1) steps
taken on this call before reaching TOUT
In above message, I1 = 500
In above message, R1 = 0.3667661010318D+00
My code:
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as si
def func():
#arguments:::
w = 1./3.
xi = 2.86
phi1 = 1.645
phi2 = 2.* 1.202
gt = 10.**(-60)
Lt = (1.202*gt)/np.pi
Lin = 10.**-5
Lf = 0.49
dt = 0.0001
gin = gt*Lt/Lin
xin = (-np.log((3. - (xi**2)*Lin)/(3. - (xi**2)*Lt)) + np.log(Lin/Lt))/4.0
uin = -(np.log(Lin/Lt))/2.
state0 = [gin,xin,uin]
print state0
def eq(L, state):
g = state[0]
x = state[1]
u = state[2]
N = (-2.*g/(6.*np.pi + 5.*g))*(18./(1. - 2.*L) + 5.*np.log(1.- 2.*L) - phi1 + 6. )
B = (-(2. - N)*L) - ((g/np.pi)* (5.*np.log(1.-2.*L) - phi2 + (5.*N/40.)))
Eqs = np.zeros((3))
gdl = Eqs[0] = ((2.+N)*g)/B
xdl = Eqs[1] = -(2./(3.*(1.+w)))* (1./(1.-(xi**2)*L/3.))*(1./B)
udl = Eqs[2]= 1./B
return Eqs
ode = si.ode(eq)
# BDF method suited to stiff systems of ODEs
ode.set_integrator('vode',nsteps=500,method='bdf')
ode.set_initial_value(state0,Lin)
L = []
G = []
while ode.successful() and ode.t < Lf:
ode.integrate(ode.t + dt)
L.append(ode.t)
G.append(ode.y)
lam = np.vstack(L)
g,x,u = np.vstack(G).T
return g,x,u,lam
r= func()
L = r[3]
g = r[0]
lng = np.log10(g)
x = r[1]
u = r[2]
w = 1./3.
xi = 2.86
O_A = np.zeros(len(L))
q = np.zeros(len(L))
for i in np.arange(len(L)):
O_A[i] = xi**2*L[i]/3.
alpha = 2./ ((3.+3.*w) * (1.- (L[i]*xi**2)/3.) )
q[i] = 1./alpha - 1.
n = np.zeros(len(L)) #eta(n)
b = np.zeros(len(L))
for j in np.arange(len(L)):
n[j] =(-2.*g[j]/(6.*np.pi + 5.*g[j]))*(18./(1. - 2.*L[j]) + 5.*np.log(1.- 2.*L[j]) - 1.645 + 6. )
b[j]= (-(2. - n[j])*L[j]) - ((g[j]/np.pi)* (5.*np.log(1.-2.*L[j]) - 2.* 1.202 + ((5.*n[j])/4.)))
P = np.zeros(len(x))
for k in np.arange(len(x)):
C = (((3. - (xi**2)*L[k])/g[k])**(3./4.)) * (((2.*L[k] + (u[k]*b[k]))*xi**2) + (n[k] * (3.- L[k]*xi**2)) )
P[k] = (np.exp(3.*x[k])) * (np.exp(4.*u[k])) * C
plt.figure()
plt.plot(L,P)
plt.xlabel('Lambda ---->')
plt.ylabel('P ----->')
plt.title('lambda Vs P')
plt.show()

FloatingPointError: overflow encountered in double_scalars

I've set up numpy.seterr as follows:
np.seterr(invalid='raise', over ='raise', under='raise')
And I'm getting the following error:
c = beta[j,i] + oneminusbeta[j,i]
FloatingPointError: overflow encountered in double_scalars
I've checked what beta[j,i] and oneminusbeta[j,i] are at the point of crash, and these are their values:
beta: -131.340389182
oneminusbeta: 0.0
Please note, this line of addition (beta[j,i] + oneminusbeta[j,i]) has run for thousands of lines in a loop (that performs image classification) before crashing here at this point.
How can I deal with this? Is it necessary to change the type of the numpy arrays?
This is how I've initialized them:
beta = np.empty([m,n])
oneminusbeta = np.empty([m,n])
Is it possible to cast the individual values before adding them up? Rather than changing the entire array declarations? Or is this even a serious issue? Would it be safe to simply turn off the numpy.seterr configuration and let the calculations go ahead without raising the error?
Edit
Someone suggested below, and I suspected as well, that the values being added shouldn't cause an overflow. Then how can I find out where the overflow is really happening?
This is my code:
epthreshold = 709
enthreshold = -708
f.write("weights["+str(i)+", " + str(j)+"] = math.exp(beta: " +str(beta[j,i])+ " + oneminusbeta: " + str(oneminusbeta[j,i])+")\n" )
c = beta[j,i] + oneminusbeta[j,i]
weights[i,j] = math.exp(np.clip(c, enthreshold, epthreshold))
And when I check my log file, this is the line I get:
weights[5550, 13] = math.exp(beta: -131.340389182 + oneminusbeta: 0.0)
Edit 2
Here's the rest of my code, where variables n,m and H have already been initialized to integer values:
import numba
import numpy as np
import statsmodels.api as sm
weights = np.empty([n,m])
for curr_n in range(n):
for curr_m in range(m):
weights[curr_n,curr_m] = 1.0/(n)
beta = np.empty([m,n])
oneminusbeta = np.empty([m,n])
for curr_class in range(m):
for curr_sample in range(n):
beta[curr_class,curr_sample] = 1./m
epthreshold = 709 # positive exponential threshold
enthreshold = -708
for h in range(H):
print "Boosting round %d ... " % h
z = np.empty([n,m])
for j in range(m): # computing working responses and weights, Step 2(a)(i)
for i in range(no_samples):
i_class = y[i] #get the correct class for the current sample
if h == 0:
z[i,j] = (int(j==i_class) - beta[j,i])/((beta[j,i])*(1. - beta[j,i]))
weights[i,j] = beta[j,i]*(1. - beta[j,i])
else:
if j == i_class:
z[i,j] = math.exp(np.clip(-beta[j,i],enthreshold, epthreshold))
else:
z[i,j] = -math.exp(np.clip(oneminusbeta[j,i], enthreshold, epthreshold))
f.write("weights["+str(i)+", " + str(j)+"] = math.exp(beta: " +str(beta[j,i])+ " + oneminusbeta: " + str(oneminusbeta[j,i])+")\n" )
c = beta[j,i] + oneminusbeta[j,i]
weights[i,j] = math.exp(np.clip(c, enthreshold, epthreshold))
g_h = np.zeros([1,1])
j = 0
# Calculating regression coefficients per class
# building the parameters per j class
for y1_w in zip(z.T, weights.T):
y1, w = y1_w
temp_g = sm.WLS(y1, X, w).fit() # Step 2(a)(ii)
if np.allclose(g_h,0):
g_h = temp_g.params
else:
g_h = np.c_[g_h, temp_g.params]
j = j + 1
if np.allclose(g,0):
g = g_h
else:
g = g + g_h # Step(2)(a)(iii)
# now set g(x), function coefficients according to new formula, step (2)(b)
sum_g = g.sum(axis=1)
for j in range(m):
diff = (g[:,j] - ((1./m) * sum_g))
g[:,j] = ((m-1.)/m) * diff
g_per_round[h,:,j] = g[:,j]
#Now computing beta, Step 2(c)...."
Q = 0.
e = 0.
for j in range(m):
# Calculating beta and oneminusbeta for class j
aj = 0.0
for i in range(no_samples):
i_class = y[i]
X1 = X[i].reshape(1, no_features)
g1 = g[:,j].reshape(no_features, 1)
gc = g[:,i_class].reshape(no_features, 1)
dot = 1. + float(np.dot(X1, g1)) - float(np.dot(X1,gc))
aj = dot
sum_e = 0.
a_q = []
a_q.append(0.)
for j2 in range(m): # calculating sum of e's except for all j except where j=i_class
if j2 != i_class: # g based on j2, not necessarily g1?
g2 = g[:,j2].reshape(no_features, 1)
dot1 = 1. + float(np.dot(X1, g2)) - float(np.dot(X1,gc))
e2 = math.exp(np.clip(dot1,enthreshold, epthreshold))
sum_e = sum_e + e2
a_q.append(dot1)
if (int(j==i_class) == 1):
a_q_arr = np.array(a_q)
alpha = np.array(a_q_arr[1:])
Q = mylogsumexp(f,a_q_arr, 1, 0)
sumalpha = mylogsumexp(f,alpha, 1, 0)
beta[j,i] = -Q
oneminusbeta[j,i] = sumalpha - Q
else:
alpha = a_q
alpha = np.array(alpha[1:])
a_q_arr = np.array(a_q)
Q = mylogsumexp(f,a_q_arr, 0, aj)
sumalpha = log(math.exp(np.clip(Q, enthreshold, epthreshold)) - math.exp(np.clip(aj, enthreshold, epthreshold)))
beta[j,i] = aj - Q
oneminusbeta[j,i] = sumalpha - Q
and the function mylogsumexp is:
def mylogsumexp(f, a, is_class, maxaj, axis=None, b=None):
np.seterr(over="raise", under="raise", invalid="raise")
threshold = -sys.float_info.max
maxthreshold = sys.float_info.max
epthreshold = 709 # positive exponential threshold
enthreshold = -708
a = asarray(a)
if axis is None:
a = a.ravel()
else:
a = rollaxis(a, axis)
if is_class == 1:
a_max = a.max(axis=0)
else:
a_max = maxaj
#bnone = " none "
if b is not None:
a_max = maxaj
b = asarray(b)
if axis is None:
b = b.ravel()
else:
b = rollaxis(b, axis)
a = np.clip(a - a_max, enthreshold, epthreshold)
midout = np.sum(np.exp(a), axis=0)
midout = 1.0 + np.clip(midout - math.exp(a_max), threshold, maxthreshold)
out = np.log(midout)
else:
a = np.clip(a - a_max, enthreshold, epthreshold)
out = np.log(np.sum(np.exp(a)))
out += a_max
if out == float("inf"):
out = maxthreshold
if out == float("-inf"):
out = threshold
return out

Categories