I'm writing a code that solves a heat equation implementing an implicit method. The problem is that the values between first and last layer of the matrix are NaNs. What could be the problem?
From my problem of view, the main issue might be with the 105th line, which represents the convrsion of original function to the one that includes the boundary function.
Boundary functions code:
def func(x, t):
return x*(1 - x)*np.exp(-2*t)
# boundary function for x = 0 and x = 1
def q0(t):
return t*np.exp(-t/0.1)*np.cos(t) # граничное условие при x = 0
def q1(t):
return t*np.exp(-t/0.5)*np.cos(t) # граничное уcловие при x = 1
def derivative(f, x0, step):
return (f(x0+step) - f(x0))/step
# boundary function that for t = 0
def u_x0(x):
return (-x + 1)*x
Function that solves the three-diagonal matrix equation
def solution(a, b):
n = len(a)
x = [0 for k in range(0, n)]
# forward
v = [0 for k in range(0, n)]
u = [0 for k in range(0, n)]
# first string (t = 0)
v[0] = a[0][1] / (-a[0][0])
u[0] = ( - b[0]) / (-a[0][0])
for i in range(1, n - 1):
v[i] = a[i][i+1] / ( -a[i][i] - a[i][i-1]*v[i-1] )
u[i] = ( a[i][i-1]*u[i-1] - b[i] ) / ( -a[i][i] - a[i][i-1]*v[i-1] )
# last string (t = 1)
v[n-1] = 0
u[n-1] = (a[n-1][n-2]*u[n-2] - b[n-1]) / (-a[n-1][n-1] - a[n-1][n-2]*v[n-2])
x[n-1] = u[n-1]
for i in range(n-1, 0, -1):
x[i-1] = v[i-1] * x[i] + u[i-1]
return x
Coefficent matrix values:
A = -t/h**2
B = 1 + 2*t/h**2
C = -t/h**2
Code that actually solves the matrix:
i = 1
X =[]
while i < 99:
X = solution(cool_array, f)
k = 0
while k < len(x_i):
#line-105
X[k] += 0.01*(func(x_i[k], x_i[i]) - (1 - x_i[i])*derivative(q0, x_i[i], 0.01) - (x_i[i])*derivative(q1, x_i[i], 0.01))
k+=1
a = 1
while a < 98:
w_h_t[i][a] = X[a]
a+=1
f = X
f[0] = w_h_t[i][0]
f[99] = w_h_t[i][99]
i+=1
print(w_h_t)
As far as I understand, the algorith solution(a, b) is written properly, so I guess the problem might be with the boundary functions or with the 105th line. The output I expect is at least an array of number, not NaNs.
Related
I have a python code that implements Dynamic Time Wrapping, which I use to compare the predicted curve to my actual curve. I care about the shape of the curve but also about the distance between the 2 curves. I z-normalized the 2 curves before calling the function that returns the cost. However, I got weird results. For example:
I got cost of 0.28 for this example:
While I got 0.38 for the below example:
In the first plot, the prediction is very far away compared to the second plot. I even got the same value of 0.28 with even very far away prediction such as 5000 points further. What is wrong here?
Below is my code from this source:
#Dynamic Time Wrapping Algorithm
def dp(dist_mat):
N, M = dist_mat.shape
# Initialize the cost matrix
cost_mat = numpy.zeros((N + 1, M + 1))
for i in range(1, N + 1):
cost_mat[i, 0] = numpy.inf
for i in range(1, M + 1):
cost_mat[0, i] = numpy.inf
# Fill the cost matrix while keeping traceback information
traceback_mat = numpy.zeros((N, M))
for i in range(N):
for j in range(M):
penalty = [
cost_mat[i, j], # match (0)
cost_mat[i, j + 1], # insertion (1)
cost_mat[i + 1, j]] # deletion (2)
i_penalty = numpy.argmin(penalty)
cost_mat[i + 1, j + 1] = dist_mat[i, j] + penalty[i_penalty]
traceback_mat[i, j] = i_penalty
# Traceback from bottom right
i = N - 1
j = M - 1
path = [(i, j)] #Path is commented because I am not interested in the path
# while i > 0 or j > 0:
# tb_type = traceback_mat[i, j]
# if tb_type == 0:
# # Match
# i = i - 1
# j = j - 1
# elif tb_type == 1:
# # Insertion
# i = i - 1
# elif tb_type == 2:
# # Deletion
# j = j - 1
# path.append((i, j))
# Strip infinity edges from cost_mat before returning
cost_mat = cost_mat[1:, 1:]
return (path[::-1], cost_mat)
I use the above code as below:
z_actual=stats.zscore(actual)
z_pred=stats.zscore(mean_predictions)
N = actual.shape[0]
M = mean_predictions.shape[0]
dist_mat = numpy.zeros((N, M))
for i in range(N):
for j in range(M):
dist_mat[i, j] = abs(z_actual[i] - z_pred[j])
path,cost_mat=dp(dist_mat)
mape=cost_mat[N - 1, M - 1]/(N + M)
I'm developing the code below to check the difference between Euler and Enhanced Euler methods for the function y'= y.
In this case, I see that the more the iterations advance, the greater the difference between the values. Does anyone know why?
def euler_explicit(y_0, h, n):
steps = [None]*(n+1)
steps[0] = y_0
for k in range(1, n+1):
steps[k] = steps[k-1] + h*steps[k-1]
return steps
def euler_aprim(y_0, h, n):
steps = [None]*(n+1)
steps[0] = y_0
for k in range(1, n+1):
steps[k] = steps[k-1] + h*0.5*(steps[k-1] + steps[k-1] + h*steps[k-1])
return steps
def main():
t_f = 5
n = 5
y_0 = 1
t_i = 0
h = (t_f - t_i)/n
euler_explicit_results = euler_explicit(y_0, h, n)
euler_aprim_results = euler_aprim(y_0, h, n)
for i in range(0, n+1):
print("Diff in position "+str(i)+" is:"+str(euler_explicit_results[i]-euler_aprim_results[i]))
if __name__ == "__main__":
main()
My output is:
Diff in position 0 is:0
Diff in position 1 is:-0.5
Diff in position 2 is:-2.25
Diff in position 3 is:-7.625
Diff in position 4 is:-23.0625
Diff in position 5 is:-65.65625
Note: this question arises because of implementation details instead of decision stump ERM algorithm itself.
I am trying to implement the decision stump algorithm by myself and compare it with a correct implementation (both implementations are shown below).
To my understanding, my implementation is essentially the same as the correct implementation, where I do the sorting of x, D and y together but correct implementation does not sort D and y and uses curr_idx instead.
But after running the following test codes, of 10000 different test cases, I got 6213 incorrect test cases.
np.random.seed(0)
cor_count = 0
err_count = 0
for iter in range(10000):
X = np.random.randint(1, 100, size=(10, 2))
D = np.random.rand(10)
D /= np.sum(D)
y = np.random.choice([-1, 1], size=(10,))
my_res = my_decision_stump(X, D, y)
res = decision_stump(X, D, y)
if my_res == res:
cor_count += 1
else:
err_count += 1
print("Number of correct cases {}".format(cor_count))
print("Number of error cases {}".format(err_count))
My implementation and correct implementation are shown below.
def my_decision_stump(X, D, y):
Fs = np.inf
optimal_j = None
optimal_b = None
optimal_theta = None
m, d = X.shape
for j in range(d):
record = np.hstack((X[:, j].reshape(-1, 1), D.reshape(-1, 1), y.reshape(-1, 1)))
record_sorted = record[record[:, 0].argsort()]
x = record_sorted[:, 0]; D = record_sorted[:, 1]; y = record_sorted[:, 2]
x = np.hstack((x, x[-1]+1))
F_pos = np.sum(D[y == 1])
F_neg = np.sum(D[y == -1])
if F_pos < Fs or F_neg < Fs:
optimal_theta = x[0] - 1; optimal_j = j
if F_pos < F_neg:
Fs = F_pos; optimal_b = 1
else:
Fs = F_neg; optimal_b = -1
for i in range(m):
F_pos -= y[i] * D[i]
F_neg += y[i] * D[i]
if (F_pos < Fs or F_neg < Fs) and x[i] != x[i+1]:
optimal_theta = 0.5 * (x[i] + x[i+1]); optimal_j = j
if F_pos < F_neg:
Fs = F_pos; optimal_b = 1
else:
Fs = F_neg; optimal_b = -1
return (optimal_j, optimal_b, optimal_theta)
def decision_stump(X, D, y):
Fs = np.inf
optimal_j = None
optimal_b = None
optimal_theta = None
m, d = X.shape
for j in range(d):
index = np.argsort(X[:, j])
x = np.zeros(m+1)
x[:-1] = X[index, j]
x[-1] = x[-2] + 1
F_pos = np.sum(D[y == 1])
F_neg = np.sum(D[y == -1])
if F_pos < Fs or F_neg < Fs:
optimal_theta = x[0] - 1; optimal_j = j
if F_pos < F_neg:
Fs = F_pos; optimal_b = 1
else:
Fs = F_neg; optimal_b = -1
for i in range(m):
curr_idx = index[i]
F_pos -= y[curr_idx] * D[curr_idx]
F_neg += y[curr_idx] * D[curr_idx]
if (F_pos < Fs or F_neg < Fs) and x[i] != x[i+1]:
optimal_theta = 0.5 * (x[i] + x[i+1]); optimal_j = j
if F_pos < F_neg:
Fs = F_pos; optimal_b = 1
else:
Fs = F_neg; optimal_b = -1
return (optimal_j, optimal_b, optimal_theta)
I am not sure what (maybe subtle) mistake I have made.
Could anyone help me, thank you in advance.
I made a stupid mistake...
In my implementation, due to the abuse of variable name D and y, it turns out that every time D and y changes (i.e. D = record_sorted[:, 1] but in next iteration this D is used, the same with y) since I do the sorting but they SHOULD NOT.
This example shows the great hazard of abusing variable name.
I have implemented two functions FFT and InverseFFT in recursive mode.
These are the functions:
def rfft(a):
n = a.size
if n == 1:
return a
i = 1j
w_n = e ** (-2 * i * pi / float(n))
w = 1
a_0 = np.zeros(int(math.ceil(n / 2.0)), dtype=np.complex_)
a_1 = np.zeros(n / 2, dtype=np.complex_)
for index in range(0, n):
if index % 2 == 0:
a_0[index / 2] = a[index]
else:
a_1[index / 2] = a[index]
y_0 = rfft(a_0)
y_1 = rfft(a_1)
y = np.zeros(n, dtype=np.complex_)
for k in range(0, n / 2):
y[k] = y_0[k] + w * y_1[k]
y[k + n / 2] = y_0[k] - w * y_1[k]
w = w * w_n
return y
def rifft(y):
n = y.size
if n == 1:
return y
i = 1j
w_n = e ** (2 * i * pi / float(n))
w = 1
y_0 = np.zeros(int(math.ceil(n / 2.0)), dtype=np.complex_)
y_1 = np.zeros(n / 2, dtype=np.complex_)
for index in range(0, n):
if index % 2 == 0:
y_0[index / 2] = y[index]
else:
y_1[index / 2] = y[index]
a_0 = rifft(y_0)
a_1 = rifft(y_1)
a = np.zeros(n, dtype=np.complex_)
for k in range(0, n / 2):
a[k] = (a_0[k] + w * a_1[k]) / n
a[k + n / 2] = (a_0[k] - w * a_1[k]) / n
w = w * w_n
return a
Based on the definition of IFFT, converting FFT function to IFFT function can be done by changing 2*i*pi to -2*i*pi and dividing the result by N. The rfft() function works fine but the rifft() function, after these modifications, does not work.
I compare the output of my functions with scipy.fftpack.fft and scipy.fftpack.ifft functions.
I feed the following NumPy array:
a = np.array([1, 0, -1, 3, 0, 0, 0, 0])
The following box shows the results of rfft() function and scipy.fftpack.fft function.
//rfft(a)
[ 3.00000000+0.j -1.12132034-1.12132034j 2.00000000+3.j 3.12132034-3.12132034j -3.00000000+0.j 3.12132034+3.12132034j 2.00000000-3.j -1.12132034+1.12132034j]
//scipy.fftpack.fft(a)
[ 3.00000000+0.j -1.12132034-1.12132034j 2.00000000+3.j 3.12132034-3.12132034j -3.00000000+0.j 3.12132034+3.12132034j 2.00000000-3.j -1.12132034+1.12132034j]
And this box shows the results of rifft() function and scipy.fftpack.ifft function.
//rifft(a)
[ 0.04687500+0.j -0.01752063+0.01752063j 0.03125000-0.046875j 0.04877063+0.04877063j -0.04687500+0.j 0.04877063-0.04877063j 0.03125000+0.046875j -0.01752063-0.01752063j]
//scipy.fftpack.ifft(a)
[ 0.37500000+0.j -0.14016504+0.14016504j 0.25000000-0.375j 0.39016504+0.39016504j -0.37500000+0.j 0.39016504-0.39016504j 0.25000000+0.375j -0.14016504-0.14016504j]
The division by the size N is a global scaling factor and should be performed on the result of the recursion rather than dividing at each stage of the recursion as you have done (by a decreasing factor as you go deeper in the recursion; overall scaling down the result too much). You could address this by removing the / n factor in the final loop of your original implementation, which gets called by another function performing the scaling:
def unscaledrifft(y):
...
for k in range(0, n / 2):
a[k] = (a_0[k] + w * a_1[k])
a[k + n / 2] = (a_0[k] - w * a_1[k])
w = w * w_n
return a
def rifft(y):
return unscaledrifft(y)/y.size
Alternatively, since you are performing a radix-2 FFT, the global factor N would be a power of 2 such that N=2**n, where n is the number of steps in the recursion. You could thus divide by 2 at each stage of the recursion to achieve the same result:
def rifft(y):
...
for k in range(0, n / 2):
a[k] = (a_0[k] + w * a_1[k]) / 2
a[k + n / 2] = (a_0[k] - w * a_1[k]) / 2
w = w * w_n
return a
I am trying to create this matrix in Python using numpy vectors:
where the values come from a function. I have implemented it with repeatedly using numpy.diag but for large dimensions, it becomes very slow. Here is the code:
def makeS(N):
vec = np.full(N, 2*v(x_range[1]))
vec[0]*=0.5
S = np.diag(vec)
vec = np.full(N-1, v(x_range[0]))
S+= np.diag(vec, 1)
for m in xrange(1, N):
vec = np.full(N-m, 2*v(x_range[m+1]))
vec[0]*= 0.5
S += np.diag(vec, -m)
return S
where v() is the said function and x_range is a vector of x-values. Is there a way to make this more efficient?
Edit:
Here is a full example:
import numpy as np
import math
N = 5
x_range = np.linspace(0, 1, N+1)
def v(x):
return math.exp(x)
def makeS(N):
vec = np.full(N, 2*v(x_range[1]))
vec[0]*=0.5
S = np.diag(vec)
vec = np.full(N-1, v(x_range[0]))
S+= np.diag(vec, 1)
for m in xrange(1, N):
vec = np.full(N-m, 2*v(x_range[m+1]))
vec[0]*= 0.5
S += np.diag(vec, -m)
return S
print makeS(N)
which outputs
[[ 1.22140276 1. 0. 0. 0. ]
[ 1.4918247 2.44280552 1. 0. 0. ]
[ 1.8221188 2.9836494 2.44280552 1. 0. ]
[ 2.22554093 3.6442376 2.9836494 2.44280552 1. ]
[ 2.71828183 4.45108186 3.6442376 2.9836494 2.44280552]]
This is the fastest approach I could find:
def makeS(N):
values = np.array([v(x) for x in x_range])
values_doubled = 2 * values
result = np.eye(N, k=1) * values[0]
result[:, 0] = values[1:]
for i in xrange(N - 1):
result[i + 1, 1:i + 2] = values_doubled[1:i + 2][::-1]
return result
With N=2000 the original takes 26.97 seconds on my machine while the new version takes 0.02339 seconds.
Here is the complete script for evaluating timings with some additional approaches.
import numpy as np
import math
import timeit
def v(x):
return math.exp(x)
def makeS1(N, x_range):
vec = np.full(N, 2 * v(x_range[1]))
vec[0] *= 0.5
S = np.diag(vec)
vec = np.full(N - 1, v(x_range[0]))
S += np.diag(vec, 1)
for m in xrange(1, N):
vec = np.full(N - m, 2 * v(x_range[m + 1]))
vec[0] *= 0.5
S += np.diag(vec, -m)
return S
def makeS2(N, x_range):
values = np.array([v(x) for x in x_range])
values_doubled = 2 * values
def value_at_position(ai, aj):
result = np.zeros((N, N))
for i, j in zip(ai.flatten(), aj.flatten()):
if j > i + 1:
continue
elif j == i + 1:
result[i, j] = values[0]
elif j == 0:
result[i, j] = values[i + 1]
else:
result[i, j] = values_doubled[i - j + 1]
return result
return np.fromfunction(value_at_position, (N, N))
def makeS3(N, x_range):
values = np.array([v(x) for x in x_range])
values_doubled = 2 * values
result = np.zeros((N, N))
for i in xrange(N):
for j in xrange(min(i + 2, N)):
if j == i + 1:
result[i, j] = values[0]
elif j == 0:
result[i, j] = values[i + 1]
else:
result[i, j] = values_doubled[i - j + 1]
return result
def makeS4(N, x_range):
values = np.array([v(x) for x in x_range])
values_doubled = 2 * values
result = np.eye(N, k=1) * values[0]
result[:, 0] = values[1:]
for i in xrange(N - 1):
result[i + 1, 1:i + 2] = values_doubled[1:i + 2][::-1]
return result
def main():
N = 2000
x_range = np.random.randn(N + 1)
start = timeit.default_timer()
s1 = makeS1(N, x_range)
print 'makeS1', timeit.default_timer() - start
start = timeit.default_timer()
s2 = makeS2(N, x_range)
print 'makeS2', timeit.default_timer() - start
start = timeit.default_timer()
s3 = makeS3(N, x_range)
print 'makeS3', timeit.default_timer() - start
start = timeit.default_timer()
s4 = makeS4(N, x_range)
print 'makeS4', timeit.default_timer() - start
if N < 10:
print s1
print s2
print s2
print s4
assert np.allclose(s1, s2)
assert np.allclose(s2, s3)
assert np.allclose(s3, s4)
main()
On my machine, this produces the output:
makeS1 26.9707232448
makeS2 11.7728229076
makeS3 0.643742975052
makeS4 0.0233912765665