Trouble inverting a matrix in python - python

I have a matrix in python that I am trying to invert. However, the result of multiplying the inverted matrix by the original matrix does not yield the identity matrix.
M = np.matrix(cv)
invM = np.linalg.inv(M)
M#invM
I am not sure what could be the problem since this is a fairly simple operation. Has anyone else had this problem? or does anyone know how to fix this? Thanks!

Likely, your matrix is ill-conditioned, which means that the matrix is close to noninvertible. You can check the condition number of your matrix using this:
np.linalg.cond(M)
The relative precision of double-precision floats is about 1e-16. For a condition number K, you lose about a factor K in precision. If K is above 1e+15, the matrix is noninvertible for practical purposes.
If you want to solve A # x = b for x, it is often more accurate to use x = np.linalg.solve(A, b) rather than x = np.linalg.inv(A) # b.
Here are a few matrices with different condition numbers and the quality of their inverse:
import numpy as np
np.random.seed(1)
n = 100
def test_inv(a):
print(f'Condition number: {np.linalg.cond(a):.3g}')
max_err = np.abs(a # np.linalg.inv(a) - np.eye(n)).max()
print(f'a # a_inv - eye: maximum error = {max_err:.3g}')
# identity matrix
test_inv(np.eye(n))
# random numbers
randmat = np.random.uniform(-1, 1, size=(n, n))
test_inv(randmat)
# random numbers, but one row is almost a linear combination of
# two other rows.
badmat = randmat.copy()
badmat[1, :] = badmat[0, :] + badmat[2, :] - 1e-9
test_inv(badmat)
The output:
Condition number: 1
a # a_inv - eye: maximum error = 0
Condition number: 626
a # a_inv - eye: maximum error = 2.84e-14
Condition number: 1.64e+10
a # a_inv - eye: maximum error = 1.53e-06

m = np.matrix([[2,3],[4,5]])
n = m.I
i = m#n
print(i)
out:
[[1. 0.]
[0. 1.]]
Try this way.

Related

Complete or Extend Orthonormal Basis in Python

I have a couple of orthonormal vectors. I would like to extend this 2-dimensional basis to a larger one. What is the fastest way of doing this in Python with NumPy?
My thoughts were the following: Generate a random vector of the required size (new_dimension > 2), perform Gram-Schmidt by substracting scaled dot-products with the previous two. Repeat. I doubt that this is the quickest way though...
You didn't specify the dimension of your space. If it is 3, then you can simply use the cross product of your two vectors. If it is not, then see below.
Example in 3-D
# 1. setup: an orthonormal basis of two vectors a, b
np.random.seed(0)
a, b = np.random.uniform(size=(2,3))
a /= np.linalg.norm(a)
b -= a.dot(b)*a
b /= np.linalg.norm(b)
# 2. check:
>>> np.allclose([1,1,0,0], [a.dot(a), b.dot(b), a.dot(b), b.dot(a)])
True
Then, making a new vector:
# 3. solve
c = np.cross(a, b)
# 4. checks
>>> np.allclose([1,0,0], [c.dot(c), c.dot(a), c.dot(b)])
True
If the dimension of your vectors is higher, then you can pick any vector that is not in the plane defined by a,b and subtract that projection, the normalize.
Example in higher dimensions
# 1. setup
n = 5
np.random.seed(0)
a, b = np.random.uniform(size=(2, n))
a /= np.linalg.norm(a)
b -= a.dot(b)*a
b /= np.linalg.norm(b)
# 2. check
assert np.allclose([1,1,0,0], [a.dot(a), b.dot(b), a.dot(b), b.dot(a)])
Then:
# 3. solve
ab = np.c_[a, b]
c = np.roll(a + b, 1) # any vector unlikely to be 0 or some
# linear combination of a and b
c -= (c # ab) # ab.T
c /= np.linalg.norm(c)
# 4. check
abc = np.c_[a, b, c]
>>> np.allclose(np.eye(3), abc.T # abc)
True
Generalization: complement an m-basis in a n-D space
In an n-dimensional space, given an (n, m) orthonormal basis x with m s.t. 1 <= m < n (in other words, m vectors in a n-dimensional space put together as columns of x): find n - m vectors that are orthonormal, and that are all orthogonal to x.
We can do this in one shot using SVD.
# 1. setup
# we use SVD for the setup as well, for convenience,
# but it's not necessary at all. It is sufficient that
# x.T # x == I
n, m = 6, 2 # for example
x, _, _ = np.linalg.svd(np.random.uniform(size=(n, m)))
x = x[:, :m]
# 2. check
>>> np.allclose(x.T # x, np.eye(m))
True
>>> x.shape
(6, 2)
So, at this point, x is orthonormal and of shape (n, m).
Find y to be one (of possibly many) orthonormal basis that is orthogonal to x:
# 3. solve
u, s, v = np.linalg.svd(x)
y = u[:, m:]
# 4. check
>>> np.allclose(y.T # y, np.eye(n-m))
True
>>> np.allclose(x.T # y, 0)
True

Find values for which matrix becomes singular in Python

Let's take the following square matrix:
import numpy as np
A = np.array([[10.0, -498.0],
[-2.0, 100.0]])
A will be singular if its determinant (A[0,0]*A[1,1]-A[0,1]*A[1,0]) is zero. For example, A will be singular if A[0,1] takes the value -500.0 (all else unchanged):
from sympy import symbols, Eq, solve
y = symbols('y')
eq = Eq(A[0,0]*A[1,1]-y*A[1,0])
sol = solve(eq)
sol
How to find all values (A[0,0],A[0,1],...) for which A (or any given square matrix) becomes singular efficiently (I work with large matrices)? Many thanks in advance.
The trick is to use Laplace expansion to calculate the determinant. The formula is
det(A) = sum (-1)^(i+j) * a_ij * M_ij
So to make a matrix singular, you just need to use the above formula, change the subject to a_ij and set det(A) = 0. It can be done like this:
import numpy as np
def cofactor(A, i, j):
A = np.delete(A, (i), axis=0)
A = np.delete(A, (j), axis=1)
return (-1)**(i+j) * np.linalg.det(A)
def make_singular(A, I, J):
n = A.shape[0]
s = 0
for i in range(n):
if i != J:
s += A[I, i] * cofactor(A, I, i)
M = cofactor(A, I, J)
if M == 0:
return 'No solution'
else:
return -s / M
Testing:
>>> M = np.array([[10.0, -498.0],
[-2.0, 100.0]])
>>> make_singular(M, 0, 1)
-500.0000000000002
>>> M = np.array([[10.0, -498.0],
[0, 100.0]])
>>> make_singular(M, 0, 1)
'No solution'
This thing works for square matrices...
What it does is it bruteforces through every item in the matrix and check if its singular, (so theres a lot of messy output, ue it if you like it tho)
And also very important, it is a Recursive function that returns a matrix if it is singular. So it throws RecursiveError recursively....:|
This is the code i have come up with, you can use it if its okay for you
import numpy as np
def is_singular(_temp_int:str, matrix_size:int):
kwargs = [int(i) for i in _temp_int]
arr = [] # Creates the matrix from the given size
temp_count = 0
for i in range(matrix_size):
arr.append([])
m = arr[i]
for j in range(matrix_size):
m.append(int(_temp_int[temp_count]))
temp_count += 1
n_array = np.array(arr)
if int(np.linalg.det(n_array)) == 0:
print(n_array) # print(n_array) for a pretty output or print(arr) for single line output of the determinant matrix
_temp_int = str(_temp_int[:-len(str(int(_temp_int)+1))] + str(int(_temp_int)+1))
is_singular(_temp_int, matrix_size)
# Only square matrices, so only one-digit integer as input
print("List of singular matrices in the size of '3x3': ")
is_singular('112278011', 3)
# Just give a temporary integer string which will be converted to matrix like [[1, 1, 2], [2, 7, 8], [0, 1, 1]]
# From the provided integer string, it adds up 1 after every iteration
I think this is the code you want, let me know if its not working

Why non-linear response to random values is always positive?

I'm creating a non-linear response to a series of random values from {-1, +1} using a simple Volterra kernel:
With a zero mean for a(k) values I would expect r(k) to have a zero mean as well for arbitrary w values. However, I get r(k) with an always positive mean value, while a mean for a(k) behaves as expected: is close to zero and changes sign from run to run.
Why don't I get a similar behavior for r(k)? Is it because a(k) are pseudo-random and two different values from a are not actually independent?
Here is a code that I use:
import numpy as np
import matplotlib.pyplot as plt
import itertools
# array of random values {-1, 1}
A = np.random.randint(2, size=10000)
A = [x*2 - 1 for x in A]
# array of random weights
M = 3
w = np.random.rand(int(M*(M+1)/2))
# non-linear response to random values
R = []
for i in range(M, len(A)):
vals = np.asarray([np.prod(x) for x in itertools.combinations_with_replacement(A[i-M:i], 2)])
R.append(np.dot(vals, w))
print(np.mean(A), np.var(A))
print(np.mean(R), np.var(R))
Edit:
Check on whether the quadratic form, which is employed by the kernel, is definite-positive fails (i.e. there are negative principal minors). The code to do the check:
import scipy.linalg as lin
wm = np.zeros((M,M))
w_index = 0
# check Sylvester's criterion
# reconstruct weights for quadratic form
for r in range(0,M):
for c in range(r,M):
wm[r,c] += w[w_index]/2
wm[c,r] += w[w_index]/2
w_index += 1
# check principal minors
for r in range(0,M):
if lin.det(wm[:r+1,:r+1])<0: print('found negative principal minor of order', r)
I'm not certain if this is the case for Volterra kernels, but many kernels are positive definite, and some kernels, such as covariance functions, do not admit values less than zero (e.g. Squared Exponential/RBF, Rational Quadratic, Matern kernels).
If these are not the cases for the Volterra kernel, you can also try changing the random seed to seed the RNG differently to check if this is still the case. Here is a looped version of your code that iterates over different random seeds:
import numpy as np
import matplotlib.pyplot as plt
import itertools
# Loop over random seeds
for i in range(10):
# Seed the RNG
np.random.seed(i)
# array of random values {-1, 1}
A = np.random.randint(2, size=10000)
A = [x*2 - 1 for x in A]
# array of random weights
M = 3
w = np.random.rand(int(M*(M+1)/2))
# non-linear response to random values
R = []
for i in range(M, len(A)):
vals = np.asarray([np.prod(x) for x in itertools.combinations_with_replacement(A[i-M:i], 2)])
R.append(np.dot(vals, w))
# Covert R to a numpy array to check for slicing
R = np.array(R)
print("A: ", np.mean(A), np.var(A))
print("R <= 0: ", R[R <= 0])
print("R: ", np.mean(R), np.var(R))
Running this, I get the following values:
A: 0.017 0.9997109999999997
R <= 0: []
R: 1.487637375177384 0.14880206863520892
A: -0.0012 0.9999985600000002
R <= 0: []
R: 2.28108226352669 0.5926651729251319
A: 0.0104 0.9998918400000001
R <= 0: []
R: 1.6138015284426408 0.9526360372883802
A: -0.0064 0.9999590399999999
R <= 0: []
R: 0.988332642595828 0.9650456000380685
A: 0.0026 0.9999932399999998
R <= 0: [-0.75835076 -0.75835076 -0.75835076 ... -0.75835076 -0.75835076
-0.75835076]
R: 0.7352258581171865 1.2668744674748733
A: -0.0048 0.9999769599999996
R <= 0: [-0.02201476 -0.29894937 -0.29894937 ... -0.02201476 -0.29894937
-0.02201476]
R: 0.7396699663779303 1.3844391355510492
A: -0.0012 0.9999985600000002
R <= 0: []
R: 2.4343947709617475 1.6377776468054106
A: -0.0052 0.99997296
R <= 0: []
R: 0.8778918601676095 0.07656607914368625
A: 0.0086 0.99992604
R <= 0: []
R: 2.3490174001719937 0.059871902764070624
A: 0.0046 0.9999788399999996
R <= 0: []
R: 1.7699147798471178 1.8049209966313247
So as you can see, R still has some negative values. My guess is that this occurs because your kernel is positive definite.
This question ended up being about math, and not programming. Nevertheless, this is my own answer.
Simply put, when indices of a(k-i) are equal, the variables in the resulting product are not independent (because they are equal). Such a product does not have a zero mean, hence the mean value of the whole equation is shifted into the positive range.
Formally, implemented function is a quadratic form, for which a mean value can be calculated by
where \mu and \Sigma are a vector of expected values and a covariance matrix for a vector A respectively.
Having a zero vector \mu leaves only the first part of this equation. The resulting estimate can be done with the following code. And it actually gives values that are close to the statistical results in the question.
# Estimate R mean
# sum weights in a main diagonal for quadratic form (matrix trace)
w_sum = 0
w_index = 0
for r in range(0,M):
for c in range(r,M):
if r==c: w_sum += w[w_index]
w_index += 1
Rmean_est = np.var(A) * w_sum
print(Rmean_est)
This estimate uses an assumption, that a elements with different indices are independent. Any implicit dependency due to the nature of pseudo-random generator, if present, probably gives only a slight change to the resulting estimate.

generating random matrices in python

In the following code I have implemented Gaussian elimination with partial pivoting for a general square linear system Ax=b. I have tested my code and it produced the right output. However now I am trying to do the following but I am not quite sure how to code it, looking for some help with this!
I want to test my implementation by solving Ax=b where A is a random 100x100 matrix and b is a random 100x1 vector.
In my code I have put in the matrices
A = np.array([[3.,2.,-4.],[2.,3.,3.],[5.,-3.,1.]])
b = np.array([[3.],[15.],[14.]])
and gotten the following correct output:
[3. 1. 2.]
[3. 1. 2.]
but now how do I change it to generate the random matrices?
here is my code below:
import numpy as np
def GEPP(A, b, doPricing = True):
'''
Gaussian elimination with partial pivoting.
input: A is an n x n numpy matrix
b is an n x 1 numpy array
output: x is the solution of Ax=b
with the entries permuted in
accordance with the pivoting
done by the algorithm
post-condition: A and b have been modified.
'''
n = len(A)
if b.size != n:
raise ValueError("Invalid argument: incompatible sizes between"+
"A & b.", b.size, n)
# k represents the current pivot row. Since GE traverses the matrix in the
# upper right triangle, we also use k for indicating the k-th diagonal
# column index.
# Elimination
for k in range(n-1):
if doPricing:
# Pivot
maxindex = abs(A[k:,k]).argmax() + k
if A[maxindex, k] == 0:
raise ValueError("Matrix is singular.")
# Swap
if maxindex != k:
A[[k,maxindex]] = A[[maxindex, k]]
b[[k,maxindex]] = b[[maxindex, k]]
else:
if A[k, k] == 0:
raise ValueError("Pivot element is zero. Try setting doPricing to True.")
#Eliminate
for row in range(k+1, n):
multiplier = A[row,k]/A[k,k]
A[row, k:] = A[row, k:] - multiplier*A[k, k:]
b[row] = b[row] - multiplier*b[k]
# Back Substitution
x = np.zeros(n)
for k in range(n-1, -1, -1):
x[k] = (b[k] - np.dot(A[k,k+1:],x[k+1:]))/A[k,k]
return x
if __name__ == "__main__":
A = np.array([[3.,2.,-4.],[2.,3.,3.],[5.,-3.,1.]])
b = np.array([[3.],[15.],[14.]])
print (GEPP(np.copy(A), np.copy(b), doPricing = False))
print (GEPP(A,b))
You're already using numpy. Have you considered np.random.rand?
np.random.rand(m, n) will get you a random matrix with values in [0, 1). You can further process it by multiplying random values or rounding.
EDIT: Something like this
if __name__ == "__main__":
A = np.round(np.random.rand(100, 100)*10)
b = np.round(np.random.rand(100)*10)
print (GEPP(np.copy(A), np.copy(b), doPricing = False))
print (GEPP(A,b))
So I would use np.random.randint for this.
numpy.random.randint(low, high=None, size=None, dtype='l')
which outputs a size-shaped array of random integers from the appropriate distribution, or a single such random int if size not provided.
low is the lower bound of the ints you want in your range
high is one greater than the upper bound in your desired range
size is the dimensions of your output array
dtype is the dtype of the result
so if I was you I would write
A = np.random.randint(0, 11, (100, 100))
b = np.random.randint(0, 11, 100)
Basically you could create the desired matrices with ones and then iterate over them, setting each value to random.randint(0,100) for example.
Empty matrix with ones is:
one_array = np.ones((100, 100))
EDIT:
like:
for x in one_array.shape[0]:
for y in one_array.shape[1]:
one_array[x][y] = random.randint(0, 100)
A = np.random.normal(size=(100,100))
b = np.random.normal(size=(100,1))
x = np.linalg.solve(A,b)
assert max(abs(A#x - b)) < 1e-12
Clearly, you can use different distributions than normal, like uniform.
You can use numpy's native rand function:
np.random.rand()
In your code just define A and b as:
A = np.random.rand(100, 100)
b = np.random.rand(100)
This will generate 100x100 matrix and 100x1 vector (both numpy arrays) filled with random values between 0 and 1.
See the docs for this function to learn more.

Optimize A*x = B solution for a tridiagonal coefficient matrix

I have a system of equations in the form of A*x = B where [A] is a tridiagonal coefficient matrix. Using the Numpy solver numpy.linalg.solve I can solve the system of equations for x.
See example below of how I develop the tridiagonal [A] martix. the {B} vector, and solve for x:
# Solve system of equations with a tridiagonal coefficient matrix
# uses numpy.linalg.solve
# use Python 3 print function
from __future__ import print_function
from __future__ import division
# modules
import numpy as np
import time
ti = time.clock()
#---- Build [A] array and {B} column vector
m = 1000 # size of array, make this 8000 to see time benefits
A = np.zeros((m, m)) # pre-allocate [A] array
B = np.zeros((m, 1)) # pre-allocate {B} column vector
A[0, 0] = 1
A[0, 1] = 2
B[0, 0] = 1
for i in range(1, m-1):
A[i, i-1] = 7 # node-1
A[i, i] = 8 # node
A[i, i+1] = 9 # node+1
B[i, 0] = 2
A[m-1, m-2] = 3
A[m-1, m-1] = 4
B[m-1, 0] = 3
print('A \n', A)
print('B \n', B)
#---- Solve using numpy.linalg.solve
x = np.linalg.solve(A, B) # solve A*x = B for x
print('x \n', x)
#---- Elapsed time for each approach
print('NUMPY time', time.clock()-ti, 'seconds')
So my question relates to two sections of the above example:
Since I am dealing with a tridiagonal matrix for [A], also called a banded matrix, is there a more efficient way to solve the system of equations instead of using numpy.linalg.solve?
Also, is there a better way to create the tridiagonal matrix instead of using a for-loop?
The above example runs on Linux in about 0.08 seconds according to the time.clock() function.
The numpy.linalg.solve function works fine, but I'm trying to find an approach that takes advantage of the tridiagonal form of [A] in hopes of speeding up the solution even further and then apply that approach to a more complicated example.
There are two immediate performance improvements (1) do not use a loop, (2) use scipy.linalg.solve_banded().
I would write the code something more like
import scipy.linalg as la
# Create arrays and set values
ab = np.zeros((3,m))
b = 2*ones(m)
ab[0] = 9
ab[1] = 8
ab[2] = 7
# Fix end points
ab[0,1] = 2
ab[1,0] = 1
ab[1,-1] = 4
ab[2,-2] = 3
b[0] = 1
b[-1] = 3
return la.solve_banded ((1,1),ab,b)
There may be more elegant ways to construct the matrix, but this works.
Using %timeit in ipython the original code took 112 ms for m=1000. This code takes 2.94 ms for m=10,000, an order of magnitude larger problem yet still almost two orders of magnitude faster! I did not have the patience to wait on the original code for m=10,000. Most of the time in the original may be in constructing the array, I did not test this. Regardless, for large arrays it is much more efficient to only store the non-zero values of the matrix.
There is a scipy.sparse matrix type called scipy.sparse.dia_matrix which captures the structure of your matrix well (it will store 3 arrays, in "positions" 0 (diagonal), 1 (above) and -1 (below)). Using this type of matrix you can try scipy.sparse.linalg.lsqr for solving. If your problem has an exact solution, it will be found, otherwise it will find the solution in least squares sense.
from scipy import sparse
A_sparse = sparse.dia_matrix(A)
ret_values = sparse.linalg.lsqr(A_sparse, C)
x = ret_values[0]
However, this may not be completely optimal in terms of exploiting the triadiagonal structure, there may be a theoretical way of making this faster. What this conversion does do for you is cut down the matrix multiplication expenses to the essential: Only the 3 bands are used. This, in combination with the iterative solver lsqr should already yield a speedup.
Note: I am not proposing scipy.sparse.linalg.spsolve, because it converts your matrix to csr format. However, replacing lsqr with spsolve is worth a try, especially because spsolve can bind UMFPACK, see relevant doc on spsolve. Also, it may be of interest to take a look at this stackoverflow question and answer relating to UMFPACK
You could use scipy.linalg.solveh_banded.
EDIT: You CANNOT used the above as your matrix is not symmetric and I thought it was. However, as was mentioned above in the comment, the Thomas algorithm is great for this
a = [7] * ( m - 2 ) + [3]
b = [1] + [8] * ( m - 2 ) + [4]
c = [2] + [9] * ( m - 2 )
d = [1] + [2] * ( m - 2 ) + [3]
# This is taken directly from the Wikipedia page also cited above
# this overwrites b and d
def TDMASolve(a, b, c, d):
n = len(d) # n is the numbers of rows, a and c has length n-1
for i in xrange(n-1):
d[i+1] -= 1. * d[i] * a[i] / b[i]
b[i+1] -= 1. * c[i] * a[i] / b[i]
for i in reversed(xrange(n-1)):
d[i] -= d[i+1] * c[i] / b[i+1]
return [d[i] / b[i] for i in xrange(n)]
This code is not optimize nor does it use np, but if I (or any of the other fine folks here) have time, I will edit it so that it does those thing. It currently times at ~10 ms for m=10000.
This probably will help
There is a function creates_tridiagonal which will create tridiagonal matrix. There is another function which converts a matrix into diagonal ordered form as requested by SciPy solve_banded function.
import numpy as np
def lu_decomp3(a):
"""
c,d,e = lu_decomp3(a).
LU decomposition of tridiagonal matrix a = [c\d\e]. On output
{c},{d} and {e} are the diagonals of the decomposed matrix a.
"""
n = np.diagonal(a).size
assert(np.all(a.shape ==(n,n))) # check if square matrix
d = np.copy(np.diagonal(a)) # without copy (assignment destination is read-only) error is raised
e = np.copy(np.diagonal(a, 1))
c = np.copy(np.diagonal(a, -1))
for k in range(1,n):
lam = c[k-1]/d[k-1]
d[k] = d[k] - lam*e[k-1]
c[k-1] = lam
return c,d,e
def lu_solve3(c,d,e,b):
"""
x = lu_solve(c,d,e,b).
Solves [c\d\e]{x} = {b}, where {c}, {d} and {e} are the
vectors returned from lu_decomp3.
"""
n = len(d)
y = np.zeros_like(b)
y[0] = b[0]
for k in range(1,n):
y[k] = b[k] - c[k-1]*y[k-1]
x = np.zeros_like(b)
x[n-1] = y[n-1]/d[n-1] # there is no x[n] out of range
for k in range(n-2,-1,-1):
x[k] = (y[k] - e[k]*x[k+1])/d[k]
return x
from scipy.sparse import diags
def create_tridiagonal(size = 4):
diag = np.random.randn(size)*100
diag_pos1 = np.random.randn(size-1)*10
diag_neg1 = np.random.randn(size-1)*10
a = diags([diag_neg1, diag, diag_pos1], offsets=[-1, 0, 1],shape=(size,size)).todense()
return a
a = create_tridiagonal(4)
b = np.random.randn(4)*10
print('matrix a is\n = {} \n\n and vector b is \n {}'.format(a, b))
c, d, e = lu_decomp3(a)
x = lu_solve3(c, d, e, b)
print("x from our function is {}".format(x))
print("check is answer correct ({})".format(np.allclose(np.dot(a, x), b)))
## Test Scipy
from scipy.linalg import solve_banded
def diagonal_form(a, upper = 1, lower= 1):
"""
a is a numpy square matrix
this function converts a square matrix to diagonal ordered form
returned matrix in ab shape which can be used directly for scipy.linalg.solve_banded
"""
n = a.shape[1]
assert(np.all(a.shape ==(n,n)))
ab = np.zeros((2*n-1, n))
for i in range(n):
ab[i,(n-1)-i:] = np.diagonal(a,(n-1)-i)
for i in range(n-1):
ab[(2*n-2)-i,:i+1] = np.diagonal(a,i-(n-1))
mid_row_inx = int(ab.shape[0]/2)
upper_rows = [mid_row_inx - i for i in range(1, upper+1)]
upper_rows.reverse()
upper_rows.append(mid_row_inx)
lower_rows = [mid_row_inx + i for i in range(1, lower+1)]
keep_rows = upper_rows+lower_rows
ab = ab[keep_rows,:]
return ab
ab = diagonal_form(a, upper=1, lower=1) # for tridiagonal matrix upper and lower = 1
x_sp = solve_banded((1,1), ab, b)
print("is our answer the same as scipy answer ({})".format(np.allclose(x, x_sp)))

Categories