I have constructed two matrices. For one I calculate matrix exponential, but for the other one I can not. They are similarly constructed and have the same structure and dimension. I really don't know why one can work but the other can not. I put my code below.
import numpy as np
import math as math
from scipy.sparse import csc_matrix
from scipy.sparse.linalg import *
sigmax = [[0, 1], [1, 0]]
sigmay = [[0, -1j], [1j, 0]]
sigmaz = [[1, 0], [0, -1]]
sigmaxx = np.kron(sigmax,sigmax)
sigmayy = np.kron(sigmay,sigmay)
sigmazz = np.kron(sigmaz,sigmaz)
sigmaxxyy = np.mat(sigmaxx) + np.mat(sigmayy)
N = 6
Hxxyy = 0
for i in range (0,N-2+1):
Hxxyy = np.mat(Hxxyy) + np.mat(np.kron(np.kron(np.identity(2**i),2*np.mat(sigmaxxyy)),np.identity(2**(N-i-2)) ))
Hxxyy = np.mat(Hxxyy) + np.mat(np.kron(np.kron(2*np.mat(sigmax),np.identity(2**(N-2))),sigmax))+np.mat(np.kron(np.kron(2*np.mat(sigmay),np.identity(2**(N-2))),sigmay))
print(expm(Hxxyy))
Hhi = 0
for j in range (0,N-1+1):
Hhi = np.mat(Hhi) + np.mat(np.kron( np.kron(np.identity(2**j),3*np.mat(sigmaz)),np.identity(2**(N-1-j))) )
print(expm(Hhi))
The error message is:
Traceback (most recent call last):
File "new test.py", line 20, in <module>
print(expm(Hhi))
File "/Users/sherlock/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/scipy/sparse/linalg/matfuncs.py", line 582, in expm
return _expm(A, use_exact_onenorm='auto')
File "/Users/sherlock/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/scipy/sparse/linalg/matfuncs.py", line 637, in _expm
X = _fragment_2_1(X, h.A, s)
File "/Users/sherlock/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/scipy/sparse/linalg/matfuncs.py", line 755, in _fragment_2_1
X[k, k] = exp_diag[k]
ValueError: setting an array element with a sequence.
Your code works in Python3 (Python 3.4.5) but fails in Python2 (Python 2.7.12).
There were a few changes in scipy/sparse/linalg/matfuncs.py between these two versions that cleaned all code paths to support both dense and sparse matrices.
Since the dimensions are not very big, a quick fix would be to
replace
expm(Hhi)
with
expm(np.array(Hhi))
Related
I've built some code to minimize the sum of the weighted least squares of some residuals. I first read all the data from a .gz file and then process it on the code below (details are irrelevant). I want to use multiprocessing in order to speed up the "runFit" function.
My code is below:
"""
Fit 3D lines to cylinders
"""
from timeit import default_timer as timer
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from scipy.optimize import minimize
from numba import jit
from multiprocessing import Pool
def readData(filename):
"Read compressed data."
return np.loadtxt(filename, delimiter=",")
#jit(nopython=True)
def weightedResiduals(unknown, wire_coords, radii, d_radii, d_zcoords):
"Calculates the sum of the weighted residuals"
y_intercept = unknown[0]
z_intercept = unknown[1]
xy_slope = unknown[2]
xz_slope = unknown[3]
intercept_vector = np.array([0, y_intercept, z_intercept])
gradient_vector = np.array([1, xy_slope, xz_slope])
gradient_vector /= np.linalg.norm(gradient_vector)
result = 0
for index in range(np.shape(wire_coords)[0]):
distance = np.linalg.norm(np.cross((wire_coords[index]-intercept_vector), gradient_vector)) - radii[index]
weight = (d_radii[index]**2 + d_zcoords[index]**2)**(-1/2)
result += (weight * distance)**2
return result
def runFit(inputfilename, outputfilename):
"""
Parameters
----------
inputfilename : string
input data file name for fitting.
outputfilename : string
result storage file name.
Returns
-------
counter : int
number of successful fits; 100% would be twice the number
of events (two lines per event).
"""
counter = 0
#Reading the required data set
fulldata = readData(inputfilename)
#Defining the output array and filling in the first two columns
event_no = int(fulldata[-1,0])
result = np.zeros((2*event_no, 10))
result[:,0] = np.repeat(np.arange(1, event_no+1), 2)
line_no_array = np.empty((2*event_no,))
line_no_array[::2] = 1
line_no_array[1::2] = 2
result[:,1] = line_no_array
def singleEventFit(event):
#Using masking to obtain required rows
mask = (fulldata==event)
desired_rows = mask[:, 0]
#Calculating the fitted line variables using weighted least squares
for line in range(1,3):
#Extracting the desired rows from the full data array
desired_array = fulldata[np.logical_and(desired_rows,(fulldata==line)[:,1])]
#Extracting grouped data from the desired rows
wire_coords = desired_array[:,2:5]
wire_x_coords = wire_coords[:,0]
wire_y_coords = wire_coords[:,1]
wire_z_coords = wire_coords[:,2]
radii = desired_array[:,5]
d_radii, d_zcoords = desired_array[:,6], desired_array[:,7]
#Estimating an initial guess for the fitted line variables
x_min_index = np.argmin(np.abs(wire_x_coords))
x_max_index = np.argmax(np.abs(wire_x_coords))
y_intercept_guess = wire_y_coords[x_min_index]
z_intercept_guess = wire_z_coords[x_min_index]
xy_slope_guess = (wire_y_coords[x_max_index]-wire_y_coords[x_min_index])/(wire_x_coords[x_max_index]-wire_x_coords[x_min_index])
xz_slope_guess = (wire_z_coords[x_max_index]-wire_z_coords[x_min_index])/(wire_x_coords[x_max_index]-wire_x_coords[x_min_index])
init = np.array([y_intercept_guess, z_intercept_guess, xy_slope_guess, xz_slope_guess])
#Minimizing the sum of the weighted residuals
fit_vars = minimize(weightedResiduals, init, args=(wire_coords, radii, d_radii, d_zcoords), tol=1e-5)
if fit_vars.success == True:
y_intercept, z_intercept = fit_vars.x[0], fit_vars.x[1]
xy_slope, xz_slope = fit_vars.x[2], fit_vars.x[3]
#Using the half of the inverse of the Hessian matrix as the covariance matrix to recover errors
std_array = np.sqrt(np.diag(0.5*fit_vars.hess_inv))
#Inputting the variables and their errors on the output array
result[2*event+line-3, 2], result[2*event+line-3, 4] = y_intercept, xy_slope
result[2*event+line-3, 6], result[2*event+line-3, 8] = z_intercept, xz_slope
result[2*event+line-3, 3], result[2*event+line-3, 5] = std_array[0], std_array[2]
result[2*event+line-3, 7], result[2*event+line-3, 9] = std_array[1], std_array[3]
with Pool() as pool:
pool.map(singleEventFit, [event for event in range(1, event_no+1)])
#Returning resulting array as a text file
np.savetxt(outputfilename, result, delimiter=',')
return counter
start = timer()
if __name__=='__main__':
print("Successful Plots: " + str(runFit("tendata.txt.gz", "output.txt.gz")))
end = timer()
print("Time: " + str(end-start) + "s")
However, I get the following traceback:
Traceback (most recent call last):
File "C:\Users\vanes\Downloads\Python Project\untitled0.py", line 113, in <module>
print("Successful Plots: " + str(runFit("tendata.txt.gz", "output.txt.gz")))
File "C:\Users\vanes\Downloads\Python Project\untitled0.py", line 105, in runFit
pool.map(singleEventFit, [event for event in range(1, event_no+1)])
File "C:\Users\vanes\anaconda3\lib\multiprocessing\pool.py", line 364, in map
return self._map_async(func, iterable, mapstar, chunksize).get()
File "C:\Users\vanes\anaconda3\lib\multiprocessing\pool.py", line 771, in get
raise self._value
File "C:\Users\vanes\anaconda3\lib\multiprocessing\pool.py", line 537, in _handle_tasks
put(task)
File "C:\Users\vanes\anaconda3\lib\multiprocessing\connection.py", line 211, in send
self._send_bytes(_ForkingPickler.dumps(obj))
File "C:\Users\vanes\anaconda3\lib\multiprocessing\reduction.py", line 51, in dumps
cls(buf, protocol).dump(obj)
AttributeError: Can't pickle local object 'runFit.<locals>.singleEventFit'
Is there any way that I can use multiprocessing in order to speed up the for-loop?
After reviewing the internet, the recommendation was to move the inner function outside and make it global. However, this can't work since I need variables defined inside "runFit()" in order to execute the loop.
I could not understand how numpy.testing.assert_allclose method is calculating relative difference between two arrays. Is it calculating in percentage or without taking percentage? For example, If I have two arrays
import numpy as np
gfg1 = [1, 2, 3]
gfg2 = np.array([4, 8, 9])
np.testing.assert_allclose(gfg1, gfg2)
the following error occurs:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/anaconda3/lib/python3.7/site-packages/numpy/testing/_private/utils.py", line 1515, in assert_allclose
verbose=verbose, header=header, equal_nan=equal_nan)
File "/home/anaconda3/lib/python3.7/site-packages/numpy/testing/_private/utils.py", line 841, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=1e-07, atol=0
Mismatch: 100%
Max absolute difference: 6
Max relative difference: 0.75
Max absolute difference is understood but what about relative difference?
If you go to the source code of assert_allclose you will see that it calls assert_array_compare. And inside the assert_array_compare you can see that the maximum relative difference is calculated as max(error[nonzero] / abs(y[nonzero])) where the error is abs(x - y).
So, in your case, for x = np.array([1, 2, 3]) and y = np.array([4, 8, 9]), you get
max_rel_error == max(|1-4|/|4|, |2-8|/|8|, |3-9|/|9|) == 0.75
I want to convert the following SDP — which just verifies the feasibility of the constraints — from CVX (MATLAB) to CVXPY (Python):
Ah = [1.0058, -0.0058; 1, 0];
Bh = [-1; 0];
Ch = [1.0058, -0.0058; -0.9829, 0.0056];
Dh = [-1; 1];
M = [0, 1;1, 0];
ni = size(M,1)/2;
n = size(Ah,1);
rho = 0.5;
cvx_begin sdp quiet
variable P(n,n) semidefinite
variable lambda(ni) nonnegative
Mblk = M*kron(diag(lambda),eye(2));
lambda(ni) == 1 % break homogeneity (many ways to do this...)
[Ah Bh]'*P*[Ah Bh] - rho^2*blkdiag(P,0) + [Ch Dh]'*Mblk*[Ch Dh] <= 0
cvx_end
switch cvx_status
case 'Solved'
feas = 1;
otherwise
feas = 0;
end
Below is my Python code,
import cvxpy as cvx
import numpy as np
import scipy as sp
Ah = np.array([[1.0058, -0.0058], [1, 0]])
Bh = np.array([[-1], [0]])
Ch = np.array([[1.0058, -0.0058], [-0.9829, 0.0056]])
Dh = np.array([[-1], [1]])
M = np.array([[0, 1], [1, 0]])
ni, n = M.shape[0] / 2, Ah.shape[0]
rho = 0.5
P = cvx.Semidef(n)
lamda = cvx.Variable()
Mblk = np.dot(M, np.kron(cvx.diag(lamda), np.eye(2)))
ABh = np.concatenate((Ah, Bh), axis=1)
CDh = np.concatenate((Ch, Dh), axis=1)
constraints = [lamda[-1] == 1,
np.dot(ABh.T, np.dot(P, ABh)) - rho**2*np.linalg.block_diag(P, 0) +
np.dot(CDh.T, np.dot(Mblk, CDh)) << 0]
prob = cvx.Problem(cvx.Minimize(1), constraints)
feas = prob.status is cvx.OPTIMAL
There are several errors when I run the program.
1. When I print Mblk, it shows
Traceback (most recent call last):
File
"/usr/lib/python2.7/dist-packages/IPython/core/interactiveshell.py",
line 2820, in run_code
Out[1]: exec code_obj in self.user_global_ns, self.user_ns
File "", line 1, in
Mblk
File "/usr/lib/python2.7/dist-packages/IPython/core/displayhook.py",
line 247, in call
format_dict, md_dict = self.compute_format_data(result)
File "/usr/lib/python2.7/dist-packages/IPython/core/displayhook.py",
line 157, in compute_format_data
return self.shell.display_formatter.format(result)
File "/usr/lib/python2.7/dist-packages/IPython/core/formatters.py",
line 152, in format
data = formatter(obj)
File "/usr/lib/python2.7/dist-packages/IPython/core/formatters.py",
line 481, in call
printer.pretty(obj)
File "/usr/lib/python2.7/dist-packages/IPython/lib/pretty.py", line
362, in pretty
return _default_pprint(obj, self, cycle)
File "/usr/lib/python2.7/dist-packages/IPython/lib/pretty.py", line
482, in _default_pprint
p.text(repr(obj))
File "/usr/lib/python2.7/dist-packages/numpy/core/numeric.py", line
1553, in array_repr
', ', "array(")
File "/usr/lib/python2.7/dist-packages/numpy/core/arrayprint.py", line
454, in array2string
separator, prefix, formatter=formatter)
File "/usr/lib/python2.7/dist-packages/numpy/core/arrayprint.py", line
256, in _array2string
'int' : IntegerFormat(data),
File "/usr/lib/python2.7/dist-packages/numpy/core/arrayprint.py", line
641, in init
max_str_len = max(len(str(maximum.reduce(data))),
File
"/usr/local/lib/python2.7/dist-packages/cvxpy/constraints/leq_constraint.py",
line 67, in nonzero
Raise Exception("Cannot evaluate the truth value of a constraint.")
Exception: Cannot evaluate the truth value of a constraint.
When I step to this line,
constraints = [lamda[-1] == 1,
np.dot(ABh.T, np.dot(P, ABh)) - rho**2*np.linalg.block_diag(P, 0) +
np.dot(CDh.T, np.dot(Mblk, CDh)) << 0]
it shows
Traceback (most recent call last): File
".../sdp.py", line 22, in
np.dot(ABh.T, np.dot(P, ABh)) - rho**2*np.linalg.block_diag(P, 0) +
ValueError: setting an array element with a sequence.
How to fix these problems?
The big issue with your code is that you can't use NumPy functions on CVXPY objects. You need to use the equivalent CVXPY functions. Here's a working version of your code:
import cvxpy as cvx
import numpy as np
import scipy as sp
Ah = np.array([[1.0058, -0.0058], [1, 0]])
Bh = np.array([[-1], [0]])
Ch = np.array([[1.0058, -0.0058], [-0.9829, 0.0056]])
Dh = np.array([[-1], [1]])
M = np.array([[0, 1], [1, 0]])
ni, n = M.shape[0] / 2, Ah.shape[0]
rho = 0.5
P = cvx.Semidef(n)
lamda = cvx.Variable()
Mblk = M*lamda*np.eye(2)
ABh = cvx.hstack(Ah, Bh)
CDh = cvx.hstack(Ch, Dh)
zeros = np.zeros((n,1))
constraints = [lamda[-1] == 1,
ABh.T*P*ABh - rho**2*cvx.bmat([[P,zeros],[zeros.T, 0]]) +
CDh.T*Mblk*CDh << 0]
prob = cvx.Problem(cvx.Minimize(1), constraints)
prob.solve()
feas = prob.status is cvx.OPTIMAL
I removed the kron function because it wasn't doing anything here and CVXPY doesn't currently support Kronecker products with a variable left-hand side. I can add it if you need it.
I have the following code:
import numpy as np
def J(x, y):
return np.matrix([[8-(4 * y), -4 * y], [y, -5 + x]])
x_0 = np.matrix([[1], [1]])
test = J(x_0[0], x_0[1])
When I go to run it I receive the following error:
Traceback (most recent call last):
File "broyden.py", line 15, in <module>
test = J(x_0[0][0], x_0[1][0])
File "broyden.py", line 12, in J
return np.matrix([[8-(4 * y), -4 * y], [y, -5 + x]])
File "/home/collin/anaconda/lib/python2.7/site-packages/numpy/matrixlib/defmatrix.py", line 261, in __new__
raise ValueError("matrix must be 2-dimensional")
ValueError: matrix must be 2-dimensional
I don't understand why I'm getting this error. Everything appears to be 2-d.
The type of x_0[0] is still numpy.matrixlib.defmatrix.matrix, not a scalar value.
You need get a scale value to treat as a matrix element. Try this code
test = J(x_0.item(0), x_0.item(1))
Hi all what I wan't should be really simple for somebody here..I want to remove a row from a numpy array in a loop like:
for i in range(len(self.Finalweight)):
if self.Finalweight[i] >= self.cutoffOutliers:
"remove line[i from self.wData"
I'm trying to remove outliers from a dataset. My full code os the method is like:
def calculate_Outliers(self):
def calcWeight(Value):
pFinal = abs(Value - self.pMed)/ self.pDev_abs_Med
gradFinal = abs(gradient(Value) - self.gradMed) / self.gradDev_abs_Med
return pFinal * gradFinal
self.pMed = median(self.wData[:,self.yColum-1])
self.pDev_abs_Med = median(abs(self.wData[:,self.yColum-1] - self.pMed))
self.gradMed = median(gradient(self.wData[:,self.yColum-1]))
self.gradDev_abs_Med = median(abs(gradient(self.wData[:,self.yColum-1]) - self.gradMed))
self.workingData= self.wData[calcWeight(self.wData)<self.cutoffOutliers]
self.xData = self.workingData[:,self.xColum-1]
self.yData = self.workingData[:,self.yColum-1]
I'm getting the following error:
ile "bin/dmtools", line 201, in plot_gride
self.calculate_Outliers()
File "bin/dmtools", line 188, in calculate_Outliers
self.workingData= self.wData[calcWeight(self.wData)>self.cutoffOutliers]
ValueError: too many indices for array
There is actually a tool in NumPy specifically made to mask out outliers and invalid data points: masked arrays. Example from the linked page:
x = numpy.array([1, 2, 3, -1, 5])
mx = numpy.ma.masked_array(x, mask=[0, 0, 0, 1, 0])
print mx.mean()
prints
2.75