Using Sympy, calculate a limit of sequence whose terms alternate sign - python

I am trying Sympy to find the limit of this sequence
((-1)**(n))*(2**(-n))
using the PyCharm debugger and the IPython console.
The python module is:
import numpy as np
from plotly.graph_objs import Scatter, Figure
from sympy import Symbol, srepr, limit_seq, oo
from sympy.abc import n
import plotly.plotly as py
py.sign_in('****', '****************')
sym_func = 1 + ((-1)**(n))*(2**(-n))
def rng_func(i):
try:
return 1 + np.power(-1, i)/(np.power(2,i))
except ZeroDivisionError as e:
raise ValueError('Invalid inputs') from e
class Sequence(object):
def __init__(self, i):
self.sum = 0
self.dom = np.arange(1, i + 1, 1)
self.rng = np.array([])
self.is_positive = True
self.is_alternating = True
for i in self.dom:
rng_val = rng_func(i)
self.rng = np.append(self.rng, [rng_val])
self.sum += rng_val
sign_array = np.sign(self.rng)
for i in self.dom:
if self.rng[i - 1] <= 0:
self.is_positive = False
break
for i in self.dom:
if sign_array[0] == -1:
alt_sign = np.power(-1, i)
else:
alt_sign = np.power(-1, i-1)
if alt_sign != sign_array[i-1]:
self.is_alternating = False
break
seq = Sequence(10)
data = [Scatter(x=seq.dom, y=seq.rng)]
fig = Figure(data=data)
py.image.save_as(fig, filename='plots/I3.png')
The IPython Console:
Connected to pydev debugger (build 171.4424.56)
Backend Qt5Agg is interactive backend. Turning interactive mode on.
import sys; print('Python %s on %s' % (sys.version, sys.platform))
Python 3.5.2 |Anaconda custom (64-bit)| (default, Jul 2 2016, 17:53:06)
Type "copyright", "credits" or "license" for more information.
IPython 5.1.0 -- An enhanced Interactive Python.
In[1]: limit_seq(((-1)**(n))*(2**(-n)), n)
In[2]: limit_seq((1**(n))*(2**(-n)), n)
Out[3]:
0
My question is why does Sympy not return a value for the limit for In[1]: but does for In[2]:? The difference is the 1 versus -1 in the numerator.
Is there a different way to create an alternating sign in Sympy?

As of now (SymPy 1.1.1), limit_seq does not support oscillating sequences such as those with (-1)**n. Its documentation says this in a somewhat technical way:
The terms should be built from rational functions, indefinite sums, and indefinite products over an indeterminate n. A term is admissible if the scope of all product quantifiers are asymptotically positive. Every admissible term is asymptotically monotonous.
As a workaround, you can use this:
def mylimit_seq(seq, n):
n_ = Dummy("n", integer=True, positive=True)
L1 = limit_seq(seq.subs(n, 2*n_), n_)
L2 = limit_seq(seq.subs(n, 2*n_ + 1), n_)
if L1 == L2:
return L1
Then mylimit_seq((-1)**n * 2**(-n), n) returns 0.
In a future release, limit_seq will work for such sequences. You can also get this functionality today by cloning SymPy from GitHub.

I separated the series into even terms and odd terms, I reflected the series containing the odd terms about the x axis. This series becomes:
(1)*(2**(-n)), which then is identical to the series containing the even terms.
The function calculating the limit remains the same:
def mylimit_seq(seq, n):
try:
L1 = limit_seq(seq.subs(n, 2*n), n)
L2 = limit_seq(seq.subs(n, 2*n + 1), n)
except:
return np.NaN
if L1 == L2:
return L1
else:
return np.NaN

Related

sympy tensor product of pauli matrices

Is there a way to use together sympy's TensorProduct from the sympy.physics.quantum module and the Pauli matrices from sympy.physics.paulialgebra?
My problem is the following:
physically: I have an Hamiltonian written as product of (fermionic) ladder operators and I need to implement the Jordan-Wigner transformation to "translate" it to a spin Hamiltonian
with sympy: I have used sympy's AnnihilateFermion and CreateFermion functions from sympy.physics.secondquant, and I have implemented manually the Jordan-Wigner transformation. The substitution from the ladder operators to the spin operators works fine. Ideally I then would use tensor_product_simp and evaluate_pauli_product to simplify the result. But sympy does not like this mixing. Any suggestions?
Below is the full minimal example. Thanks a lot for any help.
import sympy as sym
import sympy.physics.secondquant as sec
from sympy.physics.quantum import TensorProduct, Dagger, tensor_product_simp, IdentityOperator
from sympy.physics.paulialgebra import Pauli, evaluate_pauli_product
def JW(n, N_sites):
factors = []
for l in range(N_sites):
if l < n-1:
factors.append(-sym.I*Pauli(3))
elif l == n-1:
factors.append((Pauli(1)-sym.I*Pauli(2))/2)
else:
# add identity below
factors.append(Pauli(1)*Pauli(1))
res = factors[0]
for l in range(1,n):
res = TensorProduct(res, factors[l])
return res
if __name__ == '__main__':
N_sites = 4
loci = sym.symbols('l1:{}'.format(N_sites + 1))
chi_s = [sec.AnnihilateFermion(loci[k]) for k in range(N_sites)]
chi_dag_s = [sec.CreateFermion(loci[k]) for k in range(N_sites)]
H = sum((chi_dag_s[n] * chi_s[n + 1] - chi_dag_s[n + 1] * chi_s[n]) for n in range(N_sites - 1))
list_of_subs = []
for n in range(N_sites):
list_of_subs.append((chi_s[n], JW(n, N_sites)))
list_of_subs.append((chi_dag_s[n], Dagger(JW(n, N_sites))))
H_new = H.subs(list_of_subs)
print(H_new)
print(tensor_product_simp(sym.expand(H_new)))

numpy: how to set precision printing poly1d

I want to set precision (more in general the format) when printing a polynomial. For example:
>>> import numpy as np
>>> beta = np.asarray([-3.61380614191654e-09, 2.489464876955e-05, -0.0836384579176789, 143.213931472633])
>>> p = np.poly1d(beta)
>>> p
poly1d([-3.61380614e-09, 2.48946488e-05, -8.36384579e-02, 1.43213931e+02])
>>> print(p)
3 2
-3.614e-09 x + 2.489e-05 x - 0.08364 x + 143.2
>>> np.set_printoptions(precision=8) # this has no effect
>>> print(p)
3 2
-3.614e-09 x + 2.489e-05 x - 0.08364 x + 143.2
I would like to get, calling print with polynomial, something similar to:
3 2
-3.61380614e-09 x + 2.48946487e-05 x - 0.08363845 x + 143.21393147
[Edit]
I tried using numpy.polynomial as A. Donda suggest but I get a different behaviour:
Python 3.8.6 (default, Jan 27 2021, 15:42:20)
[GCC 10.2.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import numpy as np
>>> beta = np.asarray([-3.61380614191654e-09, 2.489464876955e-05, -0.0836384579176789, 143.213931472633])
>>> p = np.polynomial.Polynomial(np.flip(beta))
>>> p
Polynomial([ 1.4321393147e+02, -8.3638457918e-02, 2.4894648770e-05,
-3.6138061419e-09], domain=[-1, 1], window=[-1, 1])
>>> print(p)
poly([ 1.4321393147e+02 -8.3638457918e-02 2.4894648770e-05 -3.6138061419e-09])
printing the polynomial using print(p) does not give me the expected result
[Edit2]
Ubuntu 20.10 has numpy 1.18.4. Instead, forcing a newer version (1.20.1) using pip3, I get the desired result.
The __str__ method of poly1d uses a custom function to format numbers:
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
So, you are right, it is not influenced by NumPy's printoptions.
But the documentation for poly1d says that it is "part of the old polynomial API" and one should better use numpy.polynomial:
>>> p = np.polynomial.Polynomial(np.flip(beta))
>>> p
Polynomial([ 1.432139e+02, -8.363846e-02, 2.489465e-05, -3.613806e-09], domain=[-1, 1], window=[-1, 1])
>>> print(p)
143.213931472633 - 0.0836384579176789·x¹ + 2.489464876955e-05·x² - 3.61380614191654e-09·x³
The np.flip is necessary because other than poly1d, Polynomial expects the coefficients by increasing order.
That seems to solve your concrete problem, getting the coefficients in high precision. However, Polynomial.__str__ still doesn't respect printoptions:
>>> np.set_printoptions(precision=3)
>>> print(p)
143.213931472633 - 0.0836384579176789·x¹ + 2.489464876955e-05·x² - 3.61380614191654e-09·x³
You could file an issue on the NumPy repository.

Trying to optimize my complex function to excute in a polynomial time

I have this code that generate all the 2**40 possible binary numbers, and from this binary numbers, i will try to get all the vectors that match my objectif function conditions which is:
1- each vector in the matrix must have 20 of ones(1).
2- the sum of s = s + (the index of one +1)* the rank of the one must equal 4970.
i wrote this code but it will take a lot of time maybe months, to give the results. Now, i am looking for an alternative way or an optimization of this code if possible.
import time
from multiprocessing import Process
from multiprocessing import Pool
import numpy as np
import itertools
import numpy
CC = 20
#test if there is 20 numbers of 1
def test1numebers(v,x=1,x_l=CC):
c = 0
for i in range(len(v)):
if(v[i]==x):
c+=1
if c == x_l:
return True
else:
return False
#s = s+ the nth of 1 * (index+1)
def objectif_function(v,x=1):
s = 0
for i in range(len(v)):
if(v[i]==x):
s = s+((i+1)*nthi(v,i))
return s
#calculate the nth of 1 in a vecteur
def nthi(v,i):
c = 0
for j in range(0,i+1):
if(v[j] == 1):
c+=1
return c
#generate 2**40 of all possible binray numbers
def generateMatrix(N):
l = itertools.product([0, 1], repeat=N)
return l
#function that get the number of valide vector that match our objectif function
def main_algo(N=40,S=4970):
#N = 40
m = generateMatrix(N)
#S = 4970
c = 0
ii = 0
for i in m:
ii+=1
print("\n count:",ii)
xx = i
if(test1numebers(xx)):
if(objectif_function(xx)==S):
c+=1
print('found one')
print('\n',xx,'\n')
if ii>=1000000:
break
t_end = time.time()
print('time taken for 10**6 is: ',t_end-t_start)
print(c)
#main_algo()
if __name__ == '__main__':
'''p = Process(target=main_algo, args=(40,4970,))
p.start()
p.join()'''
p = Pool(150)
print(p.map(main_algo, [40,4970]))
While you could make a lot of improvements in readability and make your code more pythonic.
I recommend that you use numpy which is the fastest way of working with matrixes.
Avoid working with matrixes on a "pixel by pixel" loop. With numpy you can make those calculations faster and with all the data at once.
Also numpy has support for generating matrixes really fast. I think that you could make a random [0,1] matrix in less lines of code and quite faster.
Also i recommend that you install OPENBLAS, ATLAS and LAPACK which make linear algebra calculations quite faster.
I hope this helps you.

How to convert a number to base 16 with math ( not using strings )?

Say i have
1009732533765201
and i want:
0x1009732533765201 which is
1155581383011619329
You can do this in programming languages with strings like this
int('1009732533765201',16)
but i want the pure math way. To convert 1009732533765201 to it's base16 of 1155581383011619329
I tried: int('1009732533765201',16) but this uses a string, and is slow for large numbers, i'm looking for a math angle only.
Here is a math way i know how to do it:
0x1009732533765201 = 1155581383011619329
Here is a python way to do it:
int('1009732533765201',16)
but i can only do the first math version manually. How can this be accomplished, converting 0x1009732533765201 without using a string to concatenating '0x' to '1009732533765201' and not using eval
Is there any way to take 1009732533765201, and convert it to the same ouput as 0x1009732533765201 to get its integer without using int('1009732533765201',16) my goal is to find a faster approach
ANSWERED BY PARTHIAN SHOT, here is the result of his approach which is exactly what i was looking for, a way to do this without int()
orig = 1009732533765201
num = orig
result = 0
i = 0
while num != 0:
result += (num % 10) * (16 ** i)
num //= 10
i += 1
print(orig, num, result, "%x" % (result))
1009732533765201 0 1155581383011619329 1009732533765201
As I said in my comment, Python knows, out of box, how to deal with base 16 numbers. Just go ahead and assign the base 16 value to a variable.
Here is an example:
Python 3.7.4 (default, Aug 12 2019, 14:45:07)
[GCC 9.1.1 20190605 (Red Hat 9.1.1-2)] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> i=0x16
>>> i
22
>>> i=0xAA
>>> i
170
>>>
And as I said, that works for other bases, like base 2:
>>> i=0b1010
>>> i
10
>>>
And base 8:
>>> i=0o12
>>> i
10
>>>
The idea here is that we're looking at each digit of the original decimal number one at a time, from right to left, and multiplying it by 16 ** i instead of 10 ** i, then adding it to the new number.
The result is the equivalent of interpreting the original decimal representation of the number as if it were hexadecimal.
#!/usr/bin/env python
orig = 34029235
num = orig
result = 0
i = 0
while num != 0:
result += (num % 10) * (16 ** i)
num //= 10
i += 1
print(orig, num, result, "%x" % (result))
And running that code gets us...
bash$ ./main.py
(34029235, 0, 872583733, '34029235')
bash$

implicitly restart Lanczos method

I want to write simple toy code for implicitly restart Lanczos method.
Without implicit restarting, the code is perfectly working but when I turn on the restart, I cannot get proper solution
To my knowledge, newly constructed w should be orthogonal to all of the new Lanczos vectors. For the first restart, the orthogonality is well preserved but from the second restart, the orthogonality is significantly broken down and the program does not find proper eigenvalues.
I already spent several tens of hours to fix it. I almost gave up...... Here is my python code
"""
Author: Sunghwan Choi
Date Created: June 19, 2017
Python Version: 2.7 or 3.5
Reference for Lanczos algorithm
http://www.netlib.org/utk/people/JackDongarra/etemplates/node104.html
Reference for implicit restart
http://www.netlib.org/utk/people/JackDongarra/etemplates/node118.html
"""
import numpy as np
from scipy.sparse.linalg import eigsh
#from scipy.sparse import eye
from scipy.sparse import coo_matrix
from numpy import eye
def clustering(eigvals,eigvecs,tol=1e-2):
ret_eigvals=[]
ret_eigvecs=[]
for i in range(len(eigvals)):
for ret_eigval, ret_eigvec in zip (ret_eigvals,ret_eigvecs):
if (np.abs(eigvals[i]/ret_eigval-1.0)<tol ):
break
else:
ret_eigvals.append(eigvals[i])
ret_eigvecs.append(eigvecs[:,i])
ret_eigvals=np.array(ret_eigvals)
ret_eigvecs=np.array(ret_eigvecs).T
return ret_eigvals,ret_eigvecs
def check_conv(matrix, cal_eigval, cal_eigvec, tol):
indices=[]
for i in range(len(cal_eigval)):
if(np.linalg.norm(matrix.dot(cal_eigvec[:,i]) - cal_eigval[i]*cal_eigvec[:,i])< tol):
indices.append(i)
return indices
################ input
size=1600
max_step=20000
which='SA'
#implicit=False
implicit=True
energy_range=[0.0,6.0]
tol = 1e-5
n_eig=6
n_tol_check=40 # n_tol_check>n_eig ==0
######################
# generate 1D harmonic oscillator
h=0.1
matrix=-5/2*eye(size)
matrix+=4/3*(eye(size,k=1)+eye(size,k=-1))
matrix+=-1/12*(eye(size,k=2)+eye(size,k=-2))
matrix=-0.5*matrix/(h*h)
distance =lambda index: (index-size/2)*h
matrix+=np.diagflat( list(map( lambda i: 0.5*distance(i)**2, range(size))))
# solve eigenvalue problem to check validity
true_eigval,true_eigvec = eigsh(matrix,k=50,which=which)
indices = np.all([true_eigval>energy_range[0], true_eigval<energy_range[1]],axis=0)
true_eigval = true_eigval[indices]
true_eigvec = true_eigvec[:,indices]
#initialize variables
alpha=[]; beta=[]
index_v=0
restart_interval = n_tol_check+n_eig if implicit is not False else max_step
T = np.zeros((restart_interval,restart_interval))
v = np.zeros((size,restart_interval))
#Q=np.eye(restart_interval)
#generate initial vector
np.random.seed(1)
initial_vec = np.random.random(size)
#initial_vec = np.loadtxt("tmp")
w = v[:,index_v] = initial_vec/np.linalg.norm(initial_vec)
init_beta = np.linalg.norm(w)
# start Lanczos i_step
for i_step in range(max_step):
if (i_step is 0):
v[:,index_v] = w/init_beta
else:
v[:,index_v] = w/T[index_v,index_v-1]
w=matrix.dot(v[:,index_v])
if (i_step is 0):
w=w-init_beta*v[:,index_v-1]
else:
w=w-T[index_v,index_v-1]*v[:,index_v-1]
T[index_v,index_v]=np.dot(w,v[:,index_v])
w -=T[index_v,index_v]*v[:,index_v]
#check convergence
if ((i_step+1)%n_tol_check==n_eig and i_step>n_eig):
# calculate eigenval of T matrix
cal_eigval, cal_eigvec_= np.linalg.eigh(T[:index_v+1,:index_v+1])
cal_eigvec = np.dot(v[:,:index_v+1],cal_eigvec_)
#check tolerance
conv_indices = check_conv(matrix, cal_eigval, cal_eigvec,tol)
#filter energy_range
indices = np.all([cal_eigval[conv_indices]>energy_range[0], cal_eigval[conv_indices]<energy_range[1]],axis=0)
#check clustering
conv_cal_eigval,conv_cal_eigvec = clustering((cal_eigval[conv_indices])[indices], (cal_eigvec[conv_indices])[indices])
if (len(conv_cal_eigval)>=n_eig):
break
# implicit restarting
if (implicit is True):
Q=np.eye(restart_interval)
# do shift & QR decomposition
indices = np.argsort(np.abs(cal_eigval-np.mean(energy_range)))
for index in indices[n_eig:]:
new_Q,new_R = np.linalg.qr(T-cal_eigval[index]*np.eye(len(T)))
T = np.dot(new_Q.T,np.dot(T,new_Q))
v = np.dot(v,new_Q)
Q = np.dot(Q,new_Q)
w=v[:,n_eig]*T[n_eig,n_eig-1]+w*Q[-1,n_eig-1]
v[:,n_eig:]=0.0
T[:,n_eig:] = 0.0
T[n_eig:,:] = 0.0
#for debug
#print(np.dot(w.T, v))
# reset index
index_v=n_eig-1
index_v+=1
T[index_v,index_v-1]=np.linalg.norm(w)
T[index_v-1,index_v]=np.linalg.norm(w)
else:
print("not converged")
exit(-1)
print ("energy window: (", energy_range[0],",",energy_range[1],")")
print ("true eigenvalue")
print(true_eigval)
print ("eigenvalue from Lanczos w/ implicit restart (",i_step+1,")")
print(conv_cal_eigval)

Categories