Jacobi Method Outputting Wrong Eigenvalues - python

I am working on creating an eigenvalue calculator using the Jacobi method and it runs without errors. However, it does not find the correct eigenvalues nor does it find the correct eigenvectors. For some reason, I always get eigenvalues of 0. I think it may not be saving the matrix I input for MatrixA.
(Link to Jacobi method in case you are not familiar: http://fourier.eng.hmc.edu/e176/lectures/ch1/node1.html)
import numpy as np
import bettertimeit as time
import matplotlib as plt
def Jacobi(A):
n = A.shape[0] # matrix size #columns = #lines
maxit = 100 # maximum number of iterations
eps = 1.0e-15 # accuracy goal
pi = np.pi
info = 0 # return flag
ev = np.zeros(n,float) # initialize eigenvalues
U = np.zeros((n,n),float) # initialize eigenvector
for i in range(0,n): U[i,i] = 1.0
for t in range(0,maxit):
s = 0 # compute sum of off-diagonal elements in A(i,j)
for i in range(0,n): s = s + np.sum(np.abs(A[i,(i+1):n]))
if (s < eps): # diagonal form reached
info = t
for i in range(0,n):ev[i] = A[i,i]
break
else:
limit = s/(n*(n-1)/2.0) # average value of off-diagonal elements
for i in range(0,n-1): # loop over lines of matrix
for j in range(i+1,n): # loop over columns of matrix
if (np.abs(A[i,j]) > limit): # determine (ij) such that |A(i,j)| larger than average
# value of off-diagonal elements
denom = A[i,i] - A[j,j] # denominator of Eq. (3.61)
if (np.abs(denom) < eps): phi = pi/4 # Eq. (3.62)
else: phi = 0.5*np.arctan(2.0*A[i,j]/denom) # Eq. (3.61)
si = np.sin(phi)
co = np.cos(phi)
for k in range(i+1,j):
store = A[i,k]
A[i,k] = A[i,k]*co + A[k,j]*si # Eq. (3.56)
A[k,j] = A[k,j]*co - store *si # Eq. (3.57)
for k in range(j+1,n):
store = A[i,k]
A[i,k] = A[i,k]*co + A[j,k]*si # Eq. (3.56)
A[j,k] = A[j,k]*co - store *si # Eq. (3.57)
for k in range(0,i):
store = A[k,i]
A[k,i] = A[k,i]*co + A[k,j]*si
A[k,j] = A[k,j]*co - store *si
store = A[i,i]
A[i,i] = A[i,i]*co*co + 2.0*A[i,j]*co*si +A[j,j]*si*si # Eq. (3.58)
A[j,j] = A[j,j]*co*co - 2.0*A[i,j]*co*si +store *si*si # Eq. (3.59)
A[i,j] = 0.0 # Eq. (3.60)
for k in range(0,n):
store = U[k,j]
U[k,j] = U[k,j]*co - U[k,i]*si # Eq. (3.66)
U[k,i] = U[k,i]*co + store *si # Eq. (3.67)
info = -t # in case no convergence is reached set info to a negative value "-t"
return ev,U,t
n = int(input("Enter the matrix size: "))
A = np.zeros((n, n))
for i in range(n):
A[i] = input().split(" ")
MatrixA = np.array(A)
print("A= ")
print(A)
for i in range(A.shape[0]):
row = ["{}*x{}".format(A[i, j], j + 1) for j in range(A.shape[1])]
# Jacobi-method
ev,U,t = Jacobi(A)
print ("JACOBI METHOD: Number of rotations = ",t)
print ("Eigenvalues = ",ev)
print ("Eigenvectors = ")
print (U)

Related

Convex optimization using CVXPY in python

import random
import math
import cvxpy as cp
import numpy as np
U = 10 # int(input("Enter number of users : "))
K = 600 # int(input("Enter number of content in database : "))
# ncache = int(input("Enter number of cache : "))
# cache_size=int(input("Enter cache size : "))
sigma = 1 # size of content currently assumed to be 1 gb for all content
lamda = 0.12 # currncy unit per gb requested content currently taken as 10/gb
Nu = 10 # No. of content recommended to the user
dis = 0.05
R_ui = np.empty((U, K), dtype=float) # R_ui is revenue per content
# Probability of user following the recommendations
alpha_u = np.random.uniform(0.6, 1, U)
y0 = np.random.randint(2, size=(U, K)) # Input Variable
# MOVING TO FORMULA 1
U0_cp = 0 # U0_cp is initial utility of Content Provider
for i in range(U):
for j in range(K):
U0_cp = U0_cp + (alpha_u[i]*y0[i][j]*(R_ui[i][j]-sigma*lamda)/Nu)
# MOVING TO FORMULA 2
C_j = 10 # Number Of Caches
X = np.empty((K, C_j), dtype=float)
for i in range(K):
for j in range(C_j-1):
X[i][j] = random.randint(0, 1)
X[i][9] = 1
# X=np.random.randint(2,size=(K,C_j)) # Cache Input Variable
C_u = 3 # Subset of caches user has access to
C_cost = np.random.uniform(0, 0.002, C_j)
C_cost[9] = 0.05 # Cost of retrieval from a cache
C_cost.round(4)
C_cm = np.empty((U, C_u), dtype=float) # User cache mapping
flag = True
temp = 0
for i in range(U):
for j in range(0, C_u-1):
flag = True
while flag:
temp = np.random.choice(C_cost)
if temp in C_cm[i]:
continue
else:
C_cm[i][j] = temp
flag = False
C_cm[i][2] = 0.05
C_cm[i].sort()
C_cm.round(4)
K_ui = np.empty((U, K), dtype=float) # Cost retrieval matrix
temp = 0
temp1 = 1
temp2 = 0
for i in range(U):
for j in range(K):
for k in range(C_u):
temp2 = sigma * C_cm[i][k] * X[j][list(C_cost).index(C_cm[i][k])]
for l in range(0, list(C_cost).index(C_cm[i][k])-1):
temp1 = temp1*(1-X[j][l])
temp = temp+temp2*temp1
K_ui[i][j] = temp
U0_cdn = 0 # Initial Utility of CDN
for i in range(U):
for j in range(K):
U0_cdn = U0_cdn + (alpha_u[i] * y0[i][j]*(sigma*lamda-K_ui[i][j])/Nu)
# Formula 3
y_ui = cp.Variable()
Y = np.empty((U, K), dtype=float)
for i in range(U):
for j in range(K):
U0_cp = (alpha_u[i]*y0[i][j]*(R_ui[i][j]-sigma*lamda)/Nu)
U0_cdn = (alpha_u[i]*y0[i][j]*(sigma*lamda-K_ui[i][j])/Nu)
formula = cp.Maximize(cp.log((alpha_u[i]*y_ui*(R_ui[i][j]-sigma*lamda*(1+dis*(y0[i][j-1])))/Nu)-(alpha_u[i]*y0[i][j]*(R_ui[i][j]-sigma*lamda)/Nu))+cp.log(
(alpha_u[i]*y_ui*(sigma*lamda*(1+dis*(y0[i][j]-1))-K_ui[i][j])/Nu)-(alpha_u[i]*y0[i][j]*(sigma*lamda-K_ui[i][j])/Nu)))
constraint = [(alpha_u[i]*y_ui*(R_ui[i][j]-sigma*lamda*(1+dis*(y0[i][j-1])))/Nu)-(alpha_u[i]*y0[i][j]*(R_ui[i][j]-sigma*lamda)/Nu) >= 0,
(alpha_u[i]*y_ui*(sigma*lamda*(1+dis*(y0[i][j]-1))-K_ui[i][j])/Nu)-(alpha_u[i]*y0[i][j]*(sigma*lamda-K_ui[i][j])/Nu) >= 0, y_ui <= 1,y_ui >= 0]
objective = cp.Problem(formula, constraint)
Y[i][j] = objective.solve(verbose=True)
print(Y)
Stuck trying to implement the following collaboration problem using CVXPY. kindly share the possible implementation approach.
I was trying to maximize the matrix Y in the collaboration problem but I do not know how to maximize a matrix using CVXPY so I implemented CVXPY on each value of the matrix but it is not working.

Dynamic Time Wrapping returns small value for far away curves

I have a python code that implements Dynamic Time Wrapping, which I use to compare the predicted curve to my actual curve. I care about the shape of the curve but also about the distance between the 2 curves. I z-normalized the 2 curves before calling the function that returns the cost. However, I got weird results. For example:
I got cost of 0.28 for this example:
While I got 0.38 for the below example:
In the first plot, the prediction is very far away compared to the second plot. I even got the same value of 0.28 with even very far away prediction such as 5000 points further. What is wrong here?
Below is my code from this source:
#Dynamic Time Wrapping Algorithm
def dp(dist_mat):
N, M = dist_mat.shape
# Initialize the cost matrix
cost_mat = numpy.zeros((N + 1, M + 1))
for i in range(1, N + 1):
cost_mat[i, 0] = numpy.inf
for i in range(1, M + 1):
cost_mat[0, i] = numpy.inf
# Fill the cost matrix while keeping traceback information
traceback_mat = numpy.zeros((N, M))
for i in range(N):
for j in range(M):
penalty = [
cost_mat[i, j], # match (0)
cost_mat[i, j + 1], # insertion (1)
cost_mat[i + 1, j]] # deletion (2)
i_penalty = numpy.argmin(penalty)
cost_mat[i + 1, j + 1] = dist_mat[i, j] + penalty[i_penalty]
traceback_mat[i, j] = i_penalty
# Traceback from bottom right
i = N - 1
j = M - 1
path = [(i, j)] #Path is commented because I am not interested in the path
# while i > 0 or j > 0:
# tb_type = traceback_mat[i, j]
# if tb_type == 0:
# # Match
# i = i - 1
# j = j - 1
# elif tb_type == 1:
# # Insertion
# i = i - 1
# elif tb_type == 2:
# # Deletion
# j = j - 1
# path.append((i, j))
# Strip infinity edges from cost_mat before returning
cost_mat = cost_mat[1:, 1:]
return (path[::-1], cost_mat)
I use the above code as below:
z_actual=stats.zscore(actual)
z_pred=stats.zscore(mean_predictions)
N = actual.shape[0]
M = mean_predictions.shape[0]
dist_mat = numpy.zeros((N, M))
for i in range(N):
for j in range(M):
dist_mat[i, j] = abs(z_actual[i] - z_pred[j])
path,cost_mat=dp(dist_mat)
mape=cost_mat[N - 1, M - 1]/(N + M)

How to use argmin() and find minimum value from array

I'm new to python so the code may not be the best. I'm trying to find the minimum Total Cost (TotalC) and the corresponding m,k and xM values that go with this minimum cost. I'm not sure how to do this. I have tried using min(TotalC) however this gives an error within the loop or outside the loop only returns the value of TotalC and not the corresponding m, k, and xM values. Any help would be appreciated. This section is at the end of the code, I have included my entire code.
I have tried using
minIndex = TotalC.argmin()
but I'm not sure how to use it and it only returns 0 each time.
import numpy as np
import matplotlib.pyplot as plt
def Load(x):
Fpeak = (1000 + (9*(x**2) - (183*x))) *1000 #Fpeak in N
td = (20 - ((0.12)*(x**2)) + (4.2*(x))) / 1000 #td in s
return Fpeak, td
#####################################################################################################
####################### Part 2 ########################
def displacement(m,k,x,dt): #Displacement function
Fpeak, td = Load(x) #Load Function from step 1
w = np.sqrt(k/m) # Natural circular frequency
T = 2 * np.pi /w #Natural period of blast (s)
time = np.arange(0,2*T,0.001) #Time array with range (0 - 2*T) with steps of 2*T/100
zt = [] #Create a lsit to store displacement values
for t in time:
if (t <= td):
zt.append((Fpeak/k) * (1 - np.cos(w*t)) + (Fpeak/(k*td)) * ((np.sin(w*t)/w) - t))
else:
zt.append((Fpeak/(k*w*td)) * (np.sin(w*t) - np.sin(w*(t-td))) - ((Fpeak/k) * np.cos(w*t)))
zmax=max(zt) #Find the max displacement from the list of zt values
return zmax #Return max displacement
k = 1E6
m = 200
dt = 0.0001
x = 0
z = displacement(m,k,x,dt)
###################################################################################
############### Part 3 #######################
# k = 1E6 , m = 200kg , Deflection = 0.1m
k_values = np.arange(1E6, 7E6, ((7E6-1E6)/10)) #List of k values between min and max (1E6 and 7E6).
m_values = np.arange(200,1200,((1200-200)/10)) #List of m values between min and max 200kg and 1200kg
xM = []
for k in k_values: # values of k
for m in m_values: # values of m within k for loop
def bisector(m,k,dpoint,dt): #dpoint = decimal point accuracy
xL = 0
xR = 10
xM = (xL + xR)/2
zmax = 99
while round(zmax, dpoint) !=0.1:
zmax = displacement(m,k,xM,dt)
if zmax > 0.1:
xL = xM
xM = (xL + xR)/2
else:
xR = xM
xM = (xL + xR)/2
return xM
xM = bisector(m, k, 4, 0.001)
print('xM value =',xM)
#####################################################
#######Step 4
def cost (m,k,xM):
Ck = 900 + 825*((k/1E6)**2) - (1725*(k/1E6))
Cm = 10*m - 2000
Cx = 2400*((xM**2)/4)
TotalC = Ck + Cm + Cx
minIndex = TotalC.argmin(0)
print(minIndex)
return TotalC
TotalC = cost(m, k, xM)
minIndex = TotalC.argmin()
print(minIndex)
print([xM, m, k, TotalC])
argmin() returns the index of a minimum value. If you are looking for the minimum itself, try using .min(). There is also a possibility that 0 is the lowest value in Your array so bear that in mind

General minimal residual method with right-preconditioner of SSOR

I am trying to implement the algorithm of GMRES with right-preconditioner P for solving the linear system Ax = b . The code is running without error; however, it pops into unprecise result for me because the error I have is very large. For the GMRES method (without preconditioning matrix - remove P in the algorithm), the error I get is around 1e^{-12} and it converges with the same matrix.
import numpy as np
from scipy import sparse
import matplotlib.pyplot as plt
from scipy.linalg import norm as norm
import scipy.sparse as sp
from scipy.sparse import diags
"""The program is to split the matrix into D-diagonal; L: strictly lower matrix; U strictly upper matrix
satisfying: A = D - L - U """
def splitMat(A):
n,m = A.shape
if (n == m):
diagval = np.diag(A)
D = diags(diagval,0).toarray()
L = (-1)*np.tril(A,-1)
U = (-1)*np.triu(A,1)
else:
print("A needs to be a square matrix")
return (L,D,U)
"""Preconditioned Matrix for symmetric successive over-relaxation (SSOR): """
def P_SSOR(A,w):
## Split up matrix A:
L,D,U = splitMat(A)
Comp1 = (D - w*U)
Comp2 = (D - w*L)
Comp1inv = np.linalg.inv(Comp1)
Comp2inv = np.linalg.inv(Comp2)
P = w*(2-w)*np.matmul(Comp1inv, np.matmul(D,Comp2inv))
return P
"""GMRES_SSOR using right preconditioning P:
A - matrix of linear system Ax = b
x0 - initial guess
tol - tolerance
maxit - maximum iteration """
def myGMRES_SSOR(A,x0, b, tol, maxit):
matrixSize = A.shape[0]
e = np.zeros((maxit+1,1))
rr = 1
rstart = 2
X = x0
w = 1.9 ## in ssor
P = P_SSOR(A,w) ### preconditioned matrix
### Starting the GMRES ####
for rs in range(0,rstart+1):
### first check the residual:
if rr<tol:
break
else:
r0 = (b-A.dot(x0))
rho = norm(r0)
e[0] = rho
H = np.zeros((maxit+1,maxit))
Qcol = np.zeros((matrixSize, maxit+1))
Qcol[:,0:1] = r0/rho
for k in range(1, maxit+1):
### Arnodi procedure ##
Qcol[:,k] =np.matmul(np.matmul(A,P), Qcol[:,k-1]) ### This step applies P here:
for j in range(0,k):
H[j,k-1] = np.dot(np.transpose(Qcol[:,k]),Qcol[:,j])
Qcol[:,k] = Qcol[:,k] - (np.dot(H[j,k-1], Qcol[:,j]))
H[k,k-1] =norm(Qcol[:,k])
Qcol[:,k] = Qcol[:,k]/H[k,k-1]
### QR decomposition step ###
n = k
Q = np.zeros((n+1, n))
R = np.zeros((n, n))
R[0, 0] = norm(H[0:n+2, 0])
Q[:, 0] = H[0:n+1, 0] / R[0,0]
for j in range (0, n+1):
t = H[0:n+1, j-1]
for i in range (0, j-1):
R[i, j-1] = np.dot(Q[:, i], t)
t = t - np.dot(R[i, j-1], Q[:, i])
R[j-1, j-1] = norm(t)
Q[:, j-1] = t / R[j-1, j-1]
g = np.dot(np.transpose(Q), e[0:k+1])
Y = np.dot(np.linalg.inv(R), g)
Res= e[0:n] - np.dot(H[0:n, 0:n], Y[0:n])
rr = norm(Res)
#### second check on the residual ###
if rr < tol:
break
#### Updating the solution with the preconditioned matrix ####
X = X + np.matmul(np.matmul(P,Qcol[:, 0:k]), Y) ### This steps applies P here:
return X
######
A = np.random.rand(100,100)
x = np.random.rand(100,1)
b = np.matmul(A,x)
x0 = np.zeros((100,1))
maxit = 100
tol = 0.00001
x = myGMRES_SSOR(A,x0,b,tol,maxit)
res = b - np.matmul(A,x)
print(norm(res))
print("Solution with gmres\n", np.matmul(A,x))
print("---------------------------------------")
print("b matrix:", b)
I hope anyone could help me figure out this!!!
I'm not sure where you got you "Symmetric_successive_over-relaxation" SSOR code from, but it appears to be wrong. You also seem to be assuming that A is symmetric matrix, but in your random test case it is not.
Following SSOR's Wikipedia entry, I replaced your P_SSOR function with
def P_SSOR(A,w):
L,D,U = splitMat(A)
P = 2/(2-w) * (1/w*D+L)*np.linalg.inv(D)*(1/w*D+L).T
return P
and your test matrix with
A = np.random.rand(100,100)
A = A + A.T
and your code works up to a 12 digit residual error.

Monte Carlo simulation of a system of Lennard-Jones + FENE potential

I want to generate two linear chains of 20 monomers each at some distance to each other. The following code generates a single chain. Could someone help me with how to generate the second chain?
The two chains are fixed to a surface i.e the first monomer of the chain is fixed and the rest of the monomers move freely in x-y-z directions but the z component of the monomers should be positive.
Something like this:
import numpy as np
import numba as nb
#import pandas as pd
#nb.jit()
def gen_chain(N):
x = np.zeros(N)
y = np.zeros(N)
z = np.linspace(0, (N)*0.9, num=N)
return np.column_stack((x, y, z)), np.column_stack((x1, y1, z1))
#coordinates = np.loadtxt('2GN_50_T_10.txt', skiprows=199950)
#return coordinates
#nb.jit()
def lj(rij2):
sig_by_r6 = np.power(sigma**2 / rij2, 3)
sig_by_r12 = np.power(sigma**2 / rij2, 6)
lje = 4 * epsilon * (sig_by_r12 - sig_by_r6)
return lje
#nb.jit()
def fene(rij2):
return (-0.5 * K * np.power(R, 2) * np.log(1 - ((np.sqrt(rij2) - r0) / R)**2))
#nb.jit()
def total_energy(coord):
# Non-bonded energy.
e_nb = 0.0
for i in range(N):
for j in range(i - 1):
ri = coord[i]
rj = coord[j]
rij = ri - rj
rij2 = np.dot(rij, rij)
if (rij2 < rcutoff_sq):
e_nb += lj(rij2)
# Bonded FENE potential energy.
e_bond = 0.0
for i in range(1, N):
ri = coord[i]
rj = coord[i - 1] # Can be [i+1] ??
rij = ri - rj
rij2 = np.dot(rij, rij)
e_bond += fene(rij2)
return e_nb + e_bond
#nb.jit()
def move(coord):
trial = np.ndarray.copy(coord)
for i in range(1, N):
while True:
delta = (2 * np.random.rand(3) - 1) * max_delta
trial[i] += delta
#while True:
if trial[i,2] > 0.0:
break
trial[i] -= delta
return trial
#nb.jit()
def accept(delta_e):
beta = 1.0 / T
if delta_e < 0.0:
return True
random_number = np.random.rand(1)
p_acc = np.exp(-beta * delta_e)
if random_number < p_acc:
return True
return False
if __name__ == "__main__":
# FENE potential parameters.
K = 40.0
R = 0.3
r0 = 0.7
# L-J potential parameters
sigma = 0.5716
epsilon = 1.0
# MC parameters
N = 20 # Numbers of monomers
rcutoff = 2.5 * sigma
rcutoff_sq = rcutoff * rcutoff
max_delta = 0.01
n_steps = 100000
T = 10
# MAIN PART OF THE CODE
coord = gen_chain(N)
energy_current = total_energy(coord)
traj = open('2GN_20_T_10.xyz', 'w')
traj_txt = open('2GN_20_T_10.txt', 'w')
for step in range(n_steps):
if step % 1000 == 0:
traj.write(str(N) + '\n\n')
for i in range(N):
traj.write("C %10.5f %10.5f %10.5f\n" % (coord[i][0], coord[i][1], coord[i][2]))
traj_txt.write("%10.5f %10.5f %10.5f\n" % (coord[i][0], coord[i][1], coord[i][2]))
print(step, energy_current)
coord_trial = move(coord)
energy_trial = total_energy(coord_trial)
delta_e = energy_trial - energy_current
if accept(delta_e):
coord = coord_trial
energy_current = energy_trial
traj.close()
I except the chain of particles to collapse into a globule.
There is some problem with the logic of the MC you are implementing.
To perform a MC you need to ATTEMPT a move, evaluate the energy of the new state and then accept/reject according to a random number.
In your code there is not the slightest sign of the attempt to move a particle.
You need to move one (or more of them), evaluate the energy, and then update your coordinates.
By the way, I suppose this is not your entire code. There are many parameters that are not defined like the "k" and the "R0" in your fene potential
The FENE potential models bond interactions. What your code is saying is that all particles within the cutoff are bonded by FENE springs, and that the bonds are not fixed but rather defined by the cutoff. With a r_cutoff = 3.0, larger than equilibrium distance of the LJ well, you are essentially considering that each particle is bonded to potentially many others. You are treating the FENE potential as a non-bonded one.
For the bond interactions you should ignore the cutoff and only evaluate the energy for the actual pairs that are bonded according to your topology, which means that first you need to define a topology. I suggest generating a linear molecule of N atoms in a box big enough to contain the whole stretched molecule, and consider the i-th atom as bonded to the (i-1)-th atom, with i = 2, ..., N. In this way the topology is well defined and persistent. Then consider both interactions separately, non-bonded and bond, and add them at the end.
Something like this, in pseudo-code:
e_nb = 0
for particle i = 1 to N:
for particle j = 1 to i-1:
if (dist(i, j) < rcutoff):
e_nb += lj(i, j)
e_bond = 0
for particle i = 2 to N:
e_bond += fene(i, i-1)
e_tot = e_nb + e_bond
Below you can find a modified version of your code. To make things simpler, in this version there is no box and no boundary conditions, just a chain in free space. The chain is initialized as a linear sequence of particles each distant 80% of R0 from the next, since R0 is the maximum length of the FENE bond. The code considers that particle i is bonded with i+1 and the bond is not broken. This code is just a proof of concept.
#!/usr/bin/python
import numpy as np
def gen_chain(N, R):
x = np.linspace(0, (N-1)*R*0.8, num=N)
y = np.zeros(N)
z = np.zeros(N)
return np.column_stack((x, y, z))
def lj(rij2):
sig_by_r6 = np.power(sigma/rij2, 3)
sig_by_r12 = np.power(sig_by_r6, 2)
lje = 4.0 * epsilon * (sig_by_r12 - sig_by_r6)
return lje
def fene(rij2):
return (-0.5 * K * R0**2 * np.log(1-(rij2/R0**2)))
def total_energy(coord):
# Non-bonded
e_nb = 0
for i in range(N):
for j in range(i-1):
ri = coord[i]
rj = coord[j]
rij = ri - rj
rij2 = np.dot(rij, rij)
if (rij2 < rcutoff):
e_nb += lj(rij2)
# Bonded
e_bond = 0
for i in range(1, N):
ri = coord[i]
rj = coord[i-1]
rij = ri - rj
rij2 = np.dot(rij, rij)
e_bond += fene(rij2)
return e_nb + e_bond
def move(coord):
trial = np.ndarray.copy(coord)
for i in range(N):
delta = (2.0 * np.random.rand(3) - 1) * max_delta
trial[i] += delta
return trial
def accept(delta_e):
beta = 1.0/T
if delta_e <= 0.0:
return True
random_number = np.random.rand(1)
p_acc = np.exp(-beta*delta_e)
if random_number < p_acc:
return True
return False
if __name__ == "__main__":
# FENE parameters
K = 40
R0 = 1.5
# LJ parameters
sigma = 1.0
epsilon = 1.0
# MC parameters
N = 50 # number of particles
rcutoff = 3.5
max_delta = 0.01
n_steps = 10000000
T = 1.5
coord = gen_chain(N, R0)
energy_current = total_energy(coord)
traj = open('traj.xyz', 'w')
for step in range(n_steps):
if step % 1000 == 0:
traj.write(str(N) + '\n\n')
for i in range(N):
traj.write("C %10.5f %10.5f %10.5f\n" % (coord[i][0], coord[i][1], coord[i][2]))
print(step, energy_current)
coord_trial = move(coord)
energy_trial = total_energy(coord_trial)
delta_e = energy_trial - energy_current
if accept(delta_e):
coord = coord_trial
energy_current = energy_trial
traj.close()
The code prints the current configuration at each step, you can just load it up on VMD and see how it behaves. The bonds will not show correctly at first on VMD, you must use a bead representation for the particles and define the bonds manually or with a script within VMD. In any case, you don't need to see the bonds to notice that the chain does not collapse.
Please bear in mind that if you want to simulate a chain at a certain density, you need to be careful to generate the correct topology. I recommend the EMC package to efficiently generate polymers at the desired thermodynamic conditions. It is by no means a trivial problem, especially for larger chains.
By the way, your code had an error in the FENE energy evaluation. rij2 is already squared, you squared it again.
Below you can see how the total energy as a function of the number of steps behaves for T = 1.0, N = 20, rcutoff = 3.5, and also the last current configuration after 10 thousand steps.
And below for N = 50, T = 1.5, max_delta = 0.01, K = 40, R = 1.5, rcutoff = 3.5, and 10 million steps. This is the last current configuration.
The full "trajectory", which isn't really a trajectory since this is MC, you can find here (it's under 6 MB).

Categories