Move from one dimension to three-dimension - python

I've simple simulation setup which is generates one dimensional numpy.arrays using the np.random.normal distribution.
class Brownian_motion_Langevin:
def solve(self):
dB = self.sigma * np.random.normal(size=len(self.steps))
r2 = self.initial_y + np.cumsum(dB)
# Append solutions
self.values = r2
Now I need to change the solve function to return a three dimensional array. The easiest way I know is to rerun the code to get the three one-dimensional arrays, which is not very good! Does anyone suggest any effcient/smart method to implement the function into three dimensions?
Currently, the output of the code is (5, 10001), first element corresponds to number of times the simulation runs and second element is number of steps. What I expect is (5, 10001, 3), here third element is number of dimensions. Here is the complete reproducible code. Thanks!
#!/usr/bin/env python
#
# Python imports
import numpy as np
import h5py
class Brownian_motion_Langevin:
def solve(self):
dB = self.sigma * np.random.normal(size=len(self.steps))
r2 = self.initial_y + np.cumsum(dB)
# Append solutions
self.values = r2
def __init__(self, diffusion_coefficient, initial_y, simulation_time, delta_t):
"""
:param diffusion_coefficient:
:param initial_y: 1
:param delta_t: dt - change in time - size of each interval
:param simulation_time: total time for simulation
"""
# Initial parameters
self.diffusion_coefficient = diffusion_coefficient
self.initial_y = initial_y
# Define time
self.simulation_time = simulation_time
# Get dt
self.delta_t = delta_t
self.steps = np.arange(0, np.floor(self.simulation_time / self.delta_t) + 1)
self.times = self.steps * self.delta_t
# Speed up calculations
self.sigma = (2*self.diffusion_coefficient * self.delta_t)**0.5
# Simulate the diffusion process
self.solution = []
self.solve()
# Define parameters for the process
n = 5 # Number of simulations
Dc = 1 # Dc - Diffusion coefficient
y0 = 0 # y0 - starting point
tt = 1e2 # tt - total time for each simulation
dt = 0.01 # dt - integration time step
# Run simulations
motions = []
for i in range(0, n):
motions.append(Brownian_motion_Langevin(diffusion_coefficient=Dc,
initial_y=y0,
simulation_time=tt,
delta_t=dt))
values = np.array([m.values for m in motions])
print(values.shape) # this outputs the (5, 10001)
####
# FIXME make values 3-dimensional

Related

Generate a graph for the diffusion equation

I have a code that represents the diffusion equation (Concentration as a function of time and space):
∂²C/∂x² - ∂C/∂t= 0
I discretized to the following form:
C[n+1,j] = C[n,j] + (dt/dx²)(C[n,j+1] - 2(C[n,j]) + C[n,j-1])
I am trying to generate the following graph, however I haven't had much success. Is there anyone who could help me with this? Many thanks!
The graph that I obtain:
The code that I have to reproduce the diffusion equation:
import numpy as np
import matplotlib.pyplot as plt
dt = 0.001 # grid size for time (s)
dx = 0.05 # grid size for space (m)
x_max = 1 # in m
t_max = 1 # total time in s
C0 = 1 # concentration
# function to calculate concentration profiles based on a
# finite difference approximation to the 1D diffusion
# equation:
def diffusion(dt,dx,t_max,x_max,C0):
# diffusion number:
s = dt/dx**2
x = np.arange(0,x_max+dx,dx)
t = np.arange(0,t_max+dt,dt)
r = len(t)
a = len(x)
C = np.zeros([r,a]) # initial condition
C[:,0] = C0 # boundary condition on left side
C[:,-1] = 0 # boundary condition on right side
for n in range(0,r-1): # time
for j in range(1,a-1): # space
C[n+1,j] = C[n,j] + s*(C[n,j-1] -
2*C[n,j] + C[n,j+1])
return x,C,r,a
# note that this can be written without the for-loop
# in space, but it is easier to read it this way
x,C,r,a = diffusion(dt,dx,t_max,x_max,C0)
# plotting:
plt.figure()
plt.xlim([0,1])
plt.ylim([0,1])
plot_times = np.arange(0,1,0.02)
for t in plot_times:
plt.plot(x,C[int(t/dt),:],'Gray',label='numerical')
plt.xlabel('Membrane position x',fontsize=12)
plt.ylabel('Concentration',fontsize=12)

Optimization of time average msd

I am tried to perform a simulation of nT = 100 tracks, each of N = 10*10**6 steps of dt = 0.02 and then compute the time average MSD defined in the following:
def calc_msd_1D(x, nLags):
N = len(x)
inv_sq_np = 1./np.sqrt(N)
msd = np.zeros(nLags)
for delta in range(0, nLags):
r = 0;
#msd_array = np.zeros(N)
for i in range(N-(delta)):
r += (x[i+delta] - x[i])**2
msd[delta] = 1/(N-delta) * r
# msd[0] -= 2*np.random.normal(0,1)**2
#msd[1:] += 2*np.random.normal(0,1)**2
return msd
The MSD are hence computed for each trajectories using a class type of structure:
class Trajectory_Analysis_MSD:
def __init__(self,X, Y, nP, dT):
# save parameters
self.dT = dT
self.X = X
self.Y = Y
self.nP = nP
def getMSD(self,nLags):
# initialize memory
self.MSD_x = np.zeros(nLags)
self.MSD_y = np.zeros(nLags)
# calculate the correlations for components
self.MSD_x = calc_msd_1D(self.X, nLags)
self.MSD_y= calc_msd_1D(self.Y, nLags)
# calculate the msd
self.msd= (self.MSD_x + self.MSD_y)
Unfortunately the computation is very expensive in time, at the moment I have to sample the trajectories up to 50000 points to store the msd (average time \approx 26 min). Is there a way I can compute the time average msd for the entire track of each trajectory? Probably without saving each data points?
This for loop:
r = 0
for i in range(N-delta):
r += (x[i+delta] - x[i])**2
Will be very slow for large N because it iterates in pure Python. I'm guessing this (or similar code you have elsewhere) is your bottleneck.
Try to vectorize your code, so all the inner loops run inside numpy, not Python:
r = np.sum((x[delta:N] - x[0:N-delta])**2)
You don't even need the N variable:
msd[delta] = np.mean((x[delta:] - x[:-delta])**2)
And maybe you can use a ready-made function like np.correlate in this case.

MCMC method 1D Ferromagnetic Ising Model

My question is related to the Python Coding of a 1-Dimensional Ising Model using a Markov Chain Monte Carlo method (MCMC).
I have the following Hamiltonian
$$H = - \sum_{i=1}^{L-1}\sigma_{i}sigma_{i+1} - B\sum_{i=1}^{L}\sigma_{i}$$
I want to write a python function that generates a Markov chain where at each step, it calculates and saves the magnetization (per site) and the energy.
The energy is (=Hamiltonian) and I will define the Magnetization as:
$$\frac{1}{L}\sum_{i}\sigma_{i}$$
My probability distribution would be:
$$p(x) = e^{-H\beta}$$ where, $T^{-1} = \beta$
For the Markov Chain I will implement a Metropolis-Hastings Algorithim;
if $$\frac{P(\sigma')}{P(\sigma)} = e^{(H(\sigma)-H(\sigma'))\beta}$$
My idea would be to accept transitions when
$$H(\sigma') < H(\sigma)$$
and to only accept transitions
$$H(\sigma') > H(\sigma)$$
with the probability
$$P = e^{(H(\sigma)-H(\sigma'))\beta}$$
So let me set a few parameters such as:
$L=20$ - Lattice Size
$T=2$ - Temperature
$B=0$ - Magnetic Field
I will need to plot a histogram of the magnetization and energy vs step size after the calculations. I have no issue with this part.
My python knowledge isn't great but I have included my rough (uncompleted) draft. I don't think I am making much progress. Any help would be great.
#Coding attempt MCMC 1-Dimensional Ising Model
import numpy as np
import matplotlib.pyplot as plt
#Shape of Lattice L
L = 20
Shape = (20,20)
#Spin Configuration
spins = np.random.choice([-1,1],Shape)
#Magnetic moment
moment = 1
#External magnetic field
field = np.full(Shape, 0)
#Temperature
Temperature = 2
Beta = Temperature**(-1)
#Interaction (ferromagnetic if positive, antiferromagnetic if negative)
interaction = 1
#Using Probability Distribution given
def get_probability(Energy1, Energy2, Temperature):
return np.exp((Energy1 - Energy2) / Temperature)
def get_energy(spins):
return -np.sum(
interaction * spins * np.roll(spins, 1, axis=0) +
interaction * spins * np.roll(spins, -1, axis=0) +
interaction * spins * np.roll(spins, 1, axis=1) +
interaction * spins * np.roll(spins, -1, axis=1)
)/2 - moment * np.sum(field * spins)
#Introducing Metropolis Hastings Algorithim
x_now = np.random.uniform(-1, 1) #initial value
d = 10**(-1) #delta
y = []
for i in range(L-1):
#generating next value
x_proposed = np.random.uniform(x_now - d, x_now + d)
#accepting or rejecting the value
if np.random.rand() < np.exp(-np.abs(x_proposed))/(np.exp(-np.abs(x_now))):
x_now = x_proposed
if i % 100 == 0:
y.append(x_proposed)
Here I changed your code to solve the problem the way I always do.
Please, check the code and formulas very carefully
#Coding attempt MCMC 1-Dimensional Ising Model
import numpy as np
import matplotlib.pyplot as plt
#Shape of Lattice L
L = 20
#Shape = (20)
#Number of Monte Carlo samples
MC_samples=1000
#Spin Configuration
spins = np.random.choice([-1,1],L)
print(spins)
#Magnetic moment
moment = 1
#External magnetic field
field = 0
#Temperature
Temperature = 2
Beta = Temperature**(-1)
#Interaction (ferromagnetic if positive, antiferromagnetic if negative)
interaction = 1
#Using Probability Distribution given
def get_probability(delta_energy, Temperature):
return np.exp(-delta_energy / Temperature)
def get_energy(spins):
energy=0
for i in range(L):
energy=energy+interaction*spins[i-1]*spins[i]
energy= energy-field*sum(spins)
return energy
def delta_energy(spins,random_spin):
#If you do flip one random spin, the change in energy is:
#(By using a reduced formula that only involves the spin
# and its neighbours)
if random_spin==L:
PBC=0
else:
PBC=random_spin+1
return -2*interaction*(spins[random_spin-1]*spins[random_spin]+
spins[random_spin]*spins[PBC]+field*spins[random_spin])
#Introducing Metropolis Hastings Algorithim
#x_now = np.random.uniform(-1, 1) #initial value
#d = 10**(-1) #delta
#y = []
magnetization=[]
energy=[]
for i in range(MC_samples):
#Each Monte Carlo step consists in L random spin moves
for j in range(L):
#Choosing a random spin
random_spin=np.random.randint(L-1,size=(1))
#Compuing the change in energy of this spin flip
delta=delta_energy(spins,random_spin)
#Metropolis accept-rejection:
if delta<0:
#Accept the move if its negative
spins[random_spin]=-spins[random_spin]
else:
#If its positive, we compute the probability
probability=get_probability(delta,Temperature)
random=np.random.rand()
if random<=probability:
#Accept de move
spins[random_spin]=-spins[random_spin]
#Afer the MC step, we measure the system
magnetization.append(sum(spins)/L)
energy.append(get_energy(spins))
print(magnetization,energy)
#Do histograms and plots
At the end of the simulation, the variables magnetization and energy are arrays that contain the measured values at each MC step.
You can directly use these arrays to compute the histograms and plots.
Note: The energy array, is the total energy of the system, not the energy/L.
I was looking for a simple implementation of a 1D Ising model, and came across this post. While I am no expert on the field, I did write my masters on a related topic. I implemented the code in Oriol Cabanas Tirapu's answer, and found a few bugs (I think).
Below is my adapted version oh their code. Hopefully it is useful for someone.
#Coding attempt MCMC 1-Dimensional Ising Model
import numpy as np
import matplotlib.pyplot as plt
#Using Probability Distribution given
def get_probability(delta_energy, Temperature):
return np.exp(-delta_energy / Temperature)
def get_energy(spins):
energy=0
for i in range(len(spins)):
energy=energy+interaction*spins[i-1]*spins[i]
energy= energy-field*sum(spins)
return energy
def delta_energy(spins,random_spin):
#If you do flip one random spin, the change in energy is:
#(By using a reduced formula that only involves the spin
# and its neighbours)
if random_spin==L-1:
PBC=0
else:
PBC=random_spin+1
old = -interaction*(spins[random_spin-1]*spins[random_spin] + spins[random_spin]*spins[PBC]) - field*spins[random_spin]
new = interaction*(spins[random_spin-1]*spins[random_spin] + spins[random_spin]*spins[PBC]) + field*spins[random_spin]
return new-old
def metropolis(L = 100, MC_samples=1000, Temperature = 1, interaction = 1, field = 0):
# intializing
#Spin Configuration
spins = np.random.choice([-1,1],L)
Beta = Temperature**(-1)
#Introducing Metropolis Hastings Algorithim
data = []
magnetization=[]
energy=[]
for i in range(MC_samples):
#Each Monte Carlo step consists in L random spin moves
for j in range(L):
#Choosing a random spin
random_spin=np.random.randint(0,L,size=(1))
#Compuing the change in energy of this spin flip
delta=delta_energy(spins,random_spin)
#Metropolis accept-rejection:
if delta<0:
#Accept the move if its negative
spins[random_spin]=-spins[random_spin]
#print('change')
else:
#If its positive, we compute the probability
probability=get_probability(delta,Temperature)
random=np.random.rand()
if random<=probability:
#Accept de move
spins[random_spin]=-spins[random_spin]
data.append(list(spins))
#Afer the MC step, we measure the system
magnetization.append(sum(spins)/L)
energy.append(get_energy(spins))
return data,magnetization,energy
def record_state_statistics(data,n=4):
ixs = tuple()
sub_sample = [[d[i] for i in range(n)] for d in data]
# get state number
state_nums = [int(sum([((j+1)/2)*2**i for j,i in zip(reversed(d),range(len(d)))])) for d in sub_sample]
return state_nums
# setting up problem
L = 200 # size of system
MC_samples = 1000 # number of samples
Temperature = 1 # "temperature" parameter
interaction = 1 # Strength of interaction between nearest neighbours
field = 0 # external field
# running MCMC
data = metropolis(L = L, MC_samples = MC_samples, Temperature = Temperature, interaction = interaction, field = field)
results = record_state_statistics(data[0],n=4) # I was also interested in the probability of each micro-state in a sub-section of the system
# Plotting
plt.figure(figsize=(20,10))
plt.subplot(2,1,1)
plt.imshow(np.transpose(data[0]))
plt.xticks([])
plt.yticks([])
plt.axis('tight')
plt.ylabel('Space',fontdict={'size':20})
plt.title('Critical dynamics in a 1-D Ising model',fontdict={'size':20})
plt.subplot(2,1,2)
plt.plot(data[2],'r')
plt.xlim((0,MC_samples))
plt.xticks([])
plt.yticks([])
plt.ylabel('Energy',fontdict={'size':20})
plt.xlabel('Time',fontdict={'size':20})

Progressively filter/smooth a signal in python (to straight line on the left to no filtering on the right)

A picture is worth a thousand words (sorry for the shoddy work):
If the solution is preserving the value and the slope at both ends it is better.
If, in addition, the position and sharpness of the transition can be adjusted it is perfect.
But I have not found any solution yet...
Thank you very much for your help
Here is a piece of code to get started:
import matplotlib.pyplot as plt
from scipy.signal import savgol_filter
import numpy as np
def round_up_to_odd(f):
return np.int(np.ceil(f / 2.) * 2 + 1)
def generateRandomSignal(n=1000, seed=None):
"""
Parameters
----------
n : integer, optional
Number of points in the signal. The default is 1000.
Returns
-------
sig : numpy array
"""
np.random.seed(seed)
print("Seed was:", seed)
steps = np.random.choice(a=[-1, 0, 1], size=(n-1))
roughSig = np.concatenate([np.array([0]), steps]).cumsum(0)
sig = savgol_filter(roughSig, round_up_to_odd(n/20), 6)
return sig
n = 1000
t = np.linspace(0,10,n)
seed = np.random.randint(0,high=100000)
#seed = 45136
sig = generateRandomSignal(seed=seed)
###############################
# ????
# sigFilt = adaptiveFilter(sig)
###############################
# Plot
plt.figure()
plt.plot(t, sig, label="Signal")
# plt.plot(t, sigFilt, label="Signal filtered")
plt.legend()
Simple convolution does smoothing. However, as mentioned below, here we need strong smoothing first and no smoothing towards the end. I used the moving average approach with the dynamic size of the window. In the example below, the window size changes linearly.
def dynamic_smoothing(x, start_window_length=(len(x)//2), end_window_length=1):
d_sum = np.cumsum(a, dtype=float)
smoothed = list()
for i in range(len(x)):
# compute window length
a = i / len(x)
w = int(np.round(a * start_window_length + (1.0-a) * end_window_length))
# get the window
w0 = max(0, i - w) # the window must stay inside the array
w1 = min(len(x), i + w)
smoothed.append(sum(x[w0:w1])/(w1+w0))
return np.array(smoothed)

Is it possible to loop to a certain value and carry on further calculations with this value?

I am new here and new in programming, so excuse me if the question is not formulated clearly enough.
For a uni assignment, my labpartner and I are programming a predator-prey system.
In this predator-prey system, there is a certain load factor 'W0'.
We want to find a load factor W0, accurate to 5 significant digits, for which applies that there will never be less than 250 predators (wnum[1] in our code). We want to find this value of W0 and we need the code to carry on further calculations with this found value of W0. Here is what we've tried so far, but python does not seem to give any response:
# Import important stuff and settings
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
print ('Results of Group 4')
def W0():
W0 = 2.0
while any(wnum[1])<250:
W0 = W0-0.0001
return W0
def W(t):
if 0 <= t < 3/12:
Wt = 0
elif 3/12 <= t <= 8/12:
Wt = W0
elif 8/12 < t < 1:
Wt = 0
else:
Wt = W(t - 1)
return Wt
# Define the right-hand-side function
def rhsf(t,y):
y1 = y[0]
y2 = y[1]
f1 = (2-2*10**-3*y2)*y1-W(t)*y1
f2 = (-3.92+7*10**-3*y1)*y2
return np.array([f1,f2])
# Define one step of the RK4 method
def RK4Step(tn,wn,Dt,f):
# tn = current time
# wn = known approximation at time tn
# Dt = the time step to use
# f = the right-hand-side function to use
# wnplus1 = the new approximation at time tn+Dt
k1 = Dt*f(tn,wn)
k2 = Dt*f(tn+0.5*Dt,wn+0.5*k1)
k3 = Dt*f(tn+0.5*Dt,wn+0.5*k2)
k4 = Dt*f(tn+Dt,wn+k3)
wnplus1 = wn + 1/6*(k1 +2*k2 +2*k3 +k4)
return wnplus1
# Define the complete RK4 method
def RK4Method(t0,tend,Dt,f,y0):
# t0 = initial time of simulation
# tend = final time of simulation
# Dt = the time step to use
# f = the right-hand-side function to use
# y0 = the initial values
# calculate the number of time steps to take
N = int(np.round((tend-t0)/Dt))
# make the list of times t which we want the solution
time = np.linspace(t0,tend,num=N+1)
# make sure Dt matches with the number of time steps
Dt = (tend-t0)/N
# Allocate memory for the approximations
# row i represents all values of variable i at all times
# column j represents all values of all variables at time t_j
w = np.zeros((y0.size,N+1))
# store the (given) initial value
w[:,0] = y0
# Perform all time steps
for n,tn in enumerate(time[:-1]):
w[:,n+1] = RK4Step(tn,w[:,n],Dt,f)
return time, w
# Set all known values and settings
t0 = 0.0
tend = 10.0
y0 = np.array([600.0,1000.0])
Dt = 0.5/(2**7)
# Execute the method
tnum, wnum = RK4Method(t0,tend,Dt,rhsf,y0)
# Make a nice table
alldata = np.concatenate(([tnum],wnum),axis=0).transpose()
table = pd.DataFrame(alldata,columns=['t','y1(t)','y2(t)'])
print('\nA nice table of the simulation:\n')
print(table)
# Make a nice picture
plt.close('all')
plt.figure()
plt.plot(tnum,wnum[0,:],label='$y_1$',marker='o',linestyle='-')
plt.plot(tnum,wnum[1,:],label='$y_2$',marker='o',linestyle='-')
plt.xlabel('$t$')
plt.ylabel('$y(t)$')
plt.title('Simulation')
plt.legend()
# Do an error computation
# Execute the method again with a doubled time step
tnum2, wnum2 = RK4Method(t0,tend,2.0*Dt,rhsf,y0)
# Calculate the global truncation errors at the last simulated time
errors = (wnum[:,-1] - wnum2[:,-1])/(2**4-1)
print('\nThe errors are ',errors[0],' for y1 and ',errors[1],' for y2 at time t=',tnum[-1])

Categories