in the following code, I want to optimize the objective function using optuna.
"""
Additional modules
pip install optuna
pip install scikit-optimize
"""
import time
from py_wake.examples.data.hornsrev1 import V80
from py_wake.examples.data.hornsrev1 import Hornsrev1Site # We work with the Horns Rev 1 site, which comes already set up with PyWake.
from py_wake import BastankhahGaussian
import numpy as np
import optuna
from py_wake.turbulence_models import GCLTurbulence
from py_wake.deflection_models.jimenez import JimenezWakeDeflection
from py_wake.wind_turbines.power_ct_functions import PowerCtFunctionList, PowerCtTabular
def newSite(x,y):
xNew=np.array([x[0]+560*i for i in range(4)])
yNew=np.array([y[0]+560*i for i in range(4)])
x_newsite=np.array([xNew[0],xNew[0],xNew[0],xNew[1],xNew[1],xNew[1],xNew[2],xNew[2],xNew[2]])
y_newsite=np.array([yNew[0],yNew[1],yNew[2],yNew[0],yNew[1],yNew[2],yNew[0],yNew[1],yNew[2]])
return (x_newsite,y_newsite)
def objective(trial):
site = Hornsrev1Site()
x, y = site.initial_position.T
x_newsite,y_newsite=newSite(x,y)
windTurbines = V80()
# We ask values of c from optuna.
c = []
for i in range(9):
for l in range(360):
for k in range(23):
varname = f'c{i}'
minv, maxv, stepv = 0, 1, 1
c.append(trial.suggest_int(varname, minv, maxv, step=stepv))
C=np.array(c)
C=C.reshape((9,360,23))
for item in range(9):
for j in range(10,370,10):
for i in range(j-10,j):
C[item][i]=C[item][j-5]
windTurbines.powerCtFunction = PowerCtFunctionList(
key='operating',
powerCtFunction_lst=[PowerCtTabular(ws=[0, 100], power=[0, 0], power_unit='w', ct=[0, 0]), # 0=No power and ct
windTurbines.powerCtFunction], # 1=Normal operation
default_value=1)
print(C)
operating = np.ones((9,360,23)) # shape=(#wt,wd,ws)
operating[C <= 0.5]=0
wf_model = BastankhahGaussian(site, windTurbines,deflectionModel=JimenezWakeDeflection(),turbulenceModel=GCLTurbulence())
# run wind farm simulation
sim_res = wf_model(
x_newsite, y_newsite, # wind turbine positions
h=None, # wind turbine heights (defaults to the heights defined in windTurbines)
wd=None, # Wind direction (defaults to site.default_wd (0,1,...,360 if not overriden))
ws=None, # Wind speed (defaults to site.default_ws (3,4,...,25m/s if not overriden))
operating=operating
)
for i in range(9):
for l in range(360):
for k in range(23):
if sim_res.TI_eff[i][l][k]-0.14 > 0 :
sim_res.Power[i][l][k]=sim_res.Power[i][l][k]-10000*(sim_res.TI_eff[i][l][k]-0.14)**2
print(-float(np.sum(sim_res.Power))/1e+11)
return -float(np.sum(sim_res.Power)/1e+11)
def optuna_hpo():
t0 = time.perf_counter()
num_trials = 300
sampler = optuna.integration.SkoptSampler()
study = optuna.create_study(sampler=sampler, direction="maximize")
study.optimize(objective, n_trials=num_trials)
print(f"Best params: {study.best_params}")
print(f"Best value: {study.best_value}\n")
print(f'elapse: {round(time.perf_counter() - t0)}s')
# start
optuna_hpo()
The problem is the initial guess is c which is an array of (9,360,23). but when I run the following code print(f"Best params: {study.best_params}") only print 9 values of c while I need 9*360*23 so I think it just changes these 9 values, not all the c values.
I have also tried another way to write c like this:
for i in range(9*360*23):
varname = f'c{i}'
minv, maxv, stepv = 0, 1, 1
c.append(trial.suggest_int(varname, minv, maxv, step=stepv))
but in this way, the code will stop after 7,8 iteration because of a memory problem. I have also done this using a university server so I think there would be a problem with it. so now I want to know is there any way to apply all c values in optimization? I mean instead of just changing 9 values of them changing all of them.
Related
I am trying to make a program, that tells me when a note has been pressed.
I have the following notes exported as a .wav file (The C Major Scale 4 times with different rhythms, dynamics and in different octaves):
I can get the volumes of my sound file using the following code:
from scipy.io import wavfile
def get_volume(file):
sr, data = wavfile.read(file)
if data.ndim > 1:
data = data[:, 0]
return data
volumes = get_volume("FILE")
Here are some information about the output:
Max: 27851
Min: -25664
Mean: -0.7569383391943734
A Sample from the array: [ -7987 -8615 -8983 -9107 -9019 -8750 -8324 -7752 -7033 -6156
-5115 -3920 -2610 -1245 106 1377 2520 3515 4364 5077
5659 6113 6441 6639 6708 6662 6518 6288 5962 5525
4963 4265 3420 2418 1264 -27 -1429 -2901 -4388 -5814
-7101 -8186 -9028 -9614 -9955 -10077 -10012 -9785 -9401 -8846]
And here is what I get when I plot the volumes array (x is the index, y is the volume):
I want to get the indices of the start and end of the notes like the ones in the image (Did it by hand not accurate):
When I looked at the data I realized, that it is a 1d array and I also noticed, that when a note gets louder or quiter it is not smooth. It is like a ZigZag, but there is still a trend. So basically I can't just get the gradients (slope) of each point. So I though about grouping notes into batches and getting the average gradient there and thus doing the calculations with it, like so:
def get_average_gradient(arr):
# Calculates average gradient
return sum([i - (sum(arr) / len(arr)) for i in arr]) / len(arr)
def get_note_start_end(arr_size, batch_size, arr):
# Finds start and end indices
ranges = []
curr_range = [0]
prev_slope = curr_slope = "NO SLOPE"
has_ended = False
for i, j in enumerate(arr):
if j > 0:
curr_slope = "INCREASING"
elif j < 0:
curr_slope = "DECREASING"
else:
curr_slope = "NO SLOPE"
if prev_slope == "DECREASING" and not has_ended:
if i == len(arr) - 1 or arr[i + 1] < 0:
if curr_slope != "DECREASING":
curr_range.append((i + 1) * batch_size + batch_size)
ranges.append(curr_range)
curr_range = [(i + 1) * batch_size + batch_size + 1]
has_ended = True
if has_ended and curr_slope == "INCREASING":
has_ended = False
prev_slope = curr_slope
ranges[-1][-1] = arr_size - 1
return ranges
def get_notes(batch_size, arr):
# Gets the gradients of the batches
out = []
for i in range(0, len(arr), batch_size):
if i + batch_size > len(arr):
gradient = get_average_gradient(arr[i:])
else:
gradient = get_average_gradient(arr[i: i+batch_size])
# print(gradient, i)
out.append(gradient)
return get_note_start_end(len(arr), batch_size, out)
notes = get_notes(128, volumes)
The problem with this is, that if the batch size is too small, then it returns the indices of small peaks, which aren't a note on their own. If the batch size is too big then the program misses the start and end indices.
I also tried to get the notes, by using the silence.
Here is the code I used:
from pydub import AudioSegment, silence
audio = intro = AudioSegment.from_wav("C - Major - Test.wav")
dBFS = audio.dBFS
notes = silence.detect_nonsilent(audio, min_silence_len=50, silence_thresh=dBFS-10)
This worked the best, but it still wasn't good enough. Here is what I got:
It some notes pretty well, but it wasn't able to identify notes accurately if the notes themselves didn't become very quite before a different one was played (Like in the second scale and in the fourth scale).
I have been thinking about this problem for days and I have basically tried most if not all of the good(?) ideas I had. I am new to analysing audio files. Maybe I am using the wrong data to do what I want to do. Maybe I need to use the frequency data (I tried getting it, but couldn't make sense of it)
Frequency code:
from scipy.fft import *
from scipy.io import wavfile
import matplotlib.pyplot as plt
def get_freq(file, start_time, end_time):
sr, data = wavfile.read(file)
if data.ndim > 1:
data = data[:, 0]
else:
pass
# Fourier Transform
N = len(data)
yf = rfft(data)
xf = rfftfreq(N, 1 / sr)
return xf, yf
FILE = "C - Major - Test.wav"
plt.plot(*get_freq(FILE, 0, 10))
plt.show()
And the frequency graph:
And here is the .wav file:
https://drive.google.com/file/d/1CERH-eovu20uhGoV1_O3B2Ph-4-uXpiP/view?usp=sharing
Any help is appreciated :)
think this is what you need:
first you convert negative numbers into positive ones and smooth the line to eliminate noise, to find the lower peaks yo work with the negative values.
from scipy.io import wavfile
import matplotlib.pyplot as plt
from scipy.signal import find_peaks
import numpy as np
from scipy.signal import savgol_filter
def get_volume(file):
sr, data = wavfile.read(file)
if data.ndim > 1:
data = data[:, 0]
return data
v1 = abs(get_volume("test.wav"))
#Smooth the curve
volumes=savgol_filter(v1,10000 , 3)
lv=volumes*-1
#find peaks
peaks,_ = find_peaks(volumes,distance=8000,prominence=300)
lpeaks,_= find_peaks(lv,distance=8000,prominence=300)
# plot them
plt.plot(volumes)
plt.plot(peaks,volumes[peaks],"x")
plt.plot(lpeaks,volumes[lpeaks],"o")
plt.plot(np.zeros_like(volumes), "--", color="gray")
plt.show()
Plot with your test file, x marks the high peaks and o the lower peaks
This article presents two python libraries (Aubio, librosa) to achieve what you need and includes examples of how to use them: How to Use Python to Detect Music Onsets by Lynn Zheng
I have this formula that is used to predict athletic performance base on daily stress.
It is based on 5 constant unique to each person. I'm trying to find these based on daily stress and performance testing that has been done. I'm new to programming and I don't know where to start.
see the formula
Performance= Fitness(=daily stress+yesterday fitness put decay) - Fatigue(daily stress+yesterday fatigue put decay) +P0
This is a sample of the data: data
thank you
import pandas as pd
import numpy as np
import math
from scipy import optimize
data = pd.read_csv('data_mod1.csv')
TSS = data['stress'].fillna(0)
arr = np.array(TSS)
#data = data.dropna()
a = [arr[0]]
b = [arr[0]]
x = arr[1:]
def Banister(x, t1, t2,k1,k2, c):
for v in x:
a.append(a[-1]*np.exp(-1/t1) + v)
b.append(b[-1]*np.exp(-1/t2) + v)
data['fit'] = pd.Series(a)
data['fat'] = pd.Series(b)
data['perf'] = ((data['fit']*k1)-(data['fat']*k2))+c
return data['perf']
# In[ ]:
from scipy.optimize import curve_fit
fit = curve_fit(Banister, arr,data[data.index], p0=[20, 10,1 ,2, 50])
I'm trying to implement a simple Monte Carlo in Python (to which I'm fairly new). Coming from C I'm probably following the wrongest path since my code is far too slow for what I'm asking: I have a potential hard sphere-like (see V_pot(r) in the code) for 60 3d particles and periodic boundary conditions (PBC), so I defined the following functions
import timeit
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from numpy import inf
#
L, kb, d, eps, DIM = 100, 1, 1, 1, 3
r_c, T = L/2, eps/(.5*kb)
beta = 1/(kb*T)
#
def dist(A, B):
d = A - B
d -= L*np.around(d/L)
return np.sqrt(np.sum(d**2))
#
def V_pot(r):
V = -eps*(d**6/r**6 - d**6/r_c**6)
if r > r_c:
V = 0
elif r < d:
V = inf
return V
#
def ener(config):
V_jk_val, j = 0, N
#
while (j > 0):
j -= 1
i = 0
while (i < j):
V_jk_val += V_pot(dist(config[j,:], config[i,:]))
i += 1
#
return V_jk_val
#
def acc(en_n, en_o):
d_en = en_n-en_o
if (d_en <= 0):
acc_val = 1
else:
acc_val = np.exp(-beta*(d_en))
return acc_val
#
then, starting from the configuration (where every line of the array represents the coordinates of a 3D particle)
config = np.array([[16.24155657, 57.41672173, 94.39565792],
[76.38121764, 55.88334066, 5.72255163],
[38.41393783, 58.09432145, 6.26448054],
[86.44286438, 61.37100899, 91.97737383],
[37.7315366 , 44.52697269, 23.86320444],
[ 0.59231801, 39.20183376, 89.63974115],
[38.00998141, 3.84363202, 52.74021401],
[99.53480756, 69.97688928, 21.43528924],
[49.62030291, 93.60889503, 15.73723259],
[54.49195524, 0.6431965 , 25.37401196],
[33.82527814, 25.37776021, 67.4320553 ],
[64.61952893, 46.8407798 , 4.93960443],
[60.47322732, 16.48140136, 33.26481306],
[19.71667792, 46.56999616, 35.61044526],
[ 5.33252557, 4.44393836, 60.55759256],
[44.95897856, 7.81728046, 10.26000715],
[86.5548395 , 49.74079452, 4.80480133],
[52.47965686, 42.831448 , 22.03890639],
[ 2.88752006, 59.84605062, 22.75760029],
[ 9.49231045, 42.08653603, 40.63380097],
[13.90093641, 74.40377984, 32.62917915],
[97.44839233, 90.47695772, 91.60794836],
[51.29501624, 27.03796277, 57.09525454],
[10.30180295, 21.977336 , 69.54173272],
[59.61327648, 14.29582325, 11.70942289],
[89.52722796, 26.87758644, 76.34934637],
[82.03736088, 78.5665713 , 23.23587395],
[79.77571695, 66.140968 , 53.6784269 ],
[82.86070472, 40.82189833, 51.48739072],
[99.05647523, 98.63386809, 6.33888993],
[31.02997123, 66.99709163, 95.88332332],
[97.71654767, 59.24793618, 5.20183793],
[ 6.79964473, 45.01258652, 48.69477807],
[93.34977049, 55.20537774, 82.35693526],
[17.35577815, 20.45936211, 29.27981422],
[55.51942207, 52.22875901, 3.6616131 ],
[61.45612224, 36.50170405, 62.89796773],
[23.55822368, 7.09069623, 37.38274914],
[39.57082799, 58.95457592, 48.0304924 ],
[93.94997617, 64.34383203, 77.63346308],
[17.47989107, 90.01113402, 81.00648645],
[86.79068539, 66.35768515, 56.64402907],
[98.71924121, 38.33749023, 73.4715132 ],
[ 0.42356139, 78.32172925, 15.19883322],
[77.75572529, 2.60088767, 56.4683935 ],
[49.76486142, 3.01800153, 93.48019286],
[42.54483899, 4.27174457, 4.38942325],
[66.75777178, 41.1220603 , 19.64484167],
[19.69520773, 41.09230171, 2.51986091],
[73.20493772, 73.16590392, 99.19174281],
[94.16756184, 72.77653334, 10.32128552],
[29.95281655, 27.58596604, 85.12791195],
[ 2.44803886, 32.82333962, 41.6654683 ],
[23.9665915 , 49.94906612, 37.42701059],
[30.40282934, 39.63854309, 47.16572743],
[56.04809276, 30.19705527, 29.15729635],
[ 2.50566522, 70.37965564, 16.78016719],
[28.39713572, 4.04948368, 27.72615789],
[26.11873563, 41.49557167, 14.38703697],
[81.91731981, 12.10514972, 12.03083427]])
I make the 5000 time steps of the simulation with the following code
N = 60
TIME_MC = 5000
DELTA_LIST = [d]
#d/6, d/3, d, 2*d, 3*d
np.random.seed(19680801)
en_mc_delta = np.zeros((TIME_MC, len(DELTA_LIST)))
start = timeit.default_timer()
config_tmp = config
#
for iD, Delta in enumerate(DELTA_LIST):
t=0
while (t < TIME_MC):
for k in range(N):
RND = np.random.rand()
config_tmp[k,:] = config[k,:] + Delta*(np.random.random_sample((1,3))-.5)
en_o, en_n = ener(config), ener(config_tmp)
ACC = acc(en_n, en_o)
if (RND < ACC):
config[k,:] = config_tmp[k,:]
en_o = en_n
en_mc_delta[t][iD] = en_o
t += 1
stop = timeit.default_timer()
print('Time: ', stop-start)
following the rule of the Metropolis algorithm for the acceptance of the proposed move extracted with config_tmp[k,:] = config[k,:] + Delta*(np.random.random_sample((1,3))-.5).
I made some attempts to check where the code get stuck and I found that the function ener (also because of the function dist) is extremely slow: it takes something like ~0.02s to calculate the energy of a configuration, which means something around ~6000s to run the complete simulation (60 particles, 5000 proposed moves).
The outer for it's just to calculate the results for different values of Delta.
Running this code with TIME_MC=60 can make you an idea of how much slow is this code (~218s) which takes just some seconds if implemented in C. I read some other question about how to speed up Python codes but I can't understand how to do it here.
EDIT:
I'm now almost sure that the problem is in the function dist, since just to calculate PBC distance between two 3D vectors it takes around ~0.0012s which gives crazy long times when you calculate it 5000*60 times.
Note that this is a partial answer continued from comments on the original question.
Here's an example of how "unrolling" numpy's function can improve performance when replaced with a more direct calculation of the distance. Note that this was not verified to be equivalent, especially concerning the rounding. The principle still applies, I think.
import random
import time
import numpy as np
L = 100
inv_L = 0.01
vec_length = 10
repetitions = 100000
def dist_np(A, B):
d = A - B
d -= L*np.around(d/L)
return np.sqrt(np.sum(d**2))
def dist_direct(A, B):
sum = 0
for i in range(0, len(A)):
diff = (A[0,i] - B[0,i])
diff -= L * int(diff * inv_L)
sum += diff * diff
return np.sqrt(sum)
vec1 = np.zeros((1,vec_length))
vec2 = np.zeros((1,vec_length))
for i in range(0, vec_length):
vec1[0,i] = random.random()
vec2[0,i] = random.random()
print("with numpy method:")
start = time.time()
for i in range(0, repetitions):
dist_np(vec1, vec2)
print("done in {}".format(time.time() - start))
print("with direct method:")
start = time.time()
for i in range(0, repetitions):
dist_direct(vec1, vec2)
print("done in {}".format(time.time() - start))
Output:
with numpy method:
done in 6.332799911499023
with direct method:
done in 1.0938000679016113
Play around with the average vector length and the repetitions to see where the sweet spot is. I expect the performance gain is not constant when varying these meta-parameters.
I'm new to pycharm and so far my impression of the debugger is that it's marverlous! However, it behaves weird in my code and I cannot figure out what is going wrong.
If I set a breakpoint to these lines of code and then press "step over" or "step into my code" it runs until the end ignoring all other upcoming breakpoints. Any idea what I do wrong? Breakpoints before that line work perfectly fine.
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
my code
You need to pip install deap, efel and brian2 for the code to run.
# DEAP
# https://github.com/DEAP/deap/tree/54b83e2cc7a73be7657cb64b01033a31584b891d
# import array
import matplotlib.pyplot as plt
import pandas as pd
from scipy.io import loadmat
import random, numpy, os, efel, scipy, math, time, array, json
from deap import algorithms, base, creator, tools, benchmarks
from deap.benchmarks.tools import diversity, convergence # , hypervolume
from machine_hh_model_v02 import brian_hh_model
# parallel processing
# from scoop import futures
parallel_processing = "no"
# Starting values
channels = {"ENa": 65,
"EK": -90,
"El": -70,
"ECa": 120,
"gNa": 0.05,
"gK": 0.005,
"gL": 1e-4,
"gM": 8e-5,
"gCa": 1e-5}
# Boundaries
bounds = {"ENa": [50, 70],
"EK": [-100, -80],
"El": [-50, -100],
"ECa": [100, 120],
"gNa": [0, 1],
"gK": [0, 1],
"gL": [0, 1],
"gM": [0, 1],
"gCa": [0, 1]}
low, up = [x[0] for x in bounds.values()], [x[1] for x in bounds.values()]
# Set parameters
ext = 2.5 # external current stimulation [nA]
num_gen = 2 # number of generations
num_parents = 40 # number of parents
num_params = len(channels) # number of parameters to optimize
prob_crossover = 0.9 # probability that crossover takes place
# How to generate individuals
def initIndividual(container, sigma):
return container(random.gauss(x, sigma) for x in channels.values())
# CREATOR
# http://deap.readthedocs.io/en/master/tutorials/basic/part1.html
# The create() function takes at least two arguments, a name for the newly created class and a base class. Any
# subsequent argument becomes an attribute of the class. Neg. weights relate to minizing, pos. weight to maximizing
# problems.
# -- define fitness problem (which params are min./max. problems with which weight)
creator.create("FitnessMulti", base.Fitness, weights=tuple(numpy.ones(num_params) * -1))
# Next we will create the class Individual, which will inherit the class list and contain our previously defined
# FitnessMulti class in its fitness attribute. Note that upon creation all our defined classes will be part of the
# creator container and can be called directly.
# -- associate fitness problem to individuals, that are going to be created
creator.create("Individual", list, fitness=creator.FitnessMulti)
# TOOLBOX
# http://deap.readthedocs.io/en/master/examples/ga_onemax.html
# http://deap.readthedocs.io/en/master/api/tools.html#module-deap.tools
# All the objects we will use on our way, an individual, the population, as well as all functions, operators, and
# arguments will be stored in a DEAP container called Toolbox. It contains two methods for adding and removing content,
# register() and unregister().
toolbox = base.Toolbox()
# The newly introduced register() method takes at least two arguments: an alias and a function. Toolbox.attr_bool(),
# when called, will draw a random integer between -100 and 100. Toolbox.attr_float(), when called, will draw a random
# floating point number.
# -- how to generate values for each individual
# toolbox.register("attr_float", random.uniform, -100, 100)
# toolbox.register("attr_float", lambda: [random.gauss(x, 5) for x in channels.values()])
# Our individuals will be generated using the function initRepeat(). Its first argument is a container class, the
# Individual one we defined in the previous section. This container will be filled using the method attr_float(),
# provided as second argument, and will contain 10 integers, as specified using the third argument. When called, the
# individual() method will thus return an individual initialized with what would be returned by calling the attr_float()
# method 100 times.
# -- how and how many individuals to create
# toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_float, num_params)
toolbox.register("individual", initIndividual, creator.Individual, sigma=1)
# Finally, the population() method uses the same paradigm.
# -- how and how many parents to create
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
# LOAD EXPERIMENTAL DATA
# set path
# mainpath = r'C:\OwnCloud\Masterarbeit' # mainpath
# pathfit = os.path.join(mainpath, r'fitness_params') # fitness files
# os.chdir(os.path.join(mainpath, pathfit)) # change directory
# load fitness file
# xl = pd.ExcelFile('fitness.xlsx') # load excel file
# xl_mean = xl.parse("median") # load sheet 'mean' containing mean/median values
# xl_var = xl.parse("quartile-to-median distance") # load sheet 'std containing std/quantiles values
xl_mean = pd.read_json("median")
xl_var = pd.read_json("distance")
########################
############ SOMETHING IS WRONG HERE
########################
# EFEL: median
def efel_stats(features):
# get latency of first spike
if features['peak_time'].any():
features['first_peak_time'] = features['peak_time'][0]
del features['peak_time']
# get median
for key, val in features.items():
if val is None or numpy.isnan(val) or not val:
features[key] = 9999
else:
features[key] = scipy.nanmedian(val)
# get median
# if features['Spikecount'] == 0:
# for key, val in f#eatures.items( ):
# features[key] = 9999
# else:
# for key, val in features.items():
# features[key] = scipy.nanmedian(val)
return features
# ERROR FUNCTION
# The returned value must be iterable and of a length equal to the number of objectives (weights).
def error_function(external_current, indi, xl_mean=xl_mean, xl_var=xl_var):
# output variable
allerrors = []
# BRIAN: run model
stim_start, stim_duration = 500, 1000
voltage, time = brian_hh_model(1, 0, stim_start, stim_duration,
ENa=indi[0], EK=indi[1], El=indi[2], ECa=indi[3], gNa=indi[4], gK=indi[5],
gL=indi[6], gM=indi[7], gCa=indi[8])
# EFEL: extract features and get median
feature_names = ['Spikecount', 'peak_voltage', 'min_AHP_values', 'AP_begin_voltage', 'spike_half_width',
'voltage_base', 'steady_state_voltage_stimend',
'AP_begin_time', 'peak_time']
trace = {'T': time, 'V': voltage, 'stim_start': [stim_start], 'stim_end': [stim_start + stim_duration]}
features = efel.getFeatureValues([trace], feature_names)[0]
features = efel_stats(features)
# # ERROR FUNCTION: get error value
for feature, value in features.items():
# median for one external current (experimental data)
experiment_vals = xl_mean.loc[xl_mean['stimulus'] == external_current, feature].values[0]
error = float(abs(value - experiment_vals))
# my model can produce the same, less or more #spikes, peakvoltage, ...
if value == experiment_vals:
error = 0.
elif value < experiment_vals:
error = error / float(xl_var.loc[xl_var['stimulus'] == external_current, feature].values[0][0])
elif value > experiment_vals:
error = error / float(xl_var.loc[xl_var['stimulus'] == external_current, feature].values[0][1])
# append error value of this feature
allerrors.append(error)
return allerrors
# GENETIC OPERATORS
# Within DEAP there are two ways of using operators. We can 1) simply call a function from the tools module or
# 2) register it with its arguments in a toolbox, as we have already seen for our initialization methods. The second
# option allows us to to easily switch between the operators if desired.
# see http://deap.readthedocs.io/en/master/api/tools.html#module-deap.tools
# Crossover
# Register the crossover function to the toolbox
# tools.cxOnePoint --> one point crossover
# tools.cxTwoPoint --> two-point crossover
toolbox.register("mate", tools.cxSimulatedBinary, eta=20.0)
# Mutation
# Register the mutation function to the toolbox
# tools.mutGaussian --> applies a gaussian mutation of mean mu and standard deviation sigma on the input individual. The indpb argument is the probability of each attribute to be mutated.
# tools.mutPolynomialBounded --> Polynomial mutation as implemented in original NSGA-II algorithm in C by Deb.
# toolbox.register("mutate", tools.mutPolynomialBounded, eta=20, low=low, up=up, indpb=1.0/num_params)
toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=2, indpb=0.9)
# Selection
# tools.sortNondominated(individuals, k, first_front_only=False) --> Sort the first k individuals into different nondomination levels using the “Fast Nondominated Sorting Approach” proposed by Deb et al., see [Deb2002].
# tools.sortLogNondominated(individuals, k, first_front_only=False) --> Sort individuals in pareto non-dominated fronts using the Generalized Reduced Run-Time Complexity Non-Dominated Sorting Algorithm presented by Fortin et al. (2013).
toolbox.register("select", tools.selNSGA2)
# Evaluation
# Register error function in the toolbox.
# The evaluation will be performed by calling the alias "evaluate".
# toolbox.register("evaluate", error_function, ext, model_vals)
# ALGORITHM
# Now that everything is ready, we can start to write our own algorithm. It is usually done in a main function.
if parallel_processing=="yes":
toolbox.register("map", futures.map)
def main():
# register statistics to the toolbox to maintain stats of the evolution
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean, axis=0)
stats.register("std", numpy.std, axis=0)
stats.register("min", numpy.min, axis=0)
stats.register("max", numpy.max, axis=0)
logbook = tools.Logbook()
logbook.header = "gen", "evals", "min"
###
### NSGA-II algorithm as in "Deb 2002: A Fast and Elitist Multiobjective Genetic Algorithm: NSGA-II"
### https://github.com/DEAP/deap/blob/master/examples/ga/nsga2.py
###
# create random parent population pop
pop = toolbox.population(n=num_parents)
# register error function
toolbox.register("evaluate", error_function, ext)
# evaluate parent population
invalid_ind = [ind for ind in pop if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# assign crowding distance to the individuals, no actual selection is done
pop = toolbox.select(pop, len(pop))
# print logbook
record = stats.compile(pop)
logbook.record(gen=0, evals=len(invalid_ind), **record)
print(logbook.stream)
# print(record)
# Begin the generational process
for gen in range(1, num_gen):
# increase the variance in my population
offspring = tools.selTournamentDCD(pop, len(pop))
# I have no idea why
offspring = [toolbox.clone(ind) for ind in offspring]
# crossover
for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
if random.random() <= prob_crossover:
toolbox.mate(ind1, ind2)
# mutation
toolbox.mutate(ind1)
toolbox.mutate(ind2)
del ind1.fitness.values, ind2.fitness.values
# Fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Select the next generation population
pop = toolbox.select(pop + offspring, num_parents)
record = stats.compile(pop)
logbook.record(gen=gen, evals=len(invalid_ind), **record)
print(logbook.stream)
# print(record)
# print("Final population hypervolume is %f" % hypervolume(pop, [11.0, 11.0]))
return pop, logbook
# create pareto front (all non-dominated individuals that ever lived)
# pareto = tools.ParetoFront()
if __name__ == "__main__":
pop, logbook = main()
print(logbook)
print("POPULATION", pop)
for indi in pop:
stim_start, stim_duration = 500, 1000
voltage, time = brian_hh_model(1, 1, stim_start, stim_duration,
ENa=indi[0], EK=indi[1], El=indi[2], ECa=indi[3], gNa=indi[4], gK=indi[5],
gL=indi[6], gM=indi[7], gCa=indi[8])
print("INDIVIDUAL ", indi)
machine_hh_model_v02
# brian user guide
# http://brian2.readthedocs.io/en/2.0rc/user/index.html
from brian2 import *
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style()
def brian_hh_model(input_current, plotflag, stim_start, stim_duration, **parameter):
# C++ standalone mode
# At the beginning of the script, i.e. after the import statements, add:
# set_device('cpp_standalone', build_on_run=False)
# ********
# Handling units
# You can generate a physical quantity by multiplying a scalar or vector value with its physical unit:
# tau = 20*ms --> 20. ms
# rates = [10, 20, 30] * Hz --> [ 10. 20. 30.] Hz
# Most Brian functions will also complain about non-specified or incorrect units:
# G = NeuronGroup(10, 'dv/dt = -v/tau: volt', dt=0.5) --> "dt" has wrong dimensions, dimensions were (1) (s)
# Directly get the unitless value of a state variable by appending an underscore to the name
# print(rates_) --> [ 10. 20. 30.]
# ********
# Default parameters
# Set default parameters
default = {"ENa": 65, "EK": -90, "El": -70, "ECa": 120, "gNa": 0.05, "gK": 0.005, "gL": 1e-4, "gM": 8e-5,
"gCa": 1e-5}
# Use default parameter, if not defined as an input parameter
for key, val in default.items():
if key not in parameter:
parameter[key] = val
# Parameters
# Extract parameters that were given as an input (as a dictionary).
d = 96 * umetre # 79.8 umetre
area = d*d*3.141
Cm = 1*ufarad*cm**-2 * area # 1 ufarad
ENa = parameter["ENa"]*mV
EK = parameter["EK"]*mV
El = parameter["El"]*mV
ECa = parameter["ECa"]*mV
g_na = parameter["gNa"]*siemens*cm**-2 * area # 0.05 siemens
g_kd = parameter["gK"]*siemens*cm**-2 * area # 0.005 siemens
gl = parameter["gL"]*siemens*cm**-2 * area # 1e-4 siemens
gm = parameter["gM"]*siemens*cm**-2 * area # 8e-5 siemens
gCa = parameter["gCa"]*siemens*cm**-2 * area
tauMax = 4000 * ms
VT = -63*mV
# Equations
# Define both state variables and continuous-updates on these variables through differential equations. An Equation is
# a set of single lines in a string:
# 1. dx/dt = f : unit (differential equation)
# 2. x = f : unit (subexpression)
# 3. x : unit (parameter)
# There are three special units, "1" -> floating point number, "boolean" and "integer"
# Some special variables are defined: t, dt (time) and xi (white noise). Some other variable names (e.g. _pre) are
# forbidden. Flags -- dx/dt = f : unit (constant), parameter will not be changed during a run.
# The model
eqs = Equations('''
Im = gm * p * (v-EK) : amp
Ica = gCa * q*q * r * (v-ECa) : amp
dv/dt = (gl*(El-v) - g_na*(m*m*m)*h*(v-ENa) - g_kd*(n*n*n*n)*(v-EK) - Im - Ica + I)/Cm : volt
dm/dt = 0.32*(mV**-1)*(13.*mV-v+VT)/
(exp((13.*mV-v+VT)/(4.*mV))-1.)/ms*(1-m)-0.28*(mV**-1)*(v-VT-40.*mV)/
(exp((v-VT-40.*mV)/(5.*mV))-1.)/ms*m : 1
dn/dt = 0.032*(mV**-1)*(15.*mV-v+VT)/
(exp((15.*mV-v+VT)/(5.*mV))-1.)/ms*(1.-n)-.5*exp((10.*mV-v+VT)/(40.*mV))/ms*n : 1
dh/dt = 0.128*exp((17.*mV-v+VT)/(18.*mV))/ms*(1.-h)-4./(1+exp((40.*mV-v+VT)/(5.*mV)))/ms*h : 1
# K+ current
dp/dt = (1/(1+exp(-(v-VT+35.*mV)/(10.*mV))) - p) / (tauMax / (3.3 * exp((v - VT + 35.*mV)/(20.*mV) + exp(-(v - VT + 35.*mV)/(20.*mV))))) : 1
# Ca2+ current
dq/dt = 0.055*(mV**-1) * (-27.*mV - v) / (exp((-27.*mV - v) / (3.8*mV)) - 1.)/ms * (1.-q) - 0.94*exp((-75.*mV - v) / (17.*mV))/ms*q : 1
dr/dt = 0.000457 * exp((-13.*mV - v) / (50.*mV))/ms * (1.-r) - 0.0065 / (1. + exp((-15.*mV - v) / (28.*mV)))/ms*r : 1
I : amp
''')
# NeuronGroup
# The core of every simulation is a NeuronGroup, a group of neurons that share the same equations defining their
# properties. Minimum inputs are "number of neurons" and "model description in the form of equations". Threshold and
# refractoriness are only used for emiting spikes. To make a neuron non-excitable for a certain time period after a
# spike, the refractory keyword can be used.
# G = NeuronGroup(10, 'dv/dt = -v/tau : volt', threshold='v > -50*mV', reset='v = -70*mV', refractory=5*ms)
# Dictionary
# You can set multiple initial values at once using a dictionary and the Group.get_states() and Group.set_states()
# methods.
# initial_values = {'v': 1, 'tau': 10*ms}
# group.set_states(initial_values)
# group.v[:] --> 1)
# states = group.get_states()
# states['v'] --> 1)
group = NeuronGroup(1, eqs, method="exponential_euler")
group.v = El
group.I = 0*nA
# Recording
# Recording variables during a simulation is done with “monitor” objects. Specifically, spikes are recorded with
# SpikeMonitor, the time evolution of variables with StateMonitor and the firing rate of a population of neurons with
# PopulationRateMonitor. You can get all the stored values in a monitor with the Group.get_states().
# In this example, we record two variables v and u, and record from indices 0, 10 and 100 --> three neurons.
# G = NeuronGroup(...)
# M = StateMonitor(G, ('v', 'u'), record=[0, 10, 100])
# M.v[1] will return the values for the second recorded neuron which is the neuron with the index 10.
M = StateMonitor(group, 'v', record=0)
# Run the model
# The command run(100*ms) runs the simulation for 100 ms.
run(stim_start*ms)
group.I[0] = input_current*nA # current injection at one end
run(stim_duration*ms)
group.I = 0*nA
run(stim_start*ms)
# C++ standalone mode
# After the last run() call, call device.build() explicitly:
# device.build(directory='output', compile=True, run=True, debug=False)
# Timing
# profiling_summary(show=5) -- show the 5 objects that took the longest
# profiling_summary(show=2)
# Output
time = M.t/ms
voltage = M.v[0]/mV
# plot output
if plotflag:
plt.plot(time, voltage)
xlabel('Time [ms]')
ylabel('Membrane potential [mV]')
plt.show()
# For multiple calls
# device.reinit()
# device.activate()
return (voltage, time)
#brian_hh_model(1, 1, 500, 1000, ENa=65, EK=-90, El=-70, ECa=120, gNa=0.05, gK=0.005, gL=1e-4, gM=8e-5, gCa=1e-5)
Thanks a lot in advance!!
This is my python code. I must use embedded atom potential for my cluster. My structure that I imported is a bimetallic nanocluster of 1013 atoms. I need to run MD at fixed temperature of 3000 K. The simulation starts at 0 and jumps to 53573K then to 50656K before giving the error message.
from ase.md.langevin import Langevin
from ase.io.trajectory import Trajectory
from ase import units
import numpy as np
from ase import Atoms
from ase.calculators.eam import EAM
from ase.io import write, read
# Kelvin (Set up the temperature)
T = 3000
#Import the global minimum structure
fName= 'globalminstr.xyz'
#Interatomic Potential
atoms = read(fName)
calc= EAM(potential='Zope-Ti-Al-2003.eam.alloy')
atoms.set_calculator(calc)
#Langevin dynamics
dyn = Langevin(atoms, 5 * units.fs, T * units.kB, 0.002)
def printenergy(a=atoms): # store a reference to atoms in the definition.
"""Function to print the potential, kinetic and total energy."""
epot = a.get_potential_energy() / len(a)
ekin = a.get_kinetic_energy() / len(a)
print('Energy per atom: Epot = %.3feV Ekin = %.3feV (T=%3.0fK) '
'Etot = %.3feV' % (epot, ekin, ekin / (1.5 * units.kB), epot + ekin))
dyn.attach(printenergy, interval=50)
# Saving the positions of all atoms after every 100th time step.
traj = Trajectory('moldyn.traj', 'w', atoms)
dyn.attach(traj.write, interval=50)
# Running dynamics
printenergy()
dyn.run(5000)