Memory error - double recursion at fault? - python

I want to generate well formed formulas in python, but I am running into a memory error. I think I am accidentally doing some double recursion, but I am not certain. I am using python 3.8.3 and am not really formally trained. Any tips are welcome. Here's my code:
from string import Template
vars = ['w','x','y','z', '$x', '$y', 's($x, $y)']
mxy = Template('m($x, $y)')
stage1 = []
for var1 in vars:
for var2 in vars:
stage1.append(mxy.substitute(x=var1, y=var2))
def extractFunctions(x):
ans = []
for formula in x:
if '$' in formula:
ans.append(formula)
return ans
def stageSub(stageSet, iterations):
currentStageSet = stageSet
wffs = []
newTemplates = extractFunctions(currentStageSet)
for phormula in newTemplates:
if ('$x' in phormula) and ('$y' not in phormula):
for varx in currentStageSet:
wffs.append(Template(phormula).substitute(x = varx))
elif '$y' in phormula and '$x' not in phormula:
for vary in currentStageSet:
wffs.append(Template(phormula).substitute(y = vary))
elif '$x' in phormula and '$y' in phormula:
for varx in currentStageSet:
for vary in currentStageSet:
wffs.append(Template(phormula).substitute(x = varx, y = vary))
iterations = iterations - 1
print(iterations)
if iterations == 0:
return wffs
if iterations > 0:
print('this happened', iterations)
return stageSub(wffs, iterations)
stage2 = stageSub(stage1, 2)
print(len(stage2))
If you run stageSub(stage1, 1) (so just 1 iteration) it does actually halt.
Here is the error and traceback:
1
this happened 1
Traceback (most recent call last):
File "d:\Python\ringSingleAxiom\generatingWffs.py", line 48, in <module>
stage2 = stageSub(stage1, 2)
File "d:\Python\ringSingleAxiom\generatingWffs.py", line 46, in stageSub
return stageSub(wffs, iterations)
File "d:\Python\ringSingleAxiom\generatingWffs.py", line 38, in stageSub
wffs.append(Template(phormula).substitute(x = varx, y = vary))
MemoryError

Related

KeyError in a recursive function in python

I am defining a main_meal_model function to list a lunch plan that takes in a day value. I have previously defined a random_main_meal_dataset function that creates a database of all the foods that are to be eaten at lunch which also takes in the same day value. For some reason i can't omit a forced declaration of the day value in the random_main_meal_dataset function, else it either gives a KeyError or NameError
I have tried every day of the week and it seems perfect as much as my forced declaration is the same as the day value i send when calling the main_meal_model function but as soon as i try to make this an automatic correspondance it sends KeyError: 'Monday' or NameError: name 'day' is not defined per day_data = data[day]
Error messages:
Full Errorr messages:
Traceback (most recent call last):
File "c:\Users\Leonix\Desktop\CS50 Final Project\test.py", line 104, in <module>
print(main_meal_model('Monday', 70, 2000, data, 'Lunch'))
File "c:\Users\Leonix\Desktop\CS50 Final Project\test.py", line 72, in main_meal_model
day_data = data[day]
KeyError: 'Monday
or
Traceback (most recent call last):
File "c:\Users\Leonix\Desktop\CS50 Final Project\test.py", line 103, in <module>
print(main_meal_model('Monday', 70, 2000, data, 'Lunch')) File "c:\Users\Leonix\Desktop\CS50 Final Project\test.py", line 71, in main_meal_model
day_data = data[day] NameError: name 'day' is not defined
Here is the part of the code I suppose is causing the problem
https://pastebin.com/w8XQ8rTn
split_values_day = np.linspace(0, len(data), 8).astype(int)
split_values_day[-1] = split_values_day[-1]-1
def random_main_meal_dataset(data, day):
data = data[data['meal'].str.contains('Main Dishes|Condiments|Side Dishes', na=False)]
frac_data = data.sample(frac=1).reset_index().drop('index', axis=1)
day_data = []
for s in range(len(split_values_day)-1):
day_data.append(
frac_data.loc[split_values_day[s]:split_values_day[s+1]])
return dict(zip(day, day_data))
# define a lunch / dinner model that takes in prob, kg, calories, data and makes a lunch / dinner plan for the day
def main_meal_model(day, kg, calories, data, meal):
data = random_main_meal_dataset(data, day=['Monday'])
G = extract_gram(build_nutritional_values(kg, calories))
E = G['Carbohydrates Grams']
F = G['Fat Grams']
P = G['Protein Grams']
day_data = data[day]
day_data = day_data[day_data.calories != 0]
food = day_data.name.tolist()
c = day_data.calories.tolist()
x = pulp.LpVariable.dicts(
"x", indices=food, lowBound=0, upBound=1.5, cat='Continuous', indexStart=[])
e = day_data.carbohydrate.tolist()
f = day_data.total_fat.tolist()
p = day_data.protein.tolist()
div_meal = meal_split[meal]
prob = pulp.LpProblem("Diet", LpMinimize)
prob += pulp.lpSum([x[food[i]]*c[i] for i in range(len(food))])
prob += pulp.lpSum([x[food[i]]*e[i] for i in range(len(x))]) >= E*0.35
prob += pulp.lpSum([x[food[i]]*f[i] for i in range(len(x))]) >= F*0.35
prob += pulp.lpSum([x[food[i]]*p[i] for i in range(len(x))]) >= P*0.35
prob.solve(PULP_CBC_CMD(msg=0))
variables = []
values = []
for v in prob.variables():
variable = v.name
value = v.varValue
variables.append(variable)
values.append(value)
values = np.array(values).round(2).astype(float)
sol = pd.DataFrame(np.array([food, values]).T,
columns=['Food', 'Quantity'])
sol['Quantity'] = sol.Quantity.astype(float)
sol = sol[sol['Quantity'] != 0.0]
sol.Quantity = sol.Quantity*100
sol = sol.rename(columns={'Quantity': 'Quantity (g)'})
return sol
print(main_meal_model('Monday', 70, 2000, data, 'Lunch'))`

Stable Baselines3 Parameter Logits has invalid values

I am trying to run stable baselines on sports games but keep getting the following error
Traceback (most recent call last):
File "/home/dev/Desktop/Projects/AI/NBA2/stable_baselines_run.py", line 35, in <module>
model.learn(total_timesteps=10000)
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/stable_baselines3/a2c/a2c.py", line 189, in learn
return super(A2C, self).learn(
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/stable_baselines3/common/on_policy_algorithm.py", line 234, in learn
continue_training = self.collect_rollouts(self.env, callback, self.rollout_buffer, n_rollout_steps=self.n_steps)
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/stable_baselines3/common/on_policy_algorithm.py", line 166, in collect_rollouts
actions, values, log_probs = self.policy.forward(obs_tensor)
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/stable_baselines3/common/policies.py", line 566, in forward
distribution = self._get_action_dist_from_latent(latent_pi, latent_sde=latent_sde)
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/stable_baselines3/common/policies.py", line 607, in _get_action_dist_from_latent
return self.action_dist.proba_distribution(action_logits=mean_actions)
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/stable_baselines3/common/distributions.py", line 326, in proba_distribution
self.distribution = [Categorical(logits=split) for split in th.split(action_logits, tuple(self.action_dims), dim=1)]
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/stable_baselines3/common/distributions.py", line 326, in <listcomp>
self.distribution = [Categorical(logits=split) for split in th.split(action_logits, tuple(self.action_dims), dim=1)]
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/torch/distributions/categorical.py", line 64, in __init__
super(Categorical, self).__init__(batch_shape, validate_args=validate_args)
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/torch/distributions/distribution.py", line 53, in __init__
raise ValueError("The parameter {} has invalid values".format(param))
ValueError: The parameter logits has invalid values
I have removed all NaN's (replaced with 0) and normalised the data so that all data is between 0 and 1 but still cannot find the invalid value.
Here is my custom environment:
import gym
from gym import spaces
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
INITIAL_BALANCE = 100
class BettingEnv(gym.Env):
# metadata = {'render.modes': ['human']}
def __init__(self, df, results, INITIAL_BALANCE=100):
self.df = df
self.results = results
self.initial_balance = INITIAL_BALANCE
self.balance = INITIAL_BALANCE
self.profit = 0
self.starting_point = np.random.randint(len(self.df) - len(self.df) * 0.1) # Start anywhere but in the end 10%
self.timestep = 0
self.games_won = 0
self.game_bets = []
self.game_number = self.starting_point + self.timestep
self.action_space = spaces.MultiDiscrete([3,10])
self.observation_space = spaces.Box(
low = self.df.min().min(), # Lowest value found in df
high = self.df.max().max(), # Search the df for the max value (this may change with different data)
shape = (df.shape[1],), # shape of one row of the df
# dtype = np.float16
)
print('First ob: ',self.df.loc[self.game_number])
def _next_obs(self):
print('Get next obs')
# Get next game row
obs = self.df.loc[self.timestep]
print('next obs success')
return obs
def _print_bet_csv(self):
# Create bet_info_df
bet_info_df = pd.DataFrame(self.game_bets)
results_df = self.results.reset_index()
# #Merge dfs
self.merged_df = pd.merge(bet_info_df, results_df, on=['index', 'Home Odds', 'Vis Odds', 'Home Win'])
self.merged_df.set_index('index', inplace=True)
# #Print df
self.merged_df.to_csv('./temp/MLB Bot Betting DF.csv', index=True)
def _print_bet_chart(self):
x_axis = [i for i in range(self.timestep)]
plt.plot(x_axis, self.merged_df['Bankroll'])
plt.title('Bankroll')
plt.ylabel('Dollars')
plt.xlabel('Games')
plt.savefig('./temp/NBA_Bot_Betting.png')
def _take_action(self, action):
print('Start action')
# Init
action_type = action[0]
amount = action[1] + 1
self.game_number = self.starting_point + self.timestep
game_result = self.results['Home Win'][self.game_number]
odds = 0
bet_on = 'NA'
# VISITOR BET
if action_type == 0:
bet_on = 'False'
# Find vis odds
odds = self.results['Vis Odds'][self.game_number]
if odds == 0:
amount = 0
# Place bet
self.balance -= amount
# Check if win
if game_result == False:
self.balance += round(amount * odds, 2)
self.games_won += 1
# NO BET
if action_type == 1:
bet_on = 'No bet'
# HOME BET
if action_type == 2:
bet_on = 'True'
# Find home odds
odds = self.results['Home Odds'][self.game_number]
if odds == 0:
amount = 0
# Place bet
self.balance -= amount
# Check win
if game_result == True:
self.balance += round(amount * odds, 2)
self.games_won += 1
self.balance = round(self.balance, 2)
bet_info = {
'index': self.game_number,
'Home Odds': self.results['Home Odds'][self.game_number],
'Vis Odds': self.results['Vis Odds'][self.game_number],
'Bet on': bet_on,
'Home Win': game_result,
'Amount': amount,
'Odds': odds,
'Bankroll': self.balance
}
self.game_bets.append(bet_info)
print('Finish action')
return bet_info
def step(self, action):
print('Start step')
info = self._take_action(action)
self.timestep += 1
# Reward
gamma = (self.timestep / len(self.df)) # time discount
self.profit = self.balance - self.initial_balance
reward = self.profit * gamma
# Done
done = self.balance <= 0
# Obs
obs = self._next_obs()
# If last game, print results and start from beginning
#test the printing of csv
if self.timestep == 2500:
self._print_bet_csv()
self._print_bet_chart()
self.game_bets = []
print('Starting point: ',self.starting_point)
print('Chart printed')
print('Finished Step')
return obs, reward, done, info
def reset(self):
self.initial_balance = INITIAL_BALANCE
self.balance = INITIAL_BALANCE
self.profit = 0
self.starting_point = np.random.randint(len(self.df) - len(self.df) * 0.1) # Start anywhere but in the end 10%
self.timestep = 0
self.games_won = 0
self.game_bets = []
def render(self, mode='human', close=False):
print('Timestep: ', self.timestep)
print('Profit: ', self.profit)
print('Games Won: ', self.games_won)
print('Balance: ', self.balance)
Here is the file I run the environment from:
import time
start_time = time.time()
import os
import random
import json
import gym
from gym import spaces
import pandas as pd
import numpy as np
from stable_baselines3.common.vec_env import DummyVecEnv
from stable_baselines3 import PPO, A2C
from Betting_env import BettingEnv
data = pd.read_csv('Scraping/Games and Stats.csv')
df = data.drop(['Date', 'Home', 'Visitor', 'Home PTS', 'Vis PTS', 'Home Points Dif', 'Home Win'], axis=1)
df = df.astype(float)
normed = (df-df.min())/(df.max()-df.min())
normed = normed.round(10)
env = DummyVecEnv([lambda: BettingEnv(normed, data, INITIAL_BALANCE=100)])
model = A2C('MlpPolicy', env, verbose=0)
model.learn(total_timesteps=10000)
save_path = os.path.join('Training', 'Saved Models', 'Betting_Model_A2C')
model.save(save_path)
end_time = time.time()
total_time = end_time - start_time
print(round(total_time / 60 / 60), ' Hours ', round(total_time / 60), ' Minutes')
UPDATE:
After using the VecCheckNan() and check_env() functions by stable_baselines3 I get the following error messages.
VecCheckNan() gives:
Traceback (most recent call last):
File "/home/dev/Desktop/Projects/AI/NBA2/stable_baselines_run.py", line 51, in <module>
model.learn(total_timesteps=10000)
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/stable_baselines3/ppo/ppo.py", line 299, in learn
return super(PPO, self).learn(
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/stable_baselines3/common/on_policy_algorithm.py", line 226, in learn
total_timesteps, callback = self._setup_learn(
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/stable_baselines3/common/base_class.py", line 420, in _setup_learn
self._last_obs = self.env.reset() # pytype: disable=annotation-type-mismatch
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/stable_baselines3/common/vec_env/vec_check_nan.py", line 46, in reset
self._check_val(async_step=False, observations=observations)
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/stable_baselines3/common/vec_env/vec_check_nan.py", line 84, in _check_val
raise ValueError(msg)
ValueError: found nan in observations.
Originated from the environment observation (at reset)
I have printed out the first observations and there are no NaNs in there.
check_env() gives:
Traceback (most recent call last):
File "/home/dev/Desktop/Projects/AI/NBA2/stable_baselines_run.py", line 42, in <module>
check_env(env)
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/stable_baselines3/common/env_checker.py", line 245, in check_env
assert isinstance(
AssertionError: Your environment must inherit from the gym.Env class cf https://github.com/openai/gym/blob/master/gym/core.py
I have gym.Env in my Betting_Env class.
I had the same error. In my case the problem was due to the custom reset funciton of my environment. In found that in the BaseAlgorithm class in base_class.py of stablebaselines in line 429 (in my case) is this line of code
self._last_obs = self.env.reset() # pytype: disable=annotation-type-mismatch
, where my environment should return an observation or at least None.
So you could try returning an observation in your reset function.
Although you are replacing NaNs and ensuring that there are none, the error is mostly probably due to some of the values being np.inf or -np.inf.
Try:
df = df.replace([np.inf, -np.inf], np.nan).
Hope it works!
You have commented this out:
# dtype = np.float16
So you may already have tried this. But it is possible your dataframe contains a float32 and your observation space is expecting the wrong dtype. Try changing to:
dtype = np.float32

NoneType error while finding minima with Scipy Optimize

I am trying to find the global minima of a function using scipy.optimizer methods and keep running into NoneType issues. I have tried multiple algorithms including differential_evolution, shgo, and brute but keep running into errors.
Here is the setup:
def sizing_trade_study(ranges, payload):
with open("config.yml", "r") as yml:
cfg = yaml.load(yml)
first_int = True
km = []
for range in ranges:
km.append( range * 1000)
print(km)
params = (km, payload)
if first_int:
x0 = [float(cfg['design_variables']['initial_guess']['prop_radius']),
float(cfg['design_variables']['initial_guess']['speed']),
float(cfg['design_variables']['initial_guess']['battery_mass']),
float(cfg['design_variables']['initial_guess']['motor_mass']),
float(cfg['design_variables']['initial_guess']['mtow'])]
lb = [float(cfg['design_variables']['lower_bound']['prop_radius']),
float(cfg['design_variables']['lower_bound']['speed']),
float(cfg['design_variables']['lower_bound']['battery_mass']),
float(cfg['design_variables']['lower_bound']['motor_mass']),
float(cfg['design_variables']['lower_bound']['mtow'])] # Min cruise at 1.3 * VStall
ub = [float(cfg['design_variables']['upper_bound']['prop_radius']),
float(cfg['design_variables']['upper_bound']['speed']),
float(cfg['design_variables']['upper_bound']['battery_mass']),
float(cfg['design_variables']['upper_bound']['motor_mass']),
float(cfg['design_variables']['upper_bound']['mtow'])]
# bounds = (slice(lb[0], ub[0]), slice(lb[1], ub[1]), slice(lb[2], ub[2]), slice(lb[3], ub[3]), slice(lb[4], ub[4]))
# bounds = [(lb[0], ub[0]), (lb[1], ub[1]), (lb[2], ub[2]), (lb[3], ub[3]), (lb[4], ub[4])]
bounds = optimize.Bounds(lb,ub)
result = optimize.differential_evolution(objective_function, bounds, args=(params,))
print(result)
def objective_function(x, *params):
global trials
trials = trials+1
print(trials)
performance.compute_performance(x, params[0][0], params[0][1])
Here is the function I am trying to optimize:
import yaml
import simple_mission
import reserve_mission
import config_weight
def compute_performance(x, range, payload):
rprop = x[0]
speed = x[1]
battery = x[2]
motors = x[3]
mtow = x[4]
w = mtow * 9.8
with open("config.yml", "r") as yml:
cfg = yaml.load(yml)
bat_energy_density = int(cfg['performance']['bat_energy_density'])
motor_power_density = int(cfg['performance']['motor_power_density'])
discharge_depth = float(cfg['performance']['discharge_depth'])
e_nominal, flight_time, hover_output, cruise_output = simple_mission.run_simple_mission(rprop, speed, w, range)
reserve_e = reserve_mission.reserve_mission(rprop,speed, w, range)
mass = config_weight.config_weight(battery,motors, rprop, w, mtow, hover_output, cruise_output, payload)
batt = reserve_e - battery * bat_energy_density * discharge_depth / 1000
motor = hover_output.pow_hover / 1000 - motors * motor_power_density
weight = mass - w
return batt+ motor+ weight
The failure doesn't happen immediately but after a couple of runs of the optimizer function. For example, with differential_evolution, it always happens after the 75th trial.
Here is the stacktrace:
69
70
71
72
73
74
75
76
Traceback (most recent call last):
File "sizing_trade_study.py", line 62, in <module>
sizing_trade_study(args.ranges, args.payload)
File "sizing_trade_study.py", line 42, in sizing_trade_study
result = optimize.differential_evolution(objective_function, bounds, args=(params,))
File "/usr/local/anaconda3/envs/simple_mission/lib/python3.7/site-packages/scipy/optimize/_differentialevolution.py", line 308, in differential_evolution
ret = solver.solve()
File "/usr/local/anaconda3/envs/simple_mission/lib/python3.7/site-packages/scipy/optimize/_differentialevolution.py", line 759, in solve
next(self)
File "/usr/local/anaconda3/envs/simple_mission/lib/python3.7/site-packages/scipy/optimize/_differentialevolution.py", line 1082, in __next__
self.constraint_violation[candidate]):
File "/usr/local/anaconda3/envs/simple_mission/lib/python3.7/site-packages/scipy/optimize/_differentialevolution.py", line 1008, in _accept_trial
return energy_trial <= energy_orig
TypeError: '>=' not supported between instances of 'float' and 'NoneType'
Any help is greatly appreciated!
The issue is with one of the retrieved values from your bounds or objective_function, which in turn is being passed in as a NoneType to energy_orig within differential_evolution()
Source: https://github.com/scipy/scipy/blob/master/scipy/optimize/_differentialevolution.py
if feasible_orig and feasible_trial:
return energy_trial <= energy_orig
You should make sure that each key value is not empty from your config.yml or other function parameters. It's hard to tell which could be the problem. However, you could wrap it around a try/catch to get this to not stop on the 75th try for the meantime.
try:
result = optimize.differential_evolution(
objective_function,
bounds,
args=(params,),
)
except TypeError:
import pdb; pdb.set_trace()
I've set pdb, which will allow to to debug the values of each parameter, feel free to swap it out with a pass if you need to continue swiftly

How do I raise the memory limit in python?

I have written a python (2.7) script but it use a lot of memory so I get a out of memory error. Is it possible to use memory?
My code (or the github):
from itertools import combinations
import numpy
# Find the unused members and put this in a other group
def findMembers(listIn,listMembers):
lengthlist2 = (len(listMembers)-len(listIn[0]))
group2 = [0] * lengthlist2 #making the other groups based on the length of the first group
for i in listIn:
wichRow = 0
for x in listMembers:
if not (x in i) :
group2[wichRow] = x
wichRow += 1
listIn.append(group2)
return listIn
#you give a list of members and the numbers of groups
#you get back all the possibilities of combinations
def findCombinations(listMembers,numbersOfGroups):
groupTemp = [] #list needed to save correctly all the combinations
group = [] #list needed for keep it simple
newGroup = [] #list that will be returned
for listPossibilities in combinations(listMembers,(len(listMembers)/numbersOfGroups)):
groupTemp.append(list(listPossibilities))
group.append(groupTemp) #saving all the possibilities
groupTemp = []
for k in group:
# place the unused members in group2
k = (findMembers(k,listMembers))
if numbersOfGroups > 2:
groupTemp = []
groupTemp = findCombinations(k[1],numbersOfGroups-1)
for i in groupTemp:
listTemp = []
listTemp.append(k[0])
listTemp.extend(i)
newGroup.append(listTemp)
else:
newGroup = group
return newGroup
# Calculate the happiness of the group
def findHappiness(tabel,listIn):
happiness = 0
for i in listIn:
for j in i:
for k in i:
happiness += tabel[j][k]
return happiness
def buildTabel(members): #build a random survey
tabel = numpy.random.random((members,members))
return tabel
def calculateHappiness(group):
print "Finding all the happiness: "
maxhappiness = 0
i = 0
for x in group:
happiness = findHappiness(tabel,x)
if happiness > maxhappiness:
maxhappiness = happiness
y = x
progress = int(round((((i)*1.0/(len(group)))*100.0)))
update_progress(progress)
i += 1
print "\n Best solution: ", y, " with: ", maxhappiness, " happiness"
def update_progress(progress):
print '\r[{0}] {1}%'.format('#'*(progress/5), progress),
if __name__ == "__main__":
members = 24 # members of the group
numbersOfGroups = 3
tabel = buildTabel(members) #preferences will be stored here
listMembers = (range(members)) #members of the group that need to be divided
print "Searching all the combinations..."
group = findCombinations(listMembers,numbersOfGroups) #find all the combinations (recursive)
print len(group)," combinations"
calculateHappiness(group) #calculate the most happiest group and print
the error:
Searching all the combinations...
Traceback (most recent call last):
File "main.py", line 75, in <module>
calculateHappiness(group) #calculate the most happiest group and print
File "main.py", line 38, in findCombinations
newGroup = group
MemoryError
I'm using windows 10 64bit with 6gb ram. Is it possible to use virtual ram or disk space of mine hard drive disk?

SciPy: TypeError when using scipy.optimize.minimize

I'm encountering a vague error when attempting to minimise a function using scipy.optimize.minimize. The error I get is,
Traceback (most recent call last):
File "general_fd.py", line 103, in <module>:
a_tmp[index] = minimize(iF.hamiltonian,0,(x,u_last,m_tmp,dt,dx,k*dt,index),tol=1e-3)
TypeError: float() argument must be a string or number
where
def hamiltonian(alphas,x_array,u_array,m_array,dt,dx,time,index):
sigma2 = Sigma_local(time,x_array[index],alphas,m_array[index])**2
movement = f_global(time,x_array[index],alphas)
L_var = L_global(time,x_array[index],alphas,m_array[index])
dx2 = dx**2
if index==0:
sigma2R = Sigma_local(time,x_array[index+1],alphas,m_array[index])**2
tmp = u_array[0]*(abs(movement)/dx - sigma2/dx2) + u_array[1]*(sigma2R/dx2 - abs(movement)/dx) + L_var
elif index==x_array.size-1:
sigma2L = Sigma_local(time,x_array[index-1],alphas,m_array[index])**2
tmp = u_array[-1]*(abs(movement)/dx - sigma2/dx2) + u_array[-2]*(sigma2L/dx2 - abs(movement)/dx) + L_var
else:
sigma2L = Sigma_local(time,x_array[index-1],alphas,m_array[index])**2
sigma2R = Sigma_local(time,x_array[index+1],alphas,m_array[index])**2
tmp = u_array[index]*(abs(movement)/dx - sigma2/dx2) + u_array[index+1]*(sigma2R/(2*dx2) + min(movement,0)/dx) + u_array[index-1]*(sigma2L/(2*dx2) - max(movement,0)/dx) + L_var
return tmp[0]
def Sigma_local(time,x,a,m):
return 0*x
def f_global(time,x_array,a_array):
return 0.1*a_array*x_array
def L_global(time,x_array,a_array,m_array): #general cost
return a_array + np.sqrt(x_array) + a_array**2
The above code is pretty ugly as I've modified it somewhat in order to try and find the error; my apologies. I've found that minimize-function runs a few trial values of hamiltonian before throwing the error message upon the return statement in hamiltonian. I've used the minimize function on other, similar functions without this error occurring, and I'm honestly quite stumped. Any help would be appreciated.

Categories