I am trying to run stable baselines on sports games but keep getting the following error
Traceback (most recent call last):
File "/home/dev/Desktop/Projects/AI/NBA2/stable_baselines_run.py", line 35, in <module>
model.learn(total_timesteps=10000)
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/stable_baselines3/a2c/a2c.py", line 189, in learn
return super(A2C, self).learn(
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/stable_baselines3/common/on_policy_algorithm.py", line 234, in learn
continue_training = self.collect_rollouts(self.env, callback, self.rollout_buffer, n_rollout_steps=self.n_steps)
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/stable_baselines3/common/on_policy_algorithm.py", line 166, in collect_rollouts
actions, values, log_probs = self.policy.forward(obs_tensor)
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/stable_baselines3/common/policies.py", line 566, in forward
distribution = self._get_action_dist_from_latent(latent_pi, latent_sde=latent_sde)
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/stable_baselines3/common/policies.py", line 607, in _get_action_dist_from_latent
return self.action_dist.proba_distribution(action_logits=mean_actions)
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/stable_baselines3/common/distributions.py", line 326, in proba_distribution
self.distribution = [Categorical(logits=split) for split in th.split(action_logits, tuple(self.action_dims), dim=1)]
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/stable_baselines3/common/distributions.py", line 326, in <listcomp>
self.distribution = [Categorical(logits=split) for split in th.split(action_logits, tuple(self.action_dims), dim=1)]
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/torch/distributions/categorical.py", line 64, in __init__
super(Categorical, self).__init__(batch_shape, validate_args=validate_args)
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/torch/distributions/distribution.py", line 53, in __init__
raise ValueError("The parameter {} has invalid values".format(param))
ValueError: The parameter logits has invalid values
I have removed all NaN's (replaced with 0) and normalised the data so that all data is between 0 and 1 but still cannot find the invalid value.
Here is my custom environment:
import gym
from gym import spaces
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
INITIAL_BALANCE = 100
class BettingEnv(gym.Env):
# metadata = {'render.modes': ['human']}
def __init__(self, df, results, INITIAL_BALANCE=100):
self.df = df
self.results = results
self.initial_balance = INITIAL_BALANCE
self.balance = INITIAL_BALANCE
self.profit = 0
self.starting_point = np.random.randint(len(self.df) - len(self.df) * 0.1) # Start anywhere but in the end 10%
self.timestep = 0
self.games_won = 0
self.game_bets = []
self.game_number = self.starting_point + self.timestep
self.action_space = spaces.MultiDiscrete([3,10])
self.observation_space = spaces.Box(
low = self.df.min().min(), # Lowest value found in df
high = self.df.max().max(), # Search the df for the max value (this may change with different data)
shape = (df.shape[1],), # shape of one row of the df
# dtype = np.float16
)
print('First ob: ',self.df.loc[self.game_number])
def _next_obs(self):
print('Get next obs')
# Get next game row
obs = self.df.loc[self.timestep]
print('next obs success')
return obs
def _print_bet_csv(self):
# Create bet_info_df
bet_info_df = pd.DataFrame(self.game_bets)
results_df = self.results.reset_index()
# #Merge dfs
self.merged_df = pd.merge(bet_info_df, results_df, on=['index', 'Home Odds', 'Vis Odds', 'Home Win'])
self.merged_df.set_index('index', inplace=True)
# #Print df
self.merged_df.to_csv('./temp/MLB Bot Betting DF.csv', index=True)
def _print_bet_chart(self):
x_axis = [i for i in range(self.timestep)]
plt.plot(x_axis, self.merged_df['Bankroll'])
plt.title('Bankroll')
plt.ylabel('Dollars')
plt.xlabel('Games')
plt.savefig('./temp/NBA_Bot_Betting.png')
def _take_action(self, action):
print('Start action')
# Init
action_type = action[0]
amount = action[1] + 1
self.game_number = self.starting_point + self.timestep
game_result = self.results['Home Win'][self.game_number]
odds = 0
bet_on = 'NA'
# VISITOR BET
if action_type == 0:
bet_on = 'False'
# Find vis odds
odds = self.results['Vis Odds'][self.game_number]
if odds == 0:
amount = 0
# Place bet
self.balance -= amount
# Check if win
if game_result == False:
self.balance += round(amount * odds, 2)
self.games_won += 1
# NO BET
if action_type == 1:
bet_on = 'No bet'
# HOME BET
if action_type == 2:
bet_on = 'True'
# Find home odds
odds = self.results['Home Odds'][self.game_number]
if odds == 0:
amount = 0
# Place bet
self.balance -= amount
# Check win
if game_result == True:
self.balance += round(amount * odds, 2)
self.games_won += 1
self.balance = round(self.balance, 2)
bet_info = {
'index': self.game_number,
'Home Odds': self.results['Home Odds'][self.game_number],
'Vis Odds': self.results['Vis Odds'][self.game_number],
'Bet on': bet_on,
'Home Win': game_result,
'Amount': amount,
'Odds': odds,
'Bankroll': self.balance
}
self.game_bets.append(bet_info)
print('Finish action')
return bet_info
def step(self, action):
print('Start step')
info = self._take_action(action)
self.timestep += 1
# Reward
gamma = (self.timestep / len(self.df)) # time discount
self.profit = self.balance - self.initial_balance
reward = self.profit * gamma
# Done
done = self.balance <= 0
# Obs
obs = self._next_obs()
# If last game, print results and start from beginning
#test the printing of csv
if self.timestep == 2500:
self._print_bet_csv()
self._print_bet_chart()
self.game_bets = []
print('Starting point: ',self.starting_point)
print('Chart printed')
print('Finished Step')
return obs, reward, done, info
def reset(self):
self.initial_balance = INITIAL_BALANCE
self.balance = INITIAL_BALANCE
self.profit = 0
self.starting_point = np.random.randint(len(self.df) - len(self.df) * 0.1) # Start anywhere but in the end 10%
self.timestep = 0
self.games_won = 0
self.game_bets = []
def render(self, mode='human', close=False):
print('Timestep: ', self.timestep)
print('Profit: ', self.profit)
print('Games Won: ', self.games_won)
print('Balance: ', self.balance)
Here is the file I run the environment from:
import time
start_time = time.time()
import os
import random
import json
import gym
from gym import spaces
import pandas as pd
import numpy as np
from stable_baselines3.common.vec_env import DummyVecEnv
from stable_baselines3 import PPO, A2C
from Betting_env import BettingEnv
data = pd.read_csv('Scraping/Games and Stats.csv')
df = data.drop(['Date', 'Home', 'Visitor', 'Home PTS', 'Vis PTS', 'Home Points Dif', 'Home Win'], axis=1)
df = df.astype(float)
normed = (df-df.min())/(df.max()-df.min())
normed = normed.round(10)
env = DummyVecEnv([lambda: BettingEnv(normed, data, INITIAL_BALANCE=100)])
model = A2C('MlpPolicy', env, verbose=0)
model.learn(total_timesteps=10000)
save_path = os.path.join('Training', 'Saved Models', 'Betting_Model_A2C')
model.save(save_path)
end_time = time.time()
total_time = end_time - start_time
print(round(total_time / 60 / 60), ' Hours ', round(total_time / 60), ' Minutes')
UPDATE:
After using the VecCheckNan() and check_env() functions by stable_baselines3 I get the following error messages.
VecCheckNan() gives:
Traceback (most recent call last):
File "/home/dev/Desktop/Projects/AI/NBA2/stable_baselines_run.py", line 51, in <module>
model.learn(total_timesteps=10000)
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/stable_baselines3/ppo/ppo.py", line 299, in learn
return super(PPO, self).learn(
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/stable_baselines3/common/on_policy_algorithm.py", line 226, in learn
total_timesteps, callback = self._setup_learn(
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/stable_baselines3/common/base_class.py", line 420, in _setup_learn
self._last_obs = self.env.reset() # pytype: disable=annotation-type-mismatch
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/stable_baselines3/common/vec_env/vec_check_nan.py", line 46, in reset
self._check_val(async_step=False, observations=observations)
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/stable_baselines3/common/vec_env/vec_check_nan.py", line 84, in _check_val
raise ValueError(msg)
ValueError: found nan in observations.
Originated from the environment observation (at reset)
I have printed out the first observations and there are no NaNs in there.
check_env() gives:
Traceback (most recent call last):
File "/home/dev/Desktop/Projects/AI/NBA2/stable_baselines_run.py", line 42, in <module>
check_env(env)
File "/home/dev/anaconda3/envs/sb/lib/python3.9/site-packages/stable_baselines3/common/env_checker.py", line 245, in check_env
assert isinstance(
AssertionError: Your environment must inherit from the gym.Env class cf https://github.com/openai/gym/blob/master/gym/core.py
I have gym.Env in my Betting_Env class.
I had the same error. In my case the problem was due to the custom reset funciton of my environment. In found that in the BaseAlgorithm class in base_class.py of stablebaselines in line 429 (in my case) is this line of code
self._last_obs = self.env.reset() # pytype: disable=annotation-type-mismatch
, where my environment should return an observation or at least None.
So you could try returning an observation in your reset function.
Although you are replacing NaNs and ensuring that there are none, the error is mostly probably due to some of the values being np.inf or -np.inf.
Try:
df = df.replace([np.inf, -np.inf], np.nan).
Hope it works!
You have commented this out:
# dtype = np.float16
So you may already have tried this. But it is possible your dataframe contains a float32 and your observation space is expecting the wrong dtype. Try changing to:
dtype = np.float32
Related
I am defining a main_meal_model function to list a lunch plan that takes in a day value. I have previously defined a random_main_meal_dataset function that creates a database of all the foods that are to be eaten at lunch which also takes in the same day value. For some reason i can't omit a forced declaration of the day value in the random_main_meal_dataset function, else it either gives a KeyError or NameError
I have tried every day of the week and it seems perfect as much as my forced declaration is the same as the day value i send when calling the main_meal_model function but as soon as i try to make this an automatic correspondance it sends KeyError: 'Monday' or NameError: name 'day' is not defined per day_data = data[day]
Error messages:
Full Errorr messages:
Traceback (most recent call last):
File "c:\Users\Leonix\Desktop\CS50 Final Project\test.py", line 104, in <module>
print(main_meal_model('Monday', 70, 2000, data, 'Lunch'))
File "c:\Users\Leonix\Desktop\CS50 Final Project\test.py", line 72, in main_meal_model
day_data = data[day]
KeyError: 'Monday
or
Traceback (most recent call last):
File "c:\Users\Leonix\Desktop\CS50 Final Project\test.py", line 103, in <module>
print(main_meal_model('Monday', 70, 2000, data, 'Lunch')) File "c:\Users\Leonix\Desktop\CS50 Final Project\test.py", line 71, in main_meal_model
day_data = data[day] NameError: name 'day' is not defined
Here is the part of the code I suppose is causing the problem
https://pastebin.com/w8XQ8rTn
split_values_day = np.linspace(0, len(data), 8).astype(int)
split_values_day[-1] = split_values_day[-1]-1
def random_main_meal_dataset(data, day):
data = data[data['meal'].str.contains('Main Dishes|Condiments|Side Dishes', na=False)]
frac_data = data.sample(frac=1).reset_index().drop('index', axis=1)
day_data = []
for s in range(len(split_values_day)-1):
day_data.append(
frac_data.loc[split_values_day[s]:split_values_day[s+1]])
return dict(zip(day, day_data))
# define a lunch / dinner model that takes in prob, kg, calories, data and makes a lunch / dinner plan for the day
def main_meal_model(day, kg, calories, data, meal):
data = random_main_meal_dataset(data, day=['Monday'])
G = extract_gram(build_nutritional_values(kg, calories))
E = G['Carbohydrates Grams']
F = G['Fat Grams']
P = G['Protein Grams']
day_data = data[day]
day_data = day_data[day_data.calories != 0]
food = day_data.name.tolist()
c = day_data.calories.tolist()
x = pulp.LpVariable.dicts(
"x", indices=food, lowBound=0, upBound=1.5, cat='Continuous', indexStart=[])
e = day_data.carbohydrate.tolist()
f = day_data.total_fat.tolist()
p = day_data.protein.tolist()
div_meal = meal_split[meal]
prob = pulp.LpProblem("Diet", LpMinimize)
prob += pulp.lpSum([x[food[i]]*c[i] for i in range(len(food))])
prob += pulp.lpSum([x[food[i]]*e[i] for i in range(len(x))]) >= E*0.35
prob += pulp.lpSum([x[food[i]]*f[i] for i in range(len(x))]) >= F*0.35
prob += pulp.lpSum([x[food[i]]*p[i] for i in range(len(x))]) >= P*0.35
prob.solve(PULP_CBC_CMD(msg=0))
variables = []
values = []
for v in prob.variables():
variable = v.name
value = v.varValue
variables.append(variable)
values.append(value)
values = np.array(values).round(2).astype(float)
sol = pd.DataFrame(np.array([food, values]).T,
columns=['Food', 'Quantity'])
sol['Quantity'] = sol.Quantity.astype(float)
sol = sol[sol['Quantity'] != 0.0]
sol.Quantity = sol.Quantity*100
sol = sol.rename(columns={'Quantity': 'Quantity (g)'})
return sol
print(main_meal_model('Monday', 70, 2000, data, 'Lunch'))`
While learning python, I decided to try create genetic algorithm and got stuck in the mutation step.
I will be glad for any advice both on solving this problem and in general on the architecture and style of the code.
one_generation = genlib.create_generation()
print(genlib.almost_generation(one_generation))
This code return error:
Traceback (most recent call last):
File "/home/rosrobot/PycharmProjects/gen2/main.py", line 23, in \<module\>
print(genlib.almost_generation(one_generation))
File "/home/rosrobot/PycharmProjects/gen2/genlib.py", line 83, in almost_generation
updated_generation.loc\[creature_index\] = sample\[updated_generation.columns\]
File "/home/rosrobot/PycharmProjects/gen2/venv/lib64/python3.10/site-packages/pandas/core/indexing.py", line 716, in __setitem__
iloc.\_setitem_with_indexer(indexer, value, self.name)
File "/home/rosrobot/PycharmProjects/gen2/venv/lib64/python3.10/site-packages/pandas/core/indexing.py", line 1682, in \_setitem_with_indexer
self.\_setitem_with_indexer_missing(indexer, value)
File "/home/rosrobot/PycharmProjects/gen2/venv/lib64/python3.10/site-packages/pandas/core/indexing.py", line 1998, in \_setitem_with_indexer_missing
raise ValueError("cannot set a row with mismatched columns")
ValueError: cannot set a row with mismatched columns
Process finished with exit code 1
Functions in 'genlib' file:
import random as rnd
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
pd.plotting.register_matplotlib_converters()
def create_creature(gen_length=10,
creature_name='one'):
creature = pd.Series(data=[rnd.randint(0, 1) for i in range(gen_length)],
name=creature_name)
return creature
def create_generation(generation_size=50,
gen_length=10):
generation = pd.DataFrame(data=[create_creature(creature_name=(str(i + 1)) + 'th',
gen_length=gen_length) for i in range(generation_size)])
generation['quality'] = generation.sum(axis=1)
return generation
def __indexes_of_quality(generation):
"""
:rtype: pd.Series
"""
for i in generation.quality.unique():
print('quality = ', i, ': ',
generation.loc[generation.quality == i, 'quality'].index.values,
'\n',
'__')
def create_many_generations(number_of_generations=10,
generation_size=50,
gen_length=10):
list_of_dataframes = pd.Series(data=[create_generation(generation_size=generation_size,
gen_length=gen_length
) for i in range(number_of_generations)],
name='creature_name')
return list_of_dataframes
def one_generation_pyplot(generation):
sns.barplot(x=generation.index,
y=generation.sort_values('quality').quality)
plt.show()
def many_generations_pyplot(list_of_generations):
qualities = [sum(generation.quality) for generation in list_of_generations]
sns.lineplot(data=qualities)
plt.show()
def __mutation(creature: pd.Series) -> pd.Series:
point = rnd.randint(0, len(creature))
creature[point] = int(not creature[point].values)
return creature
def almost_generation(generation):
sample = generation.sample()
sample = __mutation(sample)
updated_generation = pd.DataFrame(columns=generation.columns)
for creature_index in generation.index:
if creature_index == sample.index:
print(creature_index, ' == ', sample.index)
updated_generation.loc[creature_index] = sample[updated_generation.columns]
else:
updated_generation.loc[creature_index] = generation.loc[creature_index]
return updated_generation
I tried to convert "sample" to str, and also tried using loc, iloc and append
In your almost generation function, change the line within your if block to assign values:
if creature_index == sample.index:
print(creature_index, ' == ', sample.index)
updated_generation.loc[creature_index] = sample[updated_generation.columns].values
You can just simplify your entire function as follows:
def almost_generation(generation):
sample = generation.sample()
sample = __mutation(sample)
generation.loc[sample.index] = sample[generation.columns].values
return generation
I want to generate well formed formulas in python, but I am running into a memory error. I think I am accidentally doing some double recursion, but I am not certain. I am using python 3.8.3 and am not really formally trained. Any tips are welcome. Here's my code:
from string import Template
vars = ['w','x','y','z', '$x', '$y', 's($x, $y)']
mxy = Template('m($x, $y)')
stage1 = []
for var1 in vars:
for var2 in vars:
stage1.append(mxy.substitute(x=var1, y=var2))
def extractFunctions(x):
ans = []
for formula in x:
if '$' in formula:
ans.append(formula)
return ans
def stageSub(stageSet, iterations):
currentStageSet = stageSet
wffs = []
newTemplates = extractFunctions(currentStageSet)
for phormula in newTemplates:
if ('$x' in phormula) and ('$y' not in phormula):
for varx in currentStageSet:
wffs.append(Template(phormula).substitute(x = varx))
elif '$y' in phormula and '$x' not in phormula:
for vary in currentStageSet:
wffs.append(Template(phormula).substitute(y = vary))
elif '$x' in phormula and '$y' in phormula:
for varx in currentStageSet:
for vary in currentStageSet:
wffs.append(Template(phormula).substitute(x = varx, y = vary))
iterations = iterations - 1
print(iterations)
if iterations == 0:
return wffs
if iterations > 0:
print('this happened', iterations)
return stageSub(wffs, iterations)
stage2 = stageSub(stage1, 2)
print(len(stage2))
If you run stageSub(stage1, 1) (so just 1 iteration) it does actually halt.
Here is the error and traceback:
1
this happened 1
Traceback (most recent call last):
File "d:\Python\ringSingleAxiom\generatingWffs.py", line 48, in <module>
stage2 = stageSub(stage1, 2)
File "d:\Python\ringSingleAxiom\generatingWffs.py", line 46, in stageSub
return stageSub(wffs, iterations)
File "d:\Python\ringSingleAxiom\generatingWffs.py", line 38, in stageSub
wffs.append(Template(phormula).substitute(x = varx, y = vary))
MemoryError
I am quite new to python so please bear with me.
Currently, this is my code:
import pandas as pd
import statistics
import matplotlib.pyplot as plt
import math
from datetime import datetime
start_time = datetime.now()
gf = pd.read_csv(r"/Users/aaronhuang/Documents/Desktop/ffp/exfileCLEAN2.csv",
skiprows=[1])
bf = pd.read_csv(r"/Users/aaronhuang/Documents/Desktop/ffp/2SeconddatasetCLEAN.csv",
skiprows=[1])
df = (input("Which data set? "))
magnitudes = (df['Magnitude '].values)
times = df['Time '].values
average = statistics.mean(magnitudes)
sd = statistics.stdev(magnitudes)
below = sd * 3
class data_set:
def __init__(self, index):
self.mags = []
self.i = index
self.mid_time = df['Time '][index]
self.mid_mag = df['Magnitude '][index]
self.times = []
ran = 80
for ii in range(ran):
self.times.append(df['Time '][self.i + ii - ran / 2])
self.mags.append(df['Magnitude '][self.i + ii - ran / 2])
data = []
today = float(input("What is the range? "))
i = 0
while (i < len(df['Magnitude '])):
if (abs(df['Magnitude '][i]) <= (average - below)):
# check if neighbours
t = df['Time '][i]
tt = True
for d in range(len(data)):
if abs(t - data[d].mid_time) <= today:
# check if closer to center
if df['Magnitude '][i] < data[d].mid_mag:
data[d] = data_set(i)
print("here")
tt = False
break
if tt:
data.append(data_set(i))
i += 1
print("found values")
# graphing
height = 2 # Change this for number of columns
width = math.ceil(len(data) / height)
if width < 2:
width = 2
fig, axes = plt.subplots(width, height, figsize=(30, 30))
row = 0
col = 0
for i in range(len(data)):
axes[row][col].plot(data[i].times, data[i].mags)
col += 1
if col > height - 1:
col = 0
row += 1
plt.show()
end_time = datetime.now()
print('Duration: {}'.format(end_time - start_time))
Currently, the error produced is this:
/Users/aaronhuang/.conda/envs/EXTTEst/bin/python "/Users/aaronhuang/PycharmProjects/EXTTEst/Code sandbox.py"
Which data set? gf
Traceback (most recent call last):
File "/Users/aaronhuang/PycharmProjects/EXTTEst/Code sandbox.py", line 14, in <module>
magnitudes = int(df['Magnitude '].values)
TypeError: string indices must be integers
Process finished with exit code 1
I am trying to have the user be able to choose which file to access to perform the rest of the code on.
So if the user types gf I would like the code to access the first data file.
Any help would be appreciated. Thank you
Why not use an if-statement at the beginning? Try this:
instead of:
gf = pd.read_csv(r"/Users/aaronhuang/Documents/Desktop/ffp/exfileCLEAN2.csv",
skiprows=[1])
bf = pd.read_csv(r"/Users/aaronhuang/Documents/Desktop/ffp/2SeconddatasetCLEAN.csv",
skiprows=[1])
df = (input("Which data set? "))
Use this:
choice = input("Which data set? ")
if choice == "gf":
df = pd.read_csv(r"/Users/aaronhuang/Documents/Desktop/ffp/exfileCLEAN2.csv",
skiprows=[1])
elif choice == "bf":
df = pd.read_csv(r"/Users/aaronhuang/Documents/Desktop/ffp/2SeconddatasetCLEAN.csv",
skiprows=[1])
else:
print("Error. Your choice is not valid")
df = ""
break
def simulate(self, timesteps, **kwargs):
pos = {comp: i for i, comp in enumerate(kwargs)}
population = np.zeros(len(pos), dtype='int')
for comp in pos:
population[pos[comp]] = kwargs[comp] # line where the error is
values = []
values.append(population)
comps = list(self.transitions.nodes)
time = np.arange(1, timesteps, 1, dtype='int')
for t in time:
pop = values[-1]
new_pop = values[-1].copy()
N = np.sum(pop)
I am having this value error and I'm not sure on how to fix it. Any suggestion
Error happens when I call this,
if population is None:
population = SIR1.values_.iloc[-1].copy()
else:
population = pd.concat([population, SIR1.values_.iloc[-1]])
S0 = population.S
I00 = population.I
R0 = population.R
Quarantine.simulate(365 - 74, S=S0, I=I00, R=R0) ### Line causing the error