following the guide at: https://www.youtube.com/watch?v=8dY3nQRcsac&list=PLTWFMbPFsvz3CeozHfeuJIXWAJMkPtAdS&index=7
when I run the python program the is an error in the python neat config file
it looks like it has something to do with the genome variable
my python neat config file now
#--- parameters for the XOR-2 experiment ---#
[NEAT]
fitness_criterion = max
fitness_threshold = 10000
pop_size = 20
reset_on_extinction = True
[DefaultGenome]
# node activation options
activation_default = sigmoid
activation_mutate_rate = 0.05
activation_options = sigmoid
# node aggregation options
aggregation_default = sum
aggregation_mutate_rate = 0.05
aggregation_options = sum
# node bias options
bias_init_mean = 0.0
bias_init_stdev = 1.0
bias_max_value = 30.0
bias_min_value = -30.0
bias_mutate_power = 0.5
bias_mutate_rate = 0.7
bias_replace_rate = 0.1
# genome compatibility options
compatibility_disjoint_coefficient = 1.0
compatibility_weight_coefficient = 0.5
# connection add/remove rates
conn_add_prob = 0.5
conn_delete_prob = 0.5
# connection enable options
enabled_default = True
enabled_mutate_rate = 0.01
feed_forward = False
initial_connection = unconn nected
# node add/remove rates
node_add_prob = 0.5
node_delete_prob = 0.2
# network parameters
num_hidden = 0
num_inputs = 1120
num_outputs = 12
# node response options
response_init_mean = 1.0
response_init_stdev = 0.0
response_max_value = 30.0
response_min_value = -30.0
response_mutate_power = 0.0
response_mutate_rate = 0.0
response_replace_rate = 0.0
# connection weight options
weight_init_mean = 0.0
weight_init_stdev = 1.0
weight_max_value = 30
weight_min_value = -30
weight_mutate_power = 0.5
weight_mutate_rate = 0.8
weight_replace_rate = 0.1
[DefaultSpeciesSet]
compatibility_threshold = 205
[DefaultStagnation]
species_fitness_func = max
max_stagnation = 50
species_elitism = 0
[DefaultReproduction]
elitism = 3
survival_threshold = 0.2
the Error code from the terminal
'config-feedforward')
File "/home/gym/OPAI/lib/python3.6/site-packages/neat/config.py", line 189, in __init__
self.genome_config = genome_type.parse_config(genome_dict)
File "/home/gym/OPAI/lib/python3.6/site-packages/neat/genome.py", line 158, in parse_config
return DefaultGenomeConfig(param_dict)
File "/home/gym/OPAI/lib/python3.6/site-packages/neat/genome.py", line 72, in __init__
assert self.initial_connection in self.allowed_connectivity
AssertionError
config code from the python neat code
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config-feedforward')
The issue is on the line that reads "initial_connection = unconn nected"
There's a typo. There shoudn't be a break in 'unconnected' It shouold read as follows:
"initial_connection = unconnected"
Related
I im trying to define at what flow "pressure_hill" are equal to "pressure_returnline" in function def calculateflow(Q):
im trying to use sympy solve but get error message below.
pressure_returnlie and pressure_hill are calculating a pressure drop that depends on flowvalue from water. I know that both will have the same pressure drop and I want to calculate what flow I have in each pipe.
When running the code I get TypeError: cannot determine truth value of Relational
Is there a better way than using sympy solver?
Thanks // Sebastian
import numpy as np
import fluids
from scipy import stats
# #input h and D0
# h = 100 # mm
D0 = 149# mm
# flow = 5/60 #m3/min
# density = 999 #kg/m3
pi = 3.1416
# mu = 1E-3 # viscosity
# length = 10 # meter
# #calculation
def valve_pos(pos,control_sig,time):## Change / Secound
cntrl_max = 254
cntrl_min = 1
cntrl_mid = 127
time_forw = 1
time_back = 1.5
time_forw_min = 6
time_back_min = 5
min_pos = 5
max_pos = 149
cntrl_time = 0
cntrl_time_min = 0
way = 1
if control_sig == cntrl_mid:
return 0
if control_sig > cntrl_mid:
cntrl_time = time_forw
cntrl_time_min = time_forw_min
way = 1
else:
cntrl_time = time_back
cntrl_time_min = time_back_min
way = -1
coeff = stats.linregress([cntrl_min,cntrl_mid,cntrl_max],[-1,0,1])
rate = ((max_pos - min_pos) / cntrl_time) * (control_sig*coeff[0] + coeff[1]) * time
rate_min = ((max_pos - min_pos) / cntrl_time_min) * way * time
if abs(rate) < abs(rate_min):
rate = rate_min
if ((pos + rate) > max_pos) & (way > 0):
rate = 0
if ((pos - rate) < min_pos) & (way < 0):
rate = 0
return rate
def velocity(D0, flow):
d = D0/1000
area = (d**2*pi)/4
w0 = flow/area # flow velocity m/s
return w0
def knife_valve_pressure_loss (h,D0, density, flow):
w0 = velocity(D0,flow)
if h/D0 < 0.9 and h/D0 >= 0.2:
a = np.array([7.661175,-72.63827,345.7625,-897.8331,1275.939,-938.8331,278.8193])
i = np.array([0,1,2,3,4,5,6])
dzeta = np.exp(2.3*sum(a*(h/D0)**i))
elif h/D0 >= 0.9:
dzeta = 0.6 - 0.6 * (h/D0)
elif h/D0 < 0.2 and h/D0 >= 0.0:
dzeta = 13114 * (h/D0)**2 - 5216.1 * h/D0 + 553.17
else:
print ('formula cannot be used for this h/D0')
return 0
pressure_loss = dzeta * density * 0.5 * w0**2
if pressure_loss < 0:
pressure_loss = 0
return pressure_loss/100000
def pipe_losses(D0,flow,density,length,mu= 1E-3):
w0 = velocity(D0,flow)
Re = fluids.Reynolds(V=w0, D=D0/1000, rho=1000, mu=mu)
fd = fluids.friction_factor(Re, eD=1E-5/0.05)
K = fluids.K_from_f(fd=fd, L=length, D=D0/1000)
K += fluids.exit_normal()
K += 4*fluids.bend_rounded(Di=D0/1000, angle=90, fd=fd)
K += 3.6
pressure_loss_pipe_fittings = fluids.dP_from_K(K, rho=density, V=w0)/100000
liftheight_loss = 2/10.2
return pressure_loss_pipe_fittings + liftheight_loss
def pressure_hill(flow, h = 149, length = 17, D0 = 139, density = 999):
p_valve = knife_valve_pressure_loss (h,D0, density, flow)
p_system = pipe_losses(D0,flow,density,length)
return p_valve + p_system
def pressure_returnline(flow, h = 149, length = 1, D0 = 139, density = 999):
p_valve = knife_valve_pressure_loss (h,D0, density, flow)
p_system = pipe_losses((76/1000),flow,density,0.5)
return p_valve + p_system
def calculateflow(Q):
from sympy import symbols, solve, Eq, Symbol
x,y = symbols('x,y')
sol = solve(pressure_hill(x) - pressure_returnline(y) ,(x,y))
sol
calculateflow(0.1)
Traceback (most recent call last):
File "<ipython-input-59-42d41eef9605>", line 1, in <module>
calculateflow(0.1)
File "system_losses.py", line 122, in calculateflow
sol = solve(pressure_hill(x) - pressure_returnline(y) ,(x,y))
File "system_losses.py", line 109, in pressure_hill
p_valve = knife_valve_pressure_loss (h,D0, density, flow)
File "system_losses.py", line 90, in knife_valve_pressure_loss
if pressure_loss < 0:
File "relational.py", line 398, in __bool__
raise TypeError("cannot determine truth value of Relational")
TypeError: cannot determine truth value of Relational
I'm trying to run the following code from python neat which allows you to apply genetic algorithms to neural networks. It is a very interesting concept only I cannot get the example code to run.
"""
2-input XOR example -- this is most likely the simplest possible example.
"""
from __future__ import print_function
import os
import neat
import visualize
# 2-input XOR inputs and expected outputs.
xor_inputs = [(0.0, 0.0), (0.0, 1.0), (1.0, 0.0), (1.0, 1.0)]
xor_outputs = [ (0.0,), (1.0,), (1.0,), (0.0,)]
def eval_genomes(genomes, config):
for genome_id, genome in genomes:
genome.fitness = 4.0
net = neat.nn.FeedForwardNetwork.create(genome, config)
for xi, xo in zip(xor_inputs, xor_outputs):
output = net.activate(xi)
genome.fitness -= (output[0] - xo[0]) ** 2
def run(config_file):
# Load configuration.
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_file)
# Create the population, which is the top-level object for a NEAT run.
p = neat.Population(config)
# Add a stdout reporter to show progress in the terminal.
p.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
p.add_reporter(stats)
p.add_reporter(neat.Checkpointer(5))
# Run for up to 300 generations.
winner = p.run(eval_genomes, 300)
# Display the winning genome.
print('\nBest genome:\n{!s}'.format(winner))
# Show output of the most fit genome against training data.
print('\nOutput:')
winner_net = neat.nn.FeedForwardNetwork.create(winner, config)
for xi, xo in zip(xor_inputs, xor_outputs):
output = winner_net.activate(xi)
print("input {!r}, expected output {!r}, got {!r}".format(xi, xo, output))
node_names = {-1:'A', -2: 'B', 0:'A XOR B'}
visualize.draw_net(config, winner, True, node_names=node_names)
visualize.plot_stats(stats, ylog=False, view=True)
visualize.plot_species(stats, view=True)
p = neat.Checkpointer.restore_checkpoint('neat-checkpoint-4')
p.run(eval_genomes, 10)
if __name__ == '__main__':
# Determine path to configuration file. This path manipulation is
# here so that the script will run successfully regardless of the
# current working directory.
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'config-feedforward')
run(config_path)
Here is the config file
#--- parameters for the XOR-2 experiment ---#
[NEAT]
fitness_criterion = max
fitness_threshold = 3.9
pop_size = 150
reset_on_extinction = False
[DefaultGenome]
# node activation options
activation_default = sigmoid
activation_mutate_rate = 0.0
activation_options = sigmoid
# node aggregation options
aggregation_default = sum
aggregation_mutate_rate = 0.0
aggregation_options = sum
# node bias options
bias_init_mean = 0.0
bias_init_stdev = 1.0
bias_max_value = 30.0
bias_min_value = -30.0
bias_mutate_power = 0.5
bias_mutate_rate = 0.7
bias_replace_rate = 0.1
# genome compatibility options
compatibility_disjoint_coefficient = 1.0
compatibility_weight_coefficient = 0.5
# connection add/remove rates
conn_add_prob = 0.5
conn_delete_prob = 0.5
# connection enable options
enabled_default = True
enabled_mutate_rate = 0.01
feed_forward = True
initial_connection = full
# node add/remove rates
node_add_prob = 0.2
node_delete_prob = 0.2
# network parameters
num_hidden = 0
num_inputs = 2
num_outputs = 1
# node response options
response_init_mean = 1.0
response_init_stdev = 0.0
response_max_value = 30.0
response_min_value = -30.0
response_mutate_power = 0.0
response_mutate_rate = 0.0
response_replace_rate = 0.0
# connection weight options
weight_init_mean = 0.0
weight_init_stdev = 1.0
weight_max_value = 30
weight_min_value = -30
weight_mutate_power = 0.5
weight_mutate_rate = 0.8
weight_replace_rate = 0.1
[DefaultSpeciesSet]
compatibility_threshold = 3.0
[DefaultStagnation]
species_fitness_func = max
max_stagnation = 20
species_elitism = 2
[DefaultReproduction]
elitism = 2
survival_threshold = 0.2
Lastly here is the version of python-neat I have installed
pip show neat-python
Name: neat-python
Version: 0.92
Summary: A NEAT (NeuroEvolution of Augmenting Topologies) implementation
Home-page: https://github.com/CodeReclaimers/neat-python
Could anyone please help me resolve the following error? Because so far what I have tried is installing from github and installing from pip and both give me the same error on the example code. I have also tried variant code but i get the same error.
****** Running generation 0 ******
Traceback (most recent call last):
File "driver.py", line 34, in <module>
winner = p.run(eval_genomes)
File "/home/j/anaconda3/lib/python3.8/site-packages/neat_python-0.92-py3.8.egg/neat/population.py", line 88, in run
File "driver.py", line 18, in eval_genomes
output = net.activate(xi)
File "/home/j/anaconda3/lib/python3.8/site-packages/neat_python-0.92-py3.8.egg/neat/nn/feed_forward.py", line 13, in activate
RuntimeError: Expected 3 inputs, got 2
You are using an example code for a newer version.
Example Source
NOTE: This page shows the source and configuration file for the current version of neat-python available on GitHub. If you are using the version 0.92 installed from PyPI, make sure you get the script and config file from the archived source for that release.
I checked your code in the https://neat-python.readthedocs.io/en/latest/xor_example.html and it looks the same but check your version. Try to access the archived source for that release:
https://github.com/CodeReclaimers/neat-python/releases/tag/v0.92
if __name__ == '__main__':
# Determine path to configuration file. This path manipulation is
# here so that the script will run successfully regardless of the
# current working directory.
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'config-feedforward') #<----
run(config_path)
in this "if" you will change the name of the config file as 'config-feedforward.txt' instead of 'config-feedforward'
What am I doing wrong below? I have installed Anaconda on my Mac and Numba along with it. I am trying to run Numba on my Python code, following the instructions given in the section Python with Numba here: https://www.continuum.io/blog/developer/accelerating-python-libraries-numba-part-1
I need to run this code overnight by changing zzzz from 5 to 40. I am just using zzzz = 5 now as a test case. Here is my code:
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from numba import double
from numba.decorators import jit, autojit
#jit
def Kcrit():
# The data of the plot will be added in these lists
x_data_plot=[]
y_data_plot=[]
zzzz = 5
for N in range(2,zzzz,2):
#Constants and parameters
epsilon = 0.01
if N in range(2,11,2):
K00 = np.logspace(0,3,100,10)
elif N in range(12,21,2):
K00 = np.logspace(3,3.75,100,10)
elif N in range(22,31,2):
K00 = np.logspace(3.75,4.15,100,10)
else:
K00 = np.logspace(4.15,4.5,100,10)
len1 = len(K00)
y0 = [0]*(3*N/2+3)
Kplot = np.zeros((len1,1))
Pplot = np.zeros((len1,1))
S = [np.zeros((len1,1)) for kkkk in range(N/2+1)]
KS = [np.zeros((len1,1)) for kkkk in range(N/2)]
PS = [np.zeros((len1,1)) for kkkk in range(N/2)]
Splot = [np.zeros((len1,1)) for kkkk in range(N/2+1)]
KSplot = [np.zeros((len1,1)) for kkkk in range(N/2)]
PSplot = [np.zeros((len1,1)) for kkkk in range(N/2)]
for series in range(0,len1):
K0 = K00[series]
Q = 10
r1 = 0.0001
r2 = 0.001
a = 0.001
d = 0.001
k = 0.999
S10 = 1e5
P0 = 1
tf = 1e10
time = np.linspace(0,tf,len1)
#Defining dy/dt's
def f(y,t):
for alpha in range(0,(N/2+1)):
S[alpha] = y[alpha]
for beta in range((N/2)+1,N+1):
KS[beta-N/2-1] = y[beta]
for gamma in range(N+1,3*N/2+1):
PS[gamma-N-1] = y[gamma]
K = y[3*N/2+1]
P = y[3*N/2+2]
# The model equations
ydot = np.zeros((3*N/2+3,1))
B = range((N/2)+1,N+1)
G = range(N+1,3*N/2+1)
runsumPS = 0
runsum1 = 0
runsumKS = 0
runsum2 = 0
for m in range(0,N/2):
runsumPS = runsumPS + PS[m]
runsum1 = runsum1 + S[m+1]
runsumKS = runsumKS + KS[m]
runsum2 = runsum2 + S[m]
ydot[B[m]] = a*K*S[m]-(d+k+r1)*KS[m]
for i in range(0,N/2-1):
ydot[G[i]] = a*P*S[i+1]-(d+k+r1)*PS[i]
for p in range(1,N/2):
ydot[p] = -S[p]*(r1+a*K+a*P)+k*KS[p-1]+d*(PS[p-1]+KS[p])
ydot[0] = Q-(r1+a*K)*S[0]+d*KS[0]+k*runsumPS
ydot[N/2] = k*KS[N/2-1]-(r2+a*P)*S[N/2]+d*PS[N/2-1]
ydot[G[N/2-1]] = a*P*S[N/2]-(d+k+r2)*PS[N/2-1]
ydot[3*N/2+1] = (d+k+r1)*runsumKS-a*K*runsum2
ydot[3*N/2+2] = (d+k+r1)*(runsumPS-PS[N/2-1])- \
a*P*runsum1+(d+k+r2)*PS[N/2-1]
ydot_new = []
for j in range(0,3*N/2+3):
ydot_new.extend(ydot[j])
return ydot_new
# Initial conditions
y0[0] = S10
for i in range(1,3*N/2+1):
y0[i] = 0
y0[3*N/2+1] = K0
y0[3*N/2+2] = P0
# Solve the DEs
soln = odeint(f,y0,time, mxstep = 5000)
for alpha in range(0,(N/2+1)):
S[alpha] = soln[:,alpha]
for beta in range((N/2)+1,N+1):
KS[beta-N/2-1] = soln[:,beta]
for gamma in range(N+1,3*N/2+1):
PS[gamma-N-1] = soln[:,gamma]
for alpha in range(0,(N/2+1)):
Splot[alpha][series] = soln[len1-1,alpha]
for beta in range((N/2)+1,N+1):
KSplot[beta-N/2-1][series] = soln[len1-1,beta]
for gamma in range(N+1,3*N/2+1):
PSplot[gamma-N-1][series] = soln[len1-1,gamma]
u1 = 0
u2 = 0
u3 = 0
for alpha in range(0,(N/2+1)):
u1 = u1 + Splot[alpha]
for beta in range((N/2)+1,N+1):
u2 = u2 + KSplot[beta-N/2-1]
for gamma in range(N+1,3*N/2+1):
u3 = u3 + PSplot[gamma-N-1]
K = soln[:,3*N/2+1]
P = soln[:,3*N/2+2]
Kplot[series] = soln[len1-1,3*N/2+1]
Pplot[series] = soln[len1-1,3*N/2+2]
utot = u1+u2+u3
#Plot
Kcrit = abs((Q/r2)*(1+epsilon)-utot)
v,i = Kcrit.min(0),Kcrit.argmin(0)
# Save the new points for x and y
x_data_plot.append(N)
y_data_plot.append(K00[i])
# Make the plot of all the points together
plt.plot(x_data_plot,y_data_plot)
plt.xlabel('N', fontsize = 20)
plt.ylabel('$K_{crit}$', fontsize = 20)
plt.show()
This is what I typed in the terminal:
from numba import autojit
numba_k = autojit()(Kcrit)
This is my error message:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'Kcrit' is not defined
Note that I am not asking whether Numba will actually speed up my code (that is an entirely different issue). I am just asking how to run it!
Help appreciated.
I have an object which got 16 double attributes and I want to pick one of them randomly so I create a method which create a random number between 0-15, but this method is not really what I want I think it's a pretty dirty method if you guys got another cleanest method
class Currencies(object):
EURToUSD = 0.0
EURToJPY = 0.0
EURToBTC = 0.0
USDToEUR = 0.0
USDToBTC = 0.0
USDToJPY = 0.0
BTCToEUR = 0.0
BTCToJPY = 0.0
BTCToUSD = 0.0
JPYToEUR = 0.0
JPYToUSD = 0.0
JPYToBTC = 0.0
EURToEUR = 0.0
JPYToJPY = 0.0
USDToUSD = 0.0
BTCToBTC = 0.0
def __init__ (self):
try:
rates = urllib2.urlopen("http://fx.priceonomics.com/v1/rates/")
except urllib2.URLError as e:
return e.reasonx
res = json.load(rates)
self.EURToEUR = 1.000000
self.USDToUSD = 1.000000
self.JPYToJPY = 1.000000
self.BTCToBTC = 1.000000
self.EURToUSD = res['EUR_USD']
self.EURToJPY = res['EUR_JPY']
self.EURToBTC = res['EUR_BTC']
self.USDToEUR = res['USD_EUR']
self.USDToBTC = res['USD_BTC']
self.USDToJPY = res['USD_JPY']
self.BTCToEUR = res['BTC_EUR']
self.BTCToJPY = res['BTC_JPY']
self.BTCToUSD = res['BTC_USD']
self.JPYToEUR = res['JPY_EUR']
self.JPYToUSD = res['JPY_USD']
self.JPYToBTC = res['JPY_BTC']
def getRandomRate():
randomNumber = randint(0,15)
if(randomNumber == 1):
return EURToUSD = 0.0
if(...)
You could just select a random attribute from the vars(self) dictionary, filtering on names that match a pattern:
def getRandomRate(self):
return random.choice([v for attr, v in vars(self).items()
if len(attr) == 8 and attr[3:5] == 'To'])
This picks a random value from all attributes whose name is 8 characters long and contain the word To in the middle.
A short demo using your class:
>>> import random
>>> c = Currencies()
>>> vars(c)
{'EURToBTC': u'0.0108027', 'BTCToJPY': u'12816.8350063', 'JPYToJPY': 1.0, 'USDToBTC': u'0.0094131', 'JPYToBTC': u'0.0000702', 'BTCToUSD': u'125.2057142', 'USDToUSD': 1.0, 'USDToJPY': u'116.1736146', 'EURToEUR': 1.0, 'JPYToUSD': u'0.0084464', 'BTCToEUR': u'92.3549138', 'USDToEUR': u'0.8820255', 'BTCToBTC': 1.0, 'JPYToEUR': u'0.0065705', 'EURToUSD': u'1.2338648', 'EURToJPY': u'126.4193644'}
>>> [v for attr, v in vars(c).items() if len(attr) == 8 and attr[3:5] == 'To']
[u'0.0108027', u'12816.8350063', 1.0, u'0.0094131', u'0.0000702', u'125.2057142', 1.0, u'116.1736146', 1.0, u'0.0084464', u'92.3549138', u'0.8820255', 1.0, u'0.0065705', u'1.2338648', u'126.4193644']
>>> random.choice([v for attr, v in vars(c).items() if len(attr) == 8 and attr[3:5] == 'To'])
u'0.0108027'
So the list comprehension extracted the 16 values for each of the conversion rates, and random.choice() then picks one of those rates at random.
I have a function called calculate_cost which calculates the performance of supplier for different S_range (stocking level). The function works but the plots are not smooth, is there a way to smooth it in Python?
import numpy
import scipy.stats
import scipy.integrate
import scipy.misc
import matplotlib
import math
import pylab
from scipy.stats import poisson
def calculate_cost(s, h, d, r, k, alphaR):
cost = 0.0
for i in range(0, alphaR + 1):
#i = i-1
binom = math.factorial(r) / ((math.factorial(i)) * (math.factorial(r - i)))
func = scipy.stats.poisson.cdf(s, d)
cost += ((k/r) * binom * (func ** i) * ((1.0-func) ** (r-i)))
for p in range (s):
cost += h*(s-p)*scipy.stats.poisson.pmf(p, d) #This a formula
return cost
graphs = []
class Graph:
def __init__(self):
self.label = ""
self.h = 0
self.d = 0
self.r = 0
self.k = 0
self.alphaR = 0
graph = Graph()
graph.label = "A"
graph.h = 1.0
graph.d = 10
graph.r = 30
graph.k = 283.0
graph.alphaR = 23
graphs.append(graph)
graph = Graph()
graph.label = "B"
graph.h = 1.0
graph.d = 10
graph.r = 30
graph.k = 146.0
graph.alphaR = 24
#graph.LineStyle = '*-'
graphs.append(graph)
graph = Graph()
graph.label = "C"
graph.h = 1.0
graph.d = 10
graph.r = 30
graph.k = 92.0
graph.alphaR = 25
#graph.LineStyle = '*-'
graphs.append(graph)
graph = Graph()
graph.label = "D"
graph.h = 1.0
graph.d = 10
graph.r = 30
graph.k = 80.0
graph.alphaR = 26
#graph.LineStyle = '*-'
graphs.append(graph)
graph = Graph()
graph.label = "E"
graph.h = 1.0
graph.d = 10
graph.r = 30
graph.k = 77.0
graph.alphaR = 27
#graph.LineStyle = '*-'
graphs.append(graph)
s_range = numpy.arange(0,21,1)
for graph in graphs:
cost = []
for s in s_range:
cost.append(calculate_cost(s, graph.h, graph.d, graph.r, graph.k, graph.alphaR))
matplotlib.pyplot.plot(s_range, cost, label = graph.label)
pylab.legend()
matplotlib.pyplot.xlabel(' S_range')
matplotlib.pyplot.ylabel('Cost')
pylab.show()
One solution would be to use the scipy.iterp1D function with a 'cubic' type :
from scipy import interpolate
....
s_range = numpy.arange(0,21,1)
for graph in graphs:
cost = []
for s in s_range:
cost.append(calculate_cost(s, graph.h, graph.d, graph.r, graph.k, graph.alphaR))
f = interpolate.interp1d(s_range, cost, kind='cubic')
s_range_new = np.arange(0,20, 0.1)
cost_new = f(s_range_new)
matplotlib.pyplot.plot(s_range_new, cost_new, label = graph.label)
pylab.legend()
matplotlib.pyplot.xlabel(' S_range')
matplotlib.pyplot.ylabel('Cost')
pylab.show()
This gives you :
Be careful in how you use this as this is only interpolated points and not real data points.
Hope this helps