How to automatize the variables definition in PuLP - python

I´m trying to automatize the model definition in PuLP.
Right now, I have the following model:
import pulp as pl
" Cost parameters"
p1 = 200 # Cost per unit 1
p2 = 300 # Cost per unit 2
" VARIABLES"
k0101 = pl.LpVariable("k0101", 0, 1, pl.LpInteger)
k0102 = pl.LpVariable("k0102", 0, 1, pl.LpInteger)
k0201 = pl.LpVariable("k0201", 0, 1, pl.LpInteger)
k0202 = pl.LpVariable("k0202", 0, 1, pl.LpInteger)
###### DEMAND
x010101 = pl.LpVariable("x010101", lowBound = 0)
x010102 = pl.LpVariable("x010102", lowBound = 0)
x010103 = pl.LpVariable("x010103", lowBound = 0)
x010104 = pl.LpVariable("x010104", lowBound = 0)
x010201 = pl.LpVariable("x010201", lowBound = 0)
x010202 = pl.LpVariable("x010202", lowBound = 0)
x010203 = pl.LpVariable("x010203", lowBound = 0)
x010204 = pl.LpVariable("x010204", lowBound = 0)
x020101 = pl.LpVariable("x020101", lowBound = 0)
x020102 = pl.LpVariable("x020102", lowBound = 0)
x020103 = pl.LpVariable("x020103", lowBound = 0)
x020104 = pl.LpVariable("x020104", lowBound = 0)
x020201 = pl.LpVariable("x020201", lowBound = 0)
x020202 = pl.LpVariable("x020202", lowBound = 0)
x020203 = pl.LpVariable("x020203", lowBound = 0)
x020204 = pl.LpVariable("x020204", lowBound = 0)
# Problem
z = pl.LpProblem("optimizator", pl.LpMinimize)
"OBJECTIVE FUNCTION"
z += ((p1) * (x010101 + x010102 + x010103 + x010104) + (p1) * (x010201 + x010202 + x010203 + x010204) + (p2) * (x020101 + x020102 + x020103 + x020104) + (p2) * (x020201 + x020202 + x020203 + x020204) + (p1) * (x010101 + x010102 + x010103 + x010104) + (p1) * (x010201 + x010202 + x010203 + x010204) + (p2) * (x020101 + x020102 + x020103 + x020104) + (p2) * (x020201 + x020202 + x020203 + x020204))
" CONSTRAINTS "
z += x010101 + x020101 >= 15 * k0101
" SOLUTION "
print(z)
estado = z.solve()
print(pl.LpStatus[estado])
"TOTAL COST:"
print(pl.value(z.objective))
I would like to simplify this variable definitions, in order to be able to define more variable in an easier description.
Does anyone now how can I define my variables and parameters as a dictionary, and consider that in the objective function and the constraints?

It would help to explain the problem more. The objective function as written right now has duplicate terms and it is hard to understand conceptually what you are trying to minimize.
That being said, you can use lpSum to express the sum of the variable * cost.
# create the variables
k_variable_names = ('k0101', 'k0102', 'k0201', 'k0202')
k_variables = {var: pl.LpVariable(var, cat=pl.LpBinary)
for var in k_variable_names}
x_variables_names = ('x010101' ...)
x_variables = {var: pl.LpVariable(var, lowBound=0)
for var in x_variable_names}
# objective function
z += (
lpSum([var * 2 * p1 for var_name, var in x_variables.items() if 'x010' in var_name]) +
lpSum([var * 2 * p2 for var_name, var in x_variables.items() if 'x020' in var_name])
)

Related

Invalid index to scalar variable error when trying to use scipy.optimize.curve_fit

I have a function with different parameters that I want to optimize to fit some existing data.
The function runs fine on its own, but when I try to pass it through the scipy.optimize.curve_fit function, I get this error :
IndexError: invalid index to scalar variable.
I don't understand why the function would work on its own, and I would not get any errors.
What can I do ?
The original function used dictionnaries and I thought that might be the problem but I modified it and it still doesn't work.
This is the function I'm using :
def function_test(xy,X1,X2,X3,X4):
precip = xy\[0\]
potential_evap = xy\[1\]
nUH1 = int(math.ceil(X4))
nUH2 = int(math.ceil(2.0*X4))
uh1_ordinates = [0] * nUH1
uh2_ordinates = [0] * nUH2
UH1 = [0] * nUH1
UH2 = [0] * nUH2
for t in range(1, nUH1 + 1):
uh1_ordinates[t - 1] = s_curves1(t, X4) - s_curves1(t-1, X4)
for t in range(1, nUH2 + 1):
uh2_ordinates[t - 1] = s_curves2(t, X4) - s_curves2(t-1, X4)
production_store = X1*0.60# S
routing_store = X3*0.70# R
qsim = []
for j in range(2191):
if precip[j] > potential_evap[j]:
net_evap = 0
scaled_net_precip = (precip[j] - potential_evap[j])/X1
if scaled_net_precip > 13:
scaled_net_precip = 13.
tanh_scaled_net_precip = tanh(scaled_net_precip)
reservoir_production = (X1 * (1 - (production_store/X1)**2) * tanh_scaled_net_precip) / (1 + production_store/X1 * tanh_scaled_net_precip)
routing_pattern = precip[j]-potential_evap[j]-reservoir_production
else:
scaled_net_evap = (potential_evap[j] - precip[j])/X1
if scaled_net_evap > 13:
scaled_net_evap = 13.
tanh_scaled_net_evap = tanh(scaled_net_evap)
ps_div_x1 = (2 - production_store/X1) * tanh_scaled_net_evap
net_evap = production_store * (ps_div_x1) / \
(1 + (1 - production_store/X1) * tanh_scaled_net_evap)
reservoir_production = 0
routing_pattern = 0
production_store = production_store - net_evap + reservoir_production
percolation = production_store / (1 + (production_store/2.25/X1)**4)**0.25
routing_pattern = routing_pattern + (production_store-percolation)
production_store = percolation
for i in range(0, len(UH1) - 1):
UH1[i] = UH1[i+1] + uh1_ordinates[i]*routing_pattern
UH1[-1] = uh1_ordinates[-1] * routing_pattern
for j in range(0, len(UH2) - 1):
UH2[j] = UH2[j+1] + uh2_ordinates[j]*routing_pattern
UH2[-1] = uh2_ordinates[-1] * routing_pattern
groundwater_exchange = X2 * (routing_store / X3)**3.5
routing_store = max(0, routing_store + UH1[0] * 0.9 + groundwater_exchange)
R2 = routing_store / (1 + (routing_store / X3)**4)**0.25
QR = routing_store - R2
routing_store = R2
QD = max(0, UH2[0]*0.1+groundwater_exchange)
Q = QR + QD
qsim.append(Q)
return qsim

Why docplex optimization gives no answer

I am working on an optimizatiom model which is set to maximize NPV. I am getting some results, however my cap_ele should represent the max value in the el_to_ele_t series. However after running the code both int cap_ele and series el_to_ele_t reamin at 0.
Below are my variables:
prod_hpp_grid_t = mdl.continuous_var_dict(time, lb=0, ub=cap_grid, name='Power sold to grid')
prod_h2_t = mdl.continuous_var_dict(time, lb=0, name='Hydrogen output')
P_curt = mdl.continuous_var_dict(time, lb=0, name='Curtailment')
el_to_ele_t = mdl.continuous_var_dict(time, lb=0, name='El consumption from Electrolyser')
cap_wtg = mdl.integer_var(lb=0, name='Wind capacity')
cap_pv = mdl.integer_var(lb=0, name='Solar capacity')
cap_ele = mdl.integer_var(lb=0, name= "Electrolysis capacity")
And here is the objective function:
mdl.maximize(
-(cost_invest_wtg * cap_wtg + \
cost_invest_pv * cap_pv + \
cost_invest_ele * cap_ele + \
mdl.sum(
(mdl.sum(
price_spot[t] * prod_hpp_grid_t[t] + \
price_h2s[t] * prod_h2_t[t] for t in time) -\
(cost_onm_wtg * cap_wtg + \
cost_onm_pv * cap_pv + \
cost_onm_ele * cap_ele)
) / np.power(1 + discount_f, i)
for i in range(1, life_t_hpp + 1)
)
)
And those are contraints and the solver:
for t in time:
mdl.add_constraint(prod_hpp_grid_t[t] == prod_wtg_t[t] * cap_wtg + prod_pv_t[t] * cap_pv - P_curt[t] - el_to_ele_t[t] )
mdl.add_constraint(prod_h2_t[t] == el_to_ele_t[t] * cons_el_kg_ele)
mdl.add_constraint(cap_ele >= el_to_ele_t[t])
######### Solving the problem
sol = mdl.solve(log_output=False)
prod_hpp_grid_ts = pd.DataFrame.from_dict(sol.get_value_dict(prod_hpp_grid_t), orient='index')
prod_hpp_grid_ts = prod_hpp_grid_ts.reset_index()
P_curt = pd.DataFrame.from_dict(sol.get_value_dict(P_curt), orient='index')
P_curt = P_curt.reset_index()
prod_h2_ts = pd.DataFrame.from_dict(sol.get_value_dict(prod_h2_t), orient='index')
prod_h2_ts = prod_h2_ts.reset_index()
el_to_ele_ts = pd.DataFrame.from_dict(sol.get_value_dict(el_to_ele_t), orient='index')
el_to_ele_ts = el_to_ele_ts.reset_index()
Could there be any reson for why im not getting the desired answer?

NN implementation- updating after back propegation changes the dimentions

as part of an assignment, we need to implement NN.
I am calculating forward result, then I run back propagation and then I updated the weights (all for the same instance).
When i try to calculate the forward value of the same instance, I am getting an error that the dimensions are wrong.
class MyNN:
def __init__(self, learning_rate, layer_sizes):
self.learning_rate = learning_rate
self.layer_sizes = layer_sizes
self.model_params = {}
self.memory = {}
self.grads = {}
# Initializing weights
for layer_index in range(len(layer_sizes) - 1):
W_input = layer_sizes[layer_index + 1]
W_output = layer_sizes[layer_index]
self.model_params['W_' + str(layer_index + 1)] = np.random.randn(W_input, W_output) * 0.1
self.model_params['b_' + str(layer_index + 1)] = np.random.randn(W_input) * 0.1
def forward_single_instance(self, x):
a_i_1 = x
self.memory['a_0'] = x
for layer_index in range(len(self.layer_sizes) - 1):
W_i = self.model_params['W_' + str(layer_index + 1)]
b_i = self.model_params['b_' + str(layer_index + 1)]
z_i = np.dot(W_i, a_i_1) + b_i
a_i = 1/(1+np.exp(-z_i))
self.memory['a_' + str(layer_index + 1)] = a_i
a_i_1 = a_i
return a_i_1
def log_loss(self, y_hat, y):
'''
Logistic loss, assuming a single value in y_hat and y.
'''
m = y_hat[0]
cost = -y[0]*np.log(y_hat[0]) - (1 - y[0])*np.log(1 - y_hat[0])
return cost
def backward_single_instance(self, y):
a_output = self.memory['a_' + str(len(self.layer_sizes) - 1)]
dz = a_output - y
for layer_index in range(len(self.layer_sizes) - 1, 0, -1):
a_l_1 = self.memory['a_' + str(layer_index - 1)]
dW = np.dot(dz.reshape(-1, 1), a_l_1.reshape(1, -1))
db = dz.transpose()
self.grads['dW_' + str(layer_index)] = dW
self.grads['db_' + str(layer_index)] = db
W_l = self.model_params['W_' + str(layer_index)]
dz = (a_l_1 * (1 - a_l_1)).reshape(-1, 1) * np.dot(W_l.T, dz.reshape(-1, 1))
def update(self):
for layer_index in range(len(self.layer_sizes) - 1):
Wi = 'W_' + str(layer_index + 1)
bi = 'b_' + str(layer_index + 1)
dWi = 'dW_' + str(layer_index + 1)
dbi = 'db_' + str(layer_index + 1)
W_i = self.model_params[Wi]
b_i = self.model_params[bi]
dW_i = self.grads[dWi]
db_i = self.grads[dbi]
self.model_params[Wi] = W_i - self.learning_rate * dW_i
self.model_params[bi] = b_i - self.learning_rate * db_i
then for testing I wrote this code:
nn = MyNN(0.01, [3, 2, 1])
x = np.random.randn(3)
y = np.random.randn(1)
y_hat = nn.forward_single_instance(x)
print(y_hat)
nn.backward_single_instance(y)
nn.update()
y_hat = nn.forward_single_instance(x)
This is the error that is printed:
x
[ 0.57072262 1.8578982 -1.48560691]
x
[[0.53932246 0.57051188]]
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-99-d8d9152fef18> in <module>()
----> 1 y_hat = nn.forward_single_instance(x)
2 print(y_hat)
3
4
5 l = nn.log_loss(y_hat, y)
<ipython-input-89-f354993c95f9> in forward_single_instance(self, x)
24 W_i = self.model_params['W_' + str(layer_index + 1)]
25 b_i = self.model_params['b_' + str(layer_index + 1)]
---> 26 z_i = np.dot(W_i, a_i_1) + b_i
27 a_i = 1/(1+np.exp(-z_i))
28 self.memory['a_' + str(layer_index + 1)] = a_i
ValueError: shapes (1,2) and (1,2) not aligned: 2 (dim 1) != 1 (dim 0)
the problem is in b_i dimensions, and I cant figure out why.
I tried variations to b_i shape(row vector, column vector), and all of them throws the same exception.

FloatingPointError: overflow encountered in double_scalars

I've set up numpy.seterr as follows:
np.seterr(invalid='raise', over ='raise', under='raise')
And I'm getting the following error:
c = beta[j,i] + oneminusbeta[j,i]
FloatingPointError: overflow encountered in double_scalars
I've checked what beta[j,i] and oneminusbeta[j,i] are at the point of crash, and these are their values:
beta: -131.340389182
oneminusbeta: 0.0
Please note, this line of addition (beta[j,i] + oneminusbeta[j,i]) has run for thousands of lines in a loop (that performs image classification) before crashing here at this point.
How can I deal with this? Is it necessary to change the type of the numpy arrays?
This is how I've initialized them:
beta = np.empty([m,n])
oneminusbeta = np.empty([m,n])
Is it possible to cast the individual values before adding them up? Rather than changing the entire array declarations? Or is this even a serious issue? Would it be safe to simply turn off the numpy.seterr configuration and let the calculations go ahead without raising the error?
Edit
Someone suggested below, and I suspected as well, that the values being added shouldn't cause an overflow. Then how can I find out where the overflow is really happening?
This is my code:
epthreshold = 709
enthreshold = -708
f.write("weights["+str(i)+", " + str(j)+"] = math.exp(beta: " +str(beta[j,i])+ " + oneminusbeta: " + str(oneminusbeta[j,i])+")\n" )
c = beta[j,i] + oneminusbeta[j,i]
weights[i,j] = math.exp(np.clip(c, enthreshold, epthreshold))
And when I check my log file, this is the line I get:
weights[5550, 13] = math.exp(beta: -131.340389182 + oneminusbeta: 0.0)
Edit 2
Here's the rest of my code, where variables n,m and H have already been initialized to integer values:
import numba
import numpy as np
import statsmodels.api as sm
weights = np.empty([n,m])
for curr_n in range(n):
for curr_m in range(m):
weights[curr_n,curr_m] = 1.0/(n)
beta = np.empty([m,n])
oneminusbeta = np.empty([m,n])
for curr_class in range(m):
for curr_sample in range(n):
beta[curr_class,curr_sample] = 1./m
epthreshold = 709 # positive exponential threshold
enthreshold = -708
for h in range(H):
print "Boosting round %d ... " % h
z = np.empty([n,m])
for j in range(m): # computing working responses and weights, Step 2(a)(i)
for i in range(no_samples):
i_class = y[i] #get the correct class for the current sample
if h == 0:
z[i,j] = (int(j==i_class) - beta[j,i])/((beta[j,i])*(1. - beta[j,i]))
weights[i,j] = beta[j,i]*(1. - beta[j,i])
else:
if j == i_class:
z[i,j] = math.exp(np.clip(-beta[j,i],enthreshold, epthreshold))
else:
z[i,j] = -math.exp(np.clip(oneminusbeta[j,i], enthreshold, epthreshold))
f.write("weights["+str(i)+", " + str(j)+"] = math.exp(beta: " +str(beta[j,i])+ " + oneminusbeta: " + str(oneminusbeta[j,i])+")\n" )
c = beta[j,i] + oneminusbeta[j,i]
weights[i,j] = math.exp(np.clip(c, enthreshold, epthreshold))
g_h = np.zeros([1,1])
j = 0
# Calculating regression coefficients per class
# building the parameters per j class
for y1_w in zip(z.T, weights.T):
y1, w = y1_w
temp_g = sm.WLS(y1, X, w).fit() # Step 2(a)(ii)
if np.allclose(g_h,0):
g_h = temp_g.params
else:
g_h = np.c_[g_h, temp_g.params]
j = j + 1
if np.allclose(g,0):
g = g_h
else:
g = g + g_h # Step(2)(a)(iii)
# now set g(x), function coefficients according to new formula, step (2)(b)
sum_g = g.sum(axis=1)
for j in range(m):
diff = (g[:,j] - ((1./m) * sum_g))
g[:,j] = ((m-1.)/m) * diff
g_per_round[h,:,j] = g[:,j]
#Now computing beta, Step 2(c)...."
Q = 0.
e = 0.
for j in range(m):
# Calculating beta and oneminusbeta for class j
aj = 0.0
for i in range(no_samples):
i_class = y[i]
X1 = X[i].reshape(1, no_features)
g1 = g[:,j].reshape(no_features, 1)
gc = g[:,i_class].reshape(no_features, 1)
dot = 1. + float(np.dot(X1, g1)) - float(np.dot(X1,gc))
aj = dot
sum_e = 0.
a_q = []
a_q.append(0.)
for j2 in range(m): # calculating sum of e's except for all j except where j=i_class
if j2 != i_class: # g based on j2, not necessarily g1?
g2 = g[:,j2].reshape(no_features, 1)
dot1 = 1. + float(np.dot(X1, g2)) - float(np.dot(X1,gc))
e2 = math.exp(np.clip(dot1,enthreshold, epthreshold))
sum_e = sum_e + e2
a_q.append(dot1)
if (int(j==i_class) == 1):
a_q_arr = np.array(a_q)
alpha = np.array(a_q_arr[1:])
Q = mylogsumexp(f,a_q_arr, 1, 0)
sumalpha = mylogsumexp(f,alpha, 1, 0)
beta[j,i] = -Q
oneminusbeta[j,i] = sumalpha - Q
else:
alpha = a_q
alpha = np.array(alpha[1:])
a_q_arr = np.array(a_q)
Q = mylogsumexp(f,a_q_arr, 0, aj)
sumalpha = log(math.exp(np.clip(Q, enthreshold, epthreshold)) - math.exp(np.clip(aj, enthreshold, epthreshold)))
beta[j,i] = aj - Q
oneminusbeta[j,i] = sumalpha - Q
and the function mylogsumexp is:
def mylogsumexp(f, a, is_class, maxaj, axis=None, b=None):
np.seterr(over="raise", under="raise", invalid="raise")
threshold = -sys.float_info.max
maxthreshold = sys.float_info.max
epthreshold = 709 # positive exponential threshold
enthreshold = -708
a = asarray(a)
if axis is None:
a = a.ravel()
else:
a = rollaxis(a, axis)
if is_class == 1:
a_max = a.max(axis=0)
else:
a_max = maxaj
#bnone = " none "
if b is not None:
a_max = maxaj
b = asarray(b)
if axis is None:
b = b.ravel()
else:
b = rollaxis(b, axis)
a = np.clip(a - a_max, enthreshold, epthreshold)
midout = np.sum(np.exp(a), axis=0)
midout = 1.0 + np.clip(midout - math.exp(a_max), threshold, maxthreshold)
out = np.log(midout)
else:
a = np.clip(a - a_max, enthreshold, epthreshold)
out = np.log(np.sum(np.exp(a)))
out += a_max
if out == float("inf"):
out = maxthreshold
if out == float("-inf"):
out = threshold
return out

Help with Windows Geometry in Python

Why are the commands to change the window position before and after sleep(3.00) being ignored?
if self.selectedM.get() == 'Bump':
W1 = GetSystemMetrics(1) + 200
print W1
w1.wm_geometry("+100+" + str(W1))
w2.wm_geometry("+100+" + str(W1))
w3.wm_geometry("+100+" + str(W1))
w4.wm_geometry("+100+" + str(W1))
self.rvar.set(0)
self.rvar2.set(0)
self.rvar3.set(0)
self.rvar4.set(0)
s = self.wm_geometry()
geomPatt = re.compile(r"(\d+)?x?(\d+)?([+-])(\d+)([+-])(\d+)")
m = geomPatt.search(s)
X3 = m.group(4)
Y3 = m.group(6)
M = int(Y3) - 150
P = M + 150
MH = W1
MUH = Y3
while Y3 > M:
sleep(0.0009)
Y3 = int(Y3) - 1
self.update_idletasks()
self.wm_geometry("+" + str(X3) + "+" + str(Y3))
print 1
Alpha = 1.0
#while 0.0 < Alpha :
# Alpha = Alpha - 0.01
# self.attributes("-alpha", Alpha)
# sleep(0.005)
self.wm_geometry("+" + str(X3) + "+" + str(MH))
sleep(3.00)
self.wm_geometry("+" + str(X3) + "+" + str(MUH))
#while 1.0 > Alpha :
# Alpha = Alpha + 0.01
# self.attributes("-alpha", Alpha)
# sleep(0.005)
while Y3 < P:
sleep(0.0009)
Y3 = int(Y3) + 1
self.update_idletasks()
self.wm_geometry("+" + str(X3) + "+" + str(Y3))
The answer to your question is that you don't give the system a chance to update the display. The display is updated by the event loop but you don't enter the event loop after either of the wm_geometry calls surrounding the sleep(3.00) call. They aren't being ignored, it's just that you're changing the geometry again before the system has a chance to update the display.
Does the answer to the question Having Trouble with Tkinter Transparency help you solve this problem too?

Categories