I new to python. i was wring a transportation problem solving.when i called out Pulpi, it was caught error
from pulp import LpProblem, LpVariable, LpStatus, LpMinimize, GLPK, value
M = 3
N = 5
a = range(1,M+1)
a1 = range(M)
b = range(1,N+1)
b1 = range(N)
xindx = [(a[i],b[j]) for j in b1 for i in a1]
model = LpProblem("Transportation_LP_Problem",LpMinimize)
x = LpVariable.dicts("X",xindx,0,None)
model += 2190 * x[1,1] + 46650 * x[1,2] + 25110 * x[1,3] + 8040 * x[1,4] + 6720 * x[1,5] \
+ 1800*x[2,1] + 24600 * x[2,2] + 50610 * x[2,3] + 46200 * x[2,4] + 57780 * x[2,5] \
+ 1500*x[3,1] + 45960 * x[3,2] + 24420 * x[3,3] + 7350 * x[3,4] + 6030 * x[3,5],"Transportation_cost"
model += x[1,1] + x[1,2] + x[1,3] + x[1,4] + x[1,5] <= 300.0, "Supply_Pt_1"
model += x[2,1] + x[2,2] + x[2,3] + x[2,4] + x[2,5] <= 260.0, "Supply_Pt_2"
model += x[3,1] + x[3,2] + x[3,3] + x[3,4] + x[3,5] <= 258.0, "Supply_Pt_3"
model += x[1,1] + x[2,1] + x[3,1] >= 200.0, "Demand_Pt_1"
model += x[1,2] + x[2,2] + x[3,2] >= 100.0, "Demand_Pt_2"
model += x[1,3] + x[2,3] + x[3,3] >= 250.0, "Demand_Pt_3"
model += x[1,4] + x[2,4] + x[3,4] >= 185.0, "Demand_Pt_4"
model += x[1,5] + x[2,5] + x[3,5] >= 100.0, "Demand_Pt_5"
model.solve(GLPK())
print ("Status:",LpStatus[model.status])
for v in model.variables():
print(v.name,"=",v.varValue)
print ("Objective Function", value(model.objective))
<model.solve(GLPK())> this is where the error came in
in actualSolve
raise PulpSolverError("PuLP: cannot execute " + self.path)
pulp.apis.core.PulpSolverError: PuLP: cannot execute glpsol.exe
how can i install glpsol.exe or fix this
Install the glpk-utils and try again.
https://winglpk.sourceforge.net/
Related
I have a function with different parameters that I want to optimize to fit some existing data.
The function runs fine on its own, but when I try to pass it through the scipy.optimize.curve_fit function, I get this error :
IndexError: invalid index to scalar variable.
I don't understand why the function would work on its own, and I would not get any errors.
What can I do ?
The original function used dictionnaries and I thought that might be the problem but I modified it and it still doesn't work.
This is the function I'm using :
def function_test(xy,X1,X2,X3,X4):
precip = xy\[0\]
potential_evap = xy\[1\]
nUH1 = int(math.ceil(X4))
nUH2 = int(math.ceil(2.0*X4))
uh1_ordinates = [0] * nUH1
uh2_ordinates = [0] * nUH2
UH1 = [0] * nUH1
UH2 = [0] * nUH2
for t in range(1, nUH1 + 1):
uh1_ordinates[t - 1] = s_curves1(t, X4) - s_curves1(t-1, X4)
for t in range(1, nUH2 + 1):
uh2_ordinates[t - 1] = s_curves2(t, X4) - s_curves2(t-1, X4)
production_store = X1*0.60# S
routing_store = X3*0.70# R
qsim = []
for j in range(2191):
if precip[j] > potential_evap[j]:
net_evap = 0
scaled_net_precip = (precip[j] - potential_evap[j])/X1
if scaled_net_precip > 13:
scaled_net_precip = 13.
tanh_scaled_net_precip = tanh(scaled_net_precip)
reservoir_production = (X1 * (1 - (production_store/X1)**2) * tanh_scaled_net_precip) / (1 + production_store/X1 * tanh_scaled_net_precip)
routing_pattern = precip[j]-potential_evap[j]-reservoir_production
else:
scaled_net_evap = (potential_evap[j] - precip[j])/X1
if scaled_net_evap > 13:
scaled_net_evap = 13.
tanh_scaled_net_evap = tanh(scaled_net_evap)
ps_div_x1 = (2 - production_store/X1) * tanh_scaled_net_evap
net_evap = production_store * (ps_div_x1) / \
(1 + (1 - production_store/X1) * tanh_scaled_net_evap)
reservoir_production = 0
routing_pattern = 0
production_store = production_store - net_evap + reservoir_production
percolation = production_store / (1 + (production_store/2.25/X1)**4)**0.25
routing_pattern = routing_pattern + (production_store-percolation)
production_store = percolation
for i in range(0, len(UH1) - 1):
UH1[i] = UH1[i+1] + uh1_ordinates[i]*routing_pattern
UH1[-1] = uh1_ordinates[-1] * routing_pattern
for j in range(0, len(UH2) - 1):
UH2[j] = UH2[j+1] + uh2_ordinates[j]*routing_pattern
UH2[-1] = uh2_ordinates[-1] * routing_pattern
groundwater_exchange = X2 * (routing_store / X3)**3.5
routing_store = max(0, routing_store + UH1[0] * 0.9 + groundwater_exchange)
R2 = routing_store / (1 + (routing_store / X3)**4)**0.25
QR = routing_store - R2
routing_store = R2
QD = max(0, UH2[0]*0.1+groundwater_exchange)
Q = QR + QD
qsim.append(Q)
return qsim
I'm trying to solve a complex system where you can visualize it as some points/nodes where a spring-damper system is connected in between those nodes, each point carry the forces from all other connected springs and dampers in addition to the gravitational forces on them since each spring and damper have a specific mass.
I'm using classes to make the grid and the initial conditions, but i'm not sure how exactly to calculate the new positions and accelerations using the runge kutta 4
this part is where runge kutta is defined
rows = 5
columns = 6
class Runga_kutta4():
def __init__(self, node, u0, v0, t):
self.u0 = u0
self.v0 = v0
self.t = t
self.u = u = 0, 0
self.ux = u[0]
self.uy = u[1]
self.v = v = 0, 0
self.vx = v[0]
self.vy = v[1]
f = Forces(u0, u, v0, v)
self.Node_Forces = f.nodeforces(node)
self.dt = t[1] - t[0]
results = self.calculation()
return results
# Returns the acceleration a
def acceleration(self, Node_Forces):
"""
F = m *a
a = F/m
F_sys = F_externe - (F_damping + Springs) - F_g
"""
a_list = []
for (f, m) in zip(Node_Forces, Masses.Lattice_Mass()):
ax = f[0]/m[0]
ay = f[1]/m[1]
a_list.append((ax, ay))
return a_list.reshape(5, 6)
def calculation(self):
for i in range(self.t.size - 1):
# F at time step t / 2
f_t_05_x = (self.Node_Forces[0][i + 1] - self.Node_Forces[0][i]) / 2 + self.Node_Forces[0][i]
f_t_05_y = (self.Node_Forces[1][i + 1] - self.Node_Forces[1][i]) / 2 + self.Node_Forces[1][i]
u1x = self.ux[i]
v1x = self.vx[i]
u1y = self.uy[i]
v1y = self.vy[i]
a1x = self.acceleration(self.Node_Forces[0][i])
a1y = self.acceleration(self.Node_Forces[1][i])
u2x = self.ux[i] + v1x * self.dt / 2
v2x = self.vx[i] + a1x * self.dt / 2
u2y = self.uy[i] + v1y * self.dt / 2
v2y = self.vy[i] + a1y * self.dt / 2
a2x = self.acceleration(f_t_05_x)
a2y = self.acceleration(f_t_05_y)
u3x = self.ux[i] + v2x * self.dt / 2
v3x = self.vx[i] + a2x * self.dt / 2
u3y = self.uy[i] + v2y * self.dt / 2
v3y = self.vy[i] + a2y * self.dt / 2
a3x = self.acceleration(f_t_05_x)
a3y = self.acceleration(f_t_05_y)
u4x = self.ux[i] + v3x * self.dt
v4x = self.vx[i] + a3x * self.dt
u4y = self.uy[i] + v3y * self.dt
v4y = self.vy[i] + a3y * self.dt
a4x = self.acceleration(self.Node_Forces[0][i + 1])
a4y = self.acceleration(self.Node_Forces[1][i + 1])
self.ux[i + 1] = self.ux[i] + self.dt / 6 * (v1x + 2 * v2x + 2 * v3x + v4x)
self.vx[i + 1] = self.vx[i] + self.dt / 6 * (a1x + 2 * a2x + 2 * a3x + a4x)
self.uy[i + 1] = self.uy[i] + self.dt / 6 * (v1y + 2 * v2y + 2 * v3y + v4y)
self.vy[i + 1] = self.vy[i] + self.dt / 6 * (a1y + 2 * a2y + 2 * a3y + a4y)
self.u = (self.ux, self.uy)
self.v = (self.vx, self.vy)
return self.u, self.v
l = Lattice(3)
t0, te, dt = 0, 3, 0.001 # s
t = np.linspace(t0, te, round((te-t0)/dt + 1))
for node in l.latticeNodes():
position0 = 0, 0
velocity0 = 0, 0
state0 = np.append(position0, velocity0)
new_state = Runga_kutta4(node, position0, velocity0, t)
visualise(l)
photo of the system
I'm working on an optimization problem and working through converting a hard coded, inflexible solution into a functional, flexible one. I'm struggling on how to use multiple dictionaries in a function of a PuLP optimization problem. My best guess would be to possible use nested for loops, but can't wrap my head around how to do this. Below is what my current hard coded solution looks like.
import pulp
part_numbers = {"Part A", "Part B"}
employees = {"Employee A", "Employee B", "Employee C", "Employee D", "Employee E", "Employee F", "Employee G", "Employee H"}
efficiency = {85, .75, .5, .75, .59, .40, .87, .37, .65, .85, .85, .5, .4, .8, .3, .92}
exptime = {20, 10}
model += ((
(pulp.lpSum(
( (exptime[0] * qty_produced[part_numbers[0], employees[0]])/ efficiency[0])
+ ((exptime[0] * qty_produced[part_numbers[0], employees[1]])/ efficiency[1])
+ ((exptime[0] * qty_produced[part_numbers[0], employees[2]])/ efficiency[2])
+ ((exptime[0] * qty_produced[part_numbers[0], employees[3]]) / efficiency[3])
+ ((exptime[0] * qty_produced[part_numbers[0], employees[4]]) / efficiency[4])
+ ((exptime[0] * qty_produced[part_numbers[0], employees[5]]) / efficiency[5])
+ ((exptime[0] * qty_produced[part_numbers[0], employees[6]]) / efficiency[6])
+ ((exptime[0] * qty_produced[part_numbers[0], employees[7]]) / efficiency[7])
+ ((exptime[1] * qty_produced[part_numbers[1], employees[0]])/ efficiency[8])
+ ((exptime[1] * qty_produced[part_numbers[1], employees[1]])/ efficiency[9])
+ ((exptime[1] * qty_produced[part_numbers[1], employees[2]])/ efficiency[10])
+ ((exptime[1] * qty_produced[part_numbers[1], employees[3]]) / efficiency[11])
+ ((exptime[1] * qty_produced[part_numbers[1], employees[4]]) / efficiency[12])
+ ((exptime[1] * qty_produced[part_numbers[1], employees[5]]) / efficiency[13])
+ ((exptime[1] * qty_produced[part_numbers[1], employees[6]]) / efficiency[14])
+ ((exptime[1] * qty_produced[part_numbers[1], employees[7]]) / efficiency[15])
))/(len(part_numbers)*(len(employees)))))
model += ((exptime[0] * qty_produced[part_numbers[0], employees[0]])/efficiency[0]) + ((exptime[1] * qty_produced[part_numbers[1], employees[0]])/efficiency[8]) <= 530
model += ((exptime[0] * qty_produced[part_numbers[0], employees[1]])/efficiency[1]) + ((exptime[1] * qty_produced[part_numbers[1], employees[1]])/efficiency[9]) <= 530
model += ((exptime[0] * qty_produced[part_numbers[0], employees[2]])/efficiency[2]) + ((exptime[1] * qty_produced[part_numbers[1], employees[2]])/efficiency[10]) <= 530
model += ((exptime[0] * qty_produced[part_numbers[0], employees[3]])/efficiency[3]) + ((exptime[1] * qty_produced[part_numbers[1], employees[3]])/efficiency[11]) <= 530
model += ((exptime[0] * qty_produced[part_numbers[0], employees[4]])/efficiency[4]) + ((exptime[1] * qty_produced[part_numbers[1], employees[4]])/efficiency[12]) <= 530
model += ((exptime[0] * qty_produced[part_numbers[0], employees[5]])/efficiency[5]) + ((exptime[1] * qty_produced[part_numbers[1], employees[5]])/efficiency[13]) <= 530
model += ((exptime[0] * qty_produced[part_numbers[0], employees[6]])/efficiency[6]) + ((exptime[1] * qty_produced[part_numbers[1], employees[6]])/efficiency[14]) <= 530
model += ((exptime[0] * qty_produced[part_numbers[0], employees[7]])/efficiency[7]) + ((exptime[1] * qty_produced[part_numbers[1], employees[7]])/efficiency[15]) <= 530
model.solve()
pulp.LpStatus[model.status]
Please note that the iterables you provided are not dictionaries but sets. dictionaries come in keys and values, while sets are just accounts for unique values.
Not sure about the logic for your calculation for the last part, but i hope this gives you a head start to seeing how you can loop through the nest. Another consideration to put in mind is if you have sets that have same lengths, you should consider using enumerate so you can reduce the nests of your loops
#step 1: #handles the intial calculaion for the values to be applied for the pulp.lpSum
def func(part, empl, eff,exptime):
val= 0
for indx, time in enumerate(exptime): # assumes you have the same data length
for prt in part:
for employee in empl:
for efficiency in eff:
val += (time *qty_produced[prt],employee)/efficiency
return val /(len(part)* len(empl))
#step2:
def model_func(func,part, empl, eff,exptime ):
len_emp = len(employees)//2
len_part= len(part)//2
len_eff = len(efficiency)//2
len_exp = len(exptime)//2
model = 0
func_result = func(part, empl, eff,exptime)
model+= (pulp.lpSum(func_result))
for xp1, xp2 in zip(part_numbers[:len_part], part_numbers[len_exp:]):
for empl1, emp2 in zip(employees[:len_emp],employees[len_emp:]):
for eff1, eff2 in zip(efficiency[:len_eff], efficiency[len_eff:]):
for exp1,exp2 in zip(exptime[:len_exp], exptime[len_exp:]):
model += #(exp1 * qty_produced[xp1] ,empl1/eff1 ) + (exp2 * qty_produced[xp2],empl2/eff2 ) as an example
return model
# call your function
model_func(func,part_numbers,employees,efficiency,exptime) # should return your model output
I have this data:
data = """
r = !(7225 + -2932 + 1 * -4293), (i, dc, r), i[qo] = void(1 * 7333 + 9158 + -16491);
c = (t, -20 * -28 + -8172 + 8750),
i = 1706 + 6792 + 14 * -607;
{}, [8709 * -1 + 46925 + 1 * 3786]
"""
How to match all that math operations?
I'd like to match them, and replace to get result e.g:
data = """
r = !(0), (i, dc, r), i[qo] = void(0);
c = (t, 1138),
i = 0;
{}, [42002]
"""
Any idea?
Try it online!
def conv(data):
import re
for m in reversed(list(re.finditer(
r'(?:[\*\/\+\-\s]*\d+(?:\.\d*)?){1,}', data))):
data = (data[:m.span(0)[0]] +
str(eval(data[m.span(0)[0] : m.span(0)[1]])) + data[m.span(0)[1]:])
return data
data = """
r = !(7225 + -2932 + 1 * -4293), (i, dc, r), i[qo] = void(1 * 7333 + 9158 + -16491);
c = (t, -20 * -28 + -8172 + 8750),
i = 1706 + 6792 + 14 * -607;
{}, [8709 * -1 + 46925 + 1 * 3786]
"""
print(conv(data))
Output:
r = !(0), (i, dc, r), i[qo] = void(0);
c = (t,1138),
i =0;
{}, [42002]
as part of an assignment, we need to implement NN.
I am calculating forward result, then I run back propagation and then I updated the weights (all for the same instance).
When i try to calculate the forward value of the same instance, I am getting an error that the dimensions are wrong.
class MyNN:
def __init__(self, learning_rate, layer_sizes):
self.learning_rate = learning_rate
self.layer_sizes = layer_sizes
self.model_params = {}
self.memory = {}
self.grads = {}
# Initializing weights
for layer_index in range(len(layer_sizes) - 1):
W_input = layer_sizes[layer_index + 1]
W_output = layer_sizes[layer_index]
self.model_params['W_' + str(layer_index + 1)] = np.random.randn(W_input, W_output) * 0.1
self.model_params['b_' + str(layer_index + 1)] = np.random.randn(W_input) * 0.1
def forward_single_instance(self, x):
a_i_1 = x
self.memory['a_0'] = x
for layer_index in range(len(self.layer_sizes) - 1):
W_i = self.model_params['W_' + str(layer_index + 1)]
b_i = self.model_params['b_' + str(layer_index + 1)]
z_i = np.dot(W_i, a_i_1) + b_i
a_i = 1/(1+np.exp(-z_i))
self.memory['a_' + str(layer_index + 1)] = a_i
a_i_1 = a_i
return a_i_1
def log_loss(self, y_hat, y):
'''
Logistic loss, assuming a single value in y_hat and y.
'''
m = y_hat[0]
cost = -y[0]*np.log(y_hat[0]) - (1 - y[0])*np.log(1 - y_hat[0])
return cost
def backward_single_instance(self, y):
a_output = self.memory['a_' + str(len(self.layer_sizes) - 1)]
dz = a_output - y
for layer_index in range(len(self.layer_sizes) - 1, 0, -1):
a_l_1 = self.memory['a_' + str(layer_index - 1)]
dW = np.dot(dz.reshape(-1, 1), a_l_1.reshape(1, -1))
db = dz.transpose()
self.grads['dW_' + str(layer_index)] = dW
self.grads['db_' + str(layer_index)] = db
W_l = self.model_params['W_' + str(layer_index)]
dz = (a_l_1 * (1 - a_l_1)).reshape(-1, 1) * np.dot(W_l.T, dz.reshape(-1, 1))
def update(self):
for layer_index in range(len(self.layer_sizes) - 1):
Wi = 'W_' + str(layer_index + 1)
bi = 'b_' + str(layer_index + 1)
dWi = 'dW_' + str(layer_index + 1)
dbi = 'db_' + str(layer_index + 1)
W_i = self.model_params[Wi]
b_i = self.model_params[bi]
dW_i = self.grads[dWi]
db_i = self.grads[dbi]
self.model_params[Wi] = W_i - self.learning_rate * dW_i
self.model_params[bi] = b_i - self.learning_rate * db_i
then for testing I wrote this code:
nn = MyNN(0.01, [3, 2, 1])
x = np.random.randn(3)
y = np.random.randn(1)
y_hat = nn.forward_single_instance(x)
print(y_hat)
nn.backward_single_instance(y)
nn.update()
y_hat = nn.forward_single_instance(x)
This is the error that is printed:
x
[ 0.57072262 1.8578982 -1.48560691]
x
[[0.53932246 0.57051188]]
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-99-d8d9152fef18> in <module>()
----> 1 y_hat = nn.forward_single_instance(x)
2 print(y_hat)
3
4
5 l = nn.log_loss(y_hat, y)
<ipython-input-89-f354993c95f9> in forward_single_instance(self, x)
24 W_i = self.model_params['W_' + str(layer_index + 1)]
25 b_i = self.model_params['b_' + str(layer_index + 1)]
---> 26 z_i = np.dot(W_i, a_i_1) + b_i
27 a_i = 1/(1+np.exp(-z_i))
28 self.memory['a_' + str(layer_index + 1)] = a_i
ValueError: shapes (1,2) and (1,2) not aligned: 2 (dim 1) != 1 (dim 0)
the problem is in b_i dimensions, and I cant figure out why.
I tried variations to b_i shape(row vector, column vector), and all of them throws the same exception.