I'm working on an optimization problem and working through converting a hard coded, inflexible solution into a functional, flexible one. I'm struggling on how to use multiple dictionaries in a function of a PuLP optimization problem. My best guess would be to possible use nested for loops, but can't wrap my head around how to do this. Below is what my current hard coded solution looks like.
import pulp
part_numbers = {"Part A", "Part B"}
employees = {"Employee A", "Employee B", "Employee C", "Employee D", "Employee E", "Employee F", "Employee G", "Employee H"}
efficiency = {85, .75, .5, .75, .59, .40, .87, .37, .65, .85, .85, .5, .4, .8, .3, .92}
exptime = {20, 10}
model += ((
(pulp.lpSum(
( (exptime[0] * qty_produced[part_numbers[0], employees[0]])/ efficiency[0])
+ ((exptime[0] * qty_produced[part_numbers[0], employees[1]])/ efficiency[1])
+ ((exptime[0] * qty_produced[part_numbers[0], employees[2]])/ efficiency[2])
+ ((exptime[0] * qty_produced[part_numbers[0], employees[3]]) / efficiency[3])
+ ((exptime[0] * qty_produced[part_numbers[0], employees[4]]) / efficiency[4])
+ ((exptime[0] * qty_produced[part_numbers[0], employees[5]]) / efficiency[5])
+ ((exptime[0] * qty_produced[part_numbers[0], employees[6]]) / efficiency[6])
+ ((exptime[0] * qty_produced[part_numbers[0], employees[7]]) / efficiency[7])
+ ((exptime[1] * qty_produced[part_numbers[1], employees[0]])/ efficiency[8])
+ ((exptime[1] * qty_produced[part_numbers[1], employees[1]])/ efficiency[9])
+ ((exptime[1] * qty_produced[part_numbers[1], employees[2]])/ efficiency[10])
+ ((exptime[1] * qty_produced[part_numbers[1], employees[3]]) / efficiency[11])
+ ((exptime[1] * qty_produced[part_numbers[1], employees[4]]) / efficiency[12])
+ ((exptime[1] * qty_produced[part_numbers[1], employees[5]]) / efficiency[13])
+ ((exptime[1] * qty_produced[part_numbers[1], employees[6]]) / efficiency[14])
+ ((exptime[1] * qty_produced[part_numbers[1], employees[7]]) / efficiency[15])
))/(len(part_numbers)*(len(employees)))))
model += ((exptime[0] * qty_produced[part_numbers[0], employees[0]])/efficiency[0]) + ((exptime[1] * qty_produced[part_numbers[1], employees[0]])/efficiency[8]) <= 530
model += ((exptime[0] * qty_produced[part_numbers[0], employees[1]])/efficiency[1]) + ((exptime[1] * qty_produced[part_numbers[1], employees[1]])/efficiency[9]) <= 530
model += ((exptime[0] * qty_produced[part_numbers[0], employees[2]])/efficiency[2]) + ((exptime[1] * qty_produced[part_numbers[1], employees[2]])/efficiency[10]) <= 530
model += ((exptime[0] * qty_produced[part_numbers[0], employees[3]])/efficiency[3]) + ((exptime[1] * qty_produced[part_numbers[1], employees[3]])/efficiency[11]) <= 530
model += ((exptime[0] * qty_produced[part_numbers[0], employees[4]])/efficiency[4]) + ((exptime[1] * qty_produced[part_numbers[1], employees[4]])/efficiency[12]) <= 530
model += ((exptime[0] * qty_produced[part_numbers[0], employees[5]])/efficiency[5]) + ((exptime[1] * qty_produced[part_numbers[1], employees[5]])/efficiency[13]) <= 530
model += ((exptime[0] * qty_produced[part_numbers[0], employees[6]])/efficiency[6]) + ((exptime[1] * qty_produced[part_numbers[1], employees[6]])/efficiency[14]) <= 530
model += ((exptime[0] * qty_produced[part_numbers[0], employees[7]])/efficiency[7]) + ((exptime[1] * qty_produced[part_numbers[1], employees[7]])/efficiency[15]) <= 530
model.solve()
pulp.LpStatus[model.status]
Please note that the iterables you provided are not dictionaries but sets. dictionaries come in keys and values, while sets are just accounts for unique values.
Not sure about the logic for your calculation for the last part, but i hope this gives you a head start to seeing how you can loop through the nest. Another consideration to put in mind is if you have sets that have same lengths, you should consider using enumerate so you can reduce the nests of your loops
#step 1: #handles the intial calculaion for the values to be applied for the pulp.lpSum
def func(part, empl, eff,exptime):
val= 0
for indx, time in enumerate(exptime): # assumes you have the same data length
for prt in part:
for employee in empl:
for efficiency in eff:
val += (time *qty_produced[prt],employee)/efficiency
return val /(len(part)* len(empl))
#step2:
def model_func(func,part, empl, eff,exptime ):
len_emp = len(employees)//2
len_part= len(part)//2
len_eff = len(efficiency)//2
len_exp = len(exptime)//2
model = 0
func_result = func(part, empl, eff,exptime)
model+= (pulp.lpSum(func_result))
for xp1, xp2 in zip(part_numbers[:len_part], part_numbers[len_exp:]):
for empl1, emp2 in zip(employees[:len_emp],employees[len_emp:]):
for eff1, eff2 in zip(efficiency[:len_eff], efficiency[len_eff:]):
for exp1,exp2 in zip(exptime[:len_exp], exptime[len_exp:]):
model += #(exp1 * qty_produced[xp1] ,empl1/eff1 ) + (exp2 * qty_produced[xp2],empl2/eff2 ) as an example
return model
# call your function
model_func(func,part_numbers,employees,efficiency,exptime) # should return your model output
Related
I new to python. i was wring a transportation problem solving.when i called out Pulpi, it was caught error
from pulp import LpProblem, LpVariable, LpStatus, LpMinimize, GLPK, value
M = 3
N = 5
a = range(1,M+1)
a1 = range(M)
b = range(1,N+1)
b1 = range(N)
xindx = [(a[i],b[j]) for j in b1 for i in a1]
model = LpProblem("Transportation_LP_Problem",LpMinimize)
x = LpVariable.dicts("X",xindx,0,None)
model += 2190 * x[1,1] + 46650 * x[1,2] + 25110 * x[1,3] + 8040 * x[1,4] + 6720 * x[1,5] \
+ 1800*x[2,1] + 24600 * x[2,2] + 50610 * x[2,3] + 46200 * x[2,4] + 57780 * x[2,5] \
+ 1500*x[3,1] + 45960 * x[3,2] + 24420 * x[3,3] + 7350 * x[3,4] + 6030 * x[3,5],"Transportation_cost"
model += x[1,1] + x[1,2] + x[1,3] + x[1,4] + x[1,5] <= 300.0, "Supply_Pt_1"
model += x[2,1] + x[2,2] + x[2,3] + x[2,4] + x[2,5] <= 260.0, "Supply_Pt_2"
model += x[3,1] + x[3,2] + x[3,3] + x[3,4] + x[3,5] <= 258.0, "Supply_Pt_3"
model += x[1,1] + x[2,1] + x[3,1] >= 200.0, "Demand_Pt_1"
model += x[1,2] + x[2,2] + x[3,2] >= 100.0, "Demand_Pt_2"
model += x[1,3] + x[2,3] + x[3,3] >= 250.0, "Demand_Pt_3"
model += x[1,4] + x[2,4] + x[3,4] >= 185.0, "Demand_Pt_4"
model += x[1,5] + x[2,5] + x[3,5] >= 100.0, "Demand_Pt_5"
model.solve(GLPK())
print ("Status:",LpStatus[model.status])
for v in model.variables():
print(v.name,"=",v.varValue)
print ("Objective Function", value(model.objective))
<model.solve(GLPK())> this is where the error came in
in actualSolve
raise PulpSolverError("PuLP: cannot execute " + self.path)
pulp.apis.core.PulpSolverError: PuLP: cannot execute glpsol.exe
how can i install glpsol.exe or fix this
Install the glpk-utils and try again.
https://winglpk.sourceforge.net/
I have a function with different parameters that I want to optimize to fit some existing data.
The function runs fine on its own, but when I try to pass it through the scipy.optimize.curve_fit function, I get this error :
IndexError: invalid index to scalar variable.
I don't understand why the function would work on its own, and I would not get any errors.
What can I do ?
The original function used dictionnaries and I thought that might be the problem but I modified it and it still doesn't work.
This is the function I'm using :
def function_test(xy,X1,X2,X3,X4):
precip = xy\[0\]
potential_evap = xy\[1\]
nUH1 = int(math.ceil(X4))
nUH2 = int(math.ceil(2.0*X4))
uh1_ordinates = [0] * nUH1
uh2_ordinates = [0] * nUH2
UH1 = [0] * nUH1
UH2 = [0] * nUH2
for t in range(1, nUH1 + 1):
uh1_ordinates[t - 1] = s_curves1(t, X4) - s_curves1(t-1, X4)
for t in range(1, nUH2 + 1):
uh2_ordinates[t - 1] = s_curves2(t, X4) - s_curves2(t-1, X4)
production_store = X1*0.60# S
routing_store = X3*0.70# R
qsim = []
for j in range(2191):
if precip[j] > potential_evap[j]:
net_evap = 0
scaled_net_precip = (precip[j] - potential_evap[j])/X1
if scaled_net_precip > 13:
scaled_net_precip = 13.
tanh_scaled_net_precip = tanh(scaled_net_precip)
reservoir_production = (X1 * (1 - (production_store/X1)**2) * tanh_scaled_net_precip) / (1 + production_store/X1 * tanh_scaled_net_precip)
routing_pattern = precip[j]-potential_evap[j]-reservoir_production
else:
scaled_net_evap = (potential_evap[j] - precip[j])/X1
if scaled_net_evap > 13:
scaled_net_evap = 13.
tanh_scaled_net_evap = tanh(scaled_net_evap)
ps_div_x1 = (2 - production_store/X1) * tanh_scaled_net_evap
net_evap = production_store * (ps_div_x1) / \
(1 + (1 - production_store/X1) * tanh_scaled_net_evap)
reservoir_production = 0
routing_pattern = 0
production_store = production_store - net_evap + reservoir_production
percolation = production_store / (1 + (production_store/2.25/X1)**4)**0.25
routing_pattern = routing_pattern + (production_store-percolation)
production_store = percolation
for i in range(0, len(UH1) - 1):
UH1[i] = UH1[i+1] + uh1_ordinates[i]*routing_pattern
UH1[-1] = uh1_ordinates[-1] * routing_pattern
for j in range(0, len(UH2) - 1):
UH2[j] = UH2[j+1] + uh2_ordinates[j]*routing_pattern
UH2[-1] = uh2_ordinates[-1] * routing_pattern
groundwater_exchange = X2 * (routing_store / X3)**3.5
routing_store = max(0, routing_store + UH1[0] * 0.9 + groundwater_exchange)
R2 = routing_store / (1 + (routing_store / X3)**4)**0.25
QR = routing_store - R2
routing_store = R2
QD = max(0, UH2[0]*0.1+groundwater_exchange)
Q = QR + QD
qsim.append(Q)
return qsim
I am trying to return all value pairs from three sequences, however I am only getting a single value pair for the first sequence. Does anyone know what happens?. The example sequences are "ALLKAIIAI", "AHHAKKAKLLA", "APPALLAIIKAMMA", see it in the code bellow:
def ComputeSeqs():
input_seq = ["ALLKAIIAI", "AHHAKKAKLLA", "APPALLAIIKAMMA"]
for sequence in input_seq:
ANDN920101={'A':4.35,'L':4.17,'R':4.38,'K':4.36,'N':4.75,
'M':4.52,'D':4.76,'F':4.66,'C':4.65,'P':4.44,
'Q':4.37,'S':4.50,'E':4.29,'T':4.35,'G':3.97,
'W':4.70,'H':4.63,'Y':4.60,'I':3.95,'V':3.95}
ARGP820101={'A':0.61,'L':1.53,'R':0.60,'K':1.15,'N':0.06,
'M':1.18,'D':0.46,'F':2.02,'C':1.07,'P':1.95,
'Q':0.0,'S':0.05,'E':0.47,'T':0.05,'G':0.07,
'W':2.65,'H':0.61,'Y':1.88,'I':2.22,'V':1.32}
aaindex_values = []
aaindex_listT = [ANDN920101, ARGP820101]
for i in aaindex_listT:
a_a = ((sequence.count("A") * i["A"])) / len(sequence)
c_c = ((sequence.count("C") * i["C"])) / len(sequence)
d_d = ((sequence.count("D") * i["D"])) / len(sequence)
e_e = ((sequence.count("E") * i["E"])) / len(sequence)
f_f = ((sequence.count("F") * i["F"])) / len(sequence)
g_g = ((sequence.count("G") * i["G"])) / len(sequence)
h_h = ((sequence.count("H") * i["H"])) / len(sequence)
i_i = ((sequence.count("I") * i["I"])) / len(sequence)
k_k = ((sequence.count("K") * i["K"])) / len(sequence)
l_l = ((sequence.count("L") * i["L"])) / len(sequence)
m_m = ((sequence.count("M") * i["M"])) / len(sequence)
n_n = ((sequence.count("N") * i["N"])) / len(sequence)
p_p = ((sequence.count("P") * i["P"])) / len(sequence)
q_q = ((sequence.count("Q") * i["Q"])) / len(sequence)
r_r = ((sequence.count("R") * i["R"])) / len(sequence)
s_s = ((sequence.count("S") * i["S"])) / len(sequence)
t_t = ((sequence.count("T") * i["T"])) / len(sequence)
v_v = ((sequence.count("V") * i["V"])) / len(sequence)
w_w = ((sequence.count("W") * i["W"])) / len(sequence)
y_y = ((sequence.count("Y") * i["Y"])) / len(sequence)
aaindex_comp = round(((a_a + c_c + d_d + e_e + f_f + g_g + h_h + i_i + k_k + l_l + m_m + n_n + p_p + q_q + r_r + s_s + t_t + v_v + w_w + y_y) / 20),3)
aaindex_values.append(aaindex_comp)
return aaindex_values
print(ComputeSeqs())
You need to initialize aaindex_values before the loop, and return it after the loop.
You're never creating nested lists for results of summing the multipliers from each dictionary in aaindex_listT. This is easiest done using a list comprehension. And you can loop over the dictionary and use sum() rather than creating 26 different variables.
def ComputeSeqs():
input_seq = ["ALLKAIIAI", "AHHAKKAKLLA", "APPALLAIIKAMMA"]
ANDN920101={'A':4.35,'L':4.17,'R':4.38,'K':4.36,'N':4.75,
'M':4.52,'D':4.76,'F':4.66,'C':4.65,'P':4.44,
'Q':4.37,'S':4.50,'E':4.29,'T':4.35,'G':3.97,
'W':4.70,'H':4.63,'Y':4.60,'I':3.95,'V':3.95}
ARGP820101={'A':0.61,'L':1.53,'R':0.60,'K':1.15,'N':0.06,
'M':1.18,'D':0.46,'F':2.02,'C':1.07,'P':1.95,
'Q':0.0,'S':0.05,'E':0.47,'T':0.05,'G':0.07,
'W':2.65,'H':0.61,'Y':1.88,'I':2.22,'V':1.32}
aaindex_listT = [ANDN920101, ARGP820101]
aaindex_values = []
for sequence in input_seq:
aaindex_comp = [sum(sequence.count(key) * value for key, value in i.items()) / len(sequence) for i in aaindex_listT]
aaindex_values.append(aaindex_comp)
return aaindex_values
print(ComputeSeqs())
I'm trying to solve a complex system where you can visualize it as some points/nodes where a spring-damper system is connected in between those nodes, each point carry the forces from all other connected springs and dampers in addition to the gravitational forces on them since each spring and damper have a specific mass.
I'm using classes to make the grid and the initial conditions, but i'm not sure how exactly to calculate the new positions and accelerations using the runge kutta 4
this part is where runge kutta is defined
rows = 5
columns = 6
class Runga_kutta4():
def __init__(self, node, u0, v0, t):
self.u0 = u0
self.v0 = v0
self.t = t
self.u = u = 0, 0
self.ux = u[0]
self.uy = u[1]
self.v = v = 0, 0
self.vx = v[0]
self.vy = v[1]
f = Forces(u0, u, v0, v)
self.Node_Forces = f.nodeforces(node)
self.dt = t[1] - t[0]
results = self.calculation()
return results
# Returns the acceleration a
def acceleration(self, Node_Forces):
"""
F = m *a
a = F/m
F_sys = F_externe - (F_damping + Springs) - F_g
"""
a_list = []
for (f, m) in zip(Node_Forces, Masses.Lattice_Mass()):
ax = f[0]/m[0]
ay = f[1]/m[1]
a_list.append((ax, ay))
return a_list.reshape(5, 6)
def calculation(self):
for i in range(self.t.size - 1):
# F at time step t / 2
f_t_05_x = (self.Node_Forces[0][i + 1] - self.Node_Forces[0][i]) / 2 + self.Node_Forces[0][i]
f_t_05_y = (self.Node_Forces[1][i + 1] - self.Node_Forces[1][i]) / 2 + self.Node_Forces[1][i]
u1x = self.ux[i]
v1x = self.vx[i]
u1y = self.uy[i]
v1y = self.vy[i]
a1x = self.acceleration(self.Node_Forces[0][i])
a1y = self.acceleration(self.Node_Forces[1][i])
u2x = self.ux[i] + v1x * self.dt / 2
v2x = self.vx[i] + a1x * self.dt / 2
u2y = self.uy[i] + v1y * self.dt / 2
v2y = self.vy[i] + a1y * self.dt / 2
a2x = self.acceleration(f_t_05_x)
a2y = self.acceleration(f_t_05_y)
u3x = self.ux[i] + v2x * self.dt / 2
v3x = self.vx[i] + a2x * self.dt / 2
u3y = self.uy[i] + v2y * self.dt / 2
v3y = self.vy[i] + a2y * self.dt / 2
a3x = self.acceleration(f_t_05_x)
a3y = self.acceleration(f_t_05_y)
u4x = self.ux[i] + v3x * self.dt
v4x = self.vx[i] + a3x * self.dt
u4y = self.uy[i] + v3y * self.dt
v4y = self.vy[i] + a3y * self.dt
a4x = self.acceleration(self.Node_Forces[0][i + 1])
a4y = self.acceleration(self.Node_Forces[1][i + 1])
self.ux[i + 1] = self.ux[i] + self.dt / 6 * (v1x + 2 * v2x + 2 * v3x + v4x)
self.vx[i + 1] = self.vx[i] + self.dt / 6 * (a1x + 2 * a2x + 2 * a3x + a4x)
self.uy[i + 1] = self.uy[i] + self.dt / 6 * (v1y + 2 * v2y + 2 * v3y + v4y)
self.vy[i + 1] = self.vy[i] + self.dt / 6 * (a1y + 2 * a2y + 2 * a3y + a4y)
self.u = (self.ux, self.uy)
self.v = (self.vx, self.vy)
return self.u, self.v
l = Lattice(3)
t0, te, dt = 0, 3, 0.001 # s
t = np.linspace(t0, te, round((te-t0)/dt + 1))
for node in l.latticeNodes():
position0 = 0, 0
velocity0 = 0, 0
state0 = np.append(position0, velocity0)
new_state = Runga_kutta4(node, position0, velocity0, t)
visualise(l)
photo of the system
I got the error
File "C:/Users/", line 70, in <module>
cal_PIN()
File "C:/Users/", line 67, in cal_PIN
cal_likelihood(selling_5_tick, buying_5_tick)
File "C:/Users/", line 48, in cal_likelihood
raise valueErr(result.message)
valueErr: Desired error not necessarily achieved due to precision loss.
I want to estimate the parameter in the pin model. The Log converted likelihood function is the same as the attached photo. The parameters to be estimated are (α, δ, μ, εB, εS). I coded the 3-steps-for-statement to set the initial value. I try to used scipy.optimize.minimize to estimate the parameter by applying Maximum Likelihood Estimation.
import time
import scipy
from scipy.optimize import minimize
def f(params, *args):
# args={k1, k2, k3, kmi, buying_array[i], selling_array[i]}
k1 = args[0]
k2 = args[1]
k3 = args[2]
kmi = args[3]
buying_array = args[4]
selling_array = args[5]
ini_a, ini_h, ini_eS, ini_eB = params
return (-1) * (ini_a * ini_h * scipy.exp(k1 - kmi) + ini_a * (1 - ini_h) * scipy.exp(k2 - kmi) + (
1 - ini_a) * scipy.exp(k3 - kmi) +
(buying_array * scipy.log(ini_eB + ini_h) + selling_array * scipy.log(ini_eS + ini_h) - (
ini_eB + ini_eS) + kmi))
def cal_likelihood(selling_array, buying_array):
for ini_a in [0.1, 0.3, 0.5, 0.7, 0.9]:
for ini_h in [0.1, 0.3, 0.5, 0.7, 0.9]:
for z in [0.1, 0.3, 0.5, 0.7, 0.9]:
time.sleep(1)
i = 0
for i in range(0, len(buying_array)):
ini_eB = z * selling_array[i]
cal_u = (buying_array[i] - ini_eB) / (ini_a * (1 - ini_h))
ini_eS = selling_array[i] - (ini_a * ini_h * cal_u)
k1 = ((-1.0) * (cal_u) - buying_array[i] * scipy.log(1 + (cal_u / ini_eB)))
k2 = ((-1.0) * (cal_u) - selling_array[i] * scipy.log(1 + (cal_u / ini_eS)))
k3 = (-1.0) * buying_array[i] * scipy.log(1 +
(cal_u / ini_eB)) - selling_array[i] * scipy.log(
1 + (cal_u / ini_eS))
kmi = max(k1, k2, k3)
initial_guess = [ini_a, ini_h, ini_eB, ini_eS]
result = minimize(f, initial_guess, args=(k1, k2,
k3, kmi, buying_array[i], selling_array[i]))
if result.success:
fitted_params = result.x
print(fitted_params[0])
else:
raise ValueError(result.message)
def cal_PIN():
buying_5_tick = []
selling_5_tick = []
buying_5_tick.append(4035)
buying_5_tick.append(3522)
buying_5_tick.append(4073)
buying_5_tick.append(3154)
buying_5_tick.append(9556)
selling_5_tick.append(1840)
selling_5_tick.append(2827)
selling_5_tick.append(4095)
selling_5_tick.append(2602)
selling_5_tick.append(2230)
cal_likelihood(selling_5_tick, buying_5_tick)
I expected values where 0 < α < 1 and 0 < δ < 1, but something is wrong.
Well, as you raise the error yourself it is obvious, that the minimization fails because of the error Warning: Desired error not necessarily achieved due to precision loss.
Taken from a scipy issue:
This warning occurs when line search could not find a step size
meeting both Wolfe conditions and the Polak-Ribiere descent condition
within a certain number of iterations.
The results of your minimization don't seem to have any bound, as your objective function is just *-1 some values. This results in rather big derivatives and could lead to some ill-conditioned Hessian. Such ill-conditioned matrix then leads to the linesearch-fail.
One option would be to change the objective-return to
return 1 / (ini_a * ini_h * scipy.exp(k1 - kmi) + ini_a * (1 - ini_h) * scipy.exp(k2 - kmi) + (
1 - ini_a) * scipy.exp(k3 - kmi) +
(buying_array * scipy.log(ini_eB + ini_h) + selling_array * scipy.log(ini_eS + ini_h) - (
ini_eB + ini_eS) + kmi))
This leads to the results beign in the required range of 0 < value < 1.
If this is not an optimal solution for you, try changing solvers. Find some options in the documentation.
Also some tips and tricks for youre programming. You can use itertools.product to avoid such nested loops. Instead of appending each value, just declare a list.
Here are the suggestions and the working code.
import time
import scipy
from scipy.optimize import minimize
import itertools
def f(params, *args):
# args={k1, k2, k3, kmi, buying_array[i], selling_array[i]}
k1 = args[0]
k2 = args[1]
k3 = args[2]
kmi = args[3]
buying_array = args[4]
selling_array = args[5]
ini_a, ini_h, ini_eS, ini_eB = params
return 1 / (ini_a * ini_h * scipy.exp(k1 - kmi) + ini_a * (1 - ini_h) * scipy.exp(k2 - kmi) + (
1 - ini_a) * scipy.exp(k3 - kmi) +
(buying_array * scipy.log(ini_eB + ini_h) + selling_array * scipy.log(ini_eS + ini_h) - (
ini_eB + ini_eS) + kmi))
def cal_likelihood(selling_array, buying_array):
list_iteration = [0.1, 0.3, 0.5, 0.7, 0.9]
for (ini_a, ini_h, z) in itertools.product(*[list_iteration,list_iteration,list_iteration]):
time.sleep(1)
for i in range(0, len(buying_array)):
ini_eB = z * selling_array[i]
cal_u = (buying_array[i] - ini_eB) / (ini_a * (1 - ini_h))
ini_eS = selling_array[i] - (ini_a * ini_h * cal_u)
k1 = ((-1.0) * (cal_u) - buying_array[i] * scipy.log(1 + (cal_u / ini_eB)))
k2 = ((-1.0) * (cal_u) - selling_array[i] * scipy.log(1 + (cal_u / ini_eS)))
k3 = (-1.0) * buying_array[i] * scipy.log(1 +
(cal_u / ini_eB)) - selling_array[i] * scipy.log(
1 + (cal_u / ini_eS))
kmi = max(k1, k2, k3)
initial_guess = [ini_a, ini_h, ini_eB, ini_eS]
result = minimize(f, initial_guess, args=(k1, k2,
k3, kmi, buying_array[i], selling_array[i]))
if result.success:
fitted_params = result.x
print(fitted_params[0])
else:
raise ValueError(result.message)
def cal_PIN():
buying_5_tick = [4035, 3522, 4073, 3154, 9556]
selling_5_tick = [1840, 2827, 4095, 2602, 2230]
cal_likelihood(selling_5_tick, buying_5_tick)
cal_PIN()