I implemented the algortihm of Moschopoulos that gives the density and c.d.f of a sum of gamma random variables. A C++ implementation exists in the dcoga R package, but i need mine to handle arbitrary precision numbers through the mpmath library.
The major problem with the following code is the runtime: some parameters of the class (the _delta slot) need to be updated and re-computed 'on-the-fly' when needed, and it takes a lot of time. I ran a cProfile on a simple exemple so you can see where is the problem quickly, but i dont know enough to make it faster. See for yourself by running the follwoing :
import mpmath as mp
import numpy as np
import scipy.stats.distributions as sc_dist
def gamma_density(x,a,b):
return mp.power(x,a-1) * mp.exp(-x/b) / mp.power(b,a) / mp.gamma(a)
def gamma_cdf(x,a,b):
return mp.gammainc(a,0,x/b,regularized=True)
class GammaConvolution:
def __init__(self,alpha,beta):
#alpha and beta must be provided as numpy array of mpmath.mpf objects
n = len(alpha)
if not len(beta) == n:
raise ValueError('you should provide as much alphas and betas')
if n == 1:
raise ValueError('you should provide at least 2 gammas.')
if not (type(alpha[0]) == mp.mpf and type(beta[0]) == mp.mpf):
raise ValueError('you should provide alpha and beta in mp.mpf format.')
alpha = np.array(alpha)
beta = np.array(beta)
# sanity check :
check = alpha>0
if not np.all(check):
alpha = alpha[check]
beta = beta[check]
print('Some alphas were negatives. We discarded them. {} are remaining'.format(len(alpha)))
self.signs = np.array([-1 if b < 0 else 1 for b in beta])
self.alpha = alpha
self.beta = 1/beta * self.signs
self.n = self.alpha.shape[0]
# Moshopoulos parameters :
self._beta1 = np.min(self.beta)
self._c = np.prod([mp.power(self._beta1/self.beta[i], self.alpha[i]) for i in range(self.n)])
self._rho = np.sum(self.alpha)
self._delta = [mp.mpf('1')]
self._lgam_mod = [np.sum([self.alpha[i] * (1 - (self._beta1 / self.beta[i])) for i in range(self.n)])] # this correpsont o get_lgam(k=1)"
self._to_power = [1 - self._beta1/self.beta[i] for i in range(self.n)]
def _get_delta(self,k):
if(len(self._delta)<=k):
# Then we create it :
n = len(self._delta)
self._lgam_mod.extend([np.sum([self.alpha[i] * mp.power(self._to_power[i], j + 1) for i in range(self.n)])for j in range(n,k+1)])
self._delta.extend([np.sum([self._lgam_mod[i] * self._delta[j - 1 - i] for i in range(j)])/j for j in range(n, k+1)])
return self._delta[k]
def coga(self, x, type='pdf'):
if x < 0:
return 0
k = 0
out = 0
if type=='pdf':
func = gamma_density
if type=='cdf':
func = gamma_cdf
while True:
step = self._get_delta(k) * func(x, self._rho + k, self._beta1)
if mp.isinf(step) or mp.isnan(step):
print('inf or nan happened, the algorithm did not converge')
break
out += step
if mp.almosteq(step, 0):
break
k += 1
out *= self._c
return out
def pdf(self,x):
return self.coga(x, 'pdf')
def cdf(self,x):
return self.coga(x, 'cdf')
if __name__ == "__main__":
mp.mp.dps = 20
# some particular exemples values that 'approximates' a lognormal.
alpha = np.array([mp.mpf(28.51334751960197301778147509487793953959733053134799171090326516406790428180220147416519532643017308),
mp.mpf(11.22775884868121894986129015315963173419663023710308189240288960305130268927466536233373438048018254),
mp.mpf(6.031218085515218207945488717293490366342446718306869797877975835997607997369075273734516467130527887),
mp.mpf(3.566976340452999300401949508136750700482567798832918933344685923750679570986611068640936818600783319),
mp.mpf(2.11321149019108276673514744052403419069919543373601000373799419309581402519772983291547041971629247),
mp.mpf(1.13846760415283260768713745745968197587694610126298554688258480795156541979045502458925706173497129),
mp.mpf(0.4517330810577715647869261976064157403882011767087065171431053299996540080549312203533542184738086012),
mp.mpf(0.07749235677493576352946436194914173772169589371740264101530548860132559560092370430079007024964728383),
mp.mpf(0.002501284133093294545540492059111705453529784044424054717786717782803430937621102255478670439562804153),
mp.mpf(0.000006144939533164067887819376779035687994761732668244591993428755735056093784306786937652351425833352728)])
beta = np.array([mp.mpf(391.6072818187915081052155152400534191999174250784251976117131780922742055385769343508047998043722828),
mp.mpf(77.21898445771279675063405017644417196454232681648725486524482168571564310062495549360709158314560647),
mp.mpf(31.76440960907061013049029007869346161467203121003911885547576503605957915202107379956314233702891246),
mp.mpf(17.44293394293412500742344752991577287098138329678954573112349659319428017788092621396034528552305827),
mp.mpf(11.23444737858955404891602233256282644042451542694693191750203254839756772074776087387688524524329672),
mp.mpf(8.050341288822160015292746577166226701992193848793662515696372301333563919247892899484012106343185691),
mp.mpf(6.255867387720061672816524328464895459610937299629691008594802004659480037331191722532939009895028284),
mp.mpf(5.146993307537222489735861088512006440481952536952402160831342266591666243011939270854579656683779017),
mp.mpf(4.285958039399903253267350243950743396496148339434605882255896571795493305652009075308916145669650666),
mp.mpf(3.455673251219567018227405844933725014914508519853860295284610094904914286228770061300477809395436033)])
dist = GammaConvolution(alpha, beta)
print(sc_dist.lognorm(s=0.25).cdf(1))
import cProfile
pr = cProfile.Profile()
pr.enable()
print(dist.cdf(1))
print(sc_dist.lognorm(s=0.25).cdf(1))
pr.disable()
# after your program ends
import pstats
pstats.Stats(pr).strip_dirs().sort_stats('cumtime').print_stats(20)
Can you help me making it faster ? The problem is clearly if the _get_delta function.
In Mathematica I can convert multivariable moments in cumulants and back using MomentConvert:
MomentConvert[Cumulant[{2, 2,1}], "Moment"] // TraditionalForm
as one can try in wolframcloud.
I would like to do exactly the same in python. Is there any library in python capable of this?
At least the one direction I now programmed by myself:
# from http://code.activestate.com/recipes/577211-generate-the-partitions-of-a-set-by-index/
from collections import defaultdict
class Partition:
def __init__(self, S):
self.data = list(S)
self.m = len(S)
self.table = self.rgf_table()
def __getitem__(self, i):
#generates set partitions by index
if i > len(self) - 1:
raise IndexError
L = self.unrank_rgf(i)
result = self.as_set_partition(L)
return result
def __len__(self):
return self.table[self.m,0]
def as_set_partition(self, L):
# Transform a restricted growth function into a partition
n = max(L[1:]+[1])
m = self.m
data = self.data
P = [[] for _ in range(n)]
for i in range(m):
P[L[i+1]-1].append(data[i])
return P
def rgf_table(self):
# Compute the table values
m = self.m
D = defaultdict(lambda:1)
for i in range(1,m+1):
for j in range(0,m-i+1):
D[i,j] = j * D[i-1,j] + D[i-1,j+1]
return D
def unrank_rgf(self, r):
# Unrank a restricted growth function
m = self.m
L = [1 for _ in range(m+1)]
j = 1
D = self.table
for i in range(2,m+1):
v = D[m-i,j]
cr = j*v
if cr <= r:
L[i] = j + 1
r -= cr
j += 1
else:
L[i] = r // v + 1
r %= v
return L
# S = set(range(4))
# P = Partition(S)
# for x in P:
# print (x)
# using https://en.wikipedia.org/wiki/Cumulant#Joint_cumulants
import math
def Cum2Mom(arr, state):
def E(op):
return qu.expect(op, state)
def Arr2str(arr,sep):
r = ''
for i,x in enumerate(arr):
r += str(x)
if i<len(arr)-1:
r += sep
return r
if isinstance( arr[0],str):
myprod = lambda x: Arr2str(x,'*')
mysum = lambda x: Arr2str(x,'+')
E=lambda x: 'E('+str(x)+')'
myfloat = str
else:
myfloat = lambda x: x
myprod = np.prod
mysum = sum
S = set(range(len(arr)))
P = Partition(S)
return mysum([
myprod([myfloat(math.factorial(len(pi)-1) * (-1)**(len(pi)-1))
,myprod([
E(myprod([
arr[i]
for i in B
]))
for B in pi])])
for pi in P])
print(Cum2Mom(['a','b','c','d'],1) )
import qutip as qu
print(Cum2Mom([qu.qeye(3) for i in range(3)],qu.qeye(3)) )
It's designed to work with qutip opjects and it also works with strings to verify the correct separation and prefactors.
Exponents of the variables can be represented by repeating the variable.
When I solve the problem of Linear Programming, like in the following formula, I want the result of x all to be int type
Consider the following problem:
Minimize: f = -1*x[0] + 4*x[1]
Subject to:
-3*x[0] + 1*x[1] <= 6
1*x[0] + 2*x[1] <= 4
x[1] >= -3
where: -inf <= x[0] <= inf
next is the python coder
>>> c = [-1, 4]
>>> A = [[-3, 1], [1, 2]]
>>> b = [6, 4]
>>> x0_bounds = (None, None)
>>> x1_bounds = (-3, None)
>>> res = linprog(c, A_ub=A, b_ub=b, bounds=(x0_bounds, x1_bounds),
... options={"disp": True})
>>> print(res)
Optimization terminated successfully.
Current function value: -11.428571
Iterations: 2
status: 0
success: True
fun: -11.428571428571429
x: array([-1.14285714, 2.57142857])
message: 'Optimization terminated successfully.'
nit: 2
From the docs:
method : str, optional Type of solver. At this time only ‘simplex’ is
supported.
Simplex cannot handle integrality constraints so you cannot solve integer programming problems with scipy.optimize.linprog yet. You can try other libraries like PuLP, Pyomo or CVXOPT.
Scipy added scipy.optimize.milp in version 1.9.0 just recently.
It also added the integrality= parameter to the existing linprog. So your code can be updated as follows
import scipy.optimize
import numpy as np
c = [-1, 4]
A = [[-3, 1.], [1, 2]]
b = [6, 4]
x0_bounds = (None, None)
x1_bounds = (-3.5, None)
res = scipy.optimize.linprog(
c, A_ub=A, b_ub=b, bounds=(x0_bounds, x1_bounds),
integrality=[1, 1],
options={"disp": True})
res.x
array([10., -3.])
Where integrality=[1, 1] specifies that both variables x0 and x1 are to be integers.
(I changed a bound from -3 to -3.5 so that it actually has an interesting difference in solution between integers and reals.)
Following is a python module that includes a function LPmi(.) to solve mixed integer linear programs. It employs the Branch and Bound algorithm on top of scipy.optimize.linprog(.). It is this responder's creation; anyone is free to use or modify it. It also includes an example in the form of a test(.) function.
import numpy as np
from scipy.optimize import linprog
import copy
class LP:
minimise = True
c = None
A_ub = None
b_ub = None
A_eq = None
b_eq = None
bounds = None
method = ""
fun = 0.
x = None
success = False
def __init__(self,c,minimise=True,Aub=None, bub=None, Aeq=None, beq=None,
bounds=None,method="revised simplex"):
self.minimise = minimise
if self.minimise:
self.c = c
else:
self.c = -1 * c
self.A_ub = Aub
self.b_ub = bub
self.A_eq = Aeq
self.b_eq = beq
self.bounds = bounds
self.method = method
def cal(self):
res = linprog(self.c,A_ub=self.A_ub, b_ub=self.b_ub,
A_eq=self.A_eq, b_eq=self.b_eq,bounds=self.bounds,
method=self.method)
if res["success"] == False:
return res["message"]
else:
self.success = True
if self.minimise:
self.fun = res["fun"]
else:
self.fun = -1 * res["fun"]
self.x = res["x"]
def get_res(self):
return (self.x, self.fun, self.success)
class LP_Data:
minimise = True
c = None
A_ub = None
b_ub = None
A_eq = None
b_eq = None
bounds = None
method = ""
def __init__(self,c,minimise=True,Aub=None, bub=None, Aeq=None, beq=None,
bounds=None,method="revised simplex"):
self.minimise = minimise
if self.minimise:
self.c = c
else:
self.c = -1 * c
self.A_ub = Aub
self.b_ub = bub
self.A_eq = Aeq
self.b_eq = beq
self.bounds = bounds
self.method = method
def set_bounds(self,bounds_list):
self.bounds = bounds_list
class LPd:
data = []
fun = 0.
x = []
success = False
result = None
def __init__(self,lp_data):
self.data = lp_data
self.clip_bound = np.array(list(zip(*self.data.bounds)))
def cal(self):
result = None
res = linprog(self.data.c,A_ub=self.data.A_ub, b_ub=self.data.b_ub,
A_eq=self.data.A_eq, b_eq=self.data.b_eq,
bounds=self.data.bounds,
method=self.data.method)
if res["success"] == False:
self.result = ([], np.NaN, False, None)
else:
self.success = True
if self.data.minimise:
self.fun = res["fun"]
else:
self.fun = -1 * res["fun"]
self.x = res["x"].clip(self.clip_bound[0], self.clip_bound[1])
self.result = (self.x, self.fun, self.success, self.data)
def get_res(self):
return self.result
def level_iterator(level0, int_index):
level1 = []
for k in range(len(level0)):
for i in int_index:
if level0[k][0][i-1] != np.floor(level0[k][0][i-1]):
cur_bounds = level0[k][3].bounds[i-1]
lp = LPd(copy.deepcopy(level0[k][3]))
lp.data.bounds[i-1] = (cur_bounds[0],
max(cur_bounds[0], floor(level0[k][0][i-1])))
lp.cal()
output = lp.get_res()
level1.append(output)
lp = LPd(copy.deepcopy(level0[k][3]))
lp.data.bounds[i-1] = (min(np.ceil(level0[k][0][i-1]),
cur_bounds[1]), cur_bounds[1])
lp.cal()
output = lp.get_res()
level1.append(output)
break
return level1
def intsol(solution,int_index):
is_int = True
for i in int_index:
if solution[0][i-1] != np.floor(solution[0][i-1]):
is_int = False
break
return is_int
def feasiblelevl(level, solution, int_index):
newlevel = []
solution = solution
for k in range(len(level)):
lp = level[k]
if len(lp[0]) > 0:
if lp[2] == False:
pass
elif intsol(lp,int_index) and lp[1] >= solution[1]:
solution = lp
elif lp[1] > solution[1]:
newlevel.append(lp)
return (newlevel, solution)
def LPmi(data, int_index):
level0 = []
lp = LPd(data)
lp.cal()
level0.append(lp.get_res())
solution = [None,-np.inf,None,None]
level0 = (level0, solution)
level0 = feasiblelevl(level0[0], solution, int_index)
if len(level0[0]) == 0:
return level0[1][0:3]
else:
for k in range(10):
level1 = level_iterator(level0[0],int_index)
level1 = feasiblelevl(level1, solution, int_index)
if len(level1[0]) == 0:
break
level0 = level1
return level1[1][0:3]
def test():
c = np.array([3.,7])
minimise = False
A_ub = np.array([[7.,8],[1,3]])
b_ub = np.array([56,12])
data = LP_Data(c,minimise,A_ub,b_ub,bounds=[(0,None),(0,None)])
int_index = [2] #or [1,2] or [] or [1]#
out = LPmi(data,int_index)
print(out)
if __name__ == "__main__":
np.set_printoptions(precision=3,suppress=True)
test()
Inspired by w-k-yeung, and having always wanted to do a little implementation of branch and bound, I cooked up my own version.
Do not use this. It's not well-tested and won't perform well. It won't find all possible solutions. There are too many good, free, OSS solutions to recommend this one. However....
If you just want something simple and easy to setup w/ your existing code, and it's a toy problem, and you don't care about the quality of results (or plan to manually check results for final usage), AND you're just doing this for fun, you may want to have a look at this.
I've implemented the branching as depth-first, so memory usage tends to stay roughly constant (but is a function of the search depth you choose and the size of your model.) I've tried to be good with memory where I could and implemented a few optimizations. It's still painfully slow. I'm playing with a 51-variable problem where each has a range of 0-4 or so, and it'll run for days. Plenty of FOSS packages should solve this in seconds or minutes.
Enjoy! This code is this responder's creation. Anyone is free to use or modify it. It has no warranty. I will not support it.
"""A Mixed-Integer solver based on scipy.optimize.linprog.
This code implements branch-and-bound on the linear relaxation of a given
mixed-integer program. It requires numpy and scipy.optimize.
Usage examples are given in the test() and test2() functions. Parameters of
MipModel are mostly as documented in scipy.optimize.linprog. The additional
parameter "int_vars" gives a sequence of indexes of the variables that should be
constrained to integer solutions.
Typical usage will follow the pattern of:
import mip
mip_model = mip.MipModel(C, minimize, Aub, bub, Aeq, beq, bounds, int_vars)
mip.set_debug_prints(True)
best_solution = mip.find_solutions(mip_model, depth_limit=C.shape[0] - 1)
print(f' soln: {best_solution.x}\n objective value: {best_solution.fun}\n'
f'success: '{best_solution.success}')
"""
import collections
from dataclasses import dataclass, field, replace
import datetime
import itertools
import numpy as np
from scipy.optimize import linprog
import sys
from typing import Generator, List, MutableSequence, Optional, Sequence, Tuple
_DEBUG = True
def set_debug_prints(is_on):
global _DEBUG
_DEBUG = is_on
#dataclass(frozen=True)
class MipResult:
model: 'MipModel'
fun: float = np.inf
x: np.ndarray = np.array([])
success: bool = False
def is_int_soln(self) -> bool:
"""Returns True if the result has integer values for integer variables."""
return np.all(self.x[self.model.int_vars] ==
np.floor(self.x[self.model.int_vars]))
def vars_to_split(self) -> List[int]:
"""Returns the list of integer var indexes with non-integer solutions."""
if self.success:
return list(np.where(self.x[self.model.int_vars] !=
np.floor(self.x[self.model.int_vars]))[0])
else:
return []
#dataclass(frozen=True)
class MipModel:
c: np.ndarray
minimize: bool = True
Aub: Optional[np.ndarray] = None
bub: Optional[np.ndarray] = None
Aeq: Optional[np.ndarray] = None
beq: Optional[np.ndarray] = None
bounds : Tuple = ()
int_vars: Sequence[int] = field(default_factory=list)
method: str = 'revised simplex'
clip_bound: np.ndarray = field(init=False)
def __post_init__(self):
if not self.minimize:
object.__setattr__(self, 'c', -1 * self.c)
if not self.bounds:
object.__setattr__(self, 'clip_bound',
np.tile(np.array([[0], [np.inf]]), self.c.shape[0]))
else:
lower, upper = zip(*self.bounds)
numeric_upper = [np.inf if u is None else u for u in upper]
object.__setattr__(self, 'clip_bound',
np.array((lower, numeric_upper), dtype=np.float64))
def solve(self) -> MipResult:
res = linprog(self.c,A_ub=self.Aub, b_ub=self.bub, A_eq=self.Aeq,
b_eq=self.beq, bounds=self.bounds, method=self.method)
if res["success"]:
result = MipResult(
self,
(int(self.minimize) * 2 - 1) * res['fun'],
res['x'].clip(self.clip_bound[0], self.clip_bound[1]),
res['success'])
else:
result = MipResult(self)
return result
def split_on_var(self, var_i: int, value: Optional[float] = None
) -> Generator['MipModel', None, None]:
"""Yields two new models with bound `var_i` split at `value` or middle"""
assert var_i in range(len(self.bounds)), 'Bad variable index for split'
bound_i = self.bounds[var_i]
if value is None:
if bound_i[1] is not None:
value = self.clip_bound[:, var_i].sum() / 2.0
else:
yield self
return
# We know where to split, have to treat None carefully.
elif bound_i[1] is None:
bound_i = (bound_i[0], np.inf)
# else bound and value are set numerically.
assert value >= bound_i[0] and value <= bound_i[1], 'Bad value in split.'
new_bounds = (*self.bounds[:var_i],
(bound_i[0], max(bound_i[0], np.floor(value))),
*self.bounds[var_i + 1:])
yield replace(self, bounds=new_bounds)
if np.isinf(bound_i[1]):
new_upper = (np.ceil(value), None)
else:
new_upper = (min(np.ceil(value), bound_i[1]), bound_i[1])
new_bounds = (*self.bounds[:var_i],
new_upper,
*self.bounds[var_i + 1:])
yield replace(self, bounds=new_bounds)
def filter_result(result: MipResult, best_soln: MipResult,
results: MutableSequence[MipResult]=[]
) -> Tuple[MutableSequence[MipResult], MipResult]:
if result.success:
if result.is_int_soln() and result.fun <= best_soln.fun:
if _DEBUG:
print('\n', f' {result.x}: {result.fun}')
sys.stdout.flush()
best_soln = result
else:
results.append(result)
return results, best_soln
def walk_branches(solutions: Sequence[MipResult], best_soln: MipResult,
depth_limit: int) -> Tuple[MipResult, int]:
"""Does depth-first search w/ branch & bound on the models in `solutions`.
This function keeps track of a best solution and branches on variables
for each solution in `solutions` up to a depth of `depth_limit`. Intermediate
best solutions are printed with objective function and timestamp.
Args:
solutions: Iterable of MipResult, assumes "not result.is_int_soln()"
best_soln: MipResult of the best integer solution (by fun) so far.
depth_limit: Integer depth limit of the search. This function will use up
to (2**depth_limit + 1) * 8 bytes of memory to store hashes that
identify previoulsy solved instances. Shallow copies are used
aggressively, but memory may be an issue as a function of this
parameter. CPU time _certainly_ is. Remove use of "repeats" for constant
memory utilization.
Returns: The best MipResult obtained in the search (by result.fun) and a
count of stopped branches.
"""
def chirp(top_count):
if _DEBUG:
now = datetime.datetime.now()
print(f'{len(stack):3} {now.strftime("%m-%d %H:%M:%S")}')
sys.stdout.flush()
num_cut = 0
repeats = set()
# Put solutions in a stack paired with variable indexs that need splitting.
# Pop an element, split it, and push any new branches back on, reducing vars
# to split by one element. Repeat until stack_limit reached or stack is
# empty.
stack = [(s, s.vars_to_split()) for s in solutions]
stack_limit = len(stack) + depth_limit + 1
start_depth = len(stack)
chirp(start_depth)
while stack and (len(stack) <= stack_limit):
if len(stack) < start_depth:
chirp(len(stack))
start_depth = len(stack)
cur_depth = len(stack) - start_depth
soln, split_vars = stack.pop()
var_i = split_vars.pop()
if split_vars:
stack.append((soln, split_vars))
for model in soln.model.split_on_var(var_i, soln.x[var_i]):
# Skip any set of bounds we've (probably) already seen.
bounds_hash = hash(model.bounds)
if bounds_hash in repeats:
continue
# Filter out any infeasible or integer solutions to avoid further
# processing.
result = model.solve()
if result.success:
if result.is_int_soln() and result.fun <= best_soln.fun:
if _DEBUG:
now = datetime.datetime.now()
time = now.strftime("%H:%M:%S")
print(f'\n {result.x}: {result.fun} (d={cur_depth}, {time})')
sys.stdout.flush()
best_soln = result
elif len(stack) < stack_limit:
new_vars = result.vars_to_split()
if new_vars:
stack.append((result, new_vars))
else:
num_cut += 1
# Save bounds for all but the last layer (which uses too much storage
# for the compute it saves.)
if cur_depth < depth_limit - 1:
repeats.add(bounds_hash)
return best_soln, num_cut
def find_solutions(mip_model: MipModel, depth_limit: int = 0) -> MipResult:
"""Searches for mixed integer solutions to mip_model with branch & bound.
This function searches for mixed-integer solutions to the given model by
branching on the linear relaxtion of the model. Results will be returned
early (if set_debug_prints(True) is called) by passing over results in
depth_limit // npasses (currently 3), so depth_limit can be passed in large
and the program terminated early if adequate results are found.
The search does not (at the moment) branch on all variables, only those for
which the linear relaxation gives non-integer solutions. This may limit the
search space and miss some solutions. To fully exhaust the search space, set
depth_limit to the number of variables you have.
If set_debug_prints(True) is called before, the search will also print a
countdown and timestamps to give a since of how far the search is and over
what runtime.
Args:
mip_model: The MipModel to search.
depth_limit: The maximum depth of search tree branching. Default 0 means
branch for each variable in the model (i.e. full search).
Returns:
MipResult of the best solution found.
"""
best_soln = MipResult(mip_model)
linear_soln = mip_model.solve()
if not linear_soln.success:
print('Model is infeasible even for linear relaxation.')
return best_soln
if not len(mip_model.int_vars):
return linear_soln
# Start with some reasonable areas to search.
solutions = []
models = [mip_model] + list(itertools.chain.from_iterable(
mip_model.split_on_var(v) for v in mip_model.int_vars))
for result in map(MipModel.solve, models):
solutions, best_soln = filter_result(result, best_soln, solutions)
# Default depth_limit means branch on all the vars.
if depth_limit == 0:
depth_limit = mip_model.c.shape[0]
# Since we did one split above, we've already fixed one var and done one
# level, so depth_limit can be reduced.
best_soln, num_cut = walk_branches(solutions, best_soln, depth_limit - 1)
if _DEBUG:
now = datetime.datetime.now()
print('{}: {} branches un-explored.\n'.format(
now.strftime("%m-%d %H:%M:%S"),
num_cut))
return best_soln
def test():
"""Implements the example from scipy.optimizie.linprog doc w/ 1 int var."""
set_debug_prints(False)
C = np.array([-1, 4])
minimize = True
Aub = np.array([[-3, 1],[1, 2]])
bub = np.array([6, 4])
int_vars = [1] # or [0, 1] or [] or [0]
mip_model = MipModel(C, minimize, Aub, bub, bounds=((0, None), (-3, None)),
int_vars=int_vars)
out = find_solutions(mip_model)
print(f'test:\n soln: {out.x}\n objective value: {out.fun}\n success: '
f'{out.success}')
def test2():
"""Implements the reference problem from https://xkcd.com/287/."""
import math
set_debug_prints(True)
print('test2: (takes ~15 seconds)')
target = 15.05
C = np.array([2.15, 2.75, 3.35, 3.55, 4.2, 5.8])
bounds = tuple((0, math.floor(target / c)) for c in C)
Aeq = C[np.newaxis, :]
beq = np.array([target])
# Aub = None
# bub = None
minimize = False
int_vars = np.arange(C.shape[0])
mip_model = MipModel(C, minimize, None, None, Aeq, beq, bounds=bounds,
int_vars=int_vars)
out = find_solutions(mip_model, depth_limit=C.shape[0] - 1)
print(f' soln: {out.x}\n objective value: {out.fun}\n success: '
f'{out.success}')
if __name__ == "__main__":
np.set_printoptions(precision=3,suppress=True)
test()
test2()
scipy has now added the milp function in their upcoming 1.9.0 release (see the official documentation) so you can now also solve mixed-integer problems. It uses the performant open source HiGHS solver under the hood.
For your example:
import numpy as np
from scipy.optimize import linprog, milp, Bounds, LinearConstraint
# linprog
c = [-1, 4]
A_ub = [[-3, 1], [1, 2]]
b_ub = [6, 4]
x0_bounds = (None, None)
x1_bounds = (-3, None)
bounds = (x0_bounds, x1_bounds)
options = {"disp": False}
res_linprog = linprog(
c,
A_ub=A_ub,
b_ub=b_ub,
bounds=bounds,
options=options)
print(res_linprog)
# MILP
integrality = [1, 1]
lb = [-np.inf, -3]
ub = [np.inf, np.inf]
variable_bounds = Bounds(lb, ub)
constraints = LinearConstraint(A_ub, -np.inf, b_ub)
res_milp = milp(
c,
integrality=integrality,
bounds=variable_bounds,
constraints=constraints,
options=options)
# milp() returns float values even for integer variables
# so we can cast to int
res_milp.x = [int(x_i) for x_i in res_milp.x]
print(res_milp)