Column integration in a function - python

I have a code which return the number of trucks required to pack the given items based on the Weight and Volume of the items. This objective of this function is to minimize the cost of transportation
Code:
from pulp import *
import numpy as np
# Item masses, volumes
item_mass = data["Weight"].tolist()
item_vol = data["Volume"].tolist()
n_items = len(item_vol)
set_items = range(n_items)
# Mass & volume capacities of trucks
truck_mass = truck["Weight"].tolist()
truck_vol = truck["Volume"].tolist()
# Cost of using each truck
truck_cost = truck["Price"].tolist()
n_trucks = len(truck_cost)
set_trucks = range(n_trucks)
y = pulp.LpVariable.dicts('truckUsed', set_trucks,
lowBound=0, upBound=1, cat=LpInteger)
x = pulp.LpVariable.dicts('itemInTruck', (set_items, set_trucks),
lowBound=0, upBound=1, cat=LpInteger)
# Model formulation
prob = LpProblem("Truck allocatoin problem", LpMinimize)
# Objective
prob += lpSum([truck_cost[i] * y[i] for i in set_trucks])
# Constraints
for j in set_items:
# Every item must be taken in one truck
prob += lpSum([x[j][i] for i in set_trucks]) == 1
for i in set_trucks:
# Respect the mass constraint of trucks
prob += lpSum([item_mass[j] * x[j][i] for j in set_items]) <= truck_mass[i]*y[i]
# Respect the volume constraint of trucks
prob += lpSum([item_vol[j] * x[j][i] for j in set_items]) <= truck_vol[i]*y[i]
# Ensure y variables have to be set to make use of x variables:
for j in set_items:
for i in set_trucks:
x[j][i] <= y[i]
prob.solve()
x_soln = np.array([[x[i][j].varValue for i in set_items] for j in set_trucks])
y_soln = np.array([y[i].varValue for i in set_trucks])
print (("Status:"), LpStatus[prob.status])
print ("Total Cost is: ", value(prob.objective))
print("Trucks used: " + str(sum(([y_soln[i] for i in set_trucks]))))
a = []
b = []
for i in set_items:
for j in set_trucks:
if x[i][j].value() == 1:
print("Item " + str(i) + " is packed in vehicle "+ str(j))
a.append(str(j))
b.append(str(i))
totalitemvol = sum(item_vol)
totaltruckvol = sum([y[i].value() * truck_vol[i] for i in set_trucks])
print("Volume of used trucks is " + str(totaltruckvol))
if(totaltruckvol >= totalitemvol):
print("Trucks are sufficient")
else:
print("Items cannot fit")
This code return the output as follows:
Status: Optimal
Total Cost is: 400000.0
Trucks used: 3.0
Item 0 is packed in vehicle 7
Item 1 is packed in vehicle 7
Item 2 is packed in vehicle 6
Item 3 is packed in vehicle 7
Item 4 is packed in vehicle 16
Item 5 is packed in vehicle 7
Item 6 is packed in vehicle 16
Item 7 is packed in vehicle 7
Item 8 is packed in vehicle 16
Item 9 is packed in vehicle 6
Item 10 is packed in vehicle 16
Volume of used trucks is 3436.0
Trucks are sufficient
Instead of getting the index of items Can I replace "Item 0" with "Item (productId)" where ProductID is a series in the "data" Dataframe.
I am happy to give the data and trucks csv files or the colab link.

Instead of "Item " + str(i) + " is packed in vehicle " + str(j) and assuming that the order of the ProductID is the same as the order of set_trucks, you could do
s = pd.Series(['item0', 'item1', 'item2'])
for i in set_items:
for j in set_trucks:
print("Item " + str(s[i]) + " is packed in vehicle " + str(j))
Since you're using Python 3, you can do it faster by using string formatting such as
print(f"Item {s[i]} is packed in vehicle {j}")

Related

Including backtrace pointers with symbols in a Python output of a DP table for min edit distance algorythm

I have a working min edit distance program for two words that also iterates and outputs a DP table to the console, however I wish to add a backtrace to the programme that also prints arrow pointer symbols in the DP table to show the backtrace clearly on the output. I cannot figure out how to output these symbols correctly.
import re
import time
time1= time.time()
def printTable(table, description):
print(f'{description}\n')
current_row = current_col = 0
current_row_col = re.search("^row ([0-9]+) , col ([0-9]+)$",description)
if current_row_col:
current_row = int(current_row_col.group(1))
current_col= int(current_row_col.group(2))
row_counter=0
for row in table:
row_counter+=1
col_counter=0
for col in row:
col_counter+=1
#print(row_counter , row, current_col, col)
if (row_counter == current_row) and (col_counter == current_col):
formatting = '\033[1m'+'\033[91m' #bold + red
else: formatting = '\x1b[0m' #reset fomatting
print(formatting + str(col).rjust(10, ' '), end=' ') # rjust returns a 10-characters long, right justified version of the string
print('\n\n')
print('---------------------------------------------------------------------------------------------------------------')
# A DP-based solution for edit distance problem
def editDistDP(x,y):
leftarrow = "←"
uparrow = "↑"
diagarrow = ""
dp = [] # Create an empty table to store results of subproblems
# fill in the table with zeros
for row in range(len(x) + 1):
dp.append([0]* (len(y) + 1))
# Alternatively, you can use List Comprehension to initiate the DP table in one line of code
# dp = [[0 for column in range(len(y) + 1)] for row in range(len(x) + 1)]
# Fill in the base case (easy) subproblems, i.e. the first row and column of the DP table
# first row: base case subproblems for computing the cost of converting "" to y
for i in range(len(y) + 1):
# If x is empty then the only option is to insert all the characters of y
# Minimum number of required operations (cost) is i insertions, where i = len(y)
dp[0][i] = i
# first column: base case subproblems for computing the cost of converting x to ""
for i in range(len(x) + 1):
# If y is empty then the only option is to delete all the characters of x
# Minimum number of required operations (cost) is i deletions, where i = len(x)
dp[i][0] = i
printTable(dp,"DP table after the base case (easy) subproblems are solved");
# Fill in the rest of the DP table in a BOTTOM-UP manner
for i in range(1, len(x) + 1):
for j in range(1, len(y) + 1):
horizontal_or_insertion_cost = (dp[i][j-1] + 1)
vertical_or_deletion_cost= dp[i-1][j] + 1
# Weighted Minimum Edit Distance for sub
if x[i-1] != y[j-1] and x[i-1].isnumeric:
delta = 3
elif x[i-1] != y[j-1]:
delta = 2
else:
delta = 0
diagonal_or_substitution_cost= dp[i-1][j-1] + delta
minValue = min(horizontal_or_insertion_cost,vertical_or_deletion_cost,diagonal_or_substitution_cost)
dp[i][j] = minValue
# printTable(dp,f'row {i+1} , col {j+1}') #UNCOMMENT this line to see how the DP table is filled at each step
printTable(dp,"Completed DP table after all the subproblems are solved")
return dp[-1][-1]
str1, str2 = "intention", "execution"
print(f'edit distance between "{str1}" and "{str2}": {editDistDP(str1, str2)}')
time2 = time.time()
execTime = time2-time1
execTime = str(execTime)
print("--- Executed in: " + execTime + " seconds ---")

Kernel keeps dying in Jupyter notebook with pulp solver

I've created a LP solver in Jupyter notebooks that is giving me some issues. Specifically, when I run the last line of code in the script below, I get the error message saying The kernel appears to have died. It will restart automatically.
Edit: the final dataframe, dfs_proj, is a 240-row, 5-column dataframe.
import pandas as pd
from pulp import *
from pulp import LpMaximize
dfs_proj = pd.read_csv("4for4_dfs_projections_120321.csv")
dfs_proj['count'] = 1
cols = ['Player', 'Pos', 'FFPts', 'DK ($)', 'count']
dfs_proj = dfs_proj[cols]
dfs_proj = dfs_proj[(dfs_proj['DK ($)'] >= 4000) | (dfs_proj['Pos'] == "DEF") | (dfs_proj['Pos'] == "TE")]
player_dict = dict(zip(dfs_proj['Player'], dfs_proj['count']))
# create a helper function to return the number of players assigned each position
def get_position_sum(player_vars, df, position):
return pulp.lpSum([player_vars[i] * (position in df['Pos'].iloc[i]) for i in range(len(df))])
def get_optimals(site, data, num_lineups, optimize_on='FFPts'):
"""
Generates x number of optimal lineups, based on the column to
designate as the one to optimize on.
:param str site: DK or FD. Used for salary constraints
:param pd.DataFrame data: Pandas dataframe containing projections.
:param int num_lineups: Number of lineups to generate.
:param str optimize_on: Name of column in dataframe to use when optimizing
"""
#global lineups
lineups = []
player_dict = dict(zip(data['Player'], data['count']))
for i in range(1, num_lineups+1):
prob = pulp.LpProblem('DK_NFL_weekly', pulp.const.LpMaximize)
player_vars = []
for row in data.itertuples():
var = pulp.LpVariable(f'{row.Player}', cat='Binary')
player_vars.append((row.Player, var))
# total assigned players constraint
prob += pulp.lpSum(player_var for player_var in player_vars) == 9
# total salary constraint
prob += pulp.lpSum(data['DK ($)'].iloc[i] * player_vars[i][1] for i in range(len(data))) <= 50000
# for QB and DST, require 1 of each in the lineup
prob += get_position_sum(player_vars, df, 'QB') == 1
prob += get_position_sum(player_vars, df, 'DEF') == 1
# to account for the FLEX position, we allow additional selections of the 3 FLEX-eligible positions: RB, WR, TE
prob += get_position_sum(player_vars, df, 'RB') >= 2
prob += get_position_sum(player_vars, df, 'WR') >= 3
prob += get_position_sum(player_vars, df, 'TE') >= 1
if i > 1:
if optimize_on == 'Optimal Frequency':
prob += pulp.lpSum([data['FFPts'].iloc[i] * player_vars[i][1] for i in range(len(data))]) <= (optimal - 0.001)
else:
prob += pulp.lpSum([data['FFPts'].iloc[i] * player_vars[i][1] for i in range(len(data))]) <= (optimal - 0.01)
prob += pulp.lpSum([data['FFPts'].iloc[i] * player_vars[i][1] for i in range(len(data))])
# solve and print the status
prob.solve(PULP_CBC_CMD(msg=False))
optimal = prob.objective.value()
count = 1
lineup = {}
for i in range(len(data)):
if player_vars[i][1].value() == 1:
row = data.iloc[i]
lineup[f'G{count}'] = row['Player']
count += 1
lineup['Total Points'] = optimal
lineups.append(lineup)
players = list(lineup.values())
for i in range(0, len(players)):
if type(players[i]) == str:
player_dict[players[i]] += 1
if player_dict[players[i]] == 45:
data = data[data['Player'] != players[i]]
return lineups
lineups = get_optimals(dfs_proj, 20, 'FFPts')
I have tried reinstalling all the libraries that are used in the script and still get the same issue. Even running it in a normal Python script gives me the same error message. I think this might have to do with memory, but I'm not sure how to check for that or adjust for that, either.
Thanks in advance for any help!
You had a handful of typos here... Not sure if/how you got this running.
A couple of issues you had:
You co-mingled df and data variable names inside your function. So who knows what that was pulling in. (One of the hazards of working in a notebook.)
In several locations where you used player_vars you were not indexing the tuple to get the variable piece, I'd suggest you use the LpVariable.dicts() for these, it is easier to manage.
Your function call doesn't account for site in the function params.
Other advice:
Do NOT turn off the messaging. You must check the solver output to see the status. First attempts came back as "infeasible" which is how I discovered the player_vars problem. If you do decide to turn off the message, figure out a way to assert(status==optimal) or risk junk results. I think it is doable in pulp, I just forgot how. Edit: here's how. This works when using the default CBC solver, after solving (obviously). Other solvers, not sure:
status = LpStatus[prob.status]
assert(status=='Optimal')
print out the problem a couple times to see if it passes the giggle test while building it. If you had done this, you would have seen some of the construction problems.
Anyhow, this is working fine for fake data and handles 1000+ players in a couple seconds for 20 lineups.
Buyer beware: I did not review all of the constraints too closely or the conditional constraint, so you should.
import pandas as pd
from pulp import *
# from pulp import LpMaximize
from random import randint, choice
num_players = 1000
positions = ['RB', 'WR', 'TE', 'DEF', 'QB']
players = [(i, choice(positions), randint(1,100), randint(3000,5000), 1) for i in range(num_players)]
cols = ['Player', 'Pos', 'FFPts', 'DK ($)', 'count']
dfs_proj = pd.DataFrame.from_records(players, columns = cols)
print(dfs_proj.head())
# dfs_proj = pd.read_csv("4for4_dfs_projections_120321.csv")
# dfs_proj['count'] = 1
# cols = ['Player', 'Pos', 'FFPts', 'DK ($)', 'count']
# dfs_proj = dfs_proj[cols]
dfs_proj = dfs_proj[(dfs_proj['DK ($)'] >= 4000) | (dfs_proj['Pos'] == "DEF") | (dfs_proj['Pos'] == "TE")]
# player_dict = dict(zip(dfs_proj['Player'], dfs_proj['count']))
print(dfs_proj.head())
# create a helper function to return the number of players assigned each position
def get_position_sum(player_vars, df, position):
return pulp.lpSum([player_vars[i][1] * (position in df['Pos'].iloc[i]) for i in range(len(df))]) #player vars not indexed
#def get_optimals(site, data, num_lineups, optimize_on='FFPts'): # site??? # data vs df ???
def get_optimals(data, num_lineups, optimize_on='FFPts'):
"""
Generates x number of optimal lineups, based on the column to
designate as the one to optimize on.
:param str site: DK or FD. Used for salary constraints
:param pd.DataFrame data: Pandas dataframe containing projections.
:param int num_lineups: Number of lineups to generate.
:param str optimize_on: Name of column in dataframe to use when optimizing
"""
#global lineups
lineups = []
player_dict = dict(zip(data['Player'], data['count']))
for i in range(1, num_lineups+1):
prob = pulp.LpProblem('DK_NFL_weekly', pulp.const.LpMaximize)
player_vars = []
for row in data.itertuples():
var = pulp.LpVariable(f'P{row.Player}', cat='Binary') # added 'P' to player name for clarity
player_vars.append((row.Player, var))
# total assigned players constraint
prob += pulp.lpSum(player_var[1] for player_var in player_vars) == 9 # player var not indexed
# total salary constraint
prob += pulp.lpSum(data['DK ($)'].iloc[i] * player_vars[i][1] for i in range(len(data))) <= 50000
# for QB and DST, require 1 of each in the lineup
# !!!! you had 'df' here which who knows what you were pulling in.... changed to data
prob += get_position_sum(player_vars, data, 'QB') == 1
prob += get_position_sum(player_vars, data, 'DEF') == 1
# to account for the FLEX position, we allow additional selections of the 3 FLEX-eligible positions: RB, WR, TE
prob += get_position_sum(player_vars, data, 'RB') >= 2
prob += get_position_sum(player_vars, data, 'WR') >= 3
prob += get_position_sum(player_vars, data, 'TE') >= 1
if i > 1:
if optimize_on == 'Optimal Frequency':
prob += pulp.lpSum([data['FFPts'].iloc[i] * player_vars[i][1] for i in range(len(data))]) <= (optimal - 0.001)
else:
prob += pulp.lpSum([data['FFPts'].iloc[i] * player_vars[i][1] for i in range(len(data))]) <= (optimal - 0.01)
prob += pulp.lpSum([data['FFPts'].iloc[i] * player_vars[i][1] for i in range(len(data))])
print(prob)
# solve and print the status
prob.solve(PULP_CBC_CMD())
optimal = prob.objective.value()
count = 1
lineup = {}
for i in range(len(data)):
if player_vars[i][1].value() == 1:
row = data.iloc[i]
lineup[f'G{count}'] = row['Player']
count += 1
lineup['Total Points'] = optimal
lineups.append(lineup)
players = list(lineup.values())
for i in range(0, len(players)):
if type(players[i]) == str:
player_dict[players[i]] += 1
if player_dict[players[i]] == 45:
data = data[data['Player'] != players[i]]
return lineups
lineups = get_optimals(dfs_proj, 10, 'FFPts')
for lineup in lineups:
print(lineup)

expectation and variance of future stock price under binary tree

Probably a over-simplified model for stock price: on each day, the price will go up by a factor 1.05 with probability 0.6 or will go down to 1/1.05 with probability 0.4. So this is a non-symmetrical binary tree. How can I analytically calculate the expectation and variance of this stock price on future date, say day 100. Also, is there any module in python to handle binary tree model like this? appreciate code to implement this.
Best regards
import random as r
s = 100 # starting value
^^Initial conditions. Simulating one day on the stock market:
def day(stock_value): #One day in the stock market
k = r.uniform(0,1)
if k < 0.6:
output = 1.05*stock_value
else:
output = stock_value/1.05
return(output)
Simulating 100 days on the stock market:
for j in range(100): #simulates 100 days in the stock market
s = day(s)
print(s)
Simulating 100 days 1000 times:
data = []
for i in range(1000):
s = [100]
for j in range(100):
s.append(day(s[j]))
data.append(s)
Converting the data to only consider the last day:
def mnnm(mat): #Makes an mxn matrix into an nxm matrix
out = []
for j in range(len(mat[0])):
out.append([])
for j in range(len(mat[0])):
for m in range(len(mat)):
out[j].append(mat[m][j])
return(out)
data = mnnm(data)
data = data[-1]
Taking a mean average:
def lst_avg(lst): #Returns the average of a list
output = 0
for j in range(len(lst)):
output+= lst[j]/len(lst)
return(output)
mean = lst_avg(data)
Variance:
import numpy as np
for h in range(len(data)):
data[h] = data[h]**2
mean_square = lst_avg(data)
variance = np.fabs(mean_square - mean**2)
The theoretical value after 1 day is (assuming value on day 0 is A)
A * 0.6 * 1.05 + 100 * 0.4/1.05
And after 100 days it's
A * (0.603 + 0.380952...)**100 so...
(in the following I use 1 as stock price on day 0.)
p1 = 0.6
p2 = 0.4
x1 = 1.05
x2 = 1/1.05
initial_value = 1
no_of_days = 100
# 1 day
expected_value_after_1_day = initial_value * ( p1*x1 + p2*x2)
print (expected_value_after_1_day, 'is the expected value of price after 1 day')
ex_squared_value_1_day = initial_value * (p1*x1**2 + p2*x2**2)
# variance can be calculated as follows
variance_day_1 = ex_squared_value_1_day - expected_value_after_1_day**2
# or an alternative calculation, summing the squares of the differences from the mean
alt_variance_day_1 = p1 * (x1 - expected_value_after_1_day) ** 2 + p2 * (x2 - expected_value_after_1_day) ** 2
print ('Variance after one day is', variance_day_1)
# 100 days
expected_value_n_days = initial_value * (p1*x1 + p2*x2) ** no_of_days
ex_squared_value_n_days = initial_value * (p1*x1**2 + p2*x2**2) ** no_of_days
ex_value_n_days_squared = expected_value_n_days ** 2
variance_n_days = ex_squared_value_n_days - ex_value_n_days_squared
print(expected_value_n_days, 'is the expected value of price after {} days'.format(no_of_days))
print(ex_squared_value_n_days, 'is the expected value of the square of the price after {} days'.format(no_of_days))
print(ex_value_n_days_squared, 'is the square of the expected value of the price after {} days'.format(no_of_days))
print(variance_n_days, 'is the variance after {} days'.format(no_of_days))
It probably looks a bit old-school, hope you don't mind!
Output
1.0109523809523808 is the expected value of price after 1 day
Variance after one day is 0.0022870748299321786
2.972144657651404 is the expected value of price after 100 days
11.046309656223373 is the expected value of the square of the price after 100 days
8.833643866005783 is the square of the expected value of the price after 100 days
2.2126657902175904 is the variance after 100 days

Issue with Nested If/While/If statement

As a D&D aficionado, I have written out a script to perform any attack and damage rolls for any specified die, including what happens in the event of a critical attack roll (20).
from random import randint
atk_roll = 20
hit_mod = 6
atk_hit = atk_roll + hit_mod
die = 'd10'
dmg_mod = 4
rolls = 1
def dice_roller(x, y, z):
y1 = int(y.strip('d'))
dict = {}
results = []
step = 1
while step <= z:
die = randint(1,y1)
results.append(die)
step += 1
if atk_roll == 20:
total = (sum(results)+dmg_mod) + (y1*z)
pct_dmg = total / (((y1*z)+dmg_mod) + (y1*z))
else:
total = sum(results) + dmg_mod
pct_dmg = total / ((y1*z)+dmg_mod)
dict.update({'Attack Roll: ' + f'{atk_roll} + {hit_mod} = {x}':
{'Damage Roll: ' + f'{z}' + y:
{'Rolls': results, 'Total Damage': total, 'Pct_Damage': f'{pct_dmg:.0%}'}}})
print(dict)
print(dice_roller(atk_hit, die, rolls))
{'Attack Roll: 20 + 6 = 26': {'Damage Roll: 1d10': {'Rolls': [4], 'Total Damage': 18, 'Pct_Damage': '75%'}}}
None
The issue becomes when I introduce a new "if/else" statement to account for a missed attack. The only roll that does not work is a critical roll:
from random import randint
atk_roll = 20
hit_mod = 6
atk_hit = atk_roll + hit_mod
die = 'd10'
dmg_mod = 4
rolls = 1
ac = 15
def dice_roller(x, y, z):
y1 = int(y.strip('d'))
dict = {}
results = []
step = 1
if x >= ac:
while step <= z:
die = randint(1,y1)
results.append(die)
step += 1
if atk_roll == 20:
total = (sum(results)+dmg_mod) + (y1*z)
pct_dmg = total / (((y1*z)+dmg_mod) + (y1*z))
else:
total = sum(results) + dmg_mod
pct_dmg = total / ((y1*z)+dmg_mod)
dict.update({'Attack Roll: ' + f'{atk_roll} + {hit_mod} = {x}':
{'Damage Roll: ' + f'{z}' + y:
{'Rolls': results, 'Total Damage': total, 'Pct_Damage': f'{pct_dmg:.0%}'}}})
print(dict)
else:
print('Your attack missed')
print(dice_roller((atk_hit), die, rolls))
{}
None
When I change the value of "atk_roll" in the second script back to "randint(1,20)", the code works exactly as intended, noting when an attack misses or returning the dictionary of values if the attack hits. I cannot for the life of me figure out why the code is returning a blank dict only when "atk_roll" = 20. Does it have something to do with the nesting syntax of if/while/if? Thanks!
In the second code snippet you provided: the if-else block if ask_roll == 20:
will always be taken and therefore your dictionary will not be populated with values as the atk_roll value is always 20. This is because your dict is being populated in the following else.
This differs from your first piece of code as dict is updated every time the function is called as its located outside of any conditional statements.
I would need more details about what your code does to provide a detailed solution but I will attempt to give a high-level solution. If you are trying to update the dictionary regardless if your atk_roll is equal to 20 then the update should be outside the nested else block in your conditional statement checking if atk_roll is equal to 20. This code snippet is included below.
if atk_roll == 20:
total = (sum(results)+dmg_mod) + (y1*z)
pct_dmg = total / (((y1*z)+dmg_mod) + (y1*z))
else:
total = sum(results) + dmg_mod
pct_dmg = total / ((y1*z)+dmg_mod)
dict.update({'Attack Roll: ' + f'{atk_roll} + {hit_mod} = {x}':
{'Damage Roll: ' + f'{z}' + y:
{'Rolls': results, 'Total Damage': total, 'Pct_Damage': f'{pct_dmg:.0%}'}}})

Use concentration of a blending as a constraint on a linear optimization

I have the following table, from which a have to create a recipe with a specific value for protein and carbohydrates.
And using or-tools to solve this problem, so far I have:
The formated data
data = [
['f1', 10, 15, 17, 10],
['f2', 2, 11, 12, 14],
['f3', 6.5, 17, 16, 13],
['f4', 8, 12, 8, 16]
]
The constraines for the nutriends:
nutrients = [
["protein",15.5],
["carbohydrates",12.3]]
The objective function, where the upper bound "datai" is the stock of that particular element.
food = [[]] * len(data)
# Objective: minimize the sum of (price-normalized) foods.
objective = solver.Objective()
for i in range(0, len(data)):
food[i] = solver.NumVar(0.0, data[i][1], data[i][0])
objective.SetCoefficient(food[i], 4)
objective.SetMinimization()
I also have the as constrain the required value of each nutrient:
constraints = [0] * (len(nutrients))
for i in range(0, len(nutrients)):
constraints[i] = solver.Constraint(nutrients[i][1], solver.infinity())
for j in range(0, len(data)):
constraints[i].SetCoefficient(food[j], data[j][i+3])
And finally the solver:
status = solver.Solve()
if status == solver.OPTIMAL:
# Display the amounts (in dollars) to purchase of each food.
price = 0
num_nutrients = len(data[i]) - 3
nutrients = [0] * (len(data[i]) - 3)
for i in range(0, len(data)):
price += food[i].solution_value()
for nutrient in range(0, num_nutrients):
nutrients[nutrient] += data[i][nutrient+3] * food[i].solution_value()
if food[i].solution_value() > 0:
print ("%s = %f" % (data[i][0], food[i].solution_value()))
print ('Optimal price: $%.2f' % (price))
else: # No optimal solution was found.
if status == solver.FEASIBLE:
print ('A potentially suboptimal solution was found.')
else:
print ('The solver could not solve the problem.')
Which up to this part is working fine, the result I get is the following:
f1 = 0.077049
f3 = 0.886885
Optimal price: $0.96
Know I need to add as well the constraints of how many kg I will make, which have to satisfy the previous constraints as well.
My first guess was to add a multiplier to the nutrients requirement
factor = 10
nutrients = [
["protein",15.5*factor],
["carbohydrates",12.3*factor]]
Tjis way I will have 10 times more food, but then I realized that this is not correct, since what I need is a concentración E.G.
I need 10kg with 15.5 protein/kg and 12.3 carbohydrates/kg
the constrain I need is something like this:
(f1*W + f2*X + f3*Y + f4*Z)/(W+X+Y+Z) = 10kg with 15.5 protein/kg and 12.3 carbohydrates/kg
Where W, X, Y and Z are the kg of each food
How can I add this constrain to solver?
(f1*W + f2*X + f3*Y + f4*Z)/(W+X+Y+Z) = 10
is the same as
f1*W + f2*X + f3*Y + f4*Z = 10*(W+X+Y+Z)
This is now linear.
And, in case we missed some math classes, we can write this as a standard LP constraint:
(f1-10)*W + (f2-10)*X + (f3-10)*Y + (f4-10)*Z = 0

Categories