"out of memory" issue with PETsc in Ubuntu - python
I am running an OpenMDAO code that uses 2 parallel groups. I have installed PETSc4py and mpi4py inside a virtual python environment. I am getting the following error while running my code. The error reads as follows: "Out of memory. Allocated: 0, Used by process: 236814336"
Here is the full error message:
File "PETSc/Scatter.pyx", line 42, in petsc4py.PETSc.Scatter.create
petsc4py.PETSc.Error: error code 55
[1] VecScatterCreate() line 282 in /tmp/pycharm-packaging/petsc/src/vec/vscat/interface/vscreate.c
[1] VecScatterSetUp() line 211 in /tmp/pycharm-packaging/petsc/src/vec/vscat/interface/vscatfce.c
[1] VecScatterSetUp_MPI1() line 2543 in /tmp/pycharm-packaging/petsc/src/vec/vscat/impls/mpi1/vpscat_mpi1.c
[1] VecScatterSetUp_vectype_private() line 865 in /tmp/pycharm-packaging/petsc/src/vec/vscat/impls/vscat.c
[1] VecScatterCreate_PtoP() line 746 in /tmp/pycharm-packaging/petsc/src/vec/vscat/impls/vscat.c
[1] VecScatterCreateLocal_PtoP_MPI1() line 2436 in /tmp/pycharm-packaging/petsc/src/vec/vscat/impls/mpi1/vpscat_mpi1.c
[1] PetscMallocA() line 390 in /tmp/pycharm-packaging/petsc/src/sys/memory/mal.c
[1] VecScatterCreateLocal_PtoP_MPI1() line 2436 in /tmp/pycharm-packaging/petsc/src/vec/vscat/impls/mpi1/vpscat_mpi1.c
[1] Out of memory. Allocated: 0, Used by process: 237649920
[1] Memory requested 18446744062962991104
-------------------------------------------------------
Primary job terminated normally, but 1 process returned
a non-zero exit code.. Per user-direction, the job has been aborted.
-------------------------------------------------------
--------------------------------------------------------------------------
mpirun detected that one or more processes exited with non-zero status, thus causing
the job to be terminated. The first process to do so was:
Process name: [[43240,1],1]
Exit code: 1
--------------------------------------------------------------------------
I call the process with the following code:
mpirun -np 2 python ./parallel_processing.py
Here is the code for IDF optimization:
from __future__ import print_function
import pickle
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
from openmdao.api import Problem, ScipyOptimizeDriver, SqliteRecorder
import time
import random
from openmdao.recorders.case_reader import CaseReader
from ssbj_vanaret_mda import SsbjMda
from ssbj_vanaret_idf2_mda import SsbjIdf2Mda
def idf_run2(nx, ny):
# make a counter for discipline calls
[str_count, aer_count, pro_count] = np.zeros(3)
a = ["str_count.p", "aer_count.p", "pro_count.p"]
for i in a:
with open(i, "wb") as f:
pickle.dump(0, f)
# Initialize an MDA to generate the starting point for IDF
prob_init = Problem() # initialize the optimization problem
prob_init.model = SsbjMda(nx_input=nx) # create the MDA
# Design variables
prob_init.model.add_design_var('z', lower=np.zeros(nx), upper=np.ones(nx))
prob_init.model.add_design_var('x1', lower=np.zeros(nx), upper=np.ones(nx))
prob_init.model.add_design_var('x2', lower=np.zeros(nx), upper=np.ones(nx))
prob_init.model.add_design_var('x3', lower=np.zeros(nx), upper=np.ones(nx))
# Objective function
prob_init.model.add_objective('range')
# Constraints
for i in range(nx):
prob_init.model.add_constraint('con_g1' + str(i + 1), upper=0)
prob_init.model.add_constraint('con_g2' + str(i + 1), upper=0)
prob_init.model.add_constraint('con_g3' + str(i + 1), upper=0)
prob_init.driver = ScipyOptimizeDriver(optimizer='SLSQP')
prob_init.driver.options['maxiter'] = 0
prob_init.setup(mode='fwd')
prob_init.set_solver_print(1)
prob_init.run_driver()
prob_init.cleanup()
y12_initial = prob_init['y12']
y23_initial = prob_init['y23']
y32_initial = prob_init['y32']
y21_initial = prob_init['y21']
y31_initial = prob_init['y31']
# : initialize MDA for IDF
prob = Problem()
prob.model = SsbjIdf2Mda(nx, ny, y12_initial, y23_initial, y32_initial, y21_initial, y31_initial)
# create the MDA
# Design variables
prob.model.add_design_var('z', lower=np.zeros(nx), upper=np.ones(nx)) # shared variables
prob.model.add_design_var('x1', lower=np.zeros(nx), upper=np.ones(nx)) # local variable for structural discipline
prob.model.add_design_var('x2', lower=np.zeros(nx), upper=np.ones(nx)) # local variable for aerodynamic discipline
prob.model.add_design_var('x3', lower=np.zeros(nx), upper=np.ones(nx)) # local variable for propulsion discipline
# # coupling variables
prob.model.add_design_var('y31')
prob.model.add_design_var('y12')
prob.model.add_design_var('y32')
prob.model.add_design_var('y23')
prob.model.add_design_var('y21')
# Objective function
prob.model.add_objective('obj')
# Constraints
for i in range(nx):
prob.model.add_constraint('con_g1' + str(i + 1), upper=0)
prob.model.add_constraint('con_g2' + str(i + 1), upper=0)
prob.model.add_constraint('con_g3' + str(i + 1), upper=0)
epsilon = 1e-9
# Coupling constraints
for i in range(ny):
prob.model.add_constraint('con_y12' + str(i + 1), upper=epsilon)
prob.model.add_constraint('con_y21' + str(i + 1), upper=epsilon)
prob.model.add_constraint('con_y23' + str(i + 1), upper=epsilon)
prob.model.add_constraint('con_y32' + str(i + 1), upper=epsilon)
prob.model.add_constraint('con_y31' + str(i + 1), upper=epsilon)
# Optimizer options
prob.driver = ScipyOptimizeDriver()
prob.set_solver_print(2)
prob.driver.options['optimizer'] = 'SLSQP'
for tol in [1e-3]:
prob.driver.options['maxiter'] = random.randint(40, 50)
prob.driver.options['tol'] = tol
prob.driver.add_recorder(SqliteRecorder("cases_idf.sql"))
# Run optimization
start_time = time.time()
prob.setup(mode='fwd')
# view_model(prob, outfile='n2_mdfgs.html', show_browser=True)
prob.run_driver()
prob.run_model()
# prob.check_partials()
prob.cleanup()
end_time = time.time()
total_time = end_time - start_time
if prob.driver.options['tol'] == 1e-6:
iters = len(CaseReader('cases_idf.sql').get_cases())
cr = CaseReader('cases_idf.sql')
case_ids = cr.get_cases()
obj_list = ['obj']
z = []
[z.append(case.get_objectives(case)[obj_list[0]]) for case in case_ids]
with open("df_idf.p", "rb") as f:
df_idf = pickle.load(f).append(pd.DataFrame({'total iterations[IDF]': [iters],
'total time[IDF]': [total_time],
'final_objective[IDF]': z[-1]}))
with open("df_idf.p", "wb") as f:
pickle.dump(df_idf, f)
elif prob.driver.options['tol'] == 1e-3:
iters = len(CaseReader('cases_idf.sql').get_cases())
cr = CaseReader('cases_idf.sql')
case_ids = cr.get_cases()
obj_list = ['obj']
z = []
a = ["str_count.p", "aer_count.p", "pro_count.p"]
k = []
for i in a:
with open(i, "rb") as f:
k.append(pickle.load(f))
[z.append(case.get_objectives(case)[obj_list[0]]) for case in case_ids]
with open("df_idf_tol.p", "rb") as f:
df_idf = pickle.load(f).append(pd.DataFrame({'10.total iterations[IDF_tol]': [iters],
'11.total time[IDF_tol]': [total_time],
'12.final_objective[IDF_tol]': z[-1],
'13.str_count[IDF_tol]': k[0],
'14.aer_count[IDF_tol]': k[1],
'15.pro_count[IDF_tol]': k[2]
}))
with open("df_idf_tol.p", "wb") as f:
pickle.dump(df_idf, f)
The MDA for IDF optimization goes here:
from openmdao.api import Group
import numpy as np
from openmdao.api import IndepVarComp, ExecComp, ParallelGroup
from ssbj_vanaret_discipline import StructureDisc
from ssbj_vanaret_discipline import AerodynamicsDisc
from ssbj_vanaret_discipline import PropulsionDisc
from ssbj_vanaret_discipline import PerformanceDisc
class SsbjIdf2Mda(Group):
"""
Analysis for IDF formulation where couplings are managed as additional constraints
on input/output variables of related disciplines.
"""
def __init__(self, nx_input, ny_input, y12_initial, y23_initial, y32_initial, y21_initial, y31_initial):
super(SsbjIdf2Mda, self).__init__()
self.nx = nx_input
self.ny = ny_input
self.y12 = y12_initial
self.y23 = y23_initial
self.y32 = y32_initial
self.y31 = y31_initial
self.y21 = y21_initial
def setup(self):
# Design variables
self.add_subsystem('z_ini', IndepVarComp('z', .5 * np.ones(self.nx)), promotes=['*'])
self.add_subsystem('x1_ini', IndepVarComp('x1', .5 * np.ones(self.nx)), promotes=['*'])
self.add_subsystem('x2_ini', IndepVarComp('x2', .5 * np.ones(self.nx)), promotes=['*'])
self.add_subsystem('x3_ini', IndepVarComp('x3', .5 * np.ones(self.nx)), promotes=['*'])
# Couplings
self.add_subsystem('y31_ini', IndepVarComp('y31', self.y31), promotes=['*'])
self.add_subsystem('y12_ini', IndepVarComp('y12', self.y12), promotes=['*'])
self.add_subsystem('y32_ini', IndepVarComp('y32', self.y32), promotes=['*'])
self.add_subsystem('y23_ini', IndepVarComp('y23', self.y23), promotes=['*'])
self.add_subsystem('y21_ini', IndepVarComp('y21', self.y21), promotes=['*'])
# Disciplines
parallel = self.add_subsystem('parallel', ParallelGroup())
parallel.add_subsystem('Structure', StructureDisc())
parallel.add_subsystem('Aerodynamics', AerodynamicsDisc())
self.add_subsystem('Propulsion', PropulsionDisc())
self.add_subsystem('Performance', PerformanceDisc())
# Shared variables z
self.connect('z', 'parallel.Structure.z')
self.connect('z', 'parallel.Aerodynamics.z')
self.connect('z', 'Propulsion.z')
self.connect('z', 'Performance.z')
# Local variables
self.connect('x1', 'parallel.Structure.x1')
self.connect('x2', 'parallel.Aerodynamics.x2')
self.connect('x3', 'Propulsion.x3')
self.connect('x1', 'Performance.x1')
self.connect('x2', 'Performance.x2')
self.connect('x3', 'Performance.x3')
# Coupling variables
self.connect('y21', 'parallel.Structure.y21')
self.connect('y31', 'parallel.Structure.y31')
self.connect('y32', 'parallel.Aerodynamics.y32')
self.connect('y12', 'parallel.Aerodynamics.y12')
self.connect('y23', 'Propulsion.y23')
# Objective function
self.add_subsystem('Obj', ExecComp('obj=range'), promotes=['obj'])
# Connections
self.connect('Performance.range', 'Obj.range')
# self.connect('Propulsion.y34', 'Performance.y34')
# self.connect('Aerodynamics.y24', 'Performance.y24')
# self.connect('Structure.y14', 'Performance.y14')
self.connect('parallel.Aerodynamics.y21', 'Performance.y21')
self.connect('Propulsion.y31', 'Performance.y31')
self.connect('Propulsion.y32', 'Performance.y32')
self.connect('parallel.Structure.y12', 'Performance.y12')
self.connect('parallel.Aerodynamics.y23', 'Performance.y23')
# Coupling constraints
for i in range(self.ny):
self.add_subsystem('con_Y12' + str(i + 1),
ExecComp('con_y12' + str(i + 1) + '=(y12[' + str(i) + '] - y12k[' + str(i) + ']) ** 2',
y12=self.y12,
y12k=self.y12
),
promotes=['con_y12' + str(i + 1)])
self.connect('parallel.Structure.y12', 'con_Y12' + str(i + 1) + '.y12')
self.connect('y12', 'con_Y12' + str(i + 1) + '.y12k')
for i in range(self.ny):
self.add_subsystem('con_Y21' + str(i + 1),
ExecComp('con_y21' + str(i + 1) + '=(y21[' + str(i) + '] - y21k[' + str(i) + ']) ** 2',
y21=self.y21,
y21k=self.y21
),
promotes=['con_y21' + str(i + 1)])
self.connect('parallel.Aerodynamics.y21', 'con_Y21' + str(i + 1) + '.y21')
self.connect('y21', 'con_Y21' + str(i + 1) + '.y21k')
for i in range(self.ny):
self.add_subsystem('con_Y32' + str(i + 1),
ExecComp('con_y32' + str(i + 1) + '=(y32[' + str(i) + '] - y32k[' + str(i) + ']) ** 2',
y32=self.y32,
y32k=self.y32
),
promotes=['con_y32' + str(i + 1)])
self.connect('Propulsion.y32', 'con_Y32' + str(i + 1) + '.y32')
self.connect('y32', 'con_Y32' + str(i + 1) + '.y32k')
for i in range(self.ny):
self.add_subsystem('con_Y23' + str(i + 1),
ExecComp('con_y23' + str(i + 1) + '=(y23[' + str(i) + '] - y23k[' + str(i) + ']) ** 2',
y23=self.y23,
y23k=self.y23
),
promotes=['con_y23' + str(i + 1)])
self.connect('parallel.Aerodynamics.y23', 'con_Y23' + str(i + 1) + '.y23')
self.connect('y23', 'con_Y23' + str(i + 1) + '.y23k')
for i in range(self.ny):
self.add_subsystem('con_Y31' + str(i + 1),
ExecComp('con_y31' + str(i + 1) + '=(y31[' + str(i) + '] - y31k[' + str(i) + ']) ** 2',
y31=self.y31,
y31k=self.y31
),
promotes=['con_y31' + str(i + 1)])
self.connect('Propulsion.y31', 'con_Y31' + str(i + 1) + '.y31')
self.connect('y31', 'con_Y31' + str(i + 1) + '.y31k')
# Local constraints
for i in range(self.nx):
self.add_subsystem('con_G1' + str(i + 1),
ExecComp('con_g1' + str(i + 1) + '=g1[' + str(i) + ']', g1=np.zeros(self.nx)),
promotes=['con_g1' + str(i + 1)])
self.connect('parallel.Structure.g1', 'con_G1' + str(i + 1) + '.g1')
for i in range(self.nx):
self.add_subsystem('con_G2' + str(i + 1),
ExecComp('con_g2' + str(i + 1) + '=g2[' + str(i) + ']', g2=np.zeros(self.nx)),
promotes=['con_g2' + str(i + 1)])
self.connect('parallel.Aerodynamics.g2', 'con_G2' + str(i + 1) + '.g2')
for i in range(self.nx):
self.add_subsystem('con_G3' + str(i + 1),
ExecComp('con_g3' + str(i + 1) + '=g3[' + str(i) + ']', g3=np.zeros(self.nx)),
promotes=['con_g3' + str(i + 1)])
self.connect('Propulsion.g3', 'con_G3' + str(i + 1) + '.g3')
Related
creating function of Telegram inline keyboard
I need to write a function that creates an inline-keyboard. The function parameter is an integer, for example i. Number of buttons in the keyboard: i + 1. Condition number of buttons per line: <=3.
I found this variant: def get_kb(number_of_buttons): markup = InlineKeyboardMarkup() lines = int(number_of_buttons/3)+1 last_lines = (number_of_buttons % 3) for i in range(1, lines): markup.add(InlineKeyboardButton('button ' + str(i ** i), callback_data='button_' + str(i ** i)), InlineKeyboardButton('button ' + str(i ** i + 1), callback_data='button_' + str(i ** i + 1)), InlineKeyboardButton('button ' + str(i ** i + 2), callback_data='button ' + str(i ** i + 2))) if last_lines == 0: markup.add(InlineKeyboardButton('others', callback_data='others')) elif last_lines == 1: markup.add(InlineKeyboardButton('button ' + str(q), callback_data='button ' + str(q)), InlineKeyboardButton('others', callback_data='others')) else: markup.add(InlineKeyboardButton('button ' + str(q-1), callback_data='button ' + str(q-1)), InlineKeyboardButton('button ' + str(q), callback_data='button ' + str(q)), InlineKeyboardButton('others', callback_data='others')) But I think that isn't the best variant
Based on your own answer and since you mentioned aiogram in your comment, there's already a feature implemented inside the library for your use case: # number_of_buttons_in_row would be 3 in your case def get_kb(number_of_buttons, number_of_buttons_in_row): markup = InlineKeyboardMarkup() markup.row_width = number_of_buttons_in_row # key part is here for i in range(1, number_of_buttons): markup.add(InlineKeyboardButton('button ' + str(i ** i), callback_data='button_' + str(i ** i)), InlineKeyboardButton('button ' + str(i ** i + 1), callback_data='button_' + str(i ** i + 1)), InlineKeyboardButton('button ' + str(i ** i + 2), callback_data='button ' + str(i ** i + 2))) markup.add(InlineKeyboardButton('others', callback_data='others')) markup.add(InlineKeyboardButton('button ' + str(q), callback_data='button ' + str(q)), InlineKeyboardButton('others', callback_data='others')) markup.add(InlineKeyboardButton('button ' + str(q-1), callback_data='button ' + str(q-1)), InlineKeyboardButton('button ' + str(q), callback_data='button ' + str(q)), InlineKeyboardButton('others', callback_data='others')) return markup As you can see there's a field called row_width inside markup, that's used to slice the buttons into the given width and place them in several lines
Python LDA Gensim model with over 20 topics does not print properly
Using the Gensim package (both LDA and Mallet), I noticed that when I create a model with more than 20 topics, and I use the print_topics function, it will print a maximum of 20 topics (note, not the first 20 topics, rather any 20 topics), and they will be out of order. And so my question is, how do i get all of the topics to print? I am unsure if this is a bug or an issue on my end. I have looked back at my library of LDA models (over 5000, different data sources), and have noted this happens in all of them where topics are above 20. Below is sample code with output. In the output, you will see the topics are not ordered (they should be) and topics are missing such as topic 3. lda_model = gensim.models.ldamodel.LdaModel(corpus=jr_dict_corpus, id2word=jr_dict, num_topics=25, random_state=100, update_every=1, chunksize=100, passes=10, alpha='auto', per_word_topics=True) pprint(lda_model.print_topics()) #note, if the model contained 20 topics, the topics would be listed in order 0-19 [(21, '0.001*"commitment" + 0.001*"study" + 0.001*"evolve" + 0.001*"outlook" + ' '0.001*"value" + 0.001*"people" + 0.001*"individual" + 0.001*"client" + ' '0.001*"structure" + 0.001*"proposal"'), (18, '0.001*"self" + 0.001*"insurance" + 0.001*"need" + 0.001*"trend" + ' '0.001*"statistic" + 0.001*"propose" + 0.001*"analysis" + 0.001*"perform" + ' '0.001*"impact" + 0.001*"awareness"'), (2, '0.001*"link" + 0.001*"task" + 0.001*"collegiate" + 0.001*"universitie" + ' '0.001*"banking" + 0.001*"origination" + 0.001*"security" + 0.001*"standard" ' '+ 0.001*"qualifications_bachelor" + 0.001*"greenfield"'), (11, '0.024*"collegiate" + 0.016*"interpersonal" + 0.016*"prepare" + ' '0.016*"invite" + 0.016*"aspect" + 0.016*"college" + 0.016*"statistic" + ' '0.016*"continent" + 0.016*"structure" + 0.016*"project"'), (10, '0.049*"enjoy" + 0.049*"ambiguity" + 0.017*"accordance" + 0.017*"liberalize" ' '+ 0.017*"developing" + 0.017*"application" + 0.017*"vacancie" + ' '0.017*"service" + 0.017*"initiative" + 0.017*"discontinuing"'), (20, '0.028*"negotiation" + 0.028*"desk" + 0.018*"enhance" + 0.018*"engage" + ' '0.018*"discussion" + 0.018*"ability" + 0.018*"depth" + 0.018*"derive" + ' '0.018*"enjoy" + 0.018*"balance"'), (12, '0.036*"individual" + 0.024*"validate" + 0.018*"greenfield" + ' '0.018*"capability" + 0.018*"coordinate" + 0.018*"create" + ' '0.018*"programming" + 0.018*"safety" + 0.010*"evaluation" + ' '0.002*"reliability"'), (1, '0.028*"negotiation" + 0.021*"responsibility" + 0.014*"master" + ' '0.014*"mind" + 0.014*"experience" + 0.014*"worker" + 0.014*"ability" + ' '0.007*"summary" + 0.007*"proposal" + 0.007*"alert"'), (23, '0.043*"banking" + 0.026*"origination" + 0.026*"round" + 0.026*"credibility" ' '+ 0.026*"entity" + 0.018*"standard" + 0.017*"range" + 0.017*"pension" + ' '0.017*"adapt" + 0.017*"information"'), (13, '0.034*"priority" + 0.034*"reconciliation" + 0.034*"purchaser" + ' '0.023*"reporting" + 0.023*"offer" + 0.023*"investor" + 0.023*"share" + ' '0.023*"region" + 0.023*"service" + 0.023*"manipulate"'), (22, '0.017*"analyst" + 0.017*"modelling" + 0.016*"producer" + 0.016*"return" + ' '0.016*"self" + 0.009*"scope" + 0.008*"mind" + 0.008*"need" + 0.008*"detail" ' '+ 0.008*"statistic"'), (9, '0.021*"decision" + 0.014*"invite" + 0.014*"balance" + 0.014*"commercialize" ' '+ 0.014*"transform" + 0.014*"manage" + 0.014*"optionality" + ' '0.014*"problem_solving" + 0.014*"fuel" + 0.014*"stay"'), (7, '0.032*"commitment" + 0.032*"study" + 0.016*"impact" + 0.016*"outlook" + ' '0.011*"operation" + 0.011*"expand" + 0.011*"exchange" + 0.011*"management" ' '+ 0.011*"conde" + 0.011*"evolve"'), (15, '0.032*"agility" + 0.019*"feasibility" + 0.019*"self" + 0.014*"deploy" + ' '0.014*"define" + 0.013*"investment" + 0.013*"option" + 0.013*"control" + ' '0.013*"action" + 0.013*"incubation"'), (5, '0.020*"desk" + 0.018*"agility" + 0.016*"vender" + 0.016*"coordinate" + ' '0.016*"committee" + 0.012*"acquisition" + 0.012*"target" + ' '0.012*"counterparty" + 0.012*"approval" + 0.012*"trend"'), (17, '0.022*"option" + 0.017*"working" + 0.017*"niche" + 0.011*"business" + ' '0.011*"constrain" + 0.011*"meeting" + 0.011*"correspond" + 0.011*"exposure" ' '+ 0.011*"element" + 0.011*"face"'), (0, '0.025*"expertise" + 0.025*"banking" + 0.021*"universitie" + ' '0.017*"spreadsheet" + 0.013*"negotiation" + 0.013*"shipment" + ' '0.013*"arise" + 0.013*"billing" + 0.013*"assistance" + 0.013*"sector"'), (4, '0.024*"provide" + 0.017*"consider" + 0.017*"allow" + 0.015*"outlook" + ' '0.015*"value" + 0.015*"contract" + 0.012*"study" + 0.012*"technology" + ' '0.012*"scenario" + 0.012*"indicator"'), (6, '0.058*"impulse" + 0.027*"shall" + 0.027*"shape" + 0.024*"marketer" + ' '0.017*"availability" + 0.014*"determine" + 0.014*"load" + ' '0.014*"constantly_change" + 0.014*"instrument" + 0.014*"interface"'), (19, '0.042*"task" + 0.038*"tariff" + 0.038*"recommend" + 0.024*"example" + ' '0.023*"future" + 0.021*"people" + 0.021*"math" + 0.021*"capacity" + ' '0.021*"spirit" + 0.020*"price"')] Same model as above, but using 20 topics. As you can see, the output is in order by topic # and it contains all the topics. lda_model = gensim.models.ldamodel.LdaModel(corpus=jr_dict_corpus, id2word=jr_dict, num_topics=20, random_state=100, update_every=1, chunksize=100, passes=10, alpha='auto', per_word_topics=True) pprint(lda_model.print_topics()) [(0, '0.031*"enjoy" + 0.031*"ambiguity" + 0.028*"accordance" + 0.016*"statistic" ' '+ 0.016*"initiative" + 0.016*"service" + 0.016*"liberalize" + ' '0.016*"application" + 0.011*"community" + 0.011*"identifie"'), (1, '0.016*"transformation" + 0.016*"negotiation" + 0.016*"community" + ' '0.016*"clock" + 0.011*"marketer" + 0.011*"desk" + 0.011*"mandate" + ' '0.011*"closing" + 0.011*"initiative" + 0.011*"experience"'), (2, '0.026*"priority" + 0.026*"reconciliation" + 0.026*"purchaser" + ' '0.020*"safety" + 0.020*"region" + 0.020*"query" + 0.020*"share" + ' '0.020*"manipulate" + 0.020*"ibex" + 0.020*"investor"'), (3, '0.022*"improve" + 0.021*"committee" + 0.021*"affect" + 0.012*"target" + ' '0.012*"acquisition" + 0.011*"basis" + 0.011*"profitability" + ' '0.011*"economic" + 0.011*"natural" + 0.011*"profit"'), (4, '0.024*"provide" + 0.019*"value" + 0.017*"consider" + 0.017*"allow" + ' '0.015*"scenario" + 0.015*"outlook" + 0.015*"contract" + 0.014*"forecast" + ' '0.014*"decision" + 0.012*"indicator"'), (5, '0.037*"desk" + 0.030*"coordinate" + 0.030*"agility" + 0.030*"vender" + ' '0.023*"counterparty" + 0.023*"immature_emerge" + 0.023*"metric" + ' '0.022*"approval" + 0.015*"maximization" + 0.015*"undergraduate"'), (6, '0.053*"impulse" + 0.025*"shall" + 0.025*"shape" + 0.018*"availability" + ' '0.018*"marketer" + 0.012*"determine" + 0.012*"language" + ' '0.012*"monitoring" + 0.012*"integration" + 0.012*"month"'), (7, '0.026*"commitment" + 0.026*"study" + 0.013*"impact" + 0.013*"outlook" + ' '0.009*"operation" + 0.009*"management" + 0.009*"expand" + 0.009*"exchange" ' '+ 0.009*"conde" + 0.009*"balance"'), (8, '0.057*"insurance" + 0.029*"propose" + 0.028*"rule" + 0.026*"self" + ' '0.023*"product" + 0.023*"asset" + 0.023*"pricing" + 0.023*"amount" + ' '0.023*"result" + 0.020*"liquidity"'), (9, '0.012*"universitie" + 0.012*"need" + 0.012*"statistic" + 0.012*"trend" + ' '0.008*"invite" + 0.008*"commercialize" + 0.008*"transform" + 0.008*"manage" ' '+ 0.008*"problem_solving" + 0.008*"optionality"'), (10, '0.024*"background" + 0.024*"curve" + 0.020*"allow" + 0.019*"collect" + ' '0.019*"basis" + 0.017*"accordance" + 0.013*"improve" + 0.013*"datum" + ' '0.013*"component" + 0.013*"reliability"'), (11, '0.054*"task" + 0.049*"tariff" + 0.049*"recommend" + 0.031*"future" + ' '0.027*"spirit" + 0.027*"capacity" + 0.027*"math" + 0.022*"ensure" + ' '0.022*"profit" + 0.022*"variable_margin"'), (12, '0.001*"impulse" + 0.001*"availability" + 0.001*"reliability" + ' '0.001*"shall" + 0.001*"component" + 0.001*"agent" + 0.001*"marketer" + ' '0.001*"shape" + 0.001*"assisting" + 0.001*"supply"'), (13, '0.021*"region" + 0.016*"greenfield" + 0.016*"collegiate" + 0.011*"transfer" ' '+ 0.011*"remuneration" + 0.011*"organization" + 0.011*"structure" + ' '0.011*"continent" + 0.011*"project" + 0.011*"prepare"'), (14, '0.033*"originator" + 0.025*"vender" + 0.025*"expertise" + 0.025*"banking" + ' '0.019*"evolve" + 0.017*"management" + 0.017*"market" + 0.017*"site" + ' '0.012*"component" + 0.012*"discontinuing"'), (15, '0.027*"agility" + 0.022*"mind" + 0.022*"negotiation" + 0.011*"deploy" + ' '0.011*"define" + 0.011*"ecosystem" + 0.011*"control" + 0.011*"lead" + ' '0.011*"industry" + 0.011*"option"'), (16, '0.001*"region" + 0.001*"master" + 0.001*"orginiation" + 0.001*"greenfield" ' '+ 0.001*"agent" + 0.001*"identifie" + 0.001*"remuneration" + 0.001*"mark" + ' '0.001*"reviewing" + 0.001*"closing"'), (17, '0.030*"banking" + 0.018*"option" + 0.018*"round" + 0.018*"credibility" + ' '0.018*"origination" + 0.018*"entity" + 0.016*"working" + 0.015*"niche" + ' '0.015*"standard" + 0.012*"coordinate"'), (18, '0.027*"negotiation" + 0.018*"reporting" + 0.018*"perform" + 0.018*"world" + ' '0.015*"offer" + 0.015*"manipulate" + 0.011*"query" + 0.010*"control" + ' '0.010*"working" + 0.009*"self"'), (19, '0.047*"example" + 0.039*"people" + 0.039*"price" + 0.039*"excel" + ' '0.039*"excellent" + 0.038*"base" + 0.031*"office" + 0.031*"optimizing" + ' '0.031*"participate" + 0.031*"package"')]
The default number of topics for print_topics is 20. You must use the num_topics argument to include topics above 20...
print(lda_model.print_topics(num_topics=25, num_words=10))
Pulp obtains results as problem is infeasible, while problem is not feasible
I'm trying to solve an assignment problem with pulp. The basic part of the code is as follows: set_I = range(1, numberOfPoints) set_J = range(1, numberOfCentroids) tau = 0.15 Q = 15 # decision variable x_vars = LpVariable.dicts(name="x_vars", indexs=(set_I, set_J), lowBound=0, upBound=1, cat=LpInteger) # model name prob = LpProblem("MIP_Model", LpMinimize) # constraints for i in set_I: prob += lpSum(x_vars[i][j] for j in set_J) == 1, "" for j in set_J: prob += lpSum(x_vars[i][j] for i in set_I) >= 1, "" for j in set_J: prob += lpSum(x_vars[i][j] for i in set_I) <= Q*(1-tau), "" for j in set_J: prob += lpSum(x_vars[i][j] for i in set_I) >= Q*(1+tau), "" # objective prob += lpSum(d[i, j]*x_vars[i][j] for i in set_I for j in set_J) prob.solve() The result is like this: Problem MODEL has 31 rows, 76 columns and 304 elements Coin0008I MODEL read with 0 errors Problem is infeasible - 0.01 seconds Option for printingOptions changed from normal to all However, the problem is not infeasible and results are obtained with other solvers. I wonder if there is a syntax error and is the problem caused by this? I have asked a similar question in the next link: Infeasible solution by pulp
When I run the problem locally, with d a matrix of ones, 20 points, and 3 centroids. It also becomes infeasible for me. Look at the constraints: _C22: x_vars_10_1 + x_vars_11_1 + x_vars_12_1 + x_vars_13_1 + x_vars_14_1 + x_vars_15_1 + x_vars_16_1 + x_vars_17_1 + x_vars_18_1 + x_vars_19_1 + x_vars_1_1 + x_vars_2_1 + x_vars_3_1 + x_vars_4_1 + x_vars_5_1 + x_vars_6_1 + x_vars_7_1 + x_vars_8_1 + x_vars_9_1 <= 12.75 _C23: x_vars_10_2 + x_vars_11_2 + x_vars_12_2 + x_vars_13_2 + x_vars_14_2 + x_vars_15_2 + x_vars_16_2 + x_vars_17_2 + x_vars_18_2 + x_vars_19_2 + x_vars_1_2 + x_vars_2_2 + x_vars_3_2 + x_vars_4_2 + x_vars_5_2 + x_vars_6_2 + x_vars_7_2 + x_vars_8_2 + x_vars_9_2 <= 12.75 _C24: x_vars_10_1 + x_vars_11_1 + x_vars_12_1 + x_vars_13_1 + x_vars_14_1 + x_vars_15_1 + x_vars_16_1 + x_vars_17_1 + x_vars_18_1 + x_vars_19_1 + x_vars_1_1 + x_vars_2_1 + x_vars_3_1 + x_vars_4_1 + x_vars_5_1 + x_vars_6_1 + x_vars_7_1 + x_vars_8_1 + x_vars_9_1 >= 17.25 _C25: x_vars_10_2 + x_vars_11_2 + x_vars_12_2 + x_vars_13_2 + x_vars_14_2 + x_vars_15_2 + x_vars_16_2 + x_vars_17_2 + x_vars_18_2 + x_vars_19_2 + x_vars_1_2 + x_vars_2_2 + x_vars_3_2 + x_vars_4_2 + x_vars_5_2 + x_vars_6_2 + x_vars_7_2 + x_vars_8_2 + x_vars_9_2 >= 17.25 You require x_vars_10_2 + x_vars_11_2 + x_vars_12_2 + x_vars_13_2 + x_vars_14_2 + x_vars_15_2 + x_vars_16_2 + x_vars_17_2 + x_vars_18_2 + x_vars_19_2 + x_vars_1_2 + x_vars_2_2 + x_vars_3_2 + x_vars_4_2 + x_vars_5_2 + x_vars_6_2 + x_vars_7_2 + x_vars_8_2 + x_vars_9_2 to be greater than 17.25 and smaller than 12.75 at the same time. That's not possible, of course.
Generate a Bode-form transfer function using ratsimp
I have to simplify a transfer function using sympy. I am used to maxima and I am looking for advice to get similar performances in a python environment. Using the following Maxima code: A:-Avol0/(1+s/(2*pi*fp)); Zph:Rsh/(1+Rsh*Cj*s); Zf:Rf/(1+Rf*Cf*s); alpha:Zf*Zph/(Zf+Zph); beta:Zph/(Zf+Zph); BetaA:ratsimp(beta*A,s); H:ratsimp(alpha*A/(1-BetaA),s); I get the following: (H)-> -(2*Avol0*Rf*Rsh*fp*pi)/((Cj+Cf)*Rf*Rsh*s^2+((2*Cj+(2*Avol0+2)*Cf)*Rf*Rsh*fp*pi+Rsh+Rf)*s+((2*Avol0+2)*Rsh+2*Rf)*fp*pi) The same opertions in sympy do not get to such a nice result: import numpy as np import sympy as sy """ Formulas """ s, Rf, Cf, Rsh, Cj, Cd, Ccm, GBP, Avol0, fp, w = \ sy.symbols("s Rf Cf Rsh Cj Cd Ccm GBP Avol0 fp w") A = -Avol0/(1+s/(2*np.pi*fp)) Zph = Rsh/(1+Rsh*Cj*s) Zf = Rf/(1+Rf*Cf*s) alpha = Zf*Zph/(Zf+Zph) beta = Zph/(Zf+Zph) Gloop = sy.ratsimp(beta*A) H = alpha*A/(1-Gloop) sy.ratsimp(H) returns an unreadable result: -1.0*(1.0*Avol0*Cf**2*Cj*Rf**3*Rsh**3*fp**2*s**3 + 0.159154943091895*Avol0*Cf**2*Cj*Rf**3*Rsh**3*fp*s**4 + 1.0*Avol0*Cf**2*Rf**3*Rsh**2*fp**2*s**2 + 0.159154943091895*Avol0*Cf**2*Rf**3*Rsh**2*fp*s**3 + 1.0*Avol0*Cf*Cj**2*Rf**3*Rsh**3*fp**2*s**3 + 0.159154943091895*Avol0*Cf*Cj**2*Rf**3*Rsh**3*fp*s**4 + 2.0*Avol0*Cf*Cj*Rf**3*Rsh**2*fp**2*s**2 + 0.318309886183791*Avol0*Cf*Cj*Rf**3*Rsh**2*fp*s**3 + 2.0*Avol0*Cf*Cj*Rf**2*Rsh**3*fp**2*s**2 + 0.318309886183791*Avol0*Cf*Cj*Rf**2*Rsh**3*fp*s**3 + 1.0*Avol0*Cf*Rf**3*Rsh*fp**2*s + 0.159154943091895*Avol0*Cf*Rf**3*Rsh*fp*s**2 + 2.0*Avol0*Cf*Rf**2*Rsh**2*fp**2*s + 0.318309886183791*Avol0*Cf*Rf**2*Rsh**2*fp*s**2 + 1.0*Avol0*Cj**2*Rf**2*Rsh**3*fp**2*s**2 + 0.159154943091895*Avol0*Cj**2*Rf**2*Rsh**3*fp*s**3 + 2.0*Avol0*Cj*Rf**2*Rsh**2*fp**2*s + 0.318309886183791*Avol0*Cj*Rf**2*Rsh**2*fp*s**2 + 1.0*Avol0*Cj*Rf*Rsh**3*fp**2*s + 0.159154943091895*Avol0*Cj*Rf*Rsh**3*fp*s**2 + 1.0*Avol0*Rf**2*Rsh*fp**2 + 0.159154943091895*Avol0*Rf**2*Rsh*fp*s + 1.0*Avol0*Rf*Rsh**2*fp**2 + 0.159154943091895*Avol0*Rf*Rsh**2*fp*s)/(1.0*Avol0*Cf**3*Cj*Rf**3*Rsh**3*fp**2*s**4 + 0.159154943091895*Avol0*Cf**3*Cj*Rf**3*Rsh**3*fp*s**5 + 1.0*Avol0*Cf**3*Rf**3*Rsh**2*fp**2*s**3 + 0.159154943091895*Avol0*Cf**3*Rf**3*Rsh**2*fp*s**4 + 1.0*Avol0*Cf**2*Cj**2*Rf**3*Rsh**3*fp**2*s**4 + 0.159154943091895*Avol0*Cf**2*Cj**2*Rf**3*Rsh**3*fp*s**5 + 2.0*Avol0*Cf**2*Cj*Rf**3*Rsh**2*fp**2*s**3 + 0.318309886183791*Avol0*Cf**2*Cj*Rf**3*Rsh**2*fp*s**4 + 3.0*Avol0*Cf**2*Cj*Rf**2*Rsh**3*fp**2*s**3 + 0.477464829275686*Avol0*Cf**2*Cj*Rf**2*Rsh**3*fp*s**4 + 1.0*Avol0*Cf**2*Rf**3*Rsh*fp**2*s**2 + 0.159154943091895*Avol0*Cf**2*Rf**3*Rsh*fp*s**3 + 3.0*Avol0*Cf**2*Rf**2*Rsh**2*fp**2*s**2 + 0.477464829275686*Avol0*Cf**2*Rf**2*Rsh**2*fp*s**3 + 2.0*Avol0*Cf*Cj**2*Rf**2*Rsh**3*fp**2*s**3 + 0.318309886183791*Avol0*Cf*Cj**2*Rf**2*Rsh**3*fp*s**4 + 4.0*Avol0*Cf*Cj*Rf**2*Rsh**2*fp**2*s**2 + 0.636619772367581*Avol0*Cf*Cj*Rf**2*Rsh**2*fp*s**3 + 3.0*Avol0*Cf*Cj*Rf*Rsh**3*fp**2*s**2 + 0.477464829275686*Avol0*Cf*Cj*Rf*Rsh**3*fp*s**3 + 2.0*Avol0*Cf*Rf**2*Rsh*fp**2*s + 0.318309886183791*Avol0*Cf*Rf**2*Rsh*fp*s**2 + 3.0*Avol0*Cf*Rf*Rsh**2*fp**2*s + 0.477464829275686*Avol0*Cf*Rf*Rsh**2*fp*s**2 + 1.0*Avol0*Cj**2*Rf*Rsh**3*fp**2*s**2 + 0.159154943091895*Avol0*Cj**2*Rf*Rsh**3*fp*s**3 + 2.0*Avol0*Cj*Rf*Rsh**2*fp**2*s + 0.318309886183791*Avol0*Cj*Rf*Rsh**2*fp*s**2 + 1.0*Avol0*Cj*Rsh**3*fp**2*s + 0.159154943091895*Avol0*Cj*Rsh**3*fp*s**2 + 1.0*Avol0*Rf*Rsh*fp**2 + 0.159154943091895*Avol0*Rf*Rsh*fp*s + 1.0*Avol0*Rsh**2*fp**2 + 0.159154943091895*Avol0*Rsh**2*fp*s + 1.0*Cf**3*Cj*Rf**3*Rsh**3*fp**2*s**4 + 0.318309886183791*Cf**3*Cj*Rf**3*Rsh**3*fp*s**5 + 0.0253302959105844*Cf**3*Cj*Rf**3*Rsh**3*s**6 + 1.0*Cf**3*Rf**3*Rsh**2*fp**2*s**3 + 0.318309886183791*Cf**3*Rf**3*Rsh**2*fp*s**4 + 0.0253302959105844*Cf**3*Rf**3*Rsh**2*s**5 + 2.0*Cf**2*Cj**2*Rf**3*Rsh**3*fp**2*s**4 + 0.636619772367581*Cf**2*Cj**2*Rf**3*Rsh**3*fp*s**5 + 0.0506605918211689*Cf**2*Cj**2*Rf**3*Rsh**3*s**6 + 4.0*Cf**2*Cj*Rf**3*Rsh**2*fp**2*s**3 + 1.27323954473516*Cf**2*Cj*Rf**3*Rsh**2*fp*s**4 + 0.101321183642338*Cf**2*Cj*Rf**3*Rsh**2*s**5 + 3.0*Cf**2*Cj*Rf**2*Rsh**3*fp**2*s**3 + 0.954929658551372*Cf**2*Cj*Rf**2*Rsh**3*fp*s**4 + 0.0759908877317533*Cf**2*Cj*Rf**2*Rsh**3*s**5 + 2.0*Cf**2*Rf**3*Rsh*fp**2*s**2 + 0.636619772367581*Cf**2*Rf**3*Rsh*fp*s**3 + 0.0506605918211689*Cf**2*Rf**3*Rsh*s**4 + 3.0*Cf**2*Rf**2*Rsh**2*fp**2*s**2 + 0.954929658551372*Cf**2*Rf**2*Rsh**2*fp*s**3 + 0.0759908877317533*Cf**2*Rf**2*Rsh**2*s**4 + 1.0*Cf*Cj**3*Rf**3*Rsh**3*fp**2*s**4 + 0.318309886183791*Cf*Cj**3*Rf**3*Rsh**3*fp*s**5 + 0.0253302959105844*Cf*Cj**3*Rf**3*Rsh**3*s**6 + 3.0*Cf*Cj**2*Rf**3*Rsh**2*fp**2*s**3 + 0.954929658551372*Cf*Cj**2*Rf**3*Rsh**2*fp*s**4 + 0.0759908877317533*Cf*Cj**2*Rf**3*Rsh**2*s**5 + 4.0*Cf*Cj**2*Rf**2*Rsh**3*fp**2*s**3 + 1.27323954473516*Cf*Cj**2*Rf**2*Rsh**3*fp*s**4 + 0.101321183642338*Cf*Cj**2*Rf**2*Rsh**3*s**5 + 3.0*Cf*Cj*Rf**3*Rsh*fp**2*s**2 + 0.954929658551372*Cf*Cj*Rf**3*Rsh*fp*s**3 + 0.0759908877317533*Cf*Cj*Rf**3*Rsh*s**4 + 8.0*Cf*Cj*Rf**2*Rsh**2*fp**2*s**2 + 2.54647908947033*Cf*Cj*Rf**2*Rsh**2*fp*s**3 + 0.202642367284676*Cf*Cj*Rf**2*Rsh**2*s**4 + 3.0*Cf*Cj*Rf*Rsh**3*fp**2*s**2 + 0.954929658551372*Cf*Cj*Rf*Rsh**3*fp*s**3 + 0.0759908877317533*Cf*Cj*Rf*Rsh**3*s**4 + 1.0*Cf*Rf**3*fp**2*s + 0.318309886183791*Cf*Rf**3*fp*s**2 + 0.0253302959105844*Cf*Rf**3*s**3 + 4.0*Cf*Rf**2*Rsh*fp**2*s + 1.27323954473516*Cf*Rf**2*Rsh*fp*s**2 + 0.101321183642338*Cf*Rf**2*Rsh*s**3 + 3.0*Cf*Rf*Rsh**2*fp**2*s + 0.954929658551372*Cf*Rf*Rsh**2*fp*s**2 + 0.0759908877317533*Cf*Rf*Rsh**2*s**3 + 1.0*Cj**3*Rf**2*Rsh**3*fp**2*s**3 + 0.318309886183791*Cj**3*Rf**2*Rsh**3*fp*s**4 + 0.0253302959105844*Cj**3*Rf**2*Rsh**3*s**5 + 3.0*Cj**2*Rf**2*Rsh**2*fp**2*s**2 + 0.954929658551372*Cj**2*Rf**2*Rsh**2*fp*s**3 + 0.0759908877317533*Cj**2*Rf**2*Rsh**2*s**4 + 2.0*Cj**2*Rf*Rsh**3*fp**2*s**2 + 0.636619772367581*Cj**2*Rf*Rsh**3*fp*s**3 + 0.0506605918211689*Cj**2*Rf*Rsh**3*s**4 + 3.0*Cj*Rf**2*Rsh*fp**2*s + 0.954929658551372*Cj*Rf**2*Rsh*fp*s**2 + 0.0759908877317533*Cj*Rf**2*Rsh*s**3 + 4.0*Cj*Rf*Rsh**2*fp**2*s + 1.27323954473516*Cj*Rf*Rsh**2*fp*s**2 + 0.101321183642338*Cj*Rf*Rsh**2*s**3 + 1.0*Cj*Rsh**3*fp**2*s + 0.318309886183791*Cj*Rsh**3*fp*s**2 + 0.0253302959105844*Cj*Rsh**3*s**3 + 1.0*Rf**2*fp**2 + 0.318309886183791*Rf**2*fp*s + 0.0253302959105844*Rf**2*s**2 + 2.0*Rf*Rsh*fp**2 + 0.636619772367581*Rf*Rsh*fp*s + 0.0506605918211689*Rf*Rsh*s**2 + 1.0*Rsh**2*fp**2 + 0.318309886183791*Rsh**2*fp*s + 0.0253302959105844*Rsh**2*s**2)
There is a difference between the maxima code and the python one: const 'pi' is kept symbolic in the first case, and approximated to a floating-point value in the second. Replacing np.pi with pi solves the problem, anyway it is weird how sympy tries to simplify the expression when pi is numeric.
Error , Infeasible No value for uninitialized NumericValue object
I'm trying to optmize a problem of clients/facility allocation and I'm using pyomo to do so. I have the following problem that appear and cannot solve it even if I initialize all the values of Model.z to 0; the problems is still infeasible from __future__ import division import pyomo.environ as pyo #from pyomo.environ import * from pyomo.opt import SolverFactory import sys import numpy as np import sys import os from time import perf_counter from pulp import * from pyomo.util.infeasible import log_infeasible_constraints #print(sys.path) opt = pyo.SolverFactory('glpk') opt.options['threads'] =4 #import instance model = pyo.ConcreteModel() # n is the number of facility related to J # m is the number of clients related to I # t is the number of periods relatdd to T # H is the maximum number of clients that can be served [m,n,t,H] = np.genfromtxt(fname = r'./Instances/data-100-10-4-3-0.txt', dtype = None, max_rows = 1) f_values = np.genfromtxt(fname = r'./Instances/data-100-10-4-3-0.txt', dtype = None, max_rows = 4,skip_header=1) c_values = np.genfromtxt(fname = r'./Instances/data-100-10-4-3-0.txt', dtype = None, skip_footer = 1,skip_header=5) p_values = np.genfromtxt(fname = r'./Instances/data-100-10-4-3-0.txt', dtype = None, skip_header=405) model.J = pyo.Set(initialize=[int(i)+1 for i in range(n)]) model.T= pyo.Set(initialize=[int(i)+1 for i in range(t)]) model.T.pprint() model.I = pyo.Set(initialize=[int(i)+1 for i in range(m)]) model.H = pyo.Set(initialize=[int(i)+1 for i in range(H)]) # Generation of Subsets of all clients client_list = [int(i)+1 for i in range(100)] list_H = [int(i)+1 for i in range(H)] possible_groups_of_clients=[] for i in list_H: possible_groups_of_clients += [list(elem) for elem in itertools.combinations(client_list,i)] possible_groups_of_clients = possible_groups_of_clients[0:5] #Only select the first 5 subsets model.K = pyo.Set(initialize=[int(i)+1 for i in range(len(possible_groups_of_clients))]) p_val = np.zeros((len(model.K),len(model.J),len(model.T))) for t in model.T: t=t-1 #Pyomo starts at 1 for j in model.J: j=j-1 count = 0 for k in possible_groups_of_clients: #k=k+1 c_sum=0; for p in k: if t==0: c_sum = c_sum +c_values[p][j] elif t==1: c_sum = c_sum +c_values[p+100][j] elif t==2: c_sum = c_sum +c_values[p+200][j] elif t==3: c_sum = c_sum +c_values[p+300][j] #print(f_values[t][j]) p_val[count][j][t] = f_values[t][j]+ c_sum count=count+1 model.p_val = pyo.Param(model.K,model.J,model.T, initialize = lambda model,k,j,t: p_val[k-1][j-1][t-1]) #Matrice Aik A = [[0 for k in range(len(possible_groups_of_clients))] for i in range(100)] #model.p_val = pyo.Param(model.T,model.J,model.K, initialize = lambda model,t,j,k: A[t-1][j-1][k-1]) model.A = pyo.Param(model.I, model.K, mutable= True,initialize= lambda model,i,k: A[i-1][k-1]) count=1 for i in possible_groups_of_clients: for k in range(100): k=k+1 #print(k) if k in i: model.A[k,count]=1 else: model.A[k,count]=0 count=count+1 model.z = pyo.Var(model.K,model.J,model.T,initialize= 0,within=pyo.NonNegativeReals,bounds=(0,1)) a=range(len(possible_groups_of_clients)) print(p_values) # Minization of objective functions # Constraint unique client model.uniq_client = pyo.Constraint( model.I, rule=lambda model, i: sum(model.A[i,k]*model.z[k,j,t] for t in model.T for j in model.J for k in model.K) == 1 ) model.unique_service_groupclients = pyo.Constraint( model.J, model.T, rule=lambda model,j,t: sum(model.z[k,j,t] for k in model.K)<=1 ) model.min_factory_perperiod = pyo.Constraint( model.T, rule=lambda model, t: sum(model.z[k,j,t] for j in model.J for k in model.K )>=p_values[t-1] ) model.obj = pyo.Objective(rule= lambda model:sum(model.p_val[k,j,t]*model.z[k,j,t] for t in model.T for j in model.J for k in model.K ), sense= pyo.minimize) model.pprint() Constraint Group Client sol = pyo.SolverFactory('glpk',).solve(model).write() log_infeasible_constraints(model) # ========================================================== # = Solver Results = # ========================================================== # ---------------------------------------------------------- # Problem Information # ---------------------------------------------------------- Problem: - Name: unknown Lower bound: -inf Upper bound: inf Number of objectives: 1 Number of constraints: 145 Number of variables: 201 Number of nonzeros: 601 Sense: minimize # ---------------------------------------------------------- # Solver Information # ---------------------------------------------------------- Solver: - Status: ok Termination condition: infeasible Statistics: Branch and bound: Number of bounded subproblems: 0 Number of created subproblems: 0 Error rc: 0 Time: 0.008042335510253906 ERROR: evaluating object as numeric value: z[1,1,1] (object: <class 'pyomo.core.base.var._GeneralVarData'>) No value for uninitialized NumericValue object z[1,1,1] ERROR: evaluating object as numeric value: 1.0 - (A[1,1]*z[1,1,1] + A[1,2]*z[2,1,1] + A[1,3]*z[3,1,1] + A[1,4]*z[4,1,1] + A[1,5]*z[5,1,1] + A[1,1]*z[1,2,1] + A[1,2]*z[2,2,1] + A[1,3]*z[3,2,1] + A[1,4]*z[4,2,1] + A[1,5]*z[5,2,1] + A[1,1]*z[1,3,1] + A[1,2]*z[2,3,1] + A[1,3]*z[3,3,1] + A[1,4]*z[4,3,1] + A[1,5]*z[5,3,1] + A[1,1]*z[1,4,1] + A[1,2]*z[2,4,1] + A[1,3]*z[3,4,1] + A[1,4]*z[4,4,1] + A[1,5]*z[5,4,1] + A[1,1]*z[1,5,1] + A[1,2]*z[2,5,1] + A[1,3]*z[3,5,1] + A[1,4]*z[4,5,1] + A[1,5]*z[5,5,1] + A[1,1]*z[1,6,1] + A[1,2]*z[2,6,1] + A[1,3]*z[3,6,1] + A[1,4]*z[4,6,1] + A[1,5]*z[5,6,1] + A[1,1]*z[1,7,1] + A[1,2]*z[2,7,1] + A[1,3]*z[3,7,1] + A[1,4]*z[4,7,1] + A[1,5]*z[5,7,1] + A[1,1]*z[1,8,1] + A[1,2]*z[2,8,1] + A[1,3]*z[3,8,1] + A[1,4]*z[4,8,1] + A[1,5]*z[5,8,1] + A[1,1]*z[1,9,1] + A[1,2]*z[2,9,1] + A[1,3]*z[3,9,1] + A[1,4]*z[4,9,1] + A[1,5]*z[5,9,1] + A[1,1]*z[1,10,1] + A[1,2]*z[2,10,1] + A[1,3]*z[3,10,1] + A[1,4]*z[4,10,1] + A[1,5]*z[5,10,1] + A[1,1]*z[1,1,2] + A[1,2]*z[2,1,2] + A[1,3]*z[3,1,2] + A[1,4]*z[4,1,2] + A[1,5]*z[5,1,2] + A[1,1]*z[1,2,2] + A[1,2]*z[2,2,2] + A[1,3]*z[3,2,2] + A[1,4]*z[4,2,2] + A[1,5]*z[5,2,2] + A[1,1]*z[1,3,2] + A[1,2]*z[2,3,2] + A[1,3]*z[3,3,2] + A[1,4]*z[4,3,2] + A[1,5]*z[5,3,2] + A[1,1]*z[1,4,2] + A[1,2]*z[2,4,2] + A[1,3]*z[3,4,2] + A[1,4]*z[4,4,2] + A[1,5]*z[5,4,2] + A[1,1]*z[1,5,2] + A[1,2]*z[2,5,2] + A[1,3]*z[3,5,2] + A[1,4]*z[4,5,2] + A[1,5]*z[5,5,2] + A[1,1]*z[1,6,2] + A[1,2]*z[2,6,2] + A[1,3]*z[3,6,2] + A[1,4]*z[4,6,2] + A[1,5]*z[5,6,2] + A[1,1]*z[1,7,2] + A[1,2]*z[2,7,2] + A[1,3]*z[3,7,2] + A[1,4]*z[4,7,2] + A[1,5]*z[5,7,2] + A[1,1]*z[1,8,2] + A[1,2]*z[2,8,2] + A[1,3]*z[3,8,2] + A[1,4]*z[4,8,2] + A[1,5]*z[5,8,2] + A[1,1]*z[1,9,2] + A[1,2]*z[2,9,2] + A[1,3]*z[3,9,2] + A[1,4]*z[4,9,2] + A[1,5]*z[5,9,2] + A[1,1]*z[1,10,2] + A[1,2]*z[2,10,2] + A[1,3]*z[3,10,2] + A[1,4]*z[4,10,2] + A[1,5]*z[5,10,2] + A[1,1]*z[1,1,3] + A[1,2]*z[2,1,3] + A[1,3]*z[3,1,3] + A[1,4]*z[4,1,3] + A[1,5]*z[5,1,3] + A[1,1]*z[1,2,3] + A[1,2]*z[2,2,3] + A[1,3]*z[3,2,3] + A[1,4]*z[4,2,3] + A[1,5]*z[5,2,3] + A[1,1]*z[1,3,3] + A[1,2]*z[2,3,3] + A[1,3]*z[3,3,3] + A[1,4]*z[4,3,3] + A[1,5]*z[5,3,3] + A[1,1]*z[1,4,3] + A[1,2]*z[2,4,3] + A[1,3]*z[3,4,3] + A[1,4]*z[4,4,3] + A[1,5]*z[5,4,3] + A[1,1]*z[1,5,3] + A[1,2]*z[2,5,3] + A[1,3]*z[3,5,3] + A[1,4]*z[4,5,3] + A[1,5]*z[5,5,3] + A[1,1]*z[1,6,3] + A[1,2]*z[2,6,3] + A[1,3]*z[3,6,3] + A[1,4]*z[4,6,3] + A[1,5]*z[5,6,3] + A[1,1]*z[1,7,3] + A[1,2]*z[2,7,3] + A[1,3]*z[3,7,3] + A[1,4]*z[4,7,3] + A[1,5]*z[5,7,3] + A[1,1]*z[1,8,3] + A[1,2]*z[2,8,3] + A[1,3]*z[3,8,3] + A[1,4]*z[4,8,3] + A[1,5]*z[5,8,3] + A[1,1]*z[1,9,3] + A[1,2]*z[2,9,3] + A[1,3]*z[3,9,3] + A[1,4]*z[4,9,3] + A[1,5]*z[5,9,3] + A[1,1]*z[1,10,3] + A[1,2]*z[2,10,3] + A[1,3]*z[3,10,3] + A[1,4]*z[4,10,3] + A[1,5]*z[5,10,3] + A[1,1]*z[1,1,4] + A[1,2]*z[2,1,4] + A[1,3]*z[3,1,4] + A[1,4]*z[4,1,4] + A[1,5]*z[5,1,4] + A[1,1]*z[1,2,4] + A[1,2]*z[2,2,4] + A[1,3]*z[3,2,4] + A[1,4]*z[4,2,4] + A[1,5]*z[5,2,4] + A[1,1]*z[1,3,4] + A[1,2]*z[2,3,4] + A[1,3]*z[3,3,4] + A[1,4]*z[4,3,4] + A[1,5]*z[5,3,4] + A[1,1]*z[1,4,4] + A[1,2]*z[2,4,4] + A[1,3]*z[3,4,4] + A[1,4]*z[4,4,4] + A[1,5]*z[5,4,4] + A[1,1]*z[1,5,4] + A[1,2]*z[2,5,4] + A[1,3]*z[3,5,4] + A[1,4]*z[4,5,4] + A[1,5]*z[5,5,4] + A[1,1]*z[1,6,4] + A[1,2]*z[2,6,4] + A[1,3]*z[3,6,4] + A[1,4]*z[4,6,4] + A[1,5]*z[5,6,4] + A[1,1]*z[1,7,4] + A[1,2]*z[2,7,4] + A[1,3]*z[3,7,4] + A[1,4]*z[4,7,4] + A[1,5]*z[5,7,4] + A[1,1]*z[1,8,4] + A[1,2]*z[2,8,4] + A[1,3]*z[3,8,4] + A[1,4]*z[4,8,4] + A[1,5]*z[5,8,4] + A[1,1]*z[1,9,4] + A[1,2]*z[2,9,4] + A[1,3]*z[3,9,4] + A[1,4]*z[4,9,4] + A[1,5]*z[5,9,4] + A[1,1]*z[1,10,4] + A[1,2]*z[2,10,4] + A[1,3]*z[3,10,4] + A[1,4]*z[4,10,4] + A[1,5]*z[5,10,4]) (object: <class 'pyomo.core.expr.numeric_expr.SumExpression'>) No value for uninitialized NumericValue object z[1,1,1] Traceback (most recent call last): File "main.py", line 246, in <module> log_infeasible_constraints(model) File "/home/john/.local/lib/python3.6/site-packages/pyomo /util/infeasible.py", line 25, in log_infeasible_constraints if constr.equality and fabs(value(constr.lower - constr.body)) >= tol: File "/home/john/.local/lib/python3.6/site-packages/pyomo/core/expr/numvalue.py", line 226, in value tmp = obj(exception=True) File "/home/john/.local/lib/python3.6/site-packages/pyomo/core/expr/numeric_expr.py", line 222, in __call__ return evaluate_expression(self, exception) File "/home/john/.local/lib/python3.6/site-packages/pyomo/core/expr/visitor.py", line 973, in evaluate_expression return visitor.dfs_postorder_stack(exp) File "/home/john/.local/lib/python3.6/site-packages/pyomo/core/expr/visitor.py", line 518, in dfs_postorder_stack flag, value = self.visiting_potential_leaf(_sub) File "/home/john/.local/lib/python3.6/site-packages/pyomo/core/expr/visitor.py", line 893, in visiting_potential_leaf return True, value(node) File "/home/john/.local/lib/python3.6/site-packages/pyomo/core/expr/numvalue.py", line 230, in value % (obj.name,)) ValueError: No value for uninitialized NumericValue object z[1,1,1] I got this error whatever I try and I don't understand why Z should be initialized as it is the variable I'm trying to minimize