diff --git a/pyomo/contrib/mindtpy/algorithm_base_class.py b/pyomo/contrib/mindtpy/algorithm_base_class.py index 7fe0469df1f..7def1dcaab3 100644 --- a/pyomo/contrib/mindtpy/algorithm_base_class.py +++ b/pyomo/contrib/mindtpy/algorithm_base_class.py @@ -34,13 +34,6 @@ SolutionStatus, SolverStatus, ) -from pyomo.contrib.mindtpy.util import ( - generate_norm2sq_objective_function, - set_solver_options, - generate_norm_constraint, - fp_converged, - add_orthogonality_cuts, -) from pyomo.core import ( minimize, maximize, @@ -74,15 +67,19 @@ generate_norm2sq_objective_function, generate_norm_inf_objective_function, generate_lag_objective_function, - set_solver_options, GurobiPersistent4MindtPy, - MindtPySolveData, setup_results_object, get_integer_solution, - add_feas_slacks, + initialize_feas_subproblem, epigraph_reformulation, add_var_bound, copy_var_list_values_from_solution_pool, + generate_norm_constraint, + fp_converged, + add_orthogonality_cuts, + set_solver_mipgap, + set_solver_constraint_violation_tolerance, + update_solver_timelimit, ) single_tree, single_tree_available = attempt_import('pyomo.contrib.mindtpy.single_tree') @@ -141,6 +138,8 @@ def __init__(self, **kwds): self.stored_bound = {} self.num_no_good_cuts_added = {} self.last_iter_cuts = False + # Store the OA cuts generated in the mip_start_process. + self.mip_start_lazy_oa_cuts = [] # Support use as a context manager under current solver API def __enter__(self): @@ -186,13 +185,7 @@ def _log_solver_intro_message(self): ) def set_up_logger(self): - """Set up the formatter and handler for logger. - - Parameters - ---------- - config : ConfigBlock - The specific configurations for MindtPy. - """ + """Set up the formatter and handler for logger.""" self.config.logger.handlers.clear() self.config.logger.propagate = False ch = logging.StreamHandler() @@ -247,11 +240,6 @@ def create_utility_block(self, model, name): def model_is_valid(self): """Determines whether the model is solvable by MindtPy. - Parameters - ---------- - config : ConfigBlock - The specific configurations for MindtPy. - Returns ------- bool @@ -279,10 +267,10 @@ def model_is_valid(self): 'Your model is a NLP (nonlinear program). ' 'Using NLP solver %s to solve.' % config.nlp_solver ) - nlpopt = SolverFactory(config.nlp_solver) - # TODO: rewrite - set_solver_options(nlpopt, self.timing, config, solver_type='nlp') - nlpopt.solve( + update_solver_timelimit( + self.nlp_opt, config.nlp_solver, self.timing, config + ) + self.nlp_opt.solve( self.original_model, tee=config.nlp_solver_tee, **config.nlp_solver_args, @@ -293,11 +281,12 @@ def model_is_valid(self): 'Your model is an LP (linear program). ' 'Using LP solver %s to solve.' % config.mip_solver ) - mainopt = SolverFactory(config.mip_solver) - if isinstance(mainopt, PersistentSolver): - mainopt.set_instance(self.original_model) - set_solver_options(mainopt, self.timing, config, solver_type='mip') - results = mainopt.solve( + if isinstance(self.mip_opt, PersistentSolver): + self.mip_opt.set_instance(self.original_model) + update_solver_timelimit( + self.mip_opt, config.mip_solver, self.timing, config + ) + results = self.mip_opt.solve( self.original_model, tee=config.mip_solver_tee, load_solutions=False, @@ -329,46 +318,25 @@ def build_ordered_component_lists(self, model): """ util_block = getattr(model, self.util_block_name) var_set = ComponentSet() - setattr( - util_block, - 'constraint_list', - list( - model.component_data_objects( - ctype=Constraint, active=True, descend_into=(Block) - ) - ), + util_block.constraint_list = list( + model.component_data_objects( + ctype=Constraint, active=True, descend_into=(Block) + ) ) - setattr( - util_block, - 'linear_constraint_list', - list( - c - for c in model.component_data_objects( - ctype=Constraint, active=True, descend_into=(Block) - ) - if c.body.polynomial_degree() in self.mip_constraint_polynomial_degree - ), + util_block.linear_constraint_list = list( + c + for c in util_block.constraint_list + if c.body.polynomial_degree() in self.mip_constraint_polynomial_degree ) - setattr( - util_block, - 'nonlinear_constraint_list', - list( - c - for c in model.component_data_objects( - ctype=Constraint, active=True, descend_into=(Block) - ) - if c.body.polynomial_degree() - not in self.mip_constraint_polynomial_degree - ), + util_block.nonlinear_constraint_list = list( + c + for c in util_block.constraint_list + if c.body.polynomial_degree() not in self.mip_constraint_polynomial_degree ) - setattr( - util_block, - 'objective_list', - list( - model.component_data_objects( - ctype=Objective, active=True, descend_into=(Block) - ) - ), + util_block.objective_list = list( + model.component_data_objects( + ctype=Objective, active=True, descend_into=(Block) + ) ) # Identify the non-fixed variables in (potentially) active constraints and @@ -382,24 +350,17 @@ def build_ordered_component_lists(self, model): # We use component_data_objects rather than list(var_set) in order to # preserve a deterministic ordering. - var_list = list( + util_block.variable_list = list( v for v in model.component_data_objects(ctype=Var, descend_into=(Block)) if v in var_set ) - setattr(util_block, 'variable_list', var_list) - discrete_variable_list = list( - v - for v in model.component_data_objects(ctype=Var, descend_into=(Block)) - if v in var_set and v.is_integer() + util_block.discrete_variable_list = list( + v for v in util_block.variable_list if v in var_set and v.is_integer() ) - setattr(util_block, 'discrete_variable_list', discrete_variable_list) - continuous_variable_list = list( - v - for v in model.component_data_objects(ctype=Var, descend_into=(Block)) - if v in var_set and v.is_continuous() + util_block.continuous_variable_list = list( + v for v in util_block.variable_list if v in var_set and v.is_continuous() ) - setattr(util_block, 'continuous_variable_list', continuous_variable_list) def add_cuts_components(self, model): config = self.config @@ -423,12 +384,6 @@ def add_cuts_components(self, model): # characteristics, the user may wish to revisit NLP subproblems # (with a different initialization, for example). Therefore, these # cuts are not enabled by default. - # - # Note: these cuts will only exclude integer realizations that are - # not already in the primary no_good_cuts ConstraintList. - # TODO: this is not used. - lin.feasible_no_good_cuts = ConstraintList(doc='explored no-good cuts') - lin.feasible_no_good_cuts.deactivate() if config.feasibility_norm == 'L1' or config.feasibility_norm == 'L2': feas.nl_constraint_set = RangeSet( @@ -617,16 +572,7 @@ def update_primal_bound(self, bound_value): if self.primal_bound_improved: self.update_gap() - def process_objective( - self, - config, - move_objective=False, - use_mcpp=False, - update_var_con_list=True, - partition_nonlinear_terms=True, - obj_handleable_polynomial_degree={0, 1}, - constr_handleable_polynomial_degree={0, 1}, - ): + def process_objective(self, update_var_con_list=True): """Process model objective function. Check that the model has only 1 valid objective. @@ -635,22 +581,11 @@ def process_objective( Parameters ---------- - config : ConfigBlock - Solver configuration options - move_objective : bool, optional - Whether to move even linear objective functions to the constraints, by default False. - use_mcpp : bool, optional - Whether to use mcpp to tighten the bound of slack variables., by default False. update_var_con_list : bool, optional Whether to update the variable/constraint/objective lists, by default True. Currently, update_var_con_list will be set to False only when add_regularization is not None in MindtPy. - partition_nonlinear_terms : bool, optional - Whether to partition sum of nonlinear terms in the objective function, by default True. - obj_handleable_polynomial_degree : dict, optional - The polynomial degree of the objective function that will be regarded as linear, by default {0, 1}. - constr_handleable_polynomial_degree : dict, optional - The polynomial degree of the constraints that will be regarded as linear, by default {0, 1}. """ + config = self.config m = self.working_model util_block = getattr(m, self.util_block_name) # Handle missing or multiple objectives @@ -675,10 +610,11 @@ def process_objective( # Move the objective to the constraints if it is nonlinear or move_objective is True. if ( - main_obj.expr.polynomial_degree() not in obj_handleable_polynomial_degree - or move_objective + main_obj.expr.polynomial_degree() + not in self.mip_objective_polynomial_degree + or config.move_objective ): - if move_objective: + if config.move_objective: config.logger.info("Moving objective to constraint set.") else: config.logger.info( @@ -688,12 +624,12 @@ def process_objective( util_block.objective_constr = ConstraintList() if ( main_obj.expr.polynomial_degree() - not in obj_handleable_polynomial_degree - and partition_nonlinear_terms + not in self.mip_objective_polynomial_degree + and config.partition_obj_nonlinear_terms and main_obj.expr.__class__ is EXPR.SumExpression ): repn = generate_standard_repn( - main_obj.expr, quadratic=2 in obj_handleable_polynomial_degree + main_obj.expr, quadratic=2 in self.mip_objective_polynomial_degree ) # the following code will also work if linear_subexpr is a constant. linear_subexpr = ( @@ -714,7 +650,7 @@ def process_objective( linear_subexpr, util_block.objective_value, util_block.objective_constr, - use_mcpp, + config.use_mcpp, main_obj.sense, ) nonlinear_subexpr = repn.nonlinear_expr @@ -724,7 +660,7 @@ def process_objective( subsubexpr, util_block.objective_value, util_block.objective_constr, - use_mcpp, + config.use_mcpp, main_obj.sense, ) else: @@ -732,7 +668,7 @@ def process_objective( nonlinear_subexpr, util_block.objective_value, util_block.objective_constr, - use_mcpp, + config.use_mcpp, main_obj.sense, ) else: @@ -740,7 +676,7 @@ def process_objective( main_obj.expr, util_block.objective_value, util_block.objective_constr, - use_mcpp, + config.use_mcpp, main_obj.sense, ) @@ -751,8 +687,8 @@ def process_objective( if ( main_obj.expr.polynomial_degree() - not in obj_handleable_polynomial_degree - or (move_objective and update_var_con_list) + not in self.mip_objective_polynomial_degree + or (config.move_objective and update_var_con_list) ): util_block.variable_list.extend(util_block.objective_value[:]) util_block.continuous_variable_list.extend( @@ -763,23 +699,21 @@ def process_objective( for constr in util_block.objective_constr[:]: if ( constr.body.polynomial_degree() - in constr_handleable_polynomial_degree + in self.mip_constraint_polynomial_degree ): util_block.linear_constraint_list.append(constr) else: util_block.nonlinear_constraint_list.append(constr) - def set_up_solve_data(self, model, config): + def set_up_solve_data(self, model): """Set up the solve data. Parameters ---------- model : Pyomo model The original model to be solved in MindtPy. - config : ConfigBlock - The specific configurations for MindtPy. - """ + config = self.config # if the objective function is a constant, dual bound constraint is not added. obj = next(model.component_data_objects(ctype=Objective, active=True)) if obj.expr.polynomial_degree() == 0: @@ -825,39 +759,40 @@ def set_up_solve_data(self, model, config): # ----------------------------------------------------------------------------------------- # initialization - def MindtPy_initialization(self, config): + def MindtPy_initialization(self): """Initializes the decomposition algorithm. This function initializes the decomposition algorithm, which includes generating the initial cuts required to build the main MIP. - - Parameters - ---------- - config : ConfigBlock - The specific configurations for MindtPy. """ # Do the initialization + config = self.config if config.init_strategy == 'rNLP': - self.init_rNLP(config) + self.init_rNLP() elif config.init_strategy == 'max_binary': - self.init_max_binaries(config) + self.init_max_binaries() elif config.init_strategy == 'initial_binary': - self.curr_int_sol = get_integer_solution(self.working_model) + try: + self.curr_int_sol = get_integer_solution(self.working_model) + except TypeError as e: + config.logger.error(e) + raise ValueError( + 'The initial integer combination is not provided or not complete. ' + 'Please provide the complete integer combination or use other initialization strategy.' + ) self.integer_list.append(self.curr_int_sol) - fixed_nlp, fixed_nlp_result = self.solve_subproblem(config) - self.handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result, config) + fixed_nlp, fixed_nlp_result = self.solve_subproblem() + self.handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result) elif config.init_strategy == 'FP': - self.init_rNLP(config) - self.fp_loop(config) + self.init_rNLP() + self.fp_loop() - def init_rNLP(self, config, add_oa_cuts=True): + def init_rNLP(self, add_oa_cuts=True): """Initialize the problem by solving the relaxed NLP and then store the optimal variable values obtained from solving the rNLP. Parameters ---------- - config : ConfigBlock - The specific configurations for MindtPy. add_oa_cuts : Bool Whether add OA cuts after solving the relaxed NLP problem. @@ -866,15 +801,15 @@ def init_rNLP(self, config, add_oa_cuts=True): ValueError MindtPy unable to handle the termination condition of the relaxed NLP. """ + config = self.config m = self.working_model.clone() config.logger.debug('Relaxed NLP: Solve relaxed integrality') MindtPy = m.MindtPy_utils TransformationFactory('core.relax_integer_vars').apply_to(m) nlp_args = dict(config.nlp_solver_args) - nlpopt = SolverFactory(config.nlp_solver) - set_solver_options(nlpopt, self.timing, config, solver_type='nlp') + update_solver_timelimit(self.nlp_opt, config.nlp_solver, self.timing, config) with SuppressInfeasibleWarning(): - results = nlpopt.solve( + results = self.nlp_opt.solve( m, tee=config.nlp_solver_tee, load_solutions=False, **nlp_args ) if len(results.solution) > 0: @@ -960,17 +895,12 @@ def init_rNLP(self, config, add_oa_cuts=True): % (subprob_terminate_cond, results.solver.message) ) - def init_max_binaries(self, config): + def init_max_binaries(self): """Modifies model by maximizing the number of activated binary variables. Note - The user would usually want to call solve_subproblem after an invocation of this function. - Parameters - ---------- - config : ConfigBlock - The specific configurations for MindtPy. - Raises ------ ValueError @@ -978,9 +908,10 @@ def init_max_binaries(self, config): ValueError MindtPy unable to handle the termination condition of the MILP main problem. """ + config = self.config m = self.working_model.clone() - if config.calculate_dual_at_solution: - m.dual.deactivate() + if hasattr(m, 'dual') and isinstance(m.dual, Suffix): + m.del_component('dual') MindtPy = m.MindtPy_utils self.mip_subiter += 1 config.logger.debug('Initialization: maximize value of binaries') @@ -1000,12 +931,11 @@ def init_max_binaries(self, config): getattr(m, 'ipopt_zL_out', _DoNothing()).deactivate() getattr(m, 'ipopt_zU_out', _DoNothing()).deactivate() - mipopt = SolverFactory(config.mip_solver) - if isinstance(mipopt, PersistentSolver): - mipopt.set_instance(m) + if isinstance(self.mip_opt, PersistentSolver): + self.mip_opt.set_instance(m) mip_args = dict(config.mip_solver_args) - set_solver_options(mipopt, self.timing, config, solver_type='mip') - results = mipopt.solve( + update_solver_timelimit(self.mip_opt, config.mip_solver, self.timing, config) + results = self.mip_opt.solve( m, tee=config.mip_solver_tee, load_solutions=False, **mip_args ) if len(results.solution) > 0: @@ -1031,7 +961,7 @@ def init_max_binaries(self, config): ) elif solve_terminate_cond is tc.infeasible: raise ValueError( - 'MILP main problem is infeasible. ' + 'MIP main problem is infeasible. ' 'Problem may have no more feasible ' 'binary configurations.' ) @@ -1052,17 +982,12 @@ def init_max_binaries(self, config): ################################################################################################################################################################################################################## # nlp_solve.py - def solve_subproblem(self, config): + def solve_subproblem(self): """Solves the Fixed-NLP (with fixed integers). This function sets up the 'fixed_nlp' by fixing binaries, sets continuous variables to their initial var values, precomputes dual values, deactivates trivial constraints, and then solves NLP model. - Parameters - ---------- - config : ConfigBlock - The specific configurations for MindtPy. - Returns ------- fixed_nlp : Pyomo model @@ -1070,6 +995,7 @@ def solve_subproblem(self, config): results : SolverResults Results from solving the Fixed-NLP. """ + config = self.config MindtPy = self.fixed_nlp.MindtPy_utils self.nlp_iter += 1 @@ -1097,7 +1023,8 @@ def solve_subproblem(self, config): self.fixed_nlp.tmp_duals[c] = c_geq * max( 0, c_geq * (rhs - value(c.body)) ) - except (ValueError, OverflowError) as error: + except (ValueError, OverflowError) as e: + config.logger.error(e) self.fixed_nlp.tmp_duals[c] = None evaluation_error = True if evaluation_error: @@ -1113,22 +1040,19 @@ def solve_subproblem(self, config): ignore_infeasible=False, tolerance=config.constraint_tolerance, ) - except InfeasibleConstraintException: - config.logger.warning( - 'infeasibility detected in deactivate_trivial_constraints' + except InfeasibleConstraintException as e: + config.logger.error( + str(e) + '\nInfeasibility detected in deactivate_trivial_constraints.' ) results = SolverResults() results.solver.termination_condition = tc.infeasible return self.fixed_nlp, results # Solve the NLP - nlpopt = SolverFactory(config.nlp_solver) nlp_args = dict(config.nlp_solver_args) - # TODO: Can we move set_solver_options outside of this function? - # if not, we can define this function as a method - set_solver_options(nlpopt, self.timing, config, solver_type='nlp') + update_solver_timelimit(self.nlp_opt, config.nlp_solver, self.timing, config) with SuppressInfeasibleWarning(): with time_code(self.timing, 'fixed subproblem'): - results = nlpopt.solve( + results = self.nlp_opt.solve( self.fixed_nlp, tee=config.nlp_solver_tee, load_solutions=False, @@ -1141,7 +1065,7 @@ def solve_subproblem(self, config): ) return self.fixed_nlp, results - def handle_nlp_subproblem_tc(self, fixed_nlp, result, config, cb_opt=None): + def handle_nlp_subproblem_tc(self, fixed_nlp, result, cb_opt=None): """This function handles different terminaton conditions of the fixed-NLP subproblem. Parameters @@ -1150,8 +1074,6 @@ def handle_nlp_subproblem_tc(self, fixed_nlp, result, config, cb_opt=None): Integer-variable-fixed NLP model. result : SolverResults Results from solving the NLP subproblem. - config : ConfigBlock - The specific configurations for MindtPy. cb_opt : SolverFactory, optional The gurobi_persistent solver, by default None. """ @@ -1160,25 +1082,25 @@ def handle_nlp_subproblem_tc(self, fixed_nlp, result, config, cb_opt=None): tc.locallyOptimal, tc.feasible, }: - self.handle_subproblem_optimal(fixed_nlp, config, cb_opt) + self.handle_subproblem_optimal(fixed_nlp, cb_opt) elif result.solver.termination_condition in {tc.infeasible, tc.noSolution}: - self.handle_subproblem_infeasible(fixed_nlp, config, cb_opt) + self.handle_subproblem_infeasible(fixed_nlp, cb_opt) elif result.solver.termination_condition is tc.maxTimeLimit: - config.logger.info( + self.config.logger.info( 'NLP subproblem failed to converge within the time limit.' ) self.results.solver.termination_condition = tc.maxTimeLimit self.should_terminate = True elif result.solver.termination_condition is tc.maxEvaluations: - config.logger.info('NLP subproblem failed due to maxEvaluations.') + self.config.logger.info('NLP subproblem failed due to maxEvaluations.') self.results.solver.termination_condition = tc.maxEvaluations self.should_terminate = True else: self.handle_subproblem_other_termination( - fixed_nlp, result.solver.termination_condition, config + fixed_nlp, result.solver.termination_condition, cb_opt ) - def handle_subproblem_optimal(self, fixed_nlp, config, cb_opt=None, fp=False): + def handle_subproblem_optimal(self, fixed_nlp, cb_opt=None, fp=False): """This function copies the result of the NLP solver function ('solve_subproblem') to the working model, updates the bounds, adds OA and no-good cuts, and then stores the new solution if it is the new best solution. This function handles the result of the latest iteration of solving the NLP subproblem given an optimal solution. @@ -1187,8 +1109,6 @@ def handle_subproblem_optimal(self, fixed_nlp, config, cb_opt=None, fp=False): ---------- fixed_nlp : Pyomo model Integer-variable-fixed NLP model. - config : ConfigBlock - The specific configurations for MindtPy. cb_opt : SolverFactory, optional The gurobi_persistent solver, by default None. fp : bool, optional @@ -1196,6 +1116,7 @@ def handle_subproblem_optimal(self, fixed_nlp, config, cb_opt=None, fp=False): """ # TODO: check what is this copy_value function used for? # Warmstart? + config = self.config copy_var_list_values( fixed_nlp.MindtPy_utils.variable_list, self.working_model.MindtPy_utils.variable_list, @@ -1236,8 +1157,9 @@ def handle_subproblem_optimal(self, fixed_nlp, config, cb_opt=None, fp=False): var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list) if config.add_no_good_cuts: - # TODO: fix - add_no_good_cuts(self.mip, var_values, config, self.timing) + add_no_good_cuts( + self.mip, var_values, config, self.timing, self.mip_iter, cb_opt + ) config.call_after_subproblem_feasible(fixed_nlp) @@ -1254,7 +1176,7 @@ def handle_subproblem_optimal(self, fixed_nlp, config, cb_opt=None, fp=False): ) ) - def handle_subproblem_infeasible(self, fixed_nlp, config, cb_opt=None): + def handle_subproblem_infeasible(self, fixed_nlp, cb_opt=None): """Solves feasibility problem and adds cut according to the specified strategy. This function handles the result of the latest iteration of solving the NLP subproblem given an infeasible @@ -1264,13 +1186,12 @@ def handle_subproblem_infeasible(self, fixed_nlp, config, cb_opt=None): ---------- fixed_nlp : Pyomo model Integer-variable-fixed NLP model. - config : ConfigBlock - The specific configurations for MindtPy. cb_opt : SolverFactory, optional The gurobi_persistent solver, by default None. """ # TODO try something else? Reinitialize with different initial # value? + config = self.config config.logger.info('NLP subproblem was locally infeasible.') self.nlp_infeasible_counter += 1 if config.calculate_dual_at_solution: @@ -1294,9 +1215,7 @@ def handle_subproblem_infeasible(self, fixed_nlp, config, cb_opt=None): # fixed_nlp.ipopt_zU_out[var] = -1 config.logger.info('Solving feasibility problem') - feas_subproblem, feas_subproblem_results = self.solve_feasibility_subproblem( - config - ) + feas_subproblem, feas_subproblem_results = self.solve_feasibility_subproblem() # TODO: do we really need this? if self.should_terminate: return @@ -1315,10 +1234,12 @@ def handle_subproblem_infeasible(self, fixed_nlp, config, cb_opt=None): var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list) if config.add_no_good_cuts: # excludes current discrete option - add_no_good_cuts(self.mip, var_values, config, self.timing) + add_no_good_cuts( + self.mip, var_values, config, self.timing, self.mip_iter, cb_opt + ) def handle_subproblem_other_termination( - self, fixed_nlp, termination_condition, config + self, fixed_nlp, termination_condition, cb_opt=None ): """Handles the result of the latest iteration of solving the fixed NLP subproblem given a solution that is neither optimal nor infeasible. @@ -1329,8 +1250,8 @@ def handle_subproblem_other_termination( Integer-variable-fixed NLP model. termination_condition : Pyomo TerminationCondition The termination condition of the fixed NLP subproblem. - config : ConfigBlock - The specific configurations for MindtPy. + cb_opt : SolverFactory, optional + The gurobi_persistent solver, by default None. Raises ------ @@ -1339,13 +1260,20 @@ def handle_subproblem_other_termination( """ if termination_condition is tc.maxIterations: # TODO try something else? Reinitialize with different initial value? - config.logger.info( + self.config.logger.info( 'NLP subproblem failed to converge within iteration limit.' ) var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list) - if config.add_no_good_cuts: + if self.config.add_no_good_cuts: # excludes current discrete option - add_no_good_cuts(self.mip, var_values, config, self.timing) + add_no_good_cuts( + self.mip, + var_values, + self.config, + self.timing, + self.mip_iter, + cb_opt, + ) else: raise ValueError( @@ -1353,14 +1281,9 @@ def handle_subproblem_other_termination( 'condition of {}'.format(termination_condition) ) - def solve_feasibility_subproblem(self, config): + def solve_feasibility_subproblem(self): """Solves a feasibility NLP if the fixed_nlp problem is infeasible. - Parameters - ---------- - config : ConfigBlock - The specific configurations for MindtPy. - Returns ------- feas_subproblem : Pyomo model @@ -1368,6 +1291,7 @@ def solve_feasibility_subproblem(self, config): feas_soln : SolverResults Results from solving the feasibility NLP. """ + config = self.config feas_subproblem = self.fixed_nlp MindtPy = feas_subproblem.MindtPy_utils MindtPy.feas_opt.activate() @@ -1382,25 +1306,15 @@ def solve_feasibility_subproblem(self, config): constr.deactivate() MindtPy.feas_opt.activate() - if config.feasibility_norm == 'L1': - MindtPy.feas_obj = Objective( - expr=sum(s for s in MindtPy.feas_opt.slack_var[...]), sense=minimize - ) - elif config.feasibility_norm == 'L2': - MindtPy.feas_obj = Objective( - expr=sum(s * s for s in MindtPy.feas_opt.slack_var[...]), sense=minimize - ) - else: - MindtPy.feas_obj = Objective( - expr=MindtPy.feas_opt.slack_var, sense=minimize - ) - nlpopt = SolverFactory(config.nlp_solver) + MindtPy.feas_obj.activate() nlp_args = dict(config.nlp_solver_args) - set_solver_options(nlpopt, self.timing, config, solver_type='nlp') + update_solver_timelimit( + self.feasibility_nlp_opt, config.nlp_solver, self.timing, config + ) with SuppressInfeasibleWarning(): try: with time_code(self.timing, 'feasibility subproblem'): - feas_soln = nlpopt.solve( + feas_soln = self.feasibility_nlp_opt.solve( feas_subproblem, tee=config.nlp_solver_tee, load_solutions=config.nlp_solver != 'appsi_ipopt', @@ -1408,14 +1322,15 @@ def solve_feasibility_subproblem(self, config): ) if len(feas_soln.solution) > 0: feas_subproblem.solutions.load_from(feas_soln) - except (ValueError, OverflowError) as error: + except (ValueError, OverflowError) as e: + config.logger.error(e) for nlp_var, orig_val in zip( MindtPy.variable_list, self.initial_var_values ): if not nlp_var.fixed and not nlp_var.is_binary(): nlp_var.set_value(orig_val, skip_validation=True) with time_code(self.timing, 'feasibility subproblem'): - feas_soln = nlpopt.solve( + feas_soln = self.feasibility_nlp_opt.solve( feas_subproblem, tee=config.nlp_solver_tee, load_solutions=config.nlp_solver != 'appsi_ipopt', @@ -1424,7 +1339,7 @@ def solve_feasibility_subproblem(self, config): if len(feas_soln.solution) > 0: feas_soln.solutions.load_from(feas_soln) self.handle_feasibility_subproblem_tc( - feas_soln.solver.termination_condition, MindtPy, config + feas_soln.solver.termination_condition, MindtPy ) MindtPy.feas_opt.deactivate() for constr in MindtPy.nonlinear_constraint_list: @@ -1433,7 +1348,7 @@ def solve_feasibility_subproblem(self, config): MindtPy.feas_obj.deactivate() return feas_subproblem, feas_soln - def handle_feasibility_subproblem_tc(self, subprob_terminate_cond, MindtPy, config): + def handle_feasibility_subproblem_tc(self, subprob_terminate_cond, MindtPy): """Handles the result of the latest iteration of solving the feasibility NLP subproblem. Parameters @@ -1442,9 +1357,8 @@ def handle_feasibility_subproblem_tc(self, subprob_terminate_cond, MindtPy, conf The termination condition of the feasibility NLP subproblem. MindtPy : Pyomo Block The MindtPy_utils block. - config : ConfigBlock - The specific configurations for MindtPy. """ + config = self.config if subprob_terminate_cond in {tc.optimal, tc.locallyOptimal, tc.feasible}: # TODO: check what is this copy_value used for? copy_var_list_values( @@ -1482,7 +1396,7 @@ def handle_feasibility_subproblem_tc(self, subprob_terminate_cond, MindtPy, conf ###################################################################################################################################################### # iterate.py - def algorithm_should_terminate(self, config, check_cycling): + def algorithm_should_terminate(self, check_cycling): """Checks if the algorithm should terminate at the given point. This function determines whether the algorithm should terminate based on the solver options and progress. @@ -1491,8 +1405,6 @@ def algorithm_should_terminate(self, config, check_cycling): Parameters ---------- - config : ConfigBlock - The specific configurations for MindtPy. check_cycling : bool Whether to check for a special case that causes the discrete variables to loop through the same values. @@ -1517,13 +1429,11 @@ def algorithm_should_terminate(self, config, check_cycling): or (check_cycling and self.iteration_cycling()) ) - def fix_dual_bound(self, config, last_iter_cuts): + def fix_dual_bound(self, last_iter_cuts): """Fix the dual bound when no-good cuts or tabu list is activated. Parameters ---------- - config : ConfigBlock - The specific configurations for MindtPy. last_iter_cuts : bool Whether the cuts in the last iteration have been added. """ @@ -1531,14 +1441,17 @@ def fix_dual_bound(self, config, last_iter_cuts): # Therefore, we need to correct it at the end. # In singletree implementation, the dual bound at one iteration before the optimal solution, is valid for the optimal solution. # So we will set the dual bound to it. + config = self.config if config.single_tree: config.logger.info( 'Fix the bound to the value of one iteration before optimal solution is found.' ) try: self.dual_bound = self.stored_bound[self.primal_bound] - except KeyError: - config.logger.info('No stored bound found. Bound fix failed.') + except KeyError as e: + config.logger.error( + str(e) + '\nNo stored bound found. Bound fix failed.' + ) else: config.logger.info( 'Solve the main problem without the last no_good cut to fix the bound.' @@ -1548,8 +1461,8 @@ def fix_dual_bound(self, config, last_iter_cuts): # Solve NLP subproblem # The constraint linearization happens in the handlers if not last_iter_cuts: - fixed_nlp, fixed_nlp_result = self.solve_subproblem(config) - self.handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result, config) + fixed_nlp, fixed_nlp_result = self.solve_subproblem() + self.handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result) MindtPy = self.mip.MindtPy_utils # deactivate the integer cuts generated after the best solution was found. @@ -1559,19 +1472,16 @@ def fix_dual_bound(self, config, last_iter_cuts): and MindtPy.component('mip_obj') is None ): MindtPy.objective_list[-1].activate() - mainopt = SolverFactory(config.mip_solver) # determine if persistent solver is called. - if isinstance(mainopt, PersistentSolver): - mainopt.set_instance(self.mip, symbolic_solver_labels=True) - if config.use_tabu_list: - self.set_up_tabulist_callback(mainopt) + if isinstance(self.mip_opt, PersistentSolver): + self.mip_opt.set_instance(self.mip, symbolic_solver_labels=True) mip_args = dict(config.mip_solver_args) - set_solver_options(mainopt, self.timing, config, solver_type='mip') - main_mip_results = mainopt.solve( + update_solver_timelimit( + self.mip_opt, config.mip_solver, self.timing, config + ) + main_mip_results = self.mip_opt.solve( self.mip, tee=config.mip_solver_tee, load_solutions=False, **mip_args ) - if config.use_tabu_list: - self.update_attributes() if len(main_mip_results.solution) > 0: self.mip.solutions.load_from(main_mip_results) @@ -1593,66 +1503,51 @@ def fix_dual_bound(self, config, last_iter_cuts): ): self.results.solver.termination_condition = tc.optimal - def set_up_tabulist_callback(self, mainopt): - """Set up the tabulist using IncumbentCallback - - Parameters - ---------- - mainopt : solver - The MIP solver. + def set_up_tabulist_callback(self): + """Sets up the tabulist using IncumbentCallback. + Currently only support CPLEX. """ - tabulist = mainopt._solver_model.register_callback( + tabulist = self.mip_opt._solver_model.register_callback( tabu_list.IncumbentCallback_cplex ) - self.solve_data = MindtPySolveData() - self.export_attributes() - tabulist.solve_data = self.solve_data - tabulist.opt = mainopt + tabulist.opt = self.mip_opt tabulist.config = self.config - mainopt._solver_model.parameters.preprocessing.reduce.set(1) + tabulist.mindtpy_solver = self + self.mip_opt.options['preprocessing_reduce'] = 1 # If the callback is used to reject incumbents, the user must set the # parameter c.parameters.preprocessing.reduce either to the value 1 (one) # to restrict presolve to primal reductions only or to 0 (zero) to disable all presolve reductions - mainopt._solver_model.set_warning_stream(None) - mainopt._solver_model.set_log_stream(None) - mainopt._solver_model.set_error_stream(None) - - def set_up_lazy_OA_callback(self, mainopt): - """Set up the lazy OA using LazyConstraintCallback + self.mip_opt._solver_model.set_warning_stream(None) + self.mip_opt._solver_model.set_log_stream(None) + self.mip_opt._solver_model.set_error_stream(None) - Parameters - ---------- - mainopt : solver - The MIP solver. + def set_up_lazy_OA_callback(self): + """Sets up the lazy OA using LazyConstraintCallback. + Currently only support CPLEX and Gurobi. """ if self.config.mip_solver == 'cplex_persistent': - lazyoa = mainopt._solver_model.register_callback( + lazyoa = self.mip_opt._solver_model.register_callback( single_tree.LazyOACallback_cplex ) # pass necessary data and parameters to lazyoa lazyoa.main_mip = self.mip - self.solve_data = MindtPySolveData() - self.export_attributes() - lazyoa.solve_data = self.solve_data lazyoa.config = self.config - lazyoa.opt = mainopt - mainopt._solver_model.set_warning_stream(None) - mainopt._solver_model.set_log_stream(None) - mainopt._solver_model.set_error_stream(None) + lazyoa.opt = self.mip_opt + lazyoa.mindtpy_solver = self + self.mip_opt._solver_model.set_warning_stream(None) + self.mip_opt._solver_model.set_log_stream(None) + self.mip_opt._solver_model.set_error_stream(None) if self.config.mip_solver == 'gurobi_persistent': - mainopt.set_callback(single_tree.LazyOACallback_gurobi) + self.mip_opt.set_callback(single_tree.LazyOACallback_gurobi) + self.mip_opt.mindtpy_solver = self + self.mip_opt.config = self.config ########################################################################################################################################## # mip_solve.py - def solve_main(self, config): + def solve_main(self): """This function solves the MIP main problem. - Parameters - ---------- - config : ConfigBlock - The specific configurations for MindtPy. - Returns ------- self.mip : Pyomo model @@ -1660,39 +1555,39 @@ def solve_main(self, config): main_mip_results : SolverResults Results from solving the main MIP. """ + config = self.config self.mip_iter += 1 # setup main problem - self.setup_main(config) - mainopt, mip_args = self.set_up_mip_solver(config) + self.setup_main() + mip_args = self.set_up_mip_solver() try: - main_mip_results = mainopt.solve( + main_mip_results = self.mip_opt.solve( self.mip, tee=config.mip_solver_tee, load_solutions=False, **mip_args ) # update_attributes should be before load_from(main_mip_results), since load_from(main_mip_results) may fail. - if config.single_tree or config.use_tabu_list: - self.update_attributes() if len(main_mip_results.solution) > 0: self.mip.solutions.load_from(main_mip_results) - except (ValueError, AttributeError): + except (ValueError, AttributeError, RuntimeError) as e: + config.logger.error(e) if config.single_tree: config.logger.warning('Single tree terminate.') - if get_main_elapsed_time(self.timing) >= config.time_limit - 2: + if get_main_elapsed_time(self.timing) >= config.time_limit: config.logger.warning('due to the timelimit.') self.results.solver.termination_condition = tc.maxTimeLimit if config.strategy == 'GOA' or config.add_no_good_cuts: config.logger.warning( - 'ValueError: Cannot load a SolverResults object with bad status: error. ' + 'Error: Cannot load a SolverResults object with bad status: error. ' 'MIP solver failed. This usually happens in the single-tree GOA algorithm. ' "No-good cuts are added and GOA algorithm doesn't converge within the time limit. " - 'No integer solution is found, so the cplex solver will report an error status. ' + 'No integer solution is found, so the CPLEX solver will report an error status. ' ) return None, None if config.solution_pool: - main_mip_results._solver_model = mainopt._solver_model + main_mip_results._solver_model = self.mip_opt._solver_model main_mip_results._pyomo_var_to_solver_var_map = ( - mainopt._pyomo_var_to_solver_var_map + self.mip_opt._pyomo_var_to_solver_var_map ) if main_mip_results.solver.termination_condition is tc.optimal: if config.single_tree and not config.add_no_good_cuts: @@ -1706,14 +1601,9 @@ def solve_main(self, config): ) return self.mip, main_mip_results - def solve_fp_main(self, config): + def solve_fp_main(self): """This function solves the MIP main problem. - Parameters - ---------- - config : ConfigBlock - The specific configurations for MindtPy. - Returns ------- self.mip : Pyomo model @@ -1722,10 +1612,11 @@ def solve_fp_main(self, config): Results from solving the main MIP. """ # setup main problem - self.setup_fp_main(config) - mainopt, mip_args = self.set_up_mip_solver(config) + config = self.config + self.setup_fp_main() + mip_args = self.set_up_mip_solver() - main_mip_results = mainopt.solve( + main_mip_results = self.mip_opt.solve( self.mip, tee=config.mip_solver_tee, load_solutions=False, **mip_args ) # update_attributes should be before load_from(main_mip_results), since load_from(main_mip_results) may fail. @@ -1743,14 +1634,9 @@ def solve_fp_main(self, config): return self.mip, main_mip_results - def solve_regularization_main(self, config): + def solve_regularization_main(self): """This function solves the MIP main problem. - Parameters - ---------- - config : ConfigBlock - The specific configurations for MindtPy. - Returns ------- self.mip : Pyomo model @@ -1758,13 +1644,23 @@ def solve_regularization_main(self, config): main_mip_results : SolverResults Results from solving the main MIP. """ - + config = self.config # setup main problem - self.setup_regularization_main(config) - mainopt, mip_args = self.set_up_mip_solver(config, regularization_problem=True) + self.setup_regularization_main() - main_mip_results = mainopt.solve( - self.mip, tee=config.mip_solver_tee, load_solutions=False, **mip_args + if isinstance(self.regularization_mip_opt, PersistentSolver): + self.regularization_mip_opt.set_instance(self.mip) + update_solver_timelimit( + self.regularization_mip_opt, + config.mip_regularization_solver, + self.timing, + config, + ) + main_mip_results = self.regularization_mip_opt.solve( + self.mip, + tee=config.mip_solver_tee, + load_solutions=False, + **dict(config.mip_solver_args), ) if len(main_mip_results.solution) > 0: self.mip.solutions.load_from(main_mip_results) @@ -1799,52 +1695,22 @@ def solve_regularization_main(self, config): return self.mip, main_mip_results - def set_up_mip_solver(self, config, regularization_problem=False): + def set_up_mip_solver(self): """Set up the MIP solver. - Parameters - ---------- - config : ConfigBlock - The specific configurations for MindtPy. - regularization_problem : bool - Whether it is solving a regularization problem. - Returns ------- mainopt : SolverFactory The customized MIP solver. """ - # Deactivate extraneous IMPORT/EXPORT suffixes - if config.nlp_solver in {'ipopt', 'cyipopt'}: - getattr(self.mip, 'ipopt_zL_out', _DoNothing()).deactivate() - getattr(self.mip, 'ipopt_zU_out', _DoNothing()).deactivate() - if regularization_problem: - mainopt = SolverFactory(config.mip_regularization_solver) - else: - if config.mip_solver == 'gurobi_persistent' and config.single_tree: - mainopt = GurobiPersistent4MindtPy() - self.solve_data = MindtPySolveData() - self.export_attributes() - mainopt.solve_data = self.solve_data - mainopt.config = config - else: - mainopt = SolverFactory(config.mip_solver) - # determine if persistent solver is called. - if isinstance(mainopt, PersistentSolver): - mainopt.set_instance(self.mip, symbolic_solver_labels=True) - if config.single_tree and not regularization_problem: - self.set_up_lazy_OA_callback(mainopt) + config = self.config + if isinstance(self.mip_opt, PersistentSolver): + self.mip_opt.set_instance(self.mip, symbolic_solver_labels=True) + if config.single_tree: + self.set_up_lazy_OA_callback() if config.use_tabu_list: - self.set_up_tabulist_callback(mainopt) - - set_solver_options( - mainopt, - self.timing, - config, - solver_type='mip', - regularization=regularization_problem, - ) + self.set_up_tabulist_callback() mip_args = dict(config.mip_solver_args) if config.mip_solver in { 'cplex', @@ -1853,11 +1719,11 @@ def set_up_mip_solver(self, config, regularization_problem=False): 'gurobi_persistent', }: mip_args['warmstart'] = True - return mainopt, mip_args + return mip_args # The following functions deal with handling the solution we get from the above MIP solver function - def handle_main_optimal(self, main_mip, config, update_bound=True): + def handle_main_optimal(self, main_mip, update_bound=True): """This function copies the results from 'solve_main' to the working model and updates the upper/lower bound. This function is called after an optimal solution is found for the main problem. @@ -1866,8 +1732,6 @@ def handle_main_optimal(self, main_mip, config, update_bound=True): ---------- main_mip : Pyomo model The MIP main problem. - config : ConfigBlock - The specific configurations for MindtPy. update_bound : bool, optional Whether to update the bound, by default True. Bound will not be updated when handling regularization problem. @@ -1877,7 +1741,7 @@ def handle_main_optimal(self, main_mip, config, update_bound=True): # check if the value of binary variable is valid for var in MindtPy.discrete_variable_list: if var.value is None: - config.logger.warning( + self.config.logger.warning( f"Integer variable {var.name} not initialized. " "Setting it to its lower bound" ) @@ -1887,13 +1751,13 @@ def handle_main_optimal(self, main_mip, config, update_bound=True): copy_var_list_values( main_mip.MindtPy_utils.variable_list, self.fixed_nlp.MindtPy_utils.variable_list, - config, + self.config, skip_fixed=False, ) if update_bound: self.update_dual_bound(value(MindtPy.mip_obj.expr)) - config.logger.info( + self.config.logger.info( self.log_formatter.format( self.mip_iter, 'MILP', @@ -1905,97 +1769,24 @@ def handle_main_optimal(self, main_mip, config, update_bound=True): ) ) - def handle_main_other_conditions(self, main_mip, main_mip_results, config): - """This function handles the result of the latest iteration of solving the MIP problem (given any of a few - edge conditions, such as if the solution is neither infeasible nor optimal). - - Parameters - ---------- - main_mip : Pyomo model - The MIP main problem. - main_mip_results : SolverResults - Results from solving the MIP problem. - config : ConfigBlock - The specific configurations for MindtPy. - - Raises - ------ - ValueError - MindtPy unable to handle MILP main termination condition. - """ - if main_mip_results.solver.termination_condition is tc.infeasible: - self.handle_main_infeasible(main_mip, config) - elif main_mip_results.solver.termination_condition is tc.unbounded: - temp_results = self.handle_main_unbounded(main_mip, config) - elif main_mip_results.solver.termination_condition is tc.infeasibleOrUnbounded: - temp_results = self.handle_main_unbounded(main_mip, config) - if temp_results.solver.termination_condition is tc.infeasible: - self.handle_main_infeasible(main_mip, config) - elif main_mip_results.solver.termination_condition is tc.maxTimeLimit: - self.handle_main_max_timelimit(main_mip, main_mip_results, config) - self.results.solver.termination_condition = tc.maxTimeLimit - elif main_mip_results.solver.termination_condition is tc.feasible or ( - main_mip_results.solver.termination_condition is tc.other - and main_mip_results.solution.status is SolutionStatus.feasible - ): - # load the solution and suppress the warning message by setting - # solver status to ok. - MindtPy = main_mip.MindtPy_utils - config.logger.info( - 'MILP solver reported feasible solution, ' - 'but not guaranteed to be optimal.' - ) - copy_var_list_values( - main_mip.MindtPy_utils.variable_list, - self.fixed_nlp.MindtPy_utils.variable_list, - config, - skip_fixed=False, - ) - self.update_suboptimal_dual_bound(main_mip_results) - config.logger.info( - self.log_formatter.format( - self.mip_iter, - 'MILP', - value(MindtPy.mip_obj.expr), - self.primal_bound, - self.dual_bound, - self.rel_gap, - get_main_elapsed_time(self.timing), - ) - ) - else: - raise ValueError( - 'MindtPy unable to handle MILP main termination condition ' - 'of %s. Solver message: %s' - % ( - main_mip_results.solver.termination_condition, - main_mip_results.solver.message, - ) - ) - - def handle_main_infeasible(self, main_mip, config): + def handle_main_infeasible(self): """This function handles the result of the latest iteration of solving the MIP problem given an infeasible solution. - - Parameters - ---------- - main_mip : Pyomo model - The MIP main problem. - config : ConfigBlock - The specific configurations for MindtPy. """ - config.logger.info( - 'MILP main problem is infeasible. ' + self.config.logger.info( + 'MIP main problem is infeasible. ' 'Problem may have no more feasible ' 'binary configurations.' ) if self.mip_iter == 1: - config.logger.warning( + self.config.logger.warning( 'MindtPy initialization may have generated poor quality cuts.' ) # TODO no-good cuts for single tree case # set optimistic bound to infinity - config.logger.info('MindtPy exiting due to MILP main problem infeasibility.') + self.config.logger.info( + 'MindtPy exiting due to MILP main problem infeasibility.' + ) if self.results.solver.termination_condition is None: if ( self.primal_bound == float('inf') and self.objective_sense == minimize @@ -2007,7 +1798,7 @@ def handle_main_infeasible(self, main_mip, config): else: self.results.solver.termination_condition = tc.feasible - def handle_main_max_timelimit(self, main_mip, main_mip_results, config): + def handle_main_max_timelimit(self, main_mip, main_mip_results): """This function handles the result of the latest iteration of solving the MIP problem given that solving the MIP takes too long. @@ -2017,12 +1808,10 @@ def handle_main_max_timelimit(self, main_mip, main_mip_results, config): The MIP main problem. main_mip_results : [type] Results from solving the MIP main subproblem. - config : ConfigBlock - The specific configurations for MindtPy. """ - # TODO if we have found a valid feasible solution, we take that, if not, we can at least use the dual bound + # If we have found a valid feasible solution, we take that. If not, we can at least use the dual bound. MindtPy = main_mip.MindtPy_utils - config.logger.info( + self.config.logger.info( 'Unable to optimize MILP main problem ' 'within time limit. ' 'Using current solver feasible solution.' @@ -2030,11 +1819,11 @@ def handle_main_max_timelimit(self, main_mip, main_mip_results, config): copy_var_list_values( main_mip.MindtPy_utils.variable_list, self.fixed_nlp.MindtPy_utils.variable_list, - config, + self.config, skip_fixed=False, ) self.update_suboptimal_dual_bound(main_mip_results) - config.logger.info( + self.config.logger.info( self.log_formatter.format( self.mip_iter, 'MILP', @@ -2046,7 +1835,7 @@ def handle_main_max_timelimit(self, main_mip, main_mip_results, config): ) ) - def handle_main_unbounded(self, main_mip, config): + def handle_main_unbounded(self, main_mip): """This function handles the result of the latest iteration of solving the MIP problem given an unbounded solution due to the relaxation. @@ -2054,8 +1843,6 @@ def handle_main_unbounded(self, main_mip, config): ---------- main_mip : Pyomo model The MIP main problem. - config : ConfigBlock - The specific configurations for MindtPy. Returns ------- @@ -2065,6 +1852,7 @@ def handle_main_unbounded(self, main_mip, config): # Solution is unbounded. Add an arbitrary bound to the objective and resolve. # This occurs when the objective is nonlinear. The nonlinear objective is moved # to the constraints, and deactivated for the linear main problem. + config = self.config MindtPy = main_mip.MindtPy_utils config.logger.warning( 'main MILP was unbounded. ' @@ -2076,12 +1864,11 @@ def handle_main_unbounded(self, main_mip, config): MindtPy.objective_bound = Constraint( expr=(-config.obj_bound, MindtPy.mip_obj.expr, config.obj_bound) ) - mainopt = SolverFactory(config.mip_solver) - if isinstance(mainopt, PersistentSolver): - mainopt.set_instance(main_mip) - set_solver_options(mainopt, self.timing, config, solver_type='mip') + if isinstance(self.mip_opt, PersistentSolver): + self.mip_opt.set_instance(main_mip) + update_solver_timelimit(self.mip_opt, config.mip_solver, self.timing, config) with SuppressInfeasibleWarning(): - main_mip_results = mainopt.solve( + main_mip_results = self.mip_opt.solve( main_mip, tee=config.mip_solver_tee, load_solutions=False, @@ -2091,7 +1878,7 @@ def handle_main_unbounded(self, main_mip, config): self.mip.solutions.load_from(main_mip_results) return main_mip_results - def handle_regularization_main_tc(self, main_mip, main_mip_results, config): + def handle_regularization_main_tc(self, main_mip, main_mip_results): """Handles the result of the regularization main problem. Parameters @@ -2100,8 +1887,6 @@ def handle_regularization_main_tc(self, main_mip, main_mip_results, config): The MIP main problem. main_mip_results : SolverResults Results from solving the regularization main subproblem. - config : ConfigBlock - The specific configurations for MindtPy. Raises ------ @@ -2109,34 +1894,39 @@ def handle_regularization_main_tc(self, main_mip, main_mip_results, config): MindtPy unable to handle the regularization problem termination condition. """ if main_mip_results is None: - config.logger.info( + self.config.logger.info( 'Failed to solve the regularization problem.' 'The solution of the OA main problem will be adopted.' ) elif main_mip_results.solver.termination_condition in {tc.optimal, tc.feasible}: - self.handle_main_optimal(main_mip, config, update_bound=False) + self.handle_main_optimal(main_mip, update_bound=False) elif main_mip_results.solver.termination_condition is tc.maxTimeLimit: - config.logger.info( + self.config.logger.info( 'Regularization problem failed to converge within the time limit.' ) self.results.solver.termination_condition = tc.maxTimeLimit # break elif main_mip_results.solver.termination_condition is tc.infeasible: - config.logger.info('Regularization problem infeasible.') + self.config.logger.info('Regularization problem infeasible.') elif main_mip_results.solver.termination_condition is tc.unbounded: - config.logger.info( - 'Regularization problem ubounded.' - 'Sometimes solving MIQP in cplex, unbounded means infeasible.' + self.config.logger.info( + 'Regularization problem unbounded.' + 'Sometimes solving MIQCP in CPLEX, unbounded means infeasible.' + ) + elif main_mip_results.solver.termination_condition is tc.infeasibleOrUnbounded: + self.config.logger.info( + 'Regularization problem is infeasible or unbounded.' + 'It might happen when using CPLEX to solve MIQP.' ) elif main_mip_results.solver.termination_condition is tc.unknown: - config.logger.info( + self.config.logger.info( 'Termination condition of the regularization problem is unknown.' ) if main_mip_results.problem.lower_bound != float('-inf'): - config.logger.info('Solution limit has been reached.') - self.handle_main_optimal(main_mip, config, update_bound=False) + self.config.logger.info('Solution limit has been reached.') + self.handle_main_optimal(main_mip, update_bound=False) else: - config.logger.info( + self.config.logger.info( 'No solution obtained from the regularization subproblem.' 'Please set mip_solver_tee to True for more information.' 'The solution of the OA main problem will be adopted.' @@ -2151,14 +1941,9 @@ def handle_regularization_main_tc(self, main_mip, main_mip_results, config): ) ) - def setup_main(self, config): - """Set up main problem/main regularization problem for OA, ECP, Feasibility Pump and ROA methods. - - Parameters - ---------- - config : ConfigBlock - The specific configurations for MindtPy. - """ + def setup_main(self): + """Set up main problem/main regularization problem for OA, ECP, Feasibility Pump and ROA methods.""" + config = self.config MindtPy = self.mip.MindtPy_utils for c in MindtPy.constraint_list: @@ -2178,7 +1963,7 @@ def setup_main(self, config): MindtPy.aug_penalty_expr = Expression( expr=sign_adjust * config.OA_penalty_factor - * sum(v for v in MindtPy.cuts.slack_vars[...]) + * sum(v for v in MindtPy.cuts.slack_vars.values()) ) main_objective = MindtPy.objective_list[-1] MindtPy.mip_obj = Objective( @@ -2206,14 +1991,8 @@ def setup_main(self, config): doc='Objective function expression should improve on the best found dual bound', ) - def setup_fp_main(self, config): - """Set up main problem/main regularization problem for OA, ECP, Feasibility Pump and ROA methods. - - Parameters - ---------- - config : ConfigBlock - The specific configurations for MindtPy. - """ + def setup_fp_main(self): + """Set up main problem for Feasibility Pump method.""" MindtPy = self.mip.MindtPy_utils for c in MindtPy.constraint_list: @@ -2223,27 +2002,22 @@ def setup_fp_main(self, config): MindtPy.cuts.activate() MindtPy.del_component('mip_obj') MindtPy.del_component('fp_mip_obj') - if config.fp_main_norm == 'L1': + if self.config.fp_main_norm == 'L1': MindtPy.fp_mip_obj = generate_norm1_objective_function( - self.mip, self.working_model, discrete_only=config.fp_discrete_only + self.mip, self.working_model, discrete_only=self.config.fp_discrete_only ) - elif config.fp_main_norm == 'L2': + elif self.config.fp_main_norm == 'L2': MindtPy.fp_mip_obj = generate_norm2sq_objective_function( - self.mip, self.working_model, discrete_only=config.fp_discrete_only + self.mip, self.working_model, discrete_only=self.config.fp_discrete_only ) - elif config.fp_main_norm == 'L_infinity': + elif self.config.fp_main_norm == 'L_infinity': MindtPy.fp_mip_obj = generate_norm_inf_objective_function( - self.mip, self.working_model, discrete_only=config.fp_discrete_only + self.mip, self.working_model, discrete_only=self.config.fp_discrete_only ) - def setup_regularization_main(self, config): - """Set up main problem/main regularization problem for OA, ECP, Feasibility Pump and ROA methods. - - Parameters - ---------- - config : ConfigBlock - The specific configurations for MindtPy. - """ + def setup_regularization_main(self): + """Set up main regularization problem for ROA method.""" + config = self.config MindtPy = self.mip.MindtPy_utils for c in MindtPy.constraint_list: @@ -2306,14 +2080,6 @@ def setup_regularization_main(self, config): + config.level_coef * self.dual_bound ) - def export_attributes(self): - for name, val in self.__dict__.items(): - setattr(self.solve_data, name, val) - - def update_attributes(self): - for name, val in self.solve_data.__dict__.items(): - self.__dict__[name] = val - def update_result(self): if self.objective_sense == minimize: self.results.problem.lower_bound = self.dual_bound @@ -2382,14 +2148,28 @@ def load_solution(self): working_model_variable_list, original_model_variable_list, config=config ) - def check_config(self): - """Checks if the configuration options make sense. + def check_subsolver_validity(self): + """Check if the subsolvers are available and licensed.""" + if not self.mip_opt.available(): + raise ValueError(self.config.mip_solver + ' is not available.') + if not self.mip_opt.license_is_valid(): + raise ValueError(self.config.mip_solver + ' is not licensed.') + if not self.nlp_opt.available(): + raise ValueError(self.config.nlp_solver + ' is not available.') + if not self.nlp_opt.license_is_valid(): + raise ValueError(self.config.nlp_solver + ' is not licensed.') + if self.config.add_regularization is not None: + if not self.regularization_mip_opt.available(): + raise ValueError( + self.config.mip_regularization_solver + ' is not available.' + ) + if not self.regularization_mip_opt.license_is_valid(): + raise ValueError( + self.config.mip_regularization_solver + ' is not licensed.' + ) - Parameters - ---------- - config : ConfigBlock - The specific configurations for MindtPy. - """ + def check_config(self): + """Checks if the configuration options make sense.""" config = self.config # configuration confirmation if config.init_strategy == 'FP': @@ -2419,41 +2199,16 @@ def check_config(self): if config.mip_solver in {'appsi_cplex', 'appsi_gurobi'}: config.logger.info("Solution pool does not support APPSI solver.") config.mip_solver = 'cplex_persistent' - if config.calculate_dual_at_solution: - if config.mip_solver == 'appsi_cplex': - config.logger.info( - "APPSI-Cplex cannot get duals for mixed-integer problems" - "mip_solver will be changed to Cplex." - ) - config.mip_solver = 'cplex' - if config.mip_regularization_solver == 'appsi_cplex': - config.logger.info( - "APPSI-Cplex cannot get duals for mixed-integer problems" - "mip_solver will be changed to Cplex." - ) - config.mip_regularization_solver = 'cplex' - if config.mip_solver in { - 'gurobi', - 'appsi_gurobi', - } or config.mip_regularization_solver in {'gurobi', 'appsi_gurobi'}: - raise ValueError( - "GUROBI can not provide duals for mixed-integer problems." - ) ################################################################################################################################ - # feasibility_pump.py + # Feasibility Pump - def solve_fp_subproblem(self, config): + def solve_fp_subproblem(self): """Solves the feasibility pump NLP subproblem. This function sets up the 'fp_nlp' by relax integer variables. precomputes dual values, deactivates trivial constraints, and then solves NLP model. - Parameters - ---------- - config : ConfigBlock - The specific configurations for MindtPy. - Returns ------- fp_nlp : Pyomo model @@ -2463,6 +2218,7 @@ def solve_fp_subproblem(self, config): """ fp_nlp = self.working_model.clone() MindtPy = fp_nlp.MindtPy_utils + config = self.config # Set up NLP fp_nlp.MindtPy_utils.objective_list[-1].deactivate() @@ -2494,27 +2250,26 @@ def solve_fp_subproblem(self, config): ignore_infeasible=False, tolerance=config.constraint_tolerance, ) - except InfeasibleConstraintException: - config.logger.warning( - 'infeasibility detected in deactivate_trivial_constraints' + except InfeasibleConstraintException as e: + config.logger.error( + str(e) + '\nInfeasibility detected in deactivate_trivial_constraints.' ) results = SolverResults() results.solver.termination_condition = tc.infeasible return fp_nlp, results # Solve the NLP - nlpopt = SolverFactory(config.nlp_solver) nlp_args = dict(config.nlp_solver_args) - set_solver_options(nlpopt, self.timing, config, solver_type='nlp') + update_solver_timelimit(self.nlp_opt, config.nlp_solver, self.timing, config) with SuppressInfeasibleWarning(): with time_code(self.timing, 'fp subproblem'): - results = nlpopt.solve( + results = self.nlp_opt.solve( fp_nlp, tee=config.nlp_solver_tee, load_solutions=False, **nlp_args ) if len(results.solution) > 0: fp_nlp.solutions.load_from(results) return fp_nlp, results - def handle_fp_subproblem_optimal(self, fp_nlp, config): + def handle_fp_subproblem_optimal(self, fp_nlp): """Copies the solution to the working model, updates bound, adds OA cuts / no-good cuts / increasing objective cut, calculates the duals and stores incumbent solution if it has been improved. @@ -2522,34 +2277,35 @@ def handle_fp_subproblem_optimal(self, fp_nlp, config): ---------- fp_nlp : Pyomo model The feasibility pump NLP subproblem. - config : ConfigBlock - The specific configurations for MindtPy. """ copy_var_list_values( fp_nlp.MindtPy_utils.variable_list, self.working_model.MindtPy_utils.variable_list, - config, + self.config, ) - add_orthogonality_cuts(self.working_model, self.mip, config) + add_orthogonality_cuts(self.working_model, self.mip, self.config) # if OA-like or fp converged, update Upper bound, # add no_good cuts and increasing objective cuts (fp) if fp_converged( - self.working_model, self.mip, config, discrete_only=config.fp_discrete_only + self.working_model, + self.mip, + proj_zero_tolerance=self.config.fp_projzerotol, + discrete_only=self.config.fp_discrete_only, ): copy_var_list_values( self.mip.MindtPy_utils.variable_list, self.fixed_nlp.MindtPy_utils.variable_list, - config, + self.config, skip_fixed=False, ) - fixed_nlp, fixed_nlp_results = self.solve_subproblem(config) + fixed_nlp, fixed_nlp_results = self.solve_subproblem() if fixed_nlp_results.solver.termination_condition in { tc.optimal, tc.locallyOptimal, tc.feasible, }: - self.handle_subproblem_optimal(fixed_nlp, config) + self.handle_subproblem_optimal(fixed_nlp) if self.primal_bound_improved: self.mip.MindtPy_utils.cuts.del_component('improving_objective_cut') if self.objective_sense == minimize: @@ -2557,7 +2313,8 @@ def handle_fp_subproblem_optimal(self, fp_nlp, config): Constraint( expr=sum(self.mip.MindtPy_utils.objective_value[:]) <= self.primal_bound - - config.fp_cutoffdecr * max(1, abs(self.primal_bound)) + - self.config.fp_cutoffdecr + * max(1, abs(self.primal_bound)) ) ) else: @@ -2565,24 +2322,23 @@ def handle_fp_subproblem_optimal(self, fp_nlp, config): Constraint( expr=sum(self.mip.MindtPy_utils.objective_value[:]) >= self.primal_bound - + config.fp_cutoffdecr * max(1, abs(self.primal_bound)) + + self.config.fp_cutoffdecr + * max(1, abs(self.primal_bound)) ) ) else: - config.logger.error( + self.config.logger.error( 'Feasibility pump Fixed-NLP is infeasible, something might be wrong. ' 'There might be a problem with the precisions - the feasibility pump seems to have converged' ) - def handle_fp_main_tc(self, fp_main_results, config): + def handle_fp_main_tc(self, fp_main_results): """Handle the termination condition of the feasibility pump main problem. Parameters ---------- fp_main_results : SolverResults The results from solving the FP main problem. - config : ConfigBlock - The specific configurations for MindtPy. Returns ------- @@ -2590,7 +2346,7 @@ def handle_fp_main_tc(self, fp_main_results, config): True if FP loop should terminate, False otherwise. """ if fp_main_results.solver.termination_condition is tc.optimal: - config.logger.info( + self.config.logger.info( self.log_formatter.format( self.fp_iter, 'FP-MIP', @@ -2603,58 +2359,54 @@ def handle_fp_main_tc(self, fp_main_results, config): ) return False elif fp_main_results.solver.termination_condition is tc.maxTimeLimit: - config.logger.warning('FP-MIP reaches max TimeLimit') + self.config.logger.warning('FP-MIP reaches max TimeLimit') self.results.solver.termination_condition = tc.maxTimeLimit return True elif fp_main_results.solver.termination_condition is tc.infeasible: - config.logger.warning('FP-MIP infeasible') + self.config.logger.warning('FP-MIP infeasible') no_good_cuts = self.mip.MindtPy_utils.cuts.no_good_cuts if no_good_cuts.__len__() > 0: no_good_cuts[no_good_cuts.__len__()].deactivate() return True elif fp_main_results.solver.termination_condition is tc.unbounded: - config.logger.warning('FP-MIP unbounded') + self.config.logger.warning('FP-MIP unbounded') return True elif ( fp_main_results.solver.termination_condition is tc.other and fp_main_results.solution.status is SolutionStatus.feasible ): - config.logger.warning( + self.config.logger.warning( 'MILP solver reported feasible solution of FP-MIP, ' 'but not guaranteed to be optimal.' ) return False else: - config.logger.warning('Unexpected result of FP-MIP') + self.config.logger.warning('Unexpected result of FP-MIP') return True - def fp_loop(self, config): + def fp_loop(self): """Feasibility pump loop. This is the outermost function for the Feasibility Pump algorithm in this package; this function - controls the progression of solving the model. - - Parameters - ---------- - config : ConfigBlock - The specific configurations for MindtPy. + controls the progress of solving the model. Raises ------ ValueError MindtPy unable to handle the termination condition of the FP-NLP subproblem. """ + config = self.config while self.fp_iter < config.fp_iteration_limit: - # solve MILP main problem + # solve MIP main problem with time_code(self.timing, 'fp main'): - fp_main, fp_main_results = self.solve_fp_main(config) - fp_should_terminate = self.handle_fp_main_tc(fp_main_results, config) + fp_main, fp_main_results = self.solve_fp_main() + fp_should_terminate = self.handle_fp_main_tc(fp_main_results) if fp_should_terminate: break # Solve NLP subproblem # The constraint linearization happens in the handlers - fp_nlp, fp_nlp_result = self.solve_fp_subproblem(config) + fp_nlp, fp_nlp_result = self.solve_fp_subproblem() if fp_nlp_result.solver.termination_condition in { tc.optimal, @@ -2672,7 +2424,7 @@ def fp_loop(self, config): get_main_elapsed_time(self.timing), ) ) - self.handle_fp_subproblem_optimal(fp_nlp, config) + self.handle_fp_subproblem_optimal(fp_nlp) elif fp_nlp_result.solver.termination_condition in { tc.infeasible, tc.noSolution, @@ -2722,10 +2474,14 @@ def initialize_mip_problem(self): self.mip = self.working_model.clone() next(self.mip.component_data_objects(Objective, active=True)).deactivate() + if hasattr(self.mip, 'dual') and isinstance(self.mip.dual, Suffix): + self.mip.del_component('dual') + # Deactivate extraneous IMPORT/EXPORT suffixes + if config.nlp_solver in {'ipopt', 'cyipopt'}: + getattr(self.mip, 'ipopt_zL_out', _DoNothing()).deactivate() + getattr(self.mip, 'ipopt_zU_out', _DoNothing()).deactivate() MindtPy = self.mip.MindtPy_utils - if config.calculate_dual_at_solution: - self.mip.dual.deactivate() if config.init_strategy == 'FP': MindtPy.cuts.fp_orthogonality_cuts = ConstraintList( @@ -2738,7 +2494,132 @@ def initialize_mip_problem(self): self.fixed_nlp = self.working_model.clone() TransformationFactory('core.fix_integer_vars').apply_to(self.fixed_nlp) - add_feas_slacks(self.fixed_nlp, config) + initialize_feas_subproblem(self.fixed_nlp, config) + + def initialize_subsolvers(self): + """Initialize and set options for MIP and NLP subsolvers.""" + config = self.config + if config.mip_solver == 'gurobi_persistent' and config.single_tree: + self.mip_opt = GurobiPersistent4MindtPy() + else: + self.mip_opt = SolverFactory(config.mip_solver) + self.nlp_opt = SolverFactory(config.nlp_solver) + self.feasibility_nlp_opt = SolverFactory(config.nlp_solver) + if config.mip_regularization_solver is not None: + self.regularization_mip_opt = SolverFactory( + config.mip_regularization_solver + ) + + self.check_subsolver_validity() + if config.mip_solver == 'gams': + self.mip_opt.options['add_options'] = [] + if config.nlp_solver == 'gams': + self.nlp_opt.options['add_options'] = [] + self.feasibility_nlp_opt.options['add_options'] = [] + set_solver_mipgap(self.mip_opt, config.mip_solver, config) + + set_solver_constraint_violation_tolerance( + self.nlp_opt, config.nlp_solver, config + ) + set_solver_constraint_violation_tolerance( + self.feasibility_nlp_opt, config.nlp_solver, config + ) + + self.set_appsi_solver_update_config() + + if config.mip_solver == 'gurobi_persistent' and config.single_tree: + # PreCrush: Controls presolve reductions that affect user cuts + # You should consider setting this parameter to 1 if you are using callbacks to add your own cuts. + self.mip_opt.options['PreCrush'] = 1 + self.mip_opt.options['LazyConstraints'] = 1 + + # set threads + if config.threads > 0: + self.mip_opt.options['threads'] = config.threads + # regularization solver + if config.mip_regularization_solver is not None: + set_solver_mipgap( + self.regularization_mip_opt, config.mip_regularization_solver, config + ) + if config.mip_regularization_solver == 'gams': + self.regularization_mip_opt.options['add_options'] = [] + if config.regularization_mip_threads > 0: + self.regularization_mip_opt.options[ + 'threads' + ] = config.regularization_mip_threads + else: + self.regularization_mip_opt.options['threads'] = config.threads + + if config.mip_regularization_solver in { + 'cplex', + 'appsi_cplex', + 'cplex_persistent', + }: + if config.solution_limit is not None: + self.regularization_mip_opt.options[ + 'mip_limits_solutions' + ] = config.solution_limit + # We don't need to solve the regularization problem to optimality. + # We will choose to perform aggressive node probing during presolve. + self.regularization_mip_opt.options['mip_strategy_presolvenode'] = 3 + # When using ROA method to solve convex MINLPs, the Hessian of the Lagrangean is always positive semidefinite, + # and the regularization subproblems are always convex. + # However, due to numerical accuracy, the regularization problem ended up nonconvex for a few cases, + # e.g., the smallest eigenvalue of the Hessian was slightly negative. + # Therefore, we set the optimalitytarget parameter to 3 to enable CPLEX to solve nonconvex MIQPs in the ROA-L2 and ROA-∇2L methods. + if config.add_regularization in {'hess_lag', 'hess_only_lag'}: + self.regularization_mip_opt.options['optimalitytarget'] = 3 + elif config.mip_regularization_solver == 'gurobi': + if config.solution_limit is not None: + self.regularization_mip_opt.options[ + 'SolutionLimit' + ] = config.solution_limit + # Same reason as mip_strategy_presolvenode. + self.regularization_mip_opt.options['Presolve'] = 2 + + def set_appsi_solver_update_config(self): + """Set update config for APPSI solvers.""" + config = self.config + if config.mip_solver in {'appsi_cplex', 'appsi_gurobi', 'appsi_highs'}: + # mip main problem + self.mip_opt.update_config.check_for_new_or_removed_constraints = True + self.mip_opt.update_config.check_for_new_or_removed_vars = True + self.mip_opt.update_config.check_for_new_or_removed_params = False + self.mip_opt.update_config.check_for_new_objective = True + self.mip_opt.update_config.update_constraints = True + self.mip_opt.update_config.update_vars = True + self.mip_opt.update_config.update_params = False + self.mip_opt.update_config.update_named_expressions = False + self.mip_opt.update_config.update_objective = False + self.mip_opt.update_config.treat_fixed_vars_as_params = True + + if config.nlp_solver == 'appsi_ipopt': + # fixed-nlp + self.nlp_opt.update_config.check_for_new_or_removed_constraints = False + self.nlp_opt.update_config.check_for_new_or_removed_vars = False + self.nlp_opt.update_config.check_for_new_or_removed_params = False + self.nlp_opt.update_config.check_for_new_objective = False + self.nlp_opt.update_config.update_constraints = True + self.nlp_opt.update_config.update_vars = True + self.nlp_opt.update_config.update_params = False + self.nlp_opt.update_config.update_named_expressions = False + self.nlp_opt.update_config.update_objective = False + self.nlp_opt.update_config.treat_fixed_vars_as_params = False + + self.feasibility_nlp_opt.update_config.check_for_new_or_removed_constraints = ( + False + ) + self.feasibility_nlp_opt.update_config.check_for_new_or_removed_vars = False + self.feasibility_nlp_opt.update_config.check_for_new_or_removed_params = ( + False + ) + self.feasibility_nlp_opt.update_config.check_for_new_objective = False + self.feasibility_nlp_opt.update_config.update_constraints = False + self.feasibility_nlp_opt.update_config.update_vars = True + self.feasibility_nlp_opt.update_config.update_params = False + self.feasibility_nlp_opt.update_config.update_named_expressions = False + self.feasibility_nlp_opt.update_config.update_objective = False + self.feasibility_nlp_opt.update_config.treat_fixed_vars_as_params = False def solve(self, model, **kwds): """Solve the model. @@ -2762,7 +2643,7 @@ def solve(self, model, **kwds): with lower_logger_level_to(config.logger, new_logging_level): self.check_config() - self.set_up_solve_data(model, config) + self.set_up_solve_data(model) if config.integer_to_binary: TransformationFactory('contrib.integer_to_binary').apply_to( @@ -2774,6 +2655,7 @@ def solve(self, model, **kwds): config.logger, new_logging_level ): self._log_solver_intro_message() + self.initialize_subsolvers() # Validate the model to ensure that MindtPy is able to solve it. if not self.model_is_valid(): @@ -2797,11 +2679,11 @@ def solve(self, model, **kwds): # Initialization with time_code(self.timing, 'initialization'): - self.MindtPy_initialization(config) + self.MindtPy_initialization() # Algorithm main loop with time_code(self.timing, 'main loop'): - self.MindtPy_iteration_loop(config) + self.MindtPy_iteration_loop() # Load solution if self.best_solution_found is not None: @@ -2834,31 +2716,67 @@ def objective_reformulation(self): # In ROA and RLP/NLP, since the distance calculation does not include these epigraph slack variables, they should not be added to the variable list. (update_var_con_list = False) # In the process_objective function, once the objective function has been reformulated as epigraph constraint, the variable/constraint/objective lists will not be updated only if the MINLP has a linear objective function and regularization is activated at the same time. # This is because the epigraph constraint is very "flat" for branching rules. The original objective function will be used for the main problem and epigraph reformulation will be used for the projection problem. - # TODO: The logic here is too complicated, can we simplify it? - config = self.config - self.process_objective( - self.config, - move_objective=config.move_objective, - use_mcpp=config.use_mcpp, - update_var_con_list=True, - partition_nonlinear_terms=config.partition_obj_nonlinear_terms, - obj_handleable_polynomial_degree=self.mip_objective_polynomial_degree, - constr_handleable_polynomial_degree=self.mip_constraint_polynomial_degree, - ) + self.process_objective(update_var_con_list=True) def handle_main_mip_termination(self, main_mip, main_mip_results): should_terminate = False if main_mip_results is not None: if not self.config.single_tree: if main_mip_results.solver.termination_condition is tc.optimal: - self.handle_main_optimal(main_mip, self.config) + self.handle_main_optimal(main_mip) elif main_mip_results.solver.termination_condition is tc.infeasible: - self.handle_main_infeasible(main_mip, self.config) + self.handle_main_infeasible() self.last_iter_cuts = True should_terminate = True + elif main_mip_results.solver.termination_condition is tc.unbounded: + temp_results = self.handle_main_unbounded(main_mip) + elif ( + main_mip_results.solver.termination_condition + is tc.infeasibleOrUnbounded + ): + temp_results = self.handle_main_unbounded(main_mip) + if temp_results.solver.termination_condition is tc.infeasible: + self.handle_main_infeasible() + elif main_mip_results.solver.termination_condition is tc.maxTimeLimit: + self.handle_main_max_timelimit(main_mip, main_mip_results) + self.results.solver.termination_condition = tc.maxTimeLimit + elif main_mip_results.solver.termination_condition is tc.feasible or ( + main_mip_results.solver.termination_condition is tc.other + and main_mip_results.solution.status is SolutionStatus.feasible + ): + # load the solution and suppress the warning message by setting + # solver status to ok. + MindtPy = main_mip.MindtPy_utils + self.config.logger.info( + 'MILP solver reported feasible solution, ' + 'but not guaranteed to be optimal.' + ) + copy_var_list_values( + main_mip.MindtPy_utils.variable_list, + self.fixed_nlp.MindtPy_utils.variable_list, + self.config, + skip_fixed=False, + ) + self.update_suboptimal_dual_bound(main_mip_results) + self.config.logger.info( + self.log_formatter.format( + self.mip_iter, + 'MILP', + value(MindtPy.mip_obj.expr), + self.primal_bound, + self.dual_bound, + self.rel_gap, + get_main_elapsed_time(self.timing), + ) + ) else: - self.handle_main_other_conditions( - main_mip, main_mip_results, self.config + raise ValueError( + 'MindtPy unable to handle MILP main termination condition ' + 'of %s. Solver message: %s' + % ( + main_mip_results.solver.termination_condition, + main_mip_results.solver.message, + ) ) else: self.config.logger.info('Algorithm should terminate here.') @@ -2867,37 +2785,48 @@ def handle_main_mip_termination(self, main_mip, main_mip_results): return should_terminate # iterate.py - def MindtPy_iteration_loop(self, config): + def MindtPy_iteration_loop(self): """Main loop for MindtPy Algorithms. - This is the outermost function for the Outer Approximation algorithm in this package; this function controls the progression of + This is the outermost function for the Outer Approximation algorithm in this package; this function controls the progress of solving the model. - Parameters - ---------- - config : ConfigBlock - The specific configurations for MindtPy. - Raises ------ ValueError The strategy value is not correct or not included. """ + config = self.config while self.mip_iter < config.iteration_limit: - # solve MILP main problem + # solve MIP main problem with time_code(self.timing, 'main'): - main_mip, main_mip_results = self.solve_main(config) + main_mip, main_mip_results = self.solve_main() if self.handle_main_mip_termination(main_mip, main_mip_results): break - # Call the MILP post-solve callback + # Call the MIP post-solve callback with time_code(self.timing, 'Call after main solve'): config.call_after_main_solve(main_mip) # Regularization is activated after the first feasible solution is found. if config.add_regularization is not None: - self.add_regularization(main_mip) + if not config.single_tree: + self.add_regularization() + + # In R-LP/NLP, we might end up with an integer combination that hasn't been explored. + # Therefore, we need to solve fixed NLP subproblem one more time. + if config.single_tree: + self.curr_int_sol = get_integer_solution(self.mip, string_zero=True) + copy_var_list_values( + main_mip.MindtPy_utils.variable_list, + self.fixed_nlp.MindtPy_utils.variable_list, + config, + skip_fixed=False, + ) + if self.curr_int_sol not in set(self.integer_list): + fixed_nlp, fixed_nlp_result = self.solve_subproblem() + self.handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result) - if self.algorithm_should_terminate(config, check_cycling=True): + if self.algorithm_should_terminate(check_cycling=True): self.last_iter_cuts = False break @@ -2905,14 +2834,14 @@ def MindtPy_iteration_loop(self, config): # Solve NLP subproblem # The constraint linearization happens in the handlers if not config.solution_pool: - fixed_nlp, fixed_nlp_result = self.solve_subproblem(config) - self.handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result, config) + fixed_nlp, fixed_nlp_result = self.solve_subproblem() + self.handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result) # Call the NLP post-solve callback with time_code(self.timing, 'Call after subproblem solve'): config.call_after_subproblem_solve(fixed_nlp) - if self.algorithm_should_terminate(config, check_cycling=False): + if self.algorithm_should_terminate(check_cycling=False): self.last_iter_cuts = True break else: @@ -2937,16 +2866,14 @@ def MindtPy_iteration_loop(self, config): continue else: self.integer_list.append(self.curr_int_sol) - fixed_nlp, fixed_nlp_result = self.solve_subproblem(config) - self.handle_nlp_subproblem_tc( - fixed_nlp, fixed_nlp_result, config - ) + fixed_nlp, fixed_nlp_result = self.solve_subproblem() + self.handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result) # Call the NLP post-solve callback with time_code(self.timing, 'Call after subproblem solve'): config.call_after_subproblem_solve(fixed_nlp) - if self.algorithm_should_terminate(config, check_cycling=False): + if self.algorithm_should_terminate(check_cycling=False): self.last_iter_cuts = True break # TODO: break two loops. @@ -2957,27 +2884,26 @@ def MindtPy_iteration_loop(self, config): and not self.should_terminate and config.add_regularization is None ): - self.fix_dual_bound(config, self.last_iter_cuts) + self.fix_dual_bound(self.last_iter_cuts) config.logger.info( ' ===============================================================================================' ) def get_solution_name_obj(self, main_mip_results): - config = self.config - if config.mip_solver == 'cplex_persistent': + if self.config.mip_solver == 'cplex_persistent': solution_pool_names = ( main_mip_results._solver_model.solution.pool.get_names() ) - elif config.mip_solver == 'gurobi_persistent': + elif self.config.mip_solver == 'gurobi_persistent': solution_pool_names = list(range(main_mip_results._solver_model.SolCount)) # list to store the name and objective value of the solutions in the solution pool solution_name_obj = [] for name in solution_pool_names: - if config.mip_solver == 'cplex_persistent': + if self.config.mip_solver == 'cplex_persistent': obj = main_mip_results._solver_model.solution.pool.get_objective_value( name ) - elif config.mip_solver == 'gurobi_persistent': + elif self.config.mip_solver == 'gurobi_persistent': main_mip_results._solver_model.setParam( gurobipy.GRB.Param.SolutionNumber, name ) @@ -2986,55 +2912,40 @@ def get_solution_name_obj(self, main_mip_results): solution_name_obj.sort( key=itemgetter(1), reverse=self.objective_sense == maximize ) - solution_name_obj = solution_name_obj[: config.num_solution_iteration] + solution_name_obj = solution_name_obj[: self.config.num_solution_iteration] return solution_name_obj - def add_regularization(self, main_mip): - config = self.config - if self.best_solution_found is not None and not config.single_tree: + def add_regularization(self): + if self.best_solution_found is not None: # The main problem might be unbounded, regularization is activated only when a valid bound is provided. if self.dual_bound != self.dual_bound_progress[0]: with time_code(self.timing, 'regularization main'): ( regularization_main_mip, regularization_main_mip_results, - ) = self.solve_regularization_main(config) + ) = self.solve_regularization_main() self.handle_regularization_main_tc( - regularization_main_mip, regularization_main_mip_results, config + regularization_main_mip, regularization_main_mip_results ) - # TODO: add descriptions for the following code - if config.single_tree: - self.curr_int_sol = get_integer_solution(self.mip, string_zero=True) - copy_var_list_values( - main_mip.MindtPy_utils.variable_list, - self.fixed_nlp.MindtPy_utils.variable_list, - config, - skip_fixed=False, - ) - if self.curr_int_sol not in set(self.integer_list): - fixed_nlp, fixed_nlp_result = self.solve_subproblem(config) - self.handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result, config) - def bounds_converged(self): # Check bound convergence - config = self.config - if self.abs_gap <= config.absolute_bound_tolerance: - config.logger.info( + if self.abs_gap <= self.config.absolute_bound_tolerance: + self.config.logger.info( 'MindtPy exiting on bound convergence. ' 'Absolute gap: {} <= absolute tolerance: {} \n'.format( - self.abs_gap, config.absolute_bound_tolerance + self.abs_gap, self.config.absolute_bound_tolerance ) ) self.results.solver.termination_condition = tc.optimal return True # Check relative bound convergence if self.best_solution_found is not None: - if self.rel_gap <= config.relative_bound_tolerance: - config.logger.info( + if self.rel_gap <= self.config.relative_bound_tolerance: + self.config.logger.info( 'MindtPy exiting on bound convergence. ' 'Relative gap : {} <= relative tolerance: {} \n'.format( - self.rel_gap, config.relative_bound_tolerance + self.rel_gap, self.config.relative_bound_tolerance ) ) self.results.solver.termination_condition = tc.optimal @@ -3043,18 +2954,17 @@ def bounds_converged(self): def reached_iteration_limit(self): # Check iteration limit - config = self.config - if self.mip_iter >= config.iteration_limit: - config.logger.info( + if self.mip_iter >= self.config.iteration_limit: + self.config.logger.info( 'MindtPy unable to converge bounds ' 'after {} main iterations.'.format(self.mip_iter) ) - config.logger.info( + self.config.logger.info( 'Final bound values: Primal Bound: {} Dual Bound: {}'.format( self.primal_bound, self.dual_bound ) ) - if config.single_tree: + if self.config.single_tree: self.results.solver.termination_condition = tc.feasible else: self.results.solver.termination_condition = tc.maxIterations @@ -3063,16 +2973,15 @@ def reached_iteration_limit(self): return False def reached_time_limit(self): - config = self.config - if get_main_elapsed_time(self.timing) >= config.time_limit: - config.logger.info( + if get_main_elapsed_time(self.timing) >= self.config.time_limit: + self.config.logger.info( 'MindtPy unable to converge bounds ' 'before time limit of {} seconds. ' 'Elapsed: {} seconds'.format( - config.time_limit, get_main_elapsed_time(self.timing) + self.config.time_limit, get_main_elapsed_time(self.timing) ) ) - config.logger.info( + self.config.logger.info( 'Final bound values: Primal Bound: {} Dual Bound: {}'.format( self.primal_bound, self.dual_bound ) diff --git a/pyomo/contrib/mindtpy/config_options.py b/pyomo/contrib/mindtpy/config_options.py index d0dab292937..ed0c86baae9 100644 --- a/pyomo/contrib/mindtpy/config_options.py +++ b/pyomo/contrib/mindtpy/config_options.py @@ -538,6 +538,7 @@ def _add_subsolver_configs(CONFIG): 'cplex_persistent', 'appsi_cplex', 'appsi_gurobi', + # 'appsi_highs', TODO: feasibility pump now fails with appsi_highs #2951 ] ), description='MIP subsolver name', @@ -619,6 +620,7 @@ def _add_subsolver_configs(CONFIG): 'cplex_persistent', 'appsi_cplex', 'appsi_gurobi', + # 'appsi_highs', ] ), description='MIP subsolver for regularization problem', @@ -840,22 +842,6 @@ def _add_roa_configs(CONFIG): description='The solution limit for the regularization problem since it does not need to be solved to optimality.', ), ) - CONFIG.declare( - 'reduce_level_coef', - ConfigValue( - default=False, - description='Whether to reduce level coefficient in ROA single tree when regularization problem is infeasible.', - domain=bool, - ), - ) - CONFIG.declare( - 'use_bb_tree_incumbent', - ConfigValue( - default=False, - description='Whether to use the incumbent solution of branch & bound tree in ROA single tree when regularization problem is infeasible.', - domain=bool, - ), - ) CONFIG.declare( 'sqp_lag_scaling_coef', ConfigValue( diff --git a/pyomo/contrib/mindtpy/cut_generation.py b/pyomo/contrib/mindtpy/cut_generation.py index 542d5a7d699..c0449054baa 100644 --- a/pyomo/contrib/mindtpy/cut_generation.py +++ b/pyomo/contrib/mindtpy/cut_generation.py @@ -42,8 +42,14 @@ def add_oa_cuts( The relaxed linear model. dual_values : list The value of the duals for each constraint. - solve_data : MindtPySolveData - Data container that holds solve-instance data. + jacobians : ComponentMap + Map nonlinear_constraint --> Map(variable --> jacobian of constraint w.r.t. variable). + objective_sense : Int + Objective sense of model. + mip_constraint_polynomial_degree : Set + The polynomial degrees of constraints that are regarded as linear. + mip_iter : Int + MIP iteration counter. config : ConfigBlock The specific configurations for MindtPy. cb_opt : SolverFactory, optional @@ -189,10 +195,12 @@ def add_ecp_cuts( ---------- target_model : Pyomo model The relaxed linear model. - solve_data : MindtPySolveData - Data container that holds solve-instance data. + jacobians : ComponentMap + Map nonlinear_constraint --> Map(variable --> jacobian of constraint w.r.t. variable) config : ConfigBlock The specific configurations for MindtPy. + timing : Timing + Timing. linearize_active : bool, optional Whether to linearize the active nonlinear constraints, by default True. linearize_violated : bool, optional @@ -213,9 +221,9 @@ def add_ecp_cuts( if constr.has_ub(): try: upper_slack = constr.uslack() - except (ValueError, OverflowError): - config.logger.warning( - 'constraint {} has caused either a ' + except (ValueError, OverflowError) as e: + config.logger.error( + str(e) + '\nConstraint {} has caused either a ' 'ValueError or OverflowError.' '\n'.format(constr) ) @@ -242,9 +250,9 @@ def add_ecp_cuts( if constr.has_lb(): try: lower_slack = constr.lslack() - except (ValueError, OverflowError): - config.logger.warning( - 'constraint {} has caused either a ' + except (ValueError, OverflowError) as e: + config.logger.error( + str(e) + '\nConstraint {} has caused either a ' 'ValueError or OverflowError.' '\n'.format(constr) ) @@ -278,14 +286,16 @@ def add_no_good_cuts(target_model, var_values, config, timing, mip_iter=0, cb_op Parameters ---------- + target_model : Block + The model to add no-good cuts to. var_values : list Variable values of the current solution, used to generate the cut. - solve_data : MindtPySolveData - Data container that holds solve-instance data. config : ConfigBlock The specific configurations for MindtPy. - mip_iter: Int, optional - Mip iteration counter. + timing : Timing + Timing. + mip_iter : Int, optional + MIP iteration counter. cb_opt : SolverFactory, optional Gurobi_persistent solver, by default None. @@ -346,10 +356,10 @@ def add_affine_cuts(target_model, config, timing): Parameters ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. config : ConfigBlock The specific configurations for MindtPy. + timing : Timing + Timing. """ with time_code(timing, 'Affine cut generation'): m = target_model @@ -365,8 +375,8 @@ def add_affine_cuts(target_model, config, timing): try: mc_eqn = mc(constr.body) except MCPP_Error as e: - config.logger.debug( - 'Skipping constraint %s due to MCPP error %s' + config.logger.error( + '\nSkipping constraint %s due to MCPP error %s' % (constr.name, str(e)) ) continue # skip to the next constraint diff --git a/pyomo/contrib/mindtpy/extended_cutting_plane.py b/pyomo/contrib/mindtpy/extended_cutting_plane.py index a7b1336feb1..446304b1361 100644 --- a/pyomo/contrib/mindtpy/extended_cutting_plane.py +++ b/pyomo/contrib/mindtpy/extended_cutting_plane.py @@ -38,57 +38,41 @@ class MindtPy_ECP_Solver(_MindtPyAlgorithm): CONFIG = _get_MindtPy_ECP_config() - def MindtPy_iteration_loop(self, config): + def MindtPy_iteration_loop(self): """Main loop for MindtPy Algorithms. - This is the outermost function for the Extended Cutting Plane algorithm in this package; this function controls the progression of + This is the outermost function for the Extended Cutting Plane algorithm in this package; this function controls the progress of solving the model. - Parameters - ---------- - config : ConfigBlock - The specific configurations for MindtPy. - Raises ------ ValueError The strategy value is not correct or not included. """ - while self.mip_iter < config.iteration_limit: - # solve MILP main problem - main_mip, main_mip_results = self.solve_main(config) - if main_mip_results is not None: - if not config.single_tree: - if main_mip_results.solver.termination_condition is tc.optimal: - self.handle_main_optimal(main_mip, config) - elif main_mip_results.solver.termination_condition is tc.infeasible: - self.handle_main_infeasible(main_mip, config) - self.last_iter_cuts = True - break - else: - self.handle_main_other_conditions( - main_mip, main_mip_results, config - ) - # Call the MILP post-solve callback - with time_code(self.timing, 'Call after main solve'): - config.call_after_main_solve(main_mip) - else: - config.logger.info('Algorithm should terminate here.') + while self.mip_iter < self.config.iteration_limit: + # solve MIP main problem + main_mip, main_mip_results = self.solve_main() + + if self.handle_main_mip_termination(main_mip, main_mip_results): break - if self.algorithm_should_terminate(config): + # Call the MIP post-solve callback + with time_code(self.timing, 'Call after main solve'): + self.config.call_after_main_solve(main_mip) + + if self.algorithm_should_terminate(): self.last_iter_cuts = False break - add_ecp_cuts(self.mip, self.jacobians, config, self.timing) + add_ecp_cuts(self.mip, self.jacobians, self.config, self.timing) # if add_no_good_cuts is True, the bound obtained in the last iteration is no reliable. # we correct it after the iteration. if ( - config.add_no_good_cuts or config.use_tabu_list + self.config.add_no_good_cuts or self.config.use_tabu_list ) and not self.should_terminate: - self.fix_dual_bound(config, self.last_iter_cuts) - config.logger.info( + self.fix_dual_bound(self.last_iter_cuts) + self.config.logger.info( ' ===============================================================================================' ) @@ -107,34 +91,24 @@ def initialize_mip_problem(self): doc='Extended Cutting Planes' ) - def init_rNLP(self, config): + def init_rNLP(self): """Initialize the problem by solving the relaxed NLP and then store the optimal variable values obtained from solving the rNLP. - Parameters - ---------- - config : ConfigBlock - The specific configurations for MindtPy. - Raises ------ ValueError MindtPy unable to handle the termination condition of the relaxed NLP. """ - super().init_rNLP(config, add_oa_cuts=False) + super().init_rNLP(add_oa_cuts=False) - def algorithm_should_terminate(self, config): + def algorithm_should_terminate(self): """Checks if the algorithm should terminate at the given point. This function determines whether the algorithm should terminate based on the solver options and progress. (Sets the self.results.solver.termination_condition to the appropriate condition, i.e. optimal, maxIterations, maxTimeLimit). - Parameters - ---------- - config : ConfigBlock - The specific configurations for MindtPy. - Returns ------- bool @@ -164,8 +138,9 @@ def all_nonlinear_constraint_satisfied(self): if nlc.has_lb(): try: lower_slack = nlc.lslack() - except (ValueError, OverflowError): + except (ValueError, OverflowError) as e: # Set lower_slack (upper_slack below) less than -config.ecp_tolerance in this case. + config.logger.error(e) lower_slack = -10 * config.ecp_tolerance if lower_slack < -config.ecp_tolerance: config.logger.debug( @@ -177,7 +152,8 @@ def all_nonlinear_constraint_satisfied(self): if nlc.has_ub(): try: upper_slack = nlc.uslack() - except (ValueError, OverflowError): + except (ValueError, OverflowError) as e: + config.logger.error(e) upper_slack = -10 * config.ecp_tolerance if upper_slack < -config.ecp_tolerance: config.logger.debug( diff --git a/pyomo/contrib/mindtpy/feasibility_pump.py b/pyomo/contrib/mindtpy/feasibility_pump.py index bdff3d885fb..5716400598a 100644 --- a/pyomo/contrib/mindtpy/feasibility_pump.py +++ b/pyomo/contrib/mindtpy/feasibility_pump.py @@ -14,16 +14,9 @@ import logging from pyomo.contrib.mindtpy.config_options import _get_MindtPy_FP_config from pyomo.contrib.mindtpy.algorithm_base_class import _MindtPyAlgorithm -from pyomo.core import TransformationFactory, Objective, ConstraintList -from pyomo.contrib.mindtpy.util import ( - set_up_logger, - setup_results_object, - add_var_bound, - calc_jacobians, - add_feas_slacks, -) +from pyomo.core import ConstraintList +from pyomo.contrib.mindtpy.util import calc_jacobians from pyomo.opt import SolverFactory -from pyomo.contrib.gdpopt.util import time_code, lower_logger_level_to from pyomo.contrib.mindtpy.cut_generation import add_oa_cuts @@ -75,5 +68,5 @@ def add_cuts( linearize_violated, ) - def MindtPy_iteration_loop(self, config): + def MindtPy_iteration_loop(self): pass diff --git a/pyomo/contrib/mindtpy/global_outer_approximation.py b/pyomo/contrib/mindtpy/global_outer_approximation.py index 717531523f7..ee3ffb62f55 100644 --- a/pyomo/contrib/mindtpy/global_outer_approximation.py +++ b/pyomo/contrib/mindtpy/global_outer_approximation.py @@ -11,34 +11,12 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -import logging -import math -from pyomo.contrib.gdpopt.util import ( - time_code, - lower_logger_level_to, - get_main_elapsed_time, -) -from pyomo.contrib.mindtpy.util import ( - set_up_logger, - setup_results_object, - get_integer_solution, - copy_var_list_values_from_solution_pool, - add_var_bound, - add_feas_slacks, -) -from pyomo.core import ( - TransformationFactory, - minimize, - maximize, - Objective, - ConstraintList, -) + +from pyomo.contrib.gdpopt.util import get_main_elapsed_time +from pyomo.core import ConstraintList from pyomo.opt import SolverFactory from pyomo.contrib.mindtpy.config_options import _get_MindtPy_GOA_config from pyomo.contrib.mindtpy.algorithm_base_class import _MindtPyAlgorithm -from pyomo.opt import TerminationCondition as tc -from pyomo.solvers.plugins.solvers.gurobi_direct import gurobipy -from operator import itemgetter from pyomo.contrib.mindtpy.cut_generation import add_affine_cuts @@ -128,5 +106,5 @@ def deactivate_no_good_cuts_when_fixing_bound(self, no_good_cuts): no_good_cuts[i].deactivate() if self.config.use_tabu_list: self.integer_list = self.integer_list[:valid_no_good_cuts_num] - except KeyError: - self.config.logger.info('No-good cut deactivate failed.') + except KeyError as e: + self.config.logger.error(str(e) + '\nDeactivating no-good cuts failed.') diff --git a/pyomo/contrib/mindtpy/mip_solve.py b/pyomo/contrib/mindtpy/mip_solve.py deleted file mode 100644 index 3f64733ccd2..00000000000 --- a/pyomo/contrib/mindtpy/mip_solve.py +++ /dev/null @@ -1,478 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -"""main problem functions.""" -from pyomo.core import Constraint, Expression, Objective, minimize, value, maximize -from pyomo.opt import TerminationCondition as tc -from pyomo.opt import SolverFactory -from pyomo.contrib.gdpopt.util import ( - copy_var_list_values, - _DoNothing, - get_main_elapsed_time, - time_code, -) -from pyomo.contrib.gdpopt.solve_discrete_problem import ( - distinguish_mip_infeasible_or_unbounded, -) -from pyomo.solvers.plugins.solvers.persistent_solver import PersistentSolver -from pyomo.common.dependencies import attempt_import -from pyomo.contrib.mindtpy.util import ( - generate_norm1_objective_function, - generate_norm2sq_objective_function, - generate_norm_inf_objective_function, - generate_lag_objective_function, - set_solver_options, - GurobiPersistent4MindtPy, - update_dual_bound, - update_suboptimal_dual_bound, -) - - -single_tree, single_tree_available = attempt_import('pyomo.contrib.mindtpy.single_tree') -tabu_list, tabu_list_available = attempt_import('pyomo.contrib.mindtpy.tabu_list') - - -def solve_main(solve_data, config, fp=False, regularization_problem=False): - """This function solves the MIP main problem. - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - fp : bool, optional - Whether it is in the loop of feasibility pump, by default False. - regularization_problem : bool, optional - Whether it is solving a regularization problem, by default False. - - Returns - ------- - solve_data.mip : Pyomo model - The MIP stored in solve_data. - main_mip_results : SolverResults - Results from solving the main MIP. - """ - if not fp and not regularization_problem: - solve_data.mip_iter += 1 - - # setup main problem - setup_main(solve_data, config, fp, regularization_problem) - mainopt = set_up_mip_solver(solve_data, config, regularization_problem) - - mip_args = dict(config.mip_solver_args) - if config.mip_solver in { - 'cplex', - 'cplex_persistent', - 'gurobi', - 'gurobi_persistent', - }: - mip_args['warmstart'] = True - set_solver_options( - mainopt, - solve_data.timing, - config, - solver_type='mip', - regularization=regularization_problem, - ) - try: - with time_code( - solve_data.timing, - 'regularization main' - if regularization_problem - else ('fp main' if fp else 'main'), - ): - main_mip_results = mainopt.solve( - solve_data.mip, - tee=config.mip_solver_tee, - load_solutions=False, - **mip_args, - ) - if len(main_mip_results.solution) > 0: - solve_data.mip.solutions.load_from(main_mip_results) - except (ValueError, AttributeError): - if config.single_tree: - config.logger.warning('Single tree terminate.') - if get_main_elapsed_time(solve_data.timing) >= config.time_limit - 2: - config.logger.warning('due to the timelimit.') - solve_data.results.solver.termination_condition = tc.maxTimeLimit - if config.strategy == 'GOA' or config.add_no_good_cuts: - config.logger.warning( - 'ValueError: Cannot load a SolverResults object with bad status: error. ' - 'MIP solver failed. This usually happens in the single-tree GOA algorithm. ' - "No-good cuts are added and GOA algorithm doesn't converge within the time limit. " - 'No integer solution is found, so the cplex solver will report an error status. ' - ) - return None, None - if config.solution_pool: - main_mip_results._solver_model = mainopt._solver_model - main_mip_results._pyomo_var_to_solver_var_map = ( - mainopt._pyomo_var_to_solver_var_map - ) - if main_mip_results.solver.termination_condition is tc.optimal: - if ( - config.single_tree - and not config.add_no_good_cuts - and not regularization_problem - ): - update_suboptimal_dual_bound(solve_data, main_mip_results) - if regularization_problem: - config.logger.info( - solve_data.log_formatter.format( - solve_data.mip_iter, - 'Reg ' + solve_data.regularization_mip_type, - value(solve_data.mip.MindtPy_utils.roa_proj_mip_obj), - solve_data.primal_bound, - solve_data.dual_bound, - solve_data.rel_gap, - get_main_elapsed_time(solve_data.timing), - ) - ) - - elif main_mip_results.solver.termination_condition is tc.infeasibleOrUnbounded: - # Linear solvers will sometimes tell me that it's infeasible or - # unbounded during presolve, but fails to distinguish. We need to - # resolve with a solver option flag on. - main_mip_results, _ = distinguish_mip_infeasible_or_unbounded( - solve_data.mip, config - ) - return solve_data.mip, main_mip_results - - if regularization_problem: - solve_data.mip.MindtPy_utils.objective_constr.deactivate() - solve_data.mip.MindtPy_utils.del_component('roa_proj_mip_obj') - solve_data.mip.MindtPy_utils.cuts.del_component('obj_reg_estimate') - if config.add_regularization == 'level_L1': - solve_data.mip.MindtPy_utils.del_component('L1_obj') - elif config.add_regularization == 'level_L_infinity': - solve_data.mip.MindtPy_utils.del_component('L_infinity_obj') - - return solve_data.mip, main_mip_results - - -def set_up_mip_solver(solve_data, config, regularization_problem): - """Set up the MIP solver. - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - regularization_problem : bool - Whether it is solving a regularization problem. - - Returns - ------- - mainopt : SolverFactory - The customized MIP solver. - """ - # Deactivate extraneous IMPORT/EXPORT suffixes - if config.nlp_solver in {'ipopt', 'cyipopt'}: - getattr(solve_data.mip, 'ipopt_zL_out', _DoNothing()).deactivate() - getattr(solve_data.mip, 'ipopt_zU_out', _DoNothing()).deactivate() - if regularization_problem: - mainopt = SolverFactory(config.mip_regularization_solver) - else: - if config.mip_solver == 'gurobi_persistent' and config.single_tree: - mainopt = GurobiPersistent4MindtPy() - mainopt.solve_data = solve_data - mainopt.config = config - else: - mainopt = SolverFactory(config.mip_solver) - - # determine if persistent solver is called. - if isinstance(mainopt, PersistentSolver): - mainopt.set_instance(solve_data.mip, symbolic_solver_labels=True) - if config.single_tree and not regularization_problem: - # Configuration of cplex lazy callback - if config.mip_solver == 'cplex_persistent': - lazyoa = mainopt._solver_model.register_callback( - single_tree.LazyOACallback_cplex - ) - # pass necessary data and parameters to lazyoa - lazyoa.main_mip = solve_data.mip - lazyoa.solve_data = solve_data - lazyoa.config = config - lazyoa.opt = mainopt - mainopt._solver_model.set_warning_stream(None) - mainopt._solver_model.set_log_stream(None) - mainopt._solver_model.set_error_stream(None) - if config.mip_solver == 'gurobi_persistent': - mainopt.set_callback(single_tree.LazyOACallback_gurobi) - if config.use_tabu_list: - tabulist = mainopt._solver_model.register_callback( - tabu_list.IncumbentCallback_cplex - ) - tabulist.solve_data = solve_data - tabulist.opt = mainopt - tabulist.config = config - mainopt._solver_model.parameters.preprocessing.reduce.set(1) - # If the callback is used to reject incumbents, the user must set the - # parameter c.parameters.preprocessing.reduce either to the value 1 (one) - # to restrict presolve to primal reductions only or to 0 (zero) to disable all presolve reductions - mainopt._solver_model.set_warning_stream(None) - mainopt._solver_model.set_log_stream(None) - mainopt._solver_model.set_error_stream(None) - return mainopt - - -# The following functions deal with handling the solution we get from the above MIP solver function - - -def handle_main_optimal(main_mip, solve_data, config, update_bound=True): - """This function copies the results from 'solve_main' to the working model and updates - the upper/lower bound. This function is called after an optimal solution is found for - the main problem. - - Parameters - ---------- - main_mip : Pyomo model - The MIP main problem. - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - update_bound : bool, optional - Whether to update the bound, by default True. - Bound will not be updated when handling regularization problem. - """ - # proceed. Just need integer values - MindtPy = main_mip.MindtPy_utils - # check if the value of binary variable is valid - for var in MindtPy.discrete_variable_list: - if var.value is None: - config.logger.warning( - f"Integer variable {var.name} not initialized. " - "Setting it to its lower bound" - ) - var.set_value(var.lb, skip_validation=True) # nlp_var.bounds[0] - # warm start for the nlp subproblem - copy_var_list_values( - main_mip.MindtPy_utils.variable_list, - solve_data.working_model.MindtPy_utils.variable_list, - config, - ) - - if update_bound: - update_dual_bound(solve_data, value(MindtPy.mip_obj.expr)) - config.logger.info( - solve_data.log_formatter.format( - solve_data.mip_iter, - 'MILP', - value(MindtPy.mip_obj.expr), - solve_data.primal_bound, - solve_data.dual_bound, - solve_data.rel_gap, - get_main_elapsed_time(solve_data.timing), - ) - ) - - -def handle_regularization_main_tc(main_mip, main_mip_results, solve_data, config): - """Handles the result of the regularization main problem. - - Parameters - ---------- - main_mip : Pyomo model - The MIP main problem. - main_mip_results : SolverResults - Results from solving the regularization main subproblem. - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - - Raises - ------ - ValueError - MindtPy unable to handle the regularization problem termination condition. - """ - if main_mip_results is None: - config.logger.info( - 'Failed to solve the regularization problem.' - 'The solution of the OA main problem will be adopted.' - ) - elif main_mip_results.solver.termination_condition in {tc.optimal, tc.feasible}: - handle_main_optimal(main_mip, solve_data, config, update_bound=False) - elif main_mip_results.solver.termination_condition is tc.maxTimeLimit: - config.logger.info( - 'Regularization problem failed to converge within the time limit.' - ) - solve_data.results.solver.termination_condition = tc.maxTimeLimit - # break - elif main_mip_results.solver.termination_condition is tc.infeasible: - config.logger.info('Regularization problem infeasible.') - elif main_mip_results.solver.termination_condition is tc.unbounded: - config.logger.info( - 'Regularization problem ubounded.' - 'Sometimes solving MIQP in cplex, unbounded means infeasible.' - ) - elif main_mip_results.solver.termination_condition is tc.unknown: - config.logger.info( - 'Termination condition of the regularization problem is unknown.' - ) - if main_mip_results.problem.lower_bound != float('-inf'): - config.logger.info('Solution limit has been reached.') - handle_main_optimal(main_mip, solve_data, config, update_bound=False) - else: - config.logger.info( - 'No solution obtained from the regularization subproblem.' - 'Please set mip_solver_tee to True for more information.' - 'The solution of the OA main problem will be adopted.' - ) - else: - raise ValueError( - 'MindtPy unable to handle regularization problem termination condition ' - 'of %s. Solver message: %s' - % ( - main_mip_results.solver.termination_condition, - main_mip_results.solver.message, - ) - ) - - -def setup_main(solve_data, config, fp, regularization_problem): - """Set up main problem/main regularization problem for OA, ECP, Feasibility Pump and ROA methods. - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - fp : bool - Whether it is in the loop of feasibility pump. - regularization_problem : bool - Whether it is solving a regularization problem. - """ - MindtPy = solve_data.mip.MindtPy_utils - - for c in MindtPy.constraint_list: - if ( - c.body.polynomial_degree() - not in solve_data.mip_constraint_polynomial_degree - ): - c.deactivate() - - MindtPy.cuts.activate() - - sign_adjust = 1 if solve_data.objective_sense == minimize else -1 - MindtPy.del_component('mip_obj') - if regularization_problem and config.single_tree: - MindtPy.del_component('roa_proj_mip_obj') - MindtPy.cuts.del_component('obj_reg_estimate') - if config.add_regularization is not None and config.add_no_good_cuts: - if regularization_problem: - MindtPy.cuts.no_good_cuts.activate() - else: - MindtPy.cuts.no_good_cuts.deactivate() - - if fp: - MindtPy.del_component('fp_mip_obj') - if config.fp_main_norm == 'L1': - MindtPy.fp_mip_obj = generate_norm1_objective_function( - solve_data.mip, - solve_data.working_model, - discrete_only=config.fp_discrete_only, - ) - elif config.fp_main_norm == 'L2': - MindtPy.fp_mip_obj = generate_norm2sq_objective_function( - solve_data.mip, - solve_data.working_model, - discrete_only=config.fp_discrete_only, - ) - elif config.fp_main_norm == 'L_infinity': - MindtPy.fp_mip_obj = generate_norm_inf_objective_function( - solve_data.mip, - solve_data.working_model, - discrete_only=config.fp_discrete_only, - ) - elif regularization_problem: - # The epigraph constraint is very "flat" for branching rules. - # In ROA, if the objective function is linear(or quadratic when quadratic_strategy = 1 or 2), the original objective function is used in the MIP problem. - # In the MIP projection problem, we need to reactivate the epigraph constraint(objective_constr). - if ( - MindtPy.objective_list[0].polynomial_degree() - in solve_data.mip_objective_polynomial_degree - ): - MindtPy.objective_constr.activate() - if config.add_regularization == 'level_L1': - MindtPy.roa_proj_mip_obj = generate_norm1_objective_function( - solve_data.mip, solve_data.best_solution_found, discrete_only=False - ) - elif config.add_regularization == 'level_L2': - MindtPy.roa_proj_mip_obj = generate_norm2sq_objective_function( - solve_data.mip, solve_data.best_solution_found, discrete_only=False - ) - elif config.add_regularization == 'level_L_infinity': - MindtPy.roa_proj_mip_obj = generate_norm_inf_objective_function( - solve_data.mip, solve_data.best_solution_found, discrete_only=False - ) - elif config.add_regularization in { - 'grad_lag', - 'hess_lag', - 'hess_only_lag', - 'sqp_lag', - }: - MindtPy.roa_proj_mip_obj = generate_lag_objective_function( - solve_data.mip, - solve_data.best_solution_found, - config, - solve_data.timing, - discrete_only=False, - ) - if solve_data.objective_sense == minimize: - MindtPy.cuts.obj_reg_estimate = Constraint( - expr=sum(MindtPy.objective_value[:]) - <= (1 - config.level_coef) * solve_data.primal_bound - + config.level_coef * solve_data.dual_bound - ) - else: - MindtPy.cuts.obj_reg_estimate = Constraint( - expr=sum(MindtPy.objective_value[:]) - >= (1 - config.level_coef) * solve_data.primal_bound - + config.level_coef * solve_data.dual_bound - ) - else: - if config.add_slack: - MindtPy.del_component('aug_penalty_expr') - - MindtPy.aug_penalty_expr = Expression( - expr=sign_adjust - * config.OA_penalty_factor - * sum(v for v in MindtPy.cuts.slack_vars[...]) - ) - main_objective = MindtPy.objective_list[-1] - MindtPy.mip_obj = Objective( - expr=main_objective.expr - + (MindtPy.aug_penalty_expr if config.add_slack else 0), - sense=solve_data.objective_sense, - ) - - if config.use_dual_bound: - # Delete previously added dual bound constraint - MindtPy.cuts.del_component('dual_bound') - if solve_data.dual_bound not in {float('inf'), float('-inf')}: - if solve_data.objective_sense == minimize: - MindtPy.cuts.dual_bound = Constraint( - expr=main_objective.expr - + (MindtPy.aug_penalty_expr if config.add_slack else 0) - >= solve_data.dual_bound, - doc='Objective function expression should improve on the best found dual bound', - ) - else: - MindtPy.cuts.dual_bound = Constraint( - expr=main_objective.expr - + (MindtPy.aug_penalty_expr if config.add_slack else 0) - <= solve_data.dual_bound, - doc='Objective function expression should improve on the best found dual bound', - ) diff --git a/pyomo/contrib/mindtpy/nlp_solve.py b/pyomo/contrib/mindtpy/nlp_solve.py deleted file mode 100644 index 6674b220e80..00000000000 --- a/pyomo/contrib/mindtpy/nlp_solve.py +++ /dev/null @@ -1,519 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -"""Solution of NLP subproblems.""" -from pyomo.common.collections import ComponentMap -from pyomo.common.errors import InfeasibleConstraintException -from pyomo.contrib.mindtpy.cut_generation import ( - add_oa_cuts, - add_no_good_cuts, - add_affine_cuts, -) -from pyomo.contrib.mindtpy.util import ( - add_feas_slacks, - set_solver_options, - update_primal_bound, -) -from pyomo.contrib.gdpopt.util import ( - copy_var_list_values, - get_main_elapsed_time, - time_code, - SuppressInfeasibleWarning, -) -from pyomo.core import Constraint, Objective, TransformationFactory, minimize, value -from pyomo.opt import TerminationCondition as tc -from pyomo.opt import SolverFactory, SolverResults, SolverStatus - - -def solve_subproblem(solve_data, config): - """Solves the Fixed-NLP (with fixed integers). - - This function sets up the 'fixed_nlp' by fixing binaries, sets continuous variables to their initial var values, - precomputes dual values, deactivates trivial constraints, and then solves NLP model. - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - - Returns - ------- - fixed_nlp : Pyomo model - Integer-variable-fixed NLP model. - results : SolverResults - Results from solving the Fixed-NLP. - """ - fixed_nlp = solve_data.working_model.clone() - MindtPy = fixed_nlp.MindtPy_utils - solve_data.nlp_iter += 1 - - # Set up NLP - TransformationFactory('core.fix_integer_vars').apply_to(fixed_nlp) - - MindtPy.cuts.deactivate() - if config.calculate_dual_at_solution: - fixed_nlp.tmp_duals = ComponentMap() - # tmp_duals are the value of the dual variables stored before using deactivate trivial constraints - # The values of the duals are computed as follows: (Complementary Slackness) - # - # | constraint | c_geq | status at x1 | tmp_dual (violation) | - # |------------|-------|--------------|----------------------| - # | g(x) <= b | -1 | g(x1) <= b | 0 | - # | g(x) <= b | -1 | g(x1) > b | g(x1) - b | - # | g(x) >= b | +1 | g(x1) >= b | 0 | - # | g(x) >= b | +1 | g(x1) < b | b - g(x1) | - evaluation_error = False - for c in fixed_nlp.MindtPy_utils.constraint_list: - # We prefer to include the upper bound as the right hand side since we are - # considering c by default a (hopefully) convex function, which would make - # c >= lb a nonconvex inequality which we wouldn't like to add linearizations - # if we don't have to - rhs = value(c.upper) if c.has_ub() else value(c.lower) - c_geq = -1 if c.has_ub() else 1 - try: - fixed_nlp.tmp_duals[c] = c_geq * max(0, c_geq * (rhs - value(c.body))) - except (ValueError, OverflowError) as error: - fixed_nlp.tmp_duals[c] = None - evaluation_error = True - if evaluation_error: - for nlp_var, orig_val in zip( - MindtPy.variable_list, solve_data.initial_var_values - ): - if not nlp_var.fixed and not nlp_var.is_binary(): - nlp_var.set_value(orig_val, skip_validation=True) - try: - TransformationFactory('contrib.deactivate_trivial_constraints').apply_to( - fixed_nlp, - tmp=True, - ignore_infeasible=False, - tolerance=config.constraint_tolerance, - ) - except InfeasibleConstraintException: - config.logger.warning( - 'infeasibility detected in deactivate_trivial_constraints' - ) - results = SolverResults() - results.solver.termination_condition = tc.infeasible - return fixed_nlp, results - # Solve the NLP - nlpopt = SolverFactory(config.nlp_solver) - nlp_args = dict(config.nlp_solver_args) - set_solver_options(nlpopt, solve_data.timing, config, solver_type='nlp') - with SuppressInfeasibleWarning(): - with time_code(solve_data.timing, 'fixed subproblem'): - results = nlpopt.solve( - fixed_nlp, tee=config.nlp_solver_tee, load_solutions=False, **nlp_args - ) - if len(results.solution) > 0: - fixed_nlp.solutions.load_from(results) - return fixed_nlp, results - - -def handle_nlp_subproblem_tc(fixed_nlp, result, solve_data, config, cb_opt=None): - """This function handles different terminaton conditions of the fixed-NLP subproblem. - - Parameters - ---------- - fixed_nlp : Pyomo model - Integer-variable-fixed NLP model. - result : SolverResults - Results from solving the NLP subproblem. - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - cb_opt : SolverFactory, optional - The gurobi_persistent solver, by default None. - """ - if result.solver.termination_condition in { - tc.optimal, - tc.locallyOptimal, - tc.feasible, - }: - handle_subproblem_optimal(fixed_nlp, solve_data, config, cb_opt) - elif result.solver.termination_condition in {tc.infeasible, tc.noSolution}: - handle_subproblem_infeasible(fixed_nlp, solve_data, config, cb_opt) - elif result.solver.termination_condition is tc.maxTimeLimit: - config.logger.info('NLP subproblem failed to converge within the time limit.') - solve_data.results.solver.termination_condition = tc.maxTimeLimit - solve_data.should_terminate = True - elif result.solver.termination_condition is tc.maxEvaluations: - config.logger.info('NLP subproblem failed due to maxEvaluations.') - solve_data.results.solver.termination_condition = tc.maxEvaluations - solve_data.should_terminate = True - else: - handle_subproblem_other_termination( - fixed_nlp, result.solver.termination_condition, solve_data, config, cb_opt - ) - - -# The next few functions deal with handling the solution we get from the above NLP solver function - - -def handle_subproblem_optimal(fixed_nlp, solve_data, config, cb_opt=None): - """This function copies the result of the NLP solver function ('solve_subproblem') to the working model, updates - the bounds, adds OA and no-good cuts, and then stores the new solution if it is the new best solution. This - function handles the result of the latest iteration of solving the NLP subproblem given an optimal solution. - - Parameters - ---------- - fixed_nlp : Pyomo model - Integer-variable-fixed NLP model. - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - cb_opt : SolverFactory, optional - The gurobi_persistent solver, by default None. - """ - copy_var_list_values( - fixed_nlp.MindtPy_utils.variable_list, - solve_data.working_model.MindtPy_utils.variable_list, - config, - ) - if config.calculate_dual_at_solution: - for c in fixed_nlp.tmp_duals: - if fixed_nlp.dual.get(c, None) is None: - fixed_nlp.dual[c] = fixed_nlp.tmp_duals[c] - elif ( - config.nlp_solver == 'cyipopt' - and solve_data.objective_sense == minimize - ): - # TODO: recover the opposite dual when cyipopt issue #2831 is solved. - fixed_nlp.dual[c] = -fixed_nlp.dual[c] - dual_values = list( - fixed_nlp.dual[c] for c in fixed_nlp.MindtPy_utils.constraint_list - ) - else: - dual_values = None - main_objective = fixed_nlp.MindtPy_utils.objective_list[-1] - update_primal_bound(solve_data, value(main_objective.expr)) - if solve_data.primal_bound_improved: - solve_data.best_solution_found = fixed_nlp.clone() - solve_data.best_solution_found_time = get_main_elapsed_time(solve_data.timing) - if config.strategy == 'GOA': - solve_data.num_no_good_cuts_added.update( - { - solve_data.primal_bound: len( - solve_data.mip.MindtPy_utils.cuts.no_good_cuts - ) - } - ) - # Add the linear cut - if config.strategy == 'OA': - copy_var_list_values( - fixed_nlp.MindtPy_utils.variable_list, - solve_data.mip.MindtPy_utils.variable_list, - config, - ) - add_oa_cuts( - solve_data.mip, - dual_values, - solve_data.jacobians, - solve_data.objective_sense, - solve_data.mip_constraint_polynomial_degree, - solve_data.mip_iter, - config, - solve_data.timing, - cb_opt=cb_opt, - ) - elif config.strategy == 'GOA': - copy_var_list_values( - fixed_nlp.MindtPy_utils.variable_list, - solve_data.mip.MindtPy_utils.variable_list, - config, - ) - add_affine_cuts(solve_data.mip, config, solve_data.timing) - # elif config.strategy == 'PSC': - # # !!THIS SEEMS LIKE A BUG!! - mrmundt # - # add_psc_cut(solve_data, config) - # elif config.strategy == 'GBD': - # # !!THIS SEEMS LIKE A BUG!! - mrmundt # - # add_gbd_cut(solve_data, config) - - var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list) - if config.add_no_good_cuts: - add_no_good_cuts( - solve_data.mip, - var_values, - config, - solve_data.timing, - solve_data.mip_iter, - cb_opt, - ) - - config.call_after_subproblem_feasible(fixed_nlp, solve_data) - - config.logger.info( - solve_data.fixed_nlp_log_formatter.format( - '*' if solve_data.primal_bound_improved else ' ', - solve_data.nlp_iter, - 'Fixed NLP', - value(main_objective.expr), - solve_data.primal_bound, - solve_data.dual_bound, - solve_data.rel_gap, - get_main_elapsed_time(solve_data.timing), - ) - ) - - -def handle_subproblem_infeasible(fixed_nlp, solve_data, config, cb_opt=None): - """Solves feasibility problem and adds cut according to the specified strategy. - - This function handles the result of the latest iteration of solving the NLP subproblem given an infeasible - solution and copies the solution of the feasibility problem to the working model. - - Parameters - ---------- - fixed_nlp : Pyomo model - Integer-variable-fixed NLP model. - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - cb_opt : SolverFactory, optional - The gurobi_persistent solver, by default None. - """ - # TODO try something else? Reinitialize with different initial - # value? - config.logger.info('NLP subproblem was locally infeasible.') - solve_data.nlp_infeasible_counter += 1 - if config.calculate_dual_at_solution: - for c in fixed_nlp.MindtPy_utils.constraint_list: - rhs = value(c.upper) if c.has_ub() else value(c.lower) - c_geq = -1 if c.has_ub() else 1 - fixed_nlp.dual[c] = c_geq * max(0, c_geq * (rhs - value(c.body))) - dual_values = list( - fixed_nlp.dual[c] for c in fixed_nlp.MindtPy_utils.constraint_list - ) - else: - dual_values = None - - # if config.strategy == 'PSC' or config.strategy == 'GBD': - # for var in fixed_nlp.component_data_objects(ctype=Var, descend_into=True): - # fixed_nlp.ipopt_zL_out[var] = 0 - # fixed_nlp.ipopt_zU_out[var] = 0 - # if var.has_ub() and abs(var.ub - value(var)) < config.absolute_bound_tolerance: - # fixed_nlp.ipopt_zL_out[var] = 1 - # elif var.has_lb() and abs(value(var) - var.lb) < config.absolute_bound_tolerance: - # fixed_nlp.ipopt_zU_out[var] = -1 - - if config.strategy in {'OA', 'GOA'}: - config.logger.info('Solving feasibility problem') - feas_subproblem, feas_subproblem_results = solve_feasibility_subproblem( - solve_data, config - ) - # TODO: do we really need this? - if solve_data.should_terminate: - return - copy_var_list_values( - feas_subproblem.MindtPy_utils.variable_list, - solve_data.mip.MindtPy_utils.variable_list, - config, - ) - if config.strategy == 'OA': - add_oa_cuts( - solve_data.mip, - dual_values, - solve_data.jacobians, - solve_data.objective_sense, - solve_data.mip_constraint_polynomial_degree, - solve_data.mip_iter, - config, - solve_data.timing, - cb_opt=cb_opt, - ) - elif config.strategy == 'GOA': - add_affine_cuts(solve_data.mip, config, solve_data.timing) - # Add a no-good cut to exclude this discrete option - var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list) - if config.add_no_good_cuts: - # excludes current discrete option - add_no_good_cuts( - solve_data.mip, - var_values, - config, - solve_data.timing, - solve_data.mip_iter, - cb_opt, - ) - - -def handle_subproblem_other_termination( - fixed_nlp, termination_condition, solve_data, config, cb_opt -): - """Handles the result of the latest iteration of solving the fixed NLP subproblem given - a solution that is neither optimal nor infeasible. - - Parameters - ---------- - fixed_nlp : Pyomo model - Integer-variable-fixed NLP model. - termination_condition : Pyomo TerminationCondition - The termination condition of the fixed NLP subproblem. - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - - Raises - ------ - ValueError - MindtPy unable to handle the NLP subproblem termination condition. - """ - if termination_condition is tc.maxIterations: - # TODO try something else? Reinitialize with different initial value? - config.logger.info('NLP subproblem failed to converge within iteration limit.') - var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list) - if config.add_no_good_cuts: - # excludes current discrete option - add_no_good_cuts( - solve_data.mip, - var_values, - config, - solve_data.timing, - solve_data.mip_iter, - cb_opt, - ) - else: - raise ValueError( - 'MindtPy unable to handle NLP subproblem termination ' - 'condition of {}'.format(termination_condition) - ) - - -def solve_feasibility_subproblem(solve_data, config): - """Solves a feasibility NLP if the fixed_nlp problem is infeasible. - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - - Returns - ------- - feas_subproblem : Pyomo model - Feasibility NLP from the model. - feas_soln : SolverResults - Results from solving the feasibility NLP. - """ - feas_subproblem = solve_data.working_model.clone() - add_feas_slacks(feas_subproblem, config) - - MindtPy = feas_subproblem.MindtPy_utils - if MindtPy.component('objective_value') is not None: - MindtPy.objective_value[:].set_value(0, skip_validation=True) - - next(feas_subproblem.component_data_objects(Objective, active=True)).deactivate() - for constr in feas_subproblem.MindtPy_utils.nonlinear_constraint_list: - constr.deactivate() - - MindtPy.feas_opt.activate() - if config.feasibility_norm == 'L1': - MindtPy.feas_obj = Objective( - expr=sum(s for s in MindtPy.feas_opt.slack_var[...]), sense=minimize - ) - elif config.feasibility_norm == 'L2': - MindtPy.feas_obj = Objective( - expr=sum(s * s for s in MindtPy.feas_opt.slack_var[...]), sense=minimize - ) - else: - MindtPy.feas_obj = Objective(expr=MindtPy.feas_opt.slack_var, sense=minimize) - TransformationFactory('core.fix_integer_vars').apply_to(feas_subproblem) - nlpopt = SolverFactory(config.nlp_solver) - nlp_args = dict(config.nlp_solver_args) - set_solver_options(nlpopt, solve_data.timing, config, solver_type='nlp') - with SuppressInfeasibleWarning(): - try: - with time_code(solve_data.timing, 'feasibility subproblem'): - feas_soln = nlpopt.solve( - feas_subproblem, - tee=config.nlp_solver_tee, - load_solutions=config.nlp_solver != 'appsi_ipopt', - **nlp_args - ) - if len(feas_soln.solution) > 0: - feas_subproblem.solutions.load_from(feas_soln) - except (ValueError, OverflowError) as error: - for nlp_var, orig_val in zip( - MindtPy.variable_list, solve_data.initial_var_values - ): - if not nlp_var.fixed and not nlp_var.is_binary(): - nlp_var.set_value(orig_val, skip_validation=True) - with time_code(solve_data.timing, 'feasibility subproblem'): - feas_soln = nlpopt.solve( - feas_subproblem, - tee=config.nlp_solver_tee, - load_solutions=config.nlp_solver != 'appsi_ipopt', - **nlp_args - ) - if len(feas_soln.solution) > 0: - feas_soln.solutions.load_from(feas_soln) - handle_feasibility_subproblem_tc( - feas_soln.solver.termination_condition, MindtPy, solve_data, config - ) - return feas_subproblem, feas_soln - - -def handle_feasibility_subproblem_tc( - subprob_terminate_cond, MindtPy, solve_data, config -): - """Handles the result of the latest iteration of solving the feasibility NLP subproblem given - a solution that is neither optimal nor infeasible. - - Parameters - ---------- - subprob_terminate_cond : Pyomo TerminationCondition - The termination condition of the feasibility NLP subproblem. - MindtPy : Pyomo Block - The MindtPy_utils block. - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - """ - if subprob_terminate_cond in {tc.optimal, tc.locallyOptimal, tc.feasible}: - copy_var_list_values( - MindtPy.variable_list, - solve_data.working_model.MindtPy_utils.variable_list, - config, - ) - if value(MindtPy.feas_obj.expr) <= config.zero_tolerance: - config.logger.warning( - 'The objective value %.4E of feasibility problem is less than zero_tolerance. ' - 'This indicates that the nlp subproblem is feasible, although it is found infeasible in the previous step. ' - 'Check the nlp solver output' % value(MindtPy.feas_obj.expr) - ) - elif subprob_terminate_cond in {tc.infeasible, tc.noSolution}: - config.logger.error( - 'Feasibility subproblem infeasible. This should never happen.' - ) - solve_data.should_terminate = True - solve_data.results.solver.status = SolverStatus.error - elif subprob_terminate_cond is tc.maxIterations: - config.logger.error( - 'Subsolver reached its maximum number of iterations without converging, ' - 'consider increasing the iterations limit of the subsolver or reviewing your formulation.' - ) - solve_data.should_terminate = True - solve_data.results.solver.status = SolverStatus.error - else: - config.logger.error( - 'MindtPy unable to handle feasibility subproblem termination condition ' - 'of {}'.format(subprob_terminate_cond) - ) - solve_data.should_terminate = True - solve_data.results.solver.status = SolverStatus.error diff --git a/pyomo/contrib/mindtpy/outer_approximation.py b/pyomo/contrib/mindtpy/outer_approximation.py index 454736d3c81..99d9cea1bd4 100644 --- a/pyomo/contrib/mindtpy/outer_approximation.py +++ b/pyomo/contrib/mindtpy/outer_approximation.py @@ -54,9 +54,6 @@ def check_config(self): config.logger.info('Set regularization_mip_threads equal to threads') if config.single_tree: config.add_cuts_at_incumbent = True - # if no method is activated by users, we will use use_bb_tree_incumbent by default - if not (config.reduce_level_coef or config.use_bb_tree_incumbent): - config.use_bb_tree_incumbent = True if config.mip_regularization_solver is None: config.mip_regularization_solver = config.mip_solver if config.single_tree: @@ -138,18 +135,9 @@ def objective_reformulation(self): # In ROA and RLP/NLP, since the distance calculation does not include these epigraph slack variables, they should not be added to the variable list. (update_var_con_list = False) # In the process_objective function, once the objective function has been reformulated as epigraph constraint, the variable/constraint/objective lists will not be updated only if the MINLP has a linear objective function and regularization is activated at the same time. # This is because the epigraph constraint is very "flat" for branching rules. The original objective function will be used for the main problem and epigraph reformulation will be used for the projection problem. - # TODO: The logic here is too complicated, can we simplify it? MindtPy = self.working_model.MindtPy_utils config = self.config - self.process_objective( - self.config, - move_objective=config.move_objective, - use_mcpp=config.use_mcpp, - update_var_con_list=config.add_regularization is None, - partition_nonlinear_terms=config.partition_obj_nonlinear_terms, - obj_handleable_polynomial_degree=self.mip_objective_polynomial_degree, - constr_handleable_polynomial_degree=self.mip_constraint_polynomial_degree, - ) + self.process_objective(update_var_con_list=config.add_regularization is None) # The epigraph constraint is very "flat" for branching rules. # If ROA/RLP-NLP is activated and the original objective function is linear, we will use the original objective for the main mip. if ( diff --git a/pyomo/contrib/mindtpy/single_tree.py b/pyomo/contrib/mindtpy/single_tree.py index 3eef3e41597..9776920f434 100644 --- a/pyomo/contrib/mindtpy/single_tree.py +++ b/pyomo/contrib/mindtpy/single_tree.py @@ -12,30 +12,16 @@ from pyomo.common.dependencies import attempt_import from pyomo.solvers.plugins.solvers.gurobi_direct import gurobipy from pyomo.contrib.mindtpy.cut_generation import add_oa_cuts, add_no_good_cuts -from pyomo.contrib.mindtpy.mip_solve import ( - handle_main_optimal, - solve_main, - handle_regularization_main_tc, -) from pyomo.contrib.mcpp.pyomo_mcpp import McCormick as mc, MCPP_Error from pyomo.repn import generate_standard_repn import pyomo.core.expr as EXPR from math import copysign -from pyomo.contrib.mindtpy.util import ( - get_integer_solution, - update_dual_bound, - update_primal_bound, -) +from pyomo.contrib.mindtpy.util import get_integer_solution from pyomo.contrib.gdpopt.util import ( copy_var_list_values, get_main_elapsed_time, time_code, ) -from pyomo.contrib.mindtpy.nlp_solve import ( - solve_subproblem, - solve_feasibility_subproblem, - handle_nlp_subproblem_tc, -) from pyomo.opt import TerminationCondition as tc from pyomo.core import minimize, value from pyomo.core.expr import identify_variables @@ -46,7 +32,7 @@ class LazyOACallback_cplex( cplex.callbacks.LazyConstraintCallback if cplex_available else object ): - """Inherent class in Cplex to call Lazy callback.""" + """Inherent class in CPLEX to call Lazy callback.""" def copy_lazy_var_list_values( self, @@ -98,8 +84,9 @@ def copy_lazy_var_list_values( # will always succeed and the ValueError should never be # raised. v_to.set_value(v_val, skip_validation=True) - except ValueError: + except ValueError as e: # Snap the value to the bounds + config.logger.error(e) if ( v_to.has_lb() and v_val < v_to.lb @@ -127,13 +114,13 @@ def add_lazy_oa_cuts( self, target_model, dual_values, - solve_data, + mindtpy_solver, config, opt, linearize_active=True, linearize_violated=True, ): - """Linearizes nonlinear constraints; add the OA cuts through Cplex inherent function self.add() + """Linearizes nonlinear constraints; add the OA cuts through CPLEX inherent function self.add() For nonconvex problems, turn on 'config.add_slack'. Slack variables will always be used for nonlinear equality constraints. @@ -143,8 +130,8 @@ def add_lazy_oa_cuts( The MIP main problem. dual_values : list The value of the duals for each constraint. - solve_data : MindtPySolveData - Data container that holds solve-instance data. + mindtpy_solver : object + The mindtpy solver class. config : ConfigBlock The specific configurations for MindtPy. opt : SolverFactory @@ -155,16 +142,16 @@ def add_lazy_oa_cuts( Whether to linearize the violated nonlinear constraints, by default True. """ config.logger.debug('Adding OA cuts') - with time_code(solve_data.timing, 'OA cut generation'): + with time_code(mindtpy_solver.timing, 'OA cut generation'): for index, constr in enumerate(target_model.MindtPy_utils.constraint_list): if ( constr.body.polynomial_degree() - in solve_data.mip_constraint_polynomial_degree + in mindtpy_solver.mip_constraint_polynomial_degree ): continue constr_vars = list(identify_variables(constr.body)) - jacs = solve_data.jacobians + jacs = mindtpy_solver.jacobians # Equality constraint (makes the problem nonconvex) if ( @@ -172,10 +159,13 @@ def add_lazy_oa_cuts( and constr.has_lb() and value(constr.lower) == value(constr.upper) ): - sign_adjust = -1 if solve_data.objective_sense == minimize else 1 + sign_adjust = ( + -1 if mindtpy_solver.objective_sense == minimize else 1 + ) rhs = constr.lower - # since the cplex requires the lazy cuts in cplex type, we need to transform the pyomo expression into cplex expression + # Since CPLEX requires the lazy cuts in CPLEX type, + # we need to transform the pyomo expression into CPLEX expression. pyomo_expr = copysign(1, sign_adjust * dual_values[index]) * ( sum( value(jacs[constr][var]) * (var - value(var)) @@ -193,6 +183,20 @@ def add_lazy_oa_cuts( sense='L', rhs=cplex_rhs, ) + if ( + self.get_solution_source() + == cplex.callbacks.SolutionSource.mipstart_solution + ): + mindtpy_solver.mip_start_lazy_oa_cuts.append( + [ + cplex.SparsePair( + ind=cplex_expr.variables, + val=cplex_expr.coefficients, + ), + 'L', + cplex_rhs, + ] + ) else: # Inequality constraint (possibly two-sided) if ( constr.has_ub() @@ -219,6 +223,20 @@ def add_lazy_oa_cuts( sense='L', rhs=value(constr.upper) + cplex_rhs, ) + if ( + self.get_solution_source() + == cplex.callbacks.SolutionSource.mipstart_solution + ): + mindtpy_solver.mip_start_lazy_oa_cuts.append( + [ + cplex.SparsePair( + ind=cplex_expr.variables, + val=cplex_expr.coefficients, + ), + 'L', + value(constr.upper) + cplex_rhs, + ] + ) if ( constr.has_lb() and ( @@ -248,23 +266,37 @@ def add_lazy_oa_cuts( sense='G', rhs=value(constr.lower) + cplex_rhs, ) + if ( + self.get_solution_source() + == cplex.callbacks.SolutionSource.mipstart_solution + ): + mindtpy_solver.mip_start_lazy_oa_cuts.append( + [ + cplex.SparsePair( + ind=cplex_expr.variables, + val=cplex_expr.coefficients, + ), + 'G', + value(constr.lower) + cplex_rhs, + ] + ) - def add_lazy_affine_cuts(self, solve_data, config, opt): + def add_lazy_affine_cuts(self, mindtpy_solver, config, opt): """Adds affine cuts using MCPP. - Add affine cuts through Cplex inherent function self.add(). + Add affine cuts through CPLEX inherent function self.add(). Parameters ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. + mindtpy_solver : object + The mindtpy solver class. config : ConfigBlock The specific configurations for MindtPy. opt : SolverFactory The cplex_persistent solver. """ - with time_code(solve_data.timing, 'Affine cut generation'): - m = solve_data.mip + with time_code(mindtpy_solver.timing, 'Affine cut generation'): + m = mindtpy_solver.mip config.logger.debug('Adding affine cuts') counter = 0 @@ -369,7 +401,7 @@ def add_lazy_affine_cuts(self, solve_data, config, opt): config.logger.debug('Added %s affine cuts' % counter) def add_lazy_no_good_cuts( - self, var_values, solve_data, config, opt, feasible=False + self, var_values, mindtpy_solver, config, opt, feasible=False ): """Adds no-good cuts. @@ -379,8 +411,8 @@ def add_lazy_no_good_cuts( ---------- var_values : list The variable values of the incumbent solution, used to generate the cut. - solve_data : MindtPySolveData - Data container that holds solve-instance data. + mindtpy_solver : object + The mindtpy solver class. config : ConfigBlock The specific configurations for MindtPy. opt : SolverFactory @@ -397,8 +429,8 @@ def add_lazy_no_good_cuts( return config.logger.debug('Adding no-good cuts') - with time_code(solve_data.timing, 'No-good cut generation'): - m = solve_data.mip + with time_code(mindtpy_solver.timing, 'No-good cut generation'): + m = mindtpy_solver.mip MindtPy = m.MindtPy_utils int_tol = config.integer_tolerance @@ -439,7 +471,7 @@ def add_lazy_no_good_cuts( rhs=1 - cplex_no_good_rhs, ) - def handle_lazy_main_feasible_solution(self, main_mip, solve_data, config, opt): + def handle_lazy_main_feasible_solution(self, main_mip, mindtpy_solver, config, opt): """This function is called during the branch and bound of main mip, more exactly when a feasible solution is found and LazyCallback is activated. Copy the result to working model and update upper or lower bound. @@ -449,8 +481,8 @@ def handle_lazy_main_feasible_solution(self, main_mip, solve_data, config, opt): ---------- main_mip : Pyomo model The MIP main problem. - solve_data : MindtPySolveData - Data container that holds solve-instance data. + mindtpy_solver : object + The mindtpy solver class. config : ConfigBlock The specific configurations for MindtPy. opt : SolverFactory @@ -462,23 +494,24 @@ def handle_lazy_main_feasible_solution(self, main_mip, solve_data, config, opt): self.copy_lazy_var_list_values( opt, main_mip.MindtPy_utils.variable_list, - solve_data.working_model.MindtPy_utils.variable_list, + mindtpy_solver.fixed_nlp.MindtPy_utils.variable_list, config, + skip_fixed=False, ) - update_dual_bound(solve_data, self.get_best_objective_value()) + mindtpy_solver.update_dual_bound(self.get_best_objective_value()) config.logger.info( - solve_data.log_formatter.format( - solve_data.mip_iter, + mindtpy_solver.log_formatter.format( + mindtpy_solver.mip_iter, 'restrLP', self.get_objective_value(), - solve_data.primal_bound, - solve_data.dual_bound, - solve_data.rel_gap, - get_main_elapsed_time(solve_data.timing), + mindtpy_solver.primal_bound, + mindtpy_solver.dual_bound, + mindtpy_solver.rel_gap, + get_main_elapsed_time(mindtpy_solver.timing), ) ) - def handle_lazy_subproblem_optimal(self, fixed_nlp, solve_data, config, opt): + def handle_lazy_subproblem_optimal(self, fixed_nlp, mindtpy_solver, config, opt): """This function copies the optimal solution of the fixed NLP subproblem to the MIP main problem(explanation see below), updates bound, adds OA and no-good cuts, stores incumbent solution if it has been improved. @@ -487,8 +520,8 @@ def handle_lazy_subproblem_optimal(self, fixed_nlp, solve_data, config, opt): ---------- fixed_nlp : Pyomo model Integer-variable-fixed NLP model. - solve_data : MindtPySolveData - Data container that holds solve-instance data. + mindtpy_solver : object + The mindtpy solver class. config : ConfigBlock The specific configurations for MindtPy. opt : SolverFactory @@ -500,7 +533,7 @@ def handle_lazy_subproblem_optimal(self, fixed_nlp, solve_data, config, opt): fixed_nlp.dual[c] = fixed_nlp.tmp_duals[c] elif ( config.nlp_solver == 'cyipopt' - and solve_data.objective_sense == minimize + and mindtpy_solver.objective_sense == minimize ): # TODO: recover the opposite dual when cyipopt issue #2831 is solved. fixed_nlp.dual[c] = -fixed_nlp.dual[c] @@ -510,26 +543,26 @@ def handle_lazy_subproblem_optimal(self, fixed_nlp, solve_data, config, opt): else: dual_values = None main_objective = fixed_nlp.MindtPy_utils.objective_list[-1] - update_primal_bound(solve_data, value(main_objective.expr)) - if solve_data.primal_bound_improved: - solve_data.best_solution_found = fixed_nlp.clone() - solve_data.best_solution_found_time = get_main_elapsed_time( - solve_data.timing + mindtpy_solver.update_primal_bound(value(main_objective.expr)) + if mindtpy_solver.primal_bound_improved: + mindtpy_solver.best_solution_found = fixed_nlp.clone() + mindtpy_solver.best_solution_found_time = get_main_elapsed_time( + mindtpy_solver.timing ) if config.add_no_good_cuts or config.use_tabu_list: - solve_data.stored_bound.update( - {solve_data.primal_bound: solve_data.dual_bound} + mindtpy_solver.stored_bound.update( + {mindtpy_solver.primal_bound: mindtpy_solver.dual_bound} ) config.logger.info( - solve_data.fixed_nlp_log_formatter.format( - '*' if solve_data.primal_bound_improved else ' ', - solve_data.nlp_iter, + mindtpy_solver.fixed_nlp_log_formatter.format( + '*' if mindtpy_solver.primal_bound_improved else ' ', + mindtpy_solver.nlp_iter, 'Fixed NLP', value(main_objective.expr), - solve_data.primal_bound, - solve_data.dual_bound, - solve_data.rel_gap, - get_main_elapsed_time(solve_data.timing), + mindtpy_solver.primal_bound, + mindtpy_solver.dual_bound, + mindtpy_solver.rel_gap, + get_main_elapsed_time(mindtpy_solver.timing), ) ) @@ -538,37 +571,39 @@ def handle_lazy_subproblem_optimal(self, fixed_nlp, solve_data, config, opt): # since value(constr.body), value(jacs[constr][var]), value(var) are used in self.add_lazy_oa_cuts() copy_var_list_values( fixed_nlp.MindtPy_utils.variable_list, - solve_data.mip.MindtPy_utils.variable_list, + mindtpy_solver.mip.MindtPy_utils.variable_list, config, ) if config.strategy == 'OA': - self.add_lazy_oa_cuts(solve_data.mip, dual_values, solve_data, config, opt) + self.add_lazy_oa_cuts( + mindtpy_solver.mip, dual_values, mindtpy_solver, config, opt + ) if config.add_regularization is not None: add_oa_cuts( - solve_data.mip, + mindtpy_solver.mip, dual_values, - solve_data.jacobians, - solve_data.objective_sense, - solve_data.mip_constraint_polynomial_degree, - solve_data.mip_iter, + mindtpy_solver.jacobians, + mindtpy_solver.objective_sense, + mindtpy_solver.mip_constraint_polynomial_degree, + mindtpy_solver.mip_iter, config, - solve_data.timing, + mindtpy_solver.timing, ) elif config.strategy == 'GOA': - self.add_lazy_affine_cuts(solve_data, config, opt) + self.add_lazy_affine_cuts(mindtpy_solver, config, opt) if config.add_no_good_cuts: var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list) - self.add_lazy_no_good_cuts(var_values, solve_data, config, opt) + self.add_lazy_no_good_cuts(var_values, mindtpy_solver, config, opt) - def handle_lazy_subproblem_infeasible(self, fixed_nlp, solve_data, config, opt): + def handle_lazy_subproblem_infeasible(self, fixed_nlp, mindtpy_solver, config, opt): """Solves feasibility NLP subproblem and adds cuts according to the specified strategy. Parameters ---------- fixed_nlp : Pyomo model Integer-variable-fixed NLP model. - solve_data : MindtPySolveData - Data container that holds solve-instance data. + mindtpy_solver : object + The mindtpy solver class. config : ConfigBlock The specific configurations for MindtPy. opt : SolverFactory @@ -577,7 +612,7 @@ def handle_lazy_subproblem_infeasible(self, fixed_nlp, solve_data, config, opt): # TODO try something else? Reinitialize with different initial # value? config.logger.info('NLP subproblem was locally infeasible.') - solve_data.nlp_infeasible_counter += 1 + mindtpy_solver.nlp_infeasible_counter += 1 if config.calculate_dual_at_solution: for c in fixed_nlp.MindtPy_utils.constraint_list: rhs = (0 if c.upper is None else c.upper) + ( @@ -594,37 +629,40 @@ def handle_lazy_subproblem_infeasible(self, fixed_nlp, solve_data, config, opt): dual_values = None config.logger.info('Solving feasibility problem') - feas_subproblem, feas_subproblem_results = solve_feasibility_subproblem( - solve_data, config - ) + ( + feas_subproblem, + feas_subproblem_results, + ) = mindtpy_solver.solve_feasibility_subproblem() # In OA algorithm, OA cuts are generated based on the solution of the subproblem # We need to first copy the value of variables from the subproblem and then add cuts copy_var_list_values( feas_subproblem.MindtPy_utils.variable_list, - solve_data.mip.MindtPy_utils.variable_list, + mindtpy_solver.mip.MindtPy_utils.variable_list, config, ) if config.strategy == 'OA': - self.add_lazy_oa_cuts(solve_data.mip, dual_values, solve_data, config, opt) + self.add_lazy_oa_cuts( + mindtpy_solver.mip, dual_values, mindtpy_solver, config, opt + ) if config.add_regularization is not None: add_oa_cuts( - solve_data.mip, + mindtpy_solver.mip, dual_values, - solve_data.jacobians, - solve_data.objective_sense, - solve_data.mip_constraint_polynomial_degree, - solve_data.mip_iter, + mindtpy_solver.jacobians, + mindtpy_solver.objective_sense, + mindtpy_solver.mip_constraint_polynomial_degree, + mindtpy_solver.mip_iter, config, - solve_data.timing, + mindtpy_solver.timing, ) elif config.strategy == 'GOA': - self.add_lazy_affine_cuts(solve_data, config, opt) + self.add_lazy_affine_cuts(mindtpy_solver, config, opt) if config.add_no_good_cuts: var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list) - self.add_lazy_no_good_cuts(var_values, solve_data, config, opt) + self.add_lazy_no_good_cuts(var_values, mindtpy_solver, config, opt) def handle_lazy_subproblem_other_termination( - self, fixed_nlp, termination_condition, solve_data, config + self, fixed_nlp, termination_condition, mindtpy_solver, config ): """Handles the result of the latest iteration of solving the NLP subproblem given a solution that is neither optimal nor infeasible. @@ -635,8 +673,8 @@ def handle_lazy_subproblem_other_termination( Integer-variable-fixed NLP model. termination_condition : Pyomo TerminationCondition The termination condition of the fixed NLP subproblem. - solve_data : MindtPySolveData - Data container that holds solve-instance data. + mindtpy_solver : object + The mindtpy solver class. config : ConfigBlock The specific configurations for MindtPy. @@ -657,248 +695,157 @@ def handle_lazy_subproblem_other_termination( 'condition of {}'.format(termination_condition) ) - def handle_lazy_regularization_problem( - self, main_mip, main_mip_results, solve_data, config - ): - """Handles the termination condition of the regularization main problem in RLP/NLP. - - Parameters - ---------- - main_mip : Pyomo model - The MIP main problem. - main_mip_results : SolverResults - Results from solving the regularization MIP problem. - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - - Raises - ------ - ValueError - MindtPy unable to handle the termination condition of the regularization problem. - ValueError - MindtPy unable to handle the termination condition of the regularization problem. - """ - if main_mip_results.solver.termination_condition in {tc.optimal, tc.feasible}: - handle_main_optimal(main_mip, solve_data, config, update_bound=False) - elif main_mip_results.solver.termination_condition in { - tc.infeasible, - tc.infeasibleOrUnbounded, - }: - config.logger.info( - solve_data.log_note_formatter.format( - solve_data.mip_iter, - 'Reg ' + solve_data.regularization_mip_type, - 'infeasible', - ) - ) - if config.reduce_level_coef: - config.level_coef = config.level_coef / 2 - main_mip, main_mip_results = solve_main( - solve_data, config, regularization_problem=True - ) - if main_mip_results.solver.termination_condition in { - tc.optimal, - tc.feasible, - }: - handle_main_optimal( - main_mip, solve_data, config, update_bound=False - ) - elif main_mip_results.solver.termination_condition is tc.infeasible: - config.logger.info( - 'regularization problem still infeasible with reduced level_coef. ' - 'NLP subproblem is generated based on the incumbent solution of the main problem.' - ) - elif main_mip_results.solver.termination_condition is tc.maxTimeLimit: - config.logger.info( - 'Regularization problem failed to converge within the time limit.' - ) - solve_data.results.solver.termination_condition = tc.maxTimeLimit - elif main_mip_results.solver.termination_condition is tc.unbounded: - config.logger.info( - 'Regularization problem ubounded.' - 'Sometimes solving MIQP using cplex, unbounded means infeasible.' - ) - elif main_mip_results.solver.termination_condition is tc.unknown: - config.logger.info( - 'Termination condition of the regularization problem is unknown.' - ) - if main_mip_results.problem.lower_bound != float('-inf'): - config.logger.info('Solution limit has been reached.') - handle_main_optimal( - main_mip, solve_data, config, update_bound=False - ) - else: - config.logger.info( - 'No solution obtained from the regularization subproblem.' - 'Please set mip_solver_tee to True for more information.' - 'The solution of the OA main problem will be adopted.' - ) - else: - raise ValueError( - 'MindtPy unable to handle regularization problem termination condition ' - 'of %s. Solver message: %s' - % ( - main_mip_results.solver.termination_condition, - main_mip_results.solver.message, - ) - ) - elif config.use_bb_tree_incumbent: - config.logger.debug( - 'Fixed subproblem will be generated based on the incumbent solution of the main problem.' - ) - elif main_mip_results.solver.termination_condition is tc.maxTimeLimit: - config.logger.info( - 'Regularization problem failed to converge within the time limit.' - ) - solve_data.results.solver.termination_condition = tc.maxTimeLimit - elif main_mip_results.solver.termination_condition is tc.unbounded: - config.logger.info( - 'Regularization problem ubounded.' - 'Sometimes solving MIQP using cplex, unbounded means infeasible.' - ) - elif main_mip_results.solver.termination_condition is tc.unknown: - config.logger.info( - 'Termination condition of the regularization problem is unknown.' - ) - if main_mip_results.problem.lower_bound != float('-inf'): - config.logger.info('Solution limit has been reached.') - handle_main_optimal(main_mip, solve_data, config, update_bound=False) - else: - raise ValueError( - 'MindtPy unable to handle regularization problem termination condition ' - 'of %s. Solver message: %s' - % ( - main_mip_results.solver.termination_condition, - main_mip_results.solver.message, - ) - ) - def __call__(self): - """This is an inherent function in LazyConstraintCallback in cplex. + """This is an inherent function in LazyConstraintCallback in CPLEX. This function is called whenever an integer solution is found during the branch and bound process. """ - solve_data = self.solve_data + mindtpy_solver = self.mindtpy_solver config = self.config opt = self.opt main_mip = self.main_mip + mindtpy_solver = self.mindtpy_solver - if solve_data.should_terminate: - self.abort() - return + # Reference: https://www.ibm.com/docs/en/icos/22.1.1?topic=SSSA5P_22.1.1/ilog.odms.cplex.help/refpythoncplex/html/cplex.callbacks.SolutionSource-class.htm + # Another solution source is user_solution = 118, but it will not be encountered in LazyConstraintCallback. + config.logger.debug( + "Solution source: %s (111 node_solution, 117 heuristic_solution, 119 mipstart_solution)".format( + self.get_solution_source() + ) + ) - self.handle_lazy_main_feasible_solution(main_mip, solve_data, config, opt) + # The solution found in MIP start process might be revisited in branch and bound. + # Lazy constraints separated when processing a MIP start will be discarded after that MIP start has been processed. + # This means that the callback may have to separate the same constraint again for the next MIP start or for a solution that is found later in the solution process. + # https://www.ibm.com/docs/en/icos/22.1.1?topic=SSSA5P_22.1.1/ilog.odms.cplex.help/refpythoncplex/html/cplex.callbacks.LazyConstraintCallback-class.htm + if ( + self.get_solution_source() + != cplex.callbacks.SolutionSource.mipstart_solution + and len(mindtpy_solver.mip_start_lazy_oa_cuts) > 0 + ): + for constraint, sense, rhs in mindtpy_solver.mip_start_lazy_oa_cuts: + self.add(constraint, sense, rhs) + mindtpy_solver.mip_start_lazy_oa_cuts = [] + if mindtpy_solver.should_terminate: + self.abort() + return + self.handle_lazy_main_feasible_solution(main_mip, mindtpy_solver, config, opt) if config.add_cuts_at_incumbent: self.copy_lazy_var_list_values( opt, main_mip.MindtPy_utils.variable_list, - solve_data.mip.MindtPy_utils.variable_list, + mindtpy_solver.mip.MindtPy_utils.variable_list, config, ) if config.strategy == 'OA': - self.add_lazy_oa_cuts(solve_data.mip, None, solve_data, config, opt) + # The solution obtained from mip start might be infeasible and even introduce a math domain error, like log(-1). + try: + self.add_lazy_oa_cuts( + mindtpy_solver.mip, None, mindtpy_solver, config, opt + ) + except ValueError as e: + config.logger.error( + str(e) + + "\nUsually this error is caused by the MIP start solution causing a math domain error. " + "We will skip it." + ) + return # regularization is activated after the first feasible solution is found. if ( config.add_regularization is not None - and solve_data.best_solution_found is not None + and mindtpy_solver.best_solution_found is not None ): # The main problem might be unbounded, regularization is activated only when a valid bound is provided. if ( - not solve_data.dual_bound_improved - and not solve_data.primal_bound_improved + not mindtpy_solver.dual_bound_improved + and not mindtpy_solver.primal_bound_improved ): config.logger.debug( 'The bound and the best found solution have neither been improved.' 'We will skip solving the regularization problem and the Fixed-NLP subproblem' ) - solve_data.primal_bound_improved = False + mindtpy_solver.primal_bound_improved = False return - if solve_data.dual_bound != solve_data.dual_bound_progress[0]: - main_mip, main_mip_results = solve_main( - solve_data, config, regularization_problem=True - ) - self.handle_lazy_regularization_problem( - main_mip, main_mip_results, solve_data, config - ) + if mindtpy_solver.dual_bound != mindtpy_solver.dual_bound_progress[0]: + mindtpy_solver.add_regularization() if ( - abs(solve_data.primal_bound - solve_data.dual_bound) + abs(mindtpy_solver.primal_bound - mindtpy_solver.dual_bound) <= config.absolute_bound_tolerance ): config.logger.info( 'MindtPy exiting on bound convergence. ' '|Primal Bound: {} - Dual Bound: {}| <= (absolute tolerance {}) \n'.format( - solve_data.primal_bound, - solve_data.dual_bound, + mindtpy_solver.primal_bound, + mindtpy_solver.dual_bound, config.absolute_bound_tolerance, ) ) - solve_data.results.solver.termination_condition = tc.optimal + mindtpy_solver.results.solver.termination_condition = tc.optimal self.abort() return # check if the same integer combination is obtained. - solve_data.curr_int_sol = get_integer_solution( - solve_data.working_model, string_zero=True + mindtpy_solver.curr_int_sol = get_integer_solution( + mindtpy_solver.fixed_nlp, string_zero=True ) - if solve_data.curr_int_sol in set(solve_data.integer_list): + if mindtpy_solver.curr_int_sol in set(mindtpy_solver.integer_list): config.logger.debug( 'This integer combination has been explored. ' 'We will skip solving the Fixed-NLP subproblem.' ) - solve_data.primal_bound_improved = False + mindtpy_solver.primal_bound_improved = False if config.strategy == 'GOA': if config.add_no_good_cuts: var_values = list( v.value - for v in solve_data.working_model.MindtPy_utils.variable_list + for v in mindtpy_solver.working_model.MindtPy_utils.variable_list ) - self.add_lazy_no_good_cuts(var_values, solve_data, config, opt) + self.add_lazy_no_good_cuts(var_values, mindtpy_solver, config, opt) return elif config.strategy == 'OA': return else: - solve_data.integer_list.append(solve_data.curr_int_sol) + mindtpy_solver.integer_list.append(mindtpy_solver.curr_int_sol) # solve subproblem # The constraint linearization happens in the handlers - fixed_nlp, fixed_nlp_result = solve_subproblem(solve_data, config) - + fixed_nlp, fixed_nlp_result = mindtpy_solver.solve_subproblem() # add oa cuts if fixed_nlp_result.solver.termination_condition in { tc.optimal, tc.locallyOptimal, tc.feasible, }: - self.handle_lazy_subproblem_optimal(fixed_nlp, solve_data, config, opt) + self.handle_lazy_subproblem_optimal(fixed_nlp, mindtpy_solver, config, opt) if ( - abs(solve_data.primal_bound - solve_data.dual_bound) + abs(mindtpy_solver.primal_bound - mindtpy_solver.dual_bound) <= config.absolute_bound_tolerance ): config.logger.info( 'MindtPy exiting on bound convergence. ' '|Primal Bound: {} - Dual Bound: {}| <= (absolute tolerance {}) \n'.format( - solve_data.primal_bound, - solve_data.dual_bound, + mindtpy_solver.primal_bound, + mindtpy_solver.dual_bound, config.absolute_bound_tolerance, ) ) - solve_data.results.solver.termination_condition = tc.optimal + mindtpy_solver.results.solver.termination_condition = tc.optimal return elif fixed_nlp_result.solver.termination_condition in { tc.infeasible, tc.noSolution, }: - self.handle_lazy_subproblem_infeasible(fixed_nlp, solve_data, config, opt) + self.handle_lazy_subproblem_infeasible( + fixed_nlp, mindtpy_solver, config, opt + ) else: self.handle_lazy_subproblem_other_termination( fixed_nlp, fixed_nlp_result.solver.termination_condition, - solve_data, + mindtpy_solver, config, ) @@ -906,8 +853,8 @@ def __call__(self): # Gurobi -def LazyOACallback_gurobi(cb_m, cb_opt, cb_where, solve_data, config): - """This is a GUROBI callback function defined for LP/NLP based B&B algorithm. +def LazyOACallback_gurobi(cb_m, cb_opt, cb_where, mindtpy_solver, config): + """This is a Gurobi callback function defined for LP/NLP based B&B algorithm. Parameters ---------- @@ -917,114 +864,107 @@ def LazyOACallback_gurobi(cb_m, cb_opt, cb_where, solve_data, config): The gurobi_persistent solver. cb_where : int An enum member of gurobipy.GRB.Callback. - solve_data : MindtPySolveData - Data container that holds solve-instance data. + mindtpy_solver : object + The mindtpy solver class. config : ConfigBlock The specific configurations for MindtPy. """ if cb_where == gurobipy.GRB.Callback.MIPSOL: # gurobipy.GRB.Callback.MIPSOL means that an integer solution is found during the branch and bound process - if solve_data.should_terminate: + if mindtpy_solver.should_terminate: cb_opt._solver_model.terminate() return cb_opt.cbGetSolution(vars=cb_m.MindtPy_utils.variable_list) - handle_lazy_main_feasible_solution_gurobi(cb_m, cb_opt, solve_data, config) + handle_lazy_main_feasible_solution_gurobi(cb_m, cb_opt, mindtpy_solver, config) if config.add_cuts_at_incumbent: if config.strategy == 'OA': add_oa_cuts( - solve_data.mip, + mindtpy_solver.mip, None, - solve_data.jacobians, - solve_data.objective_sense, - solve_data.mip_constraint_polynomial_degree, - solve_data.mip_iter, + mindtpy_solver.jacobians, + mindtpy_solver.objective_sense, + mindtpy_solver.mip_constraint_polynomial_degree, + mindtpy_solver.mip_iter, config, - solve_data.timing, + mindtpy_solver.timing, cb_opt=cb_opt, ) # Regularization is activated after the first feasible solution is found. if ( config.add_regularization is not None - and solve_data.best_solution_found is not None + and mindtpy_solver.best_solution_found is not None ): # The main problem might be unbounded, regularization is activated only when a valid bound is provided. if ( - not solve_data.dual_bound_improved - and not solve_data.primal_bound_improved + not mindtpy_solver.dual_bound_improved + and not mindtpy_solver.primal_bound_improved ): config.logger.debug( 'The bound and the best found solution have neither been improved.' 'We will skip solving the regularization problem and the Fixed-NLP subproblem' ) - solve_data.primal_bound_improved = False + mindtpy_solver.primal_bound_improved = False return - if solve_data.dual_bound != solve_data.dual_bound_progress[0]: - main_mip, main_mip_results = solve_main( - solve_data, config, regularization_problem=True - ) - handle_regularization_main_tc( - main_mip, main_mip_results, solve_data, config - ) + if mindtpy_solver.dual_bound != mindtpy_solver.dual_bound_progress[0]: + mindtpy_solver.add_regularization() if ( - abs(solve_data.primal_bound - solve_data.dual_bound) + abs(mindtpy_solver.primal_bound - mindtpy_solver.dual_bound) <= config.absolute_bound_tolerance ): config.logger.info( 'MindtPy exiting on bound convergence. ' '|Primal Bound: {} - Dual Bound: {}| <= (absolute tolerance {}) \n'.format( - solve_data.primal_bound, - solve_data.dual_bound, + mindtpy_solver.primal_bound, + mindtpy_solver.dual_bound, config.absolute_bound_tolerance, ) ) - solve_data.results.solver.termination_condition = tc.optimal + mindtpy_solver.results.solver.termination_condition = tc.optimal cb_opt._solver_model.terminate() return - # # check if the same integer combination is obtained. - solve_data.curr_int_sol = get_integer_solution( - solve_data.working_model, string_zero=True + # check if the same integer combination is obtained. + mindtpy_solver.curr_int_sol = get_integer_solution( + mindtpy_solver.fixed_nlp, string_zero=True ) - if solve_data.curr_int_sol in set(solve_data.integer_list): + if mindtpy_solver.curr_int_sol in set(mindtpy_solver.integer_list): config.logger.debug( 'This integer combination has been explored. ' 'We will skip solving the Fixed-NLP subproblem.' ) - solve_data.primal_bound_improved = False + mindtpy_solver.primal_bound_improved = False if config.strategy == 'GOA': if config.add_no_good_cuts: var_values = list( v.value - for v in solve_data.working_model.MindtPy_utils.variable_list + for v in mindtpy_solver.fixed_nlp.MindtPy_utils.variable_list ) add_no_good_cuts( - solve_data.mip, + mindtpy_solver.mip, var_values, config, - solve_data.timing, - mip_iter=solve_data.mip_iter, + mindtpy_solver.timing, + mip_iter=mindtpy_solver.mip_iter, cb_opt=cb_opt, ) return elif config.strategy == 'OA': return else: - solve_data.integer_list.append(solve_data.curr_int_sol) + mindtpy_solver.integer_list.append(mindtpy_solver.curr_int_sol) # solve subproblem # The constraint linearization happens in the handlers - fixed_nlp, fixed_nlp_result = solve_subproblem(solve_data, config) + fixed_nlp, fixed_nlp_result = mindtpy_solver.solve_subproblem() - handle_nlp_subproblem_tc( - fixed_nlp, fixed_nlp_result, solve_data, config, cb_opt - ) + mindtpy_solver.handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result, cb_opt) -def handle_lazy_main_feasible_solution_gurobi(cb_m, cb_opt, solve_data, config): +def handle_lazy_main_feasible_solution_gurobi(cb_m, cb_opt, mindtpy_solver, config): """This function is called during the branch and bound of main MIP problem, more exactly when a feasible solution is found and LazyCallback is activated. @@ -1037,8 +977,8 @@ def handle_lazy_main_feasible_solution_gurobi(cb_m, cb_opt, solve_data, config): The MIP main problem. cb_opt : SolverFactory The gurobi_persistent solver. - solve_data : MindtPySolveData - Data container that holds solve-instance data. + mindtpy_solver : object + The mindtpy solver class. config : ConfigBlock The specific configurations for MindtPy. """ @@ -1047,18 +987,24 @@ def handle_lazy_main_feasible_solution_gurobi(cb_m, cb_opt, solve_data, config): # this value copy is useful since we need to fix subproblem based on the solution of the main problem copy_var_list_values( cb_m.MindtPy_utils.variable_list, - solve_data.working_model.MindtPy_utils.variable_list, + mindtpy_solver.fixed_nlp.MindtPy_utils.variable_list, + config, + skip_fixed=False, + ) + copy_var_list_values( + cb_m.MindtPy_utils.variable_list, + mindtpy_solver.mip.MindtPy_utils.variable_list, config, ) - update_dual_bound(solve_data, cb_opt.cbGet(gurobipy.GRB.Callback.MIPSOL_OBJBND)) + mindtpy_solver.update_dual_bound(cb_opt.cbGet(gurobipy.GRB.Callback.MIPSOL_OBJBND)) config.logger.info( - solve_data.log_formatter.format( - solve_data.mip_iter, + mindtpy_solver.log_formatter.format( + mindtpy_solver.mip_iter, 'restrLP', cb_opt.cbGet(gurobipy.GRB.Callback.MIPSOL_OBJ), - solve_data.primal_bound, - solve_data.dual_bound, - solve_data.rel_gap, - get_main_elapsed_time(solve_data.timing), + mindtpy_solver.primal_bound, + mindtpy_solver.dual_bound, + mindtpy_solver.rel_gap, + get_main_elapsed_time(mindtpy_solver.timing), ) ) diff --git a/pyomo/contrib/mindtpy/tabu_list.py b/pyomo/contrib/mindtpy/tabu_list.py index c2908339f74..313bd6f6271 100644 --- a/pyomo/contrib/mindtpy/tabu_list.py +++ b/pyomo/contrib/mindtpy/tabu_list.py @@ -22,24 +22,23 @@ class IncumbentCallback_cplex( def __call__(self): """ - This is an inherent function in LazyConstraintCallback in cplex. + This is an inherent function in LazyConstraintCallback in CPLEX. This callback will be used after each new potential incumbent is found. https://www.ibm.com/support/knowledgecenter/SSSA5P_12.10.0/ilog.odms.cplex.help/refpythoncplex/html/cplex.callbacks.IncumbentCallback-class.html IncumbentCallback will be activated after Lazyconstraint callback, when the potential incumbent solution is satisfies the lazyconstraints. TODO: need to handle GOA same integer combination check in lazyconstraint callback in single_tree.py - TODO: integer_var_value_tuple can be replaced by solve_data.curr_int_sol """ - solve_data = self.solve_data + mindtpy_solver = self.mindtpy_solver opt = self.opt config = self.config if config.single_tree: self.reject() else: temp = [] - for var in solve_data.mip.MindtPy_utils.discrete_variable_list: + for var in mindtpy_solver.mip.MindtPy_utils.discrete_variable_list: value = self.get_values(opt._pyomo_var_to_solver_var_map[var]) temp.append(int(round(value))) - integer_var_value = tuple(temp) + mindtpy_solver.curr_int_sol = tuple(temp) - if integer_var_value in set(solve_data.integer_list): + if mindtpy_solver.curr_int_sol in set(mindtpy_solver.integer_list): self.reject() diff --git a/pyomo/contrib/mindtpy/tests/MINLP2_simple.py b/pyomo/contrib/mindtpy/tests/MINLP2_simple.py index d7936ae0280..10da243d332 100644 --- a/pyomo/contrib/mindtpy/tests/MINLP2_simple.py +++ b/pyomo/contrib/mindtpy/tests/MINLP2_simple.py @@ -54,7 +54,7 @@ class SimpleMINLP(ConcreteModel): def __init__(self, *args, **kwargs): """Create the problem.""" - kwargs.setdefault('name', 'DuranEx1') + kwargs.setdefault('name', 'SimpleMINLP2') super(SimpleMINLP, self).__init__(*args, **kwargs) m = self diff --git a/pyomo/contrib/mindtpy/tests/MINLP3_simple.py b/pyomo/contrib/mindtpy/tests/MINLP3_simple.py index bf98a99152b..f387b0e26a1 100644 --- a/pyomo/contrib/mindtpy/tests/MINLP3_simple.py +++ b/pyomo/contrib/mindtpy/tests/MINLP3_simple.py @@ -47,7 +47,7 @@ class SimpleMINLP(ConcreteModel): def __init__(self, *args, **kwargs): """Create the problem.""" - kwargs.setdefault('name', 'DuranEx1') + kwargs.setdefault('name', 'SimpleMINLP3') super(SimpleMINLP, self).__init__(*args, **kwargs) m = self @@ -64,7 +64,7 @@ def __init__(self, *args, **kwargs): # DISCRETE VARIABLES Y = m.Y = Var(J, domain=Binary, initialize=initY) # CONTINUOUS VARIABLES - X = m.X = Var(I, domain=Reals, initialize=initX, bounds=(-1, 50)) + X = m.X = Var(I, domain=Reals, initialize=initX, bounds=(-0.9, 50)) """Constraint definitions""" # CONSTRAINTS diff --git a/pyomo/contrib/mindtpy/tests/constraint_qualification_example.py b/pyomo/contrib/mindtpy/tests/constraint_qualification_example.py index 7c35587bd4e..6038f9a74eb 100644 --- a/pyomo/contrib/mindtpy/tests/constraint_qualification_example.py +++ b/pyomo/contrib/mindtpy/tests/constraint_qualification_example.py @@ -14,12 +14,8 @@ Binary, ConcreteModel, Constraint, - Reals, Objective, - Param, - RangeSet, Var, - exp, minimize, log, ) @@ -33,7 +29,7 @@ def __init__(self, *args, **kwargs): super(ConstraintQualificationExample, self).__init__(*args, **kwargs) m = self m.x = Var(bounds=(1.0, 10.0), initialize=5.0) - m.y = Var(within=Binary) + m.y = Var(within=Binary, initialize=1.0) m.c1 = Constraint(expr=(m.x - 3.0) ** 2 <= 50.0 * (1 - m.y)) m.c2 = Constraint(expr=m.x * log(m.x) + 5.0 <= 50.0 * (m.y)) m.objective = Objective(expr=m.x, sense=minimize) diff --git a/pyomo/contrib/mindtpy/tests/feasibility_pump1.py b/pyomo/contrib/mindtpy/tests/feasibility_pump1.py index cc52e2bc49f..e0a611c1ed2 100644 --- a/pyomo/contrib/mindtpy/tests/feasibility_pump1.py +++ b/pyomo/contrib/mindtpy/tests/feasibility_pump1.py @@ -23,13 +23,13 @@ from pyomo.common.collections import ComponentMap -class Feasibility_Pump1(ConcreteModel): - """Feasibility_Pump1 example""" +class FeasPump1(ConcreteModel): + """Feasibility Pump example 1""" def __init__(self, *args, **kwargs): """Create the problem.""" - kwargs.setdefault('name', 'Feasibility_Pump1') - super(Feasibility_Pump1, self).__init__(*args, **kwargs) + kwargs.setdefault('name', 'Feasibility Pump 1') + super(FeasPump1, self).__init__(*args, **kwargs) m = self m.x = Var(within=Binary) diff --git a/pyomo/contrib/mindtpy/tests/feasibility_pump2.py b/pyomo/contrib/mindtpy/tests/feasibility_pump2.py index 9d80702dfa4..48b98dc5800 100644 --- a/pyomo/contrib/mindtpy/tests/feasibility_pump2.py +++ b/pyomo/contrib/mindtpy/tests/feasibility_pump2.py @@ -25,13 +25,13 @@ from pyomo.common.collections import ComponentMap -class Feasibility_Pump2(ConcreteModel): - """Feasibility_Pump2 example""" +class FeasPump2(ConcreteModel): + """Feasibility Pump example 2""" def __init__(self, *args, **kwargs): """Create the problem.""" - kwargs.setdefault('name', 'Feasibility_Pump2') - super(Feasibility_Pump2, self).__init__(*args, **kwargs) + kwargs.setdefault('name', 'Feasibility Pump 2') + super(FeasPump2, self).__init__(*args, **kwargs) m = self m.x = Var(within=Binary) diff --git a/pyomo/contrib/mindtpy/tests/test_mindtpy.py b/pyomo/contrib/mindtpy/tests/test_mindtpy.py index ddadb7d666a..e872eccc670 100644 --- a/pyomo/contrib/mindtpy/tests/test_mindtpy.py +++ b/pyomo/contrib/mindtpy/tests/test_mindtpy.py @@ -57,7 +57,7 @@ extreme_model_list = [LP_model.model, QCP_model.model] required_solvers = ('ipopt', 'glpk') -if all(SolverFactory(s).available() for s in required_solvers): +if all(SolverFactory(s).available(exception_flag=False) for s in required_solvers): subsolvers_available = True else: subsolvers_available = False @@ -83,6 +83,7 @@ def test_OA_rNLP(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='OA', @@ -104,6 +105,7 @@ def test_OA_extreme_model(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in extreme_model_list: + model = model.clone() results = opt.solve( model, strategy='OA', @@ -116,6 +118,7 @@ def test_OA_L2_norm(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='OA', @@ -138,6 +141,7 @@ def test_OA_L_infinity_norm(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='OA', @@ -160,6 +164,7 @@ def test_OA_max_binary(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='OA', @@ -182,6 +187,7 @@ def test_OA_sympy(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='OA', @@ -203,6 +209,7 @@ def test_OA_initial_binary(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='OA', @@ -224,6 +231,7 @@ def test_OA_no_good_cuts(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='OA', @@ -243,12 +251,12 @@ def test_OA_no_good_cuts(self): @unittest.skipUnless( SolverFactory('cplex').available() or SolverFactory('gurobi').available(), - "CPLEX or GUROBI not available.", + "CPLEX or Gurobi not available.", ) def test_OA_quadratic_strategy(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: - model = ProposalModel() + model = ProposalModel().clone() if SolverFactory('cplex').available(): mip_solver = 'cplex' elif SolverFactory('gurobi').available(): @@ -279,6 +287,7 @@ def test_OA_APPSI_solver(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='OA', @@ -302,6 +311,7 @@ def test_OA_APPSI_ipopt(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='OA', @@ -325,6 +335,7 @@ def test_OA_cyipopt(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in nonconvex_model_list: + model = model.clone() results = opt.solve( model, strategy='OA', @@ -345,6 +356,7 @@ def test_OA_integer_to_binary(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='OA', @@ -366,6 +378,7 @@ def test_OA_partition_obj_nonlinear_terms(self): """Test the outer approximation decomposition algorithm (partition_obj_nonlinear_terms).""" with SolverFactory('mindtpy') as opt: for model in obj_nonlinear_sum_model_list: + model = model.clone() results = opt.solve( model, strategy='OA', @@ -387,6 +400,7 @@ def test_OA_add_slack(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='OA', @@ -427,6 +441,7 @@ def test_OA_nonconvex(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in nonconvex_model_list: + model = model.clone() results = opt.solve( model, strategy='OA', @@ -470,7 +485,7 @@ def test_time_limit(self): def test_maximize_obj(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: - model = ProposalModel() + model = ProposalModel().clone() model.objective.sense = maximize opt.solve( model, @@ -483,7 +498,7 @@ def test_maximize_obj(self): def test_infeasible_model(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: - model = SimpleMINLP() + model = SimpleMINLP().clone() model.X[1].fix(0) model.Y[1].fix(0) results = opt.solve( diff --git a/pyomo/contrib/mindtpy/tests/test_mindtpy_ECP.py b/pyomo/contrib/mindtpy/tests/test_mindtpy_ECP.py index 7e012a262f9..b5bfbe62553 100644 --- a/pyomo/contrib/mindtpy/tests/test_mindtpy_ECP.py +++ b/pyomo/contrib/mindtpy/tests/test_mindtpy_ECP.py @@ -13,8 +13,7 @@ from pyomo.opt import TerminationCondition required_solvers = ('ipopt', 'glpk') -# required_solvers = ('gams', 'gams') -if all(SolverFactory(s).available() for s in required_solvers): +if all(SolverFactory(s).available(exception_flag=False) for s in required_solvers): subsolvers_available = True else: subsolvers_available = False @@ -46,6 +45,7 @@ def test_ECP(self): """Test the extended cutting plane decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='ECP', @@ -67,6 +67,7 @@ def test_ECP_add_slack(self): """Test the extended cutting plane decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='ECP', diff --git a/pyomo/contrib/mindtpy/tests/test_mindtpy_feas_pump.py b/pyomo/contrib/mindtpy/tests/test_mindtpy_feas_pump.py index fb06781f009..697a63d17c8 100644 --- a/pyomo/contrib/mindtpy/tests/test_mindtpy_feas_pump.py +++ b/pyomo/contrib/mindtpy/tests/test_mindtpy_feas_pump.py @@ -14,11 +14,12 @@ from pyomo.opt import TerminationCondition from pyomo.contrib.gdpopt.util import is_feasible from pyomo.util.infeasible import log_infeasible_constraints -from pyomo.contrib.mindtpy.tests.feasibility_pump1 import Feasibility_Pump1 -from pyomo.contrib.mindtpy.tests.feasibility_pump2 import Feasibility_Pump2 +from pyomo.contrib.mindtpy.tests.feasibility_pump1 import FeasPump1 +from pyomo.contrib.mindtpy.tests.feasibility_pump2 import FeasPump2 -required_solvers = ('ipopt', 'glpk', 'cplex') -if all(SolverFactory(s).available() for s in required_solvers): +required_solvers = ('ipopt', 'cplex') +# TODO: 'appsi_highs' will fail here. +if all(SolverFactory(s).available(exception_flag=False) for s in required_solvers): subsolvers_available = True else: subsolvers_available = False @@ -26,8 +27,8 @@ model_list = [ EightProcessFlowsheet(convex=True), ConstraintQualificationExample(), - Feasibility_Pump1(), - Feasibility_Pump2(), + FeasPump1(), + FeasPump2(), SimpleMINLP(), SimpleMINLP2(), SimpleMINLP3(), @@ -57,6 +58,7 @@ def test_FP(self): """Test the feasibility pump algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='FP', @@ -71,6 +73,7 @@ def test_FP_OA_8PP(self): """Test the FP-OA algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='OA', diff --git a/pyomo/contrib/mindtpy/tests/test_mindtpy_global.py b/pyomo/contrib/mindtpy/tests/test_mindtpy_global.py index 87667781ba4..0fa19b30d9c 100644 --- a/pyomo/contrib/mindtpy/tests/test_mindtpy_global.py +++ b/pyomo/contrib/mindtpy/tests/test_mindtpy_global.py @@ -11,7 +11,7 @@ from pyomo.opt import TerminationCondition required_solvers = ('baron', 'cplex_persistent') -if not all(SolverFactory(s).available(False) for s in required_solvers): +if not all(SolverFactory(s).available(exception_flag=False) for s in required_solvers): subsolvers_available = False elif not SolverFactory('baron').license_is_valid(): subsolvers_available = False @@ -47,6 +47,7 @@ def test_GOA(self): """Test the global outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='GOA', @@ -67,6 +68,7 @@ def test_GOA_tabu_list(self): """Test the global outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='GOA', diff --git a/pyomo/contrib/mindtpy/tests/test_mindtpy_global_lp_nlp.py b/pyomo/contrib/mindtpy/tests/test_mindtpy_global_lp_nlp.py index fb179e45e9e..259cfe9dd7c 100644 --- a/pyomo/contrib/mindtpy/tests/test_mindtpy_global_lp_nlp.py +++ b/pyomo/contrib/mindtpy/tests/test_mindtpy_global_lp_nlp.py @@ -11,7 +11,7 @@ from pyomo.contrib.mcpp import pyomo_mcpp required_solvers = ('baron', 'cplex_persistent') -if not all(SolverFactory(s).available(False) for s in required_solvers): +if not all(SolverFactory(s).available(exception_flag=False) for s in required_solvers): subsolvers_available = False elif not SolverFactory('baron').license_is_valid(): subsolvers_available = False @@ -46,6 +46,7 @@ def test_GOA(self): """Test the global outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='GOA', @@ -67,6 +68,7 @@ def test_GOA_tabu_list(self): """Test the global outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='GOA', @@ -90,10 +92,11 @@ def test_GOA_tabu_list(self): and SolverFactory('gurobi_direct').available(), 'gurobi_persistent and gurobi_direct solver is not available', ) - def test_GOA_GUROBI(self): + def test_GOA_Gurobi(self): """Test the global outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='GOA', diff --git a/pyomo/contrib/mindtpy/tests/test_mindtpy_lp_nlp.py b/pyomo/contrib/mindtpy/tests/test_mindtpy_lp_nlp.py index 953365129ce..2662a0e6f56 100644 --- a/pyomo/contrib/mindtpy/tests/test_mindtpy_lp_nlp.py +++ b/pyomo/contrib/mindtpy/tests/test_mindtpy_lp_nlp.py @@ -14,6 +14,7 @@ import pyomo.common.unittest as unittest from pyomo.contrib.mindtpy.tests.eight_process_problem import EightProcessFlowsheet from pyomo.contrib.mindtpy.tests.MINLP_simple import SimpleMINLP as SimpleMINLP +from pyomo.contrib.mindtpy.tests.MINLP3_simple import SimpleMINLP as SimpleMINLP3 from pyomo.contrib.mindtpy.tests.constraint_qualification_example import ( ConstraintQualificationExample, ) @@ -26,7 +27,10 @@ s for s in required_mip_solvers if SolverFactory(s).available(False) ] -if SolverFactory(required_nlp_solvers).available(False) and available_mip_solvers: +if ( + SolverFactory(required_nlp_solvers).available(exception_flag=False) + and available_mip_solvers +): subsolvers_available = True else: subsolvers_available = False @@ -36,6 +40,7 @@ EightProcessFlowsheet(convex=True), ConstraintQualificationExample(), SimpleMINLP(), + SimpleMINLP3(), ] @@ -76,6 +81,7 @@ def test_LPNLP_CPLEX(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='OA', @@ -97,10 +103,11 @@ def test_LPNLP_CPLEX(self): 'gurobi_persistent' in available_mip_solvers, 'gurobi_persistent solver is not available', ) - def test_LPNLP_GUROBI(self): + def test_LPNLP_Gurobi(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='OA', @@ -122,6 +129,7 @@ def test_RLPNLP_L1(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() for mip_solver in available_mip_solvers: results = opt.solve( model, @@ -145,6 +153,7 @@ def test_RLPNLP_L2(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() for mip_solver in available_mip_solvers: if known_solver_failure(mip_solver, model): continue @@ -170,6 +179,7 @@ def test_RLPNLP_Linf(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() for mip_solver in available_mip_solvers: results = opt.solve( model, @@ -193,6 +203,7 @@ def test_RLPNLP_grad_lag(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() for mip_solver in available_mip_solvers: results = opt.solve( model, @@ -216,6 +227,7 @@ def test_RLPNLP_hess_lag(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() for mip_solver in available_mip_solvers: if known_solver_failure(mip_solver, model): continue @@ -241,6 +253,7 @@ def test_RLPNLP_hess_only_lag(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() for mip_solver in available_mip_solvers: if known_solver_failure(mip_solver, model): continue @@ -266,6 +279,7 @@ def test_RLPNLP_sqp_lag(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() for mip_solver in available_mip_solvers: if known_solver_failure(mip_solver, model): continue diff --git a/pyomo/contrib/mindtpy/tests/test_mindtpy_regularization.py b/pyomo/contrib/mindtpy/tests/test_mindtpy_regularization.py index 8a36be7e9b0..4c2ae4d1220 100644 --- a/pyomo/contrib/mindtpy/tests/test_mindtpy_regularization.py +++ b/pyomo/contrib/mindtpy/tests/test_mindtpy_regularization.py @@ -10,7 +10,7 @@ required_solvers = ('ipopt', 'cplex') # required_solvers = ('gams', 'gams') -if all(SolverFactory(s).available() for s in required_solvers): +if all(SolverFactory(s).available(exception_flag=False) for s in required_solvers): subsolvers_available = True else: subsolvers_available = False @@ -36,6 +36,7 @@ def test_ROA_L1(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='OA', @@ -57,6 +58,7 @@ def test_ROA_L2(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='OA', @@ -78,6 +80,7 @@ def test_ROA_Linf(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='OA', @@ -99,6 +102,7 @@ def test_ROA_grad_lag(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='OA', @@ -120,6 +124,7 @@ def test_ROA_hess_lag(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='OA', @@ -141,6 +146,7 @@ def test_ROA_hess_only_lag(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='OA', @@ -162,6 +168,7 @@ def test_ROA_sqp_lag(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='OA', @@ -183,6 +190,7 @@ def test_ROA_sqp_lag_equality_relaxation(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='OA', @@ -205,6 +213,7 @@ def test_ROA_sqp_lag_add_no_good_cuts(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='OA', @@ -228,6 +237,7 @@ def test_ROA_sqp_lag_level_coef(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='OA', diff --git a/pyomo/contrib/mindtpy/tests/test_mindtpy_solution_pool.py b/pyomo/contrib/mindtpy/tests/test_mindtpy_solution_pool.py index f9a3686a160..e8ad85ad9bc 100644 --- a/pyomo/contrib/mindtpy/tests/test_mindtpy_solution_pool.py +++ b/pyomo/contrib/mindtpy/tests/test_mindtpy_solution_pool.py @@ -54,6 +54,7 @@ def test_OA_solution_pool_cplex(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='OA', @@ -79,6 +80,7 @@ def test_OA_solution_pool_gurobi(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='OA', @@ -105,6 +107,7 @@ def test_OA_solution_pool_coverage1(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() results = opt.solve( model, strategy='OA', diff --git a/pyomo/contrib/mindtpy/util.py b/pyomo/contrib/mindtpy/util.py index 378df6355dd..e336715cc8f 100644 --- a/pyomo/contrib/mindtpy/util.py +++ b/pyomo/contrib/mindtpy/util.py @@ -40,17 +40,11 @@ numpy = attempt_import('numpy')[0] -class MindtPySolveData(object): - """Data container to hold solve-instance data.""" - - pass - - def calc_jacobians(model, config): """Generates a map of jacobians for the variables in the model. This function generates a map of jacobians corresponding to the variables in the - model and adds this ComponentMap to solve_data. + model. Parameters ---------- @@ -60,7 +54,7 @@ def calc_jacobians(model, config): The specific configurations for MindtPy. """ # Map nonlinear_constraint --> Map( - # variable --> jacobian of constraint wrt. variable) + # variable --> jacobian of constraint w.r.t. variable) jacobians = ComponentMap() if config.differentiate_mode == 'reverse_symbolic': mode = EXPR.differentiate.Modes.reverse_symbolic @@ -75,8 +69,9 @@ def calc_jacobians(model, config): return jacobians -def add_feas_slacks(m, config): +def initialize_feas_subproblem(m, config): """Adds feasibility slack variables according to config.feasibility_norm (given an infeasible problem). + Defines the objective function of the feasibility subproblem. Parameters ---------- @@ -106,6 +101,18 @@ def add_feas_slacks(m, config): MindtPy.feas_opt.feas_constraints.add( constr.body - constr.lower >= -MindtPy.feas_opt.slack_var ) + # Setup objective function for the feasibility subproblem. + if config.feasibility_norm == 'L1': + MindtPy.feas_obj = Objective( + expr=sum(s for s in MindtPy.feas_opt.slack_var.values()), sense=minimize + ) + elif config.feasibility_norm == 'L2': + MindtPy.feas_obj = Objective( + expr=sum(s * s for s in MindtPy.feas_opt.slack_var.values()), sense=minimize + ) + else: + MindtPy.feas_obj = Objective(expr=MindtPy.feas_opt.slack_var, sense=minimize) + MindtPy.feas_obj.deactivate() def add_var_bound(model, config): @@ -353,8 +360,7 @@ def generate_lag_objective_function( jac_lag[nlp.get_primal_indices([var])[0]] = 0 nlp_var = set([i.name for i in nlp.get_pyomo_variables()]) first_order_term = sum( - float(jac_lag[nlp.get_primal_indices([temp_var])[0]]) - * (var - temp_var.value) + jac_lag[nlp.get_primal_indices([temp_var])[0]][0] * (var - temp_var.value) for var, temp_var in zip( model.MindtPy_utils.variable_list, temp_model.MindtPy_utils.variable_list, @@ -492,140 +498,130 @@ def generate_norm1_norm_constraint(model, setpoint_model, config, discrete_only= ) -def set_solver_options(opt, timing, config, solver_type, regularization=False): - """Set options for MIP/NLP solvers. +def update_solver_timelimit(opt, solver_name, timing, config): + """Updates the time limit for subsolvers. Parameters ---------- - opt : SolverFactory - The MIP/NLP solver. + opt : Solvers + The solver object. + solver_name : String + The name of solver. timing : Timing - Timing. + Timing config : ConfigBlock The specific configurations for MindtPy. - solver_type : str - The type of the solver, i.e. mip or nlp. - regularization : bool, optional - Whether the solver is used to solve the regularization problem, by default False. """ - # TODO: integrate nlp_args here - # nlp_args = dict(config.nlp_solver_args) elapsed = get_main_elapsed_time(timing) - remaining = int(max(config.time_limit - elapsed, 1)) - if solver_type == 'mip': - if regularization: - solver_name = config.mip_regularization_solver - if config.regularization_mip_threads > 0: - opt.options['threads'] = config.regularization_mip_threads - else: - solver_name = config.mip_solver - if config.threads > 0: - opt.options['threads'] = config.threads - elif solver_type == 'nlp': - solver_name = config.nlp_solver - # TODO: opt.name doesn't work for GAMS - if solver_name in {'cplex', 'gurobi', 'gurobi_persistent', 'appsi_gurobi'}: - opt.options['timelimit'] = remaining - opt.options['mipgap'] = config.mip_solver_mipgap - if solver_name == 'gurobi_persistent' and config.single_tree: - # PreCrush: Controls presolve reductions that affect user cuts - # You should consider setting this parameter to 1 if you are using callbacks to add your own cuts. - opt.options['PreCrush'] = 1 - opt.options['LazyConstraints'] = 1 - if regularization == True: - if solver_name == 'cplex': - if config.solution_limit is not None: - opt.options['mip limits solutions'] = config.solution_limit - opt.options['mip strategy presolvenode'] = 3 - # TODO: need to discuss if this option should be added. - if config.add_regularization in {'hess_lag', 'hess_only_lag'}: - opt.options['optimalitytarget'] = 3 - elif solver_name == 'gurobi': - if config.solution_limit is not None: - opt.options['SolutionLimit'] = config.solution_limit - opt.options['Presolve'] = 2 - elif solver_name == 'cplex_persistent': + remaining = math.ceil(max(config.time_limit - elapsed, 1)) + if solver_name in { + 'cplex', + 'appsi_cplex', + 'cplex_persistent', + 'gurobi', + 'gurobi_persistent', + 'appsi_gurobi', + }: opt.options['timelimit'] = remaining - opt._solver_model.parameters.mip.tolerances.mipgap.set(config.mip_solver_mipgap) - if regularization is True: - if config.solution_limit is not None: - opt._solver_model.parameters.mip.limits.solutions.set( - config.solution_limit - ) - opt._solver_model.parameters.mip.strategy.presolvenode.set(3) - if config.add_regularization in {'hess_lag', 'hess_only_lag'}: - opt._solver_model.parameters.optimalitytarget.set(3) - elif solver_name == 'appsi_cplex': - opt.options['timelimit'] = remaining - opt.options['mip_tolerances_mipgap'] = config.mip_solver_mipgap - if regularization is True: - if config.solution_limit is not None: - opt.options['mip_limits_solutions'] = config.solution_limit - opt.options['mip_strategy_presolvenode'] = 3 - if config.add_regularization in {'hess_lag', 'hess_only_lag'}: - opt.options['optimalitytarget'] = 3 + elif solver_name == 'appsi_highs': + opt.config.time_limit = remaining + elif solver_name == 'cyipopt': + opt.config.options['max_cpu_time'] = float(remaining) elif solver_name == 'glpk': opt.options['tmlim'] = remaining - opt.options['mipgap'] = config.mip_solver_mipgap elif solver_name == 'baron': opt.options['MaxTime'] = remaining - opt.options['AbsConFeasTol'] = config.zero_tolerance elif solver_name in {'ipopt', 'appsi_ipopt'}: opt.options['max_cpu_time'] = remaining + elif solver_name == 'gams': + opt.options['add_options'].append('option Reslim=%s;' % remaining) + + +def set_solver_mipgap(opt, solver_name, config): + """Set mipgap for subsolvers. + + Parameters + ---------- + opt : Solvers + The solver object. + solver_name : String + The name of solver. + config : ConfigBlock + The specific configurations for MindtPy. + """ + if solver_name in { + 'cplex', + 'cplex_persistent', + 'gurobi', + 'gurobi_persistent', + 'appsi_gurobi', + 'glpk', + }: + opt.options['mipgap'] = config.mip_solver_mipgap + elif solver_name == 'appsi_cplex': + opt.options['mip_tolerances_mipgap'] = config.mip_solver_mipgap + elif solver_name == 'appsi_highs': + opt.config.mip_gap = config.mip_solver_mipgap + elif solver_name == 'gams': + opt.options['add_options'].append('option optcr=%s;' % config.mip_solver_mipgap) + + +def set_solver_constraint_violation_tolerance(opt, solver_name, config): + """Set constraint violation tolerance for solvers. + + Parameters + ---------- + opt : Solvers + The solver object. + solver_name : String + The name of solver. + config : ConfigBlock + The specific configurations for MindtPy. + """ + if solver_name == 'baron': + opt.options['AbsConFeasTol'] = config.zero_tolerance + elif solver_name in {'ipopt', 'appsi_ipopt'}: opt.options['constr_viol_tol'] = config.zero_tolerance elif solver_name == 'cyipopt': - opt.config.options['max_cpu_time'] = float(remaining) opt.config.options['constr_viol_tol'] = config.zero_tolerance elif solver_name == 'gams': - if solver_type == 'mip': - opt.options['add_options'] = [ - 'option optcr=%s;' % config.mip_solver_mipgap, - 'option reslim=%s;' % remaining, - ] - elif solver_type == 'nlp': - opt.options['add_options'] = ['option reslim=%s;' % remaining] - if config.nlp_solver_args.__contains__('solver'): - if config.nlp_solver_args['solver'] in { - 'ipopt', - 'ipopth', - 'msnlp', - 'conopt', - 'baron', - }: - if config.nlp_solver_args['solver'] == 'ipopt': - opt.options['add_options'].append('$onecho > ipopt.opt') - opt.options['add_options'].append( - 'constr_viol_tol ' + str(config.zero_tolerance) - ) - elif config.nlp_solver_args['solver'] == 'ipopth': - opt.options['add_options'].append('$onecho > ipopth.opt') - opt.options['add_options'].append( - 'constr_viol_tol ' + str(config.zero_tolerance) - ) - # TODO: Ipopt warmstart option - # opt.options['add_options'].append('warm_start_init_point yes\n' - # 'warm_start_bound_push 1e-9\n' - # 'warm_start_bound_frac 1e-9\n' - # 'warm_start_slack_bound_frac 1e-9\n' - # 'warm_start_slack_bound_push 1e-9\n' - # 'warm_start_mult_bound_push 1e-9\n') - elif config.nlp_solver_args['solver'] == 'conopt': - opt.options['add_options'].append('$onecho > conopt.opt') - opt.options['add_options'].append( - 'RTNWMA ' + str(config.zero_tolerance) - ) - elif config.nlp_solver_args['solver'] == 'msnlp': - opt.options['add_options'].append('$onecho > msnlp.opt') - opt.options['add_options'].append( - 'feasibility_tolerance ' + str(config.zero_tolerance) - ) - elif config.nlp_solver_args['solver'] == 'baron': - opt.options['add_options'].append('$onecho > baron.opt') - opt.options['add_options'].append( - 'AbsConFeasTol ' + str(config.zero_tolerance) - ) - opt.options['add_options'].append('$offecho') - opt.options['add_options'].append('GAMS_MODEL.optfile=1') + if config.nlp_solver_args['solver'] in { + 'ipopt', + 'ipopth', + 'msnlp', + 'conopt', + 'baron', + }: + opt.options['add_options'].append('GAMS_MODEL.optfile=1') + opt.options['add_options'].append( + '$onecho > ' + config.nlp_solver_args['solver'] + '.opt' + ) + if config.nlp_solver_args['solver'] in {'ipopt', 'ipopth'}: + opt.options['add_options'].append( + 'constr_viol_tol ' + str(config.zero_tolerance) + ) + # Ipopt warmstart options + opt.options['add_options'].append( + 'warm_start_init_point yes\n' + 'warm_start_bound_push 1e-9\n' + 'warm_start_bound_frac 1e-9\n' + 'warm_start_slack_bound_frac 1e-9\n' + 'warm_start_slack_bound_push 1e-9\n' + 'warm_start_mult_bound_push 1e-9\n' + ) + elif config.nlp_solver_args['solver'] == 'conopt': + opt.options['add_options'].append( + 'RTNWMA ' + str(config.zero_tolerance) + ) + elif config.nlp_solver_args['solver'] == 'msnlp': + opt.options['add_options'].append( + 'feasibility_tolerance ' + str(config.zero_tolerance) + ) + elif config.nlp_solver_args['solver'] == 'baron': + opt.options['add_options'].append( + 'AbsConFeasTol ' + str(config.zero_tolerance) + ) + opt.options['add_options'].append('$offecho') def get_integer_solution(model, string_zero=False): @@ -647,7 +643,8 @@ def get_integer_solution(model, string_zero=False): for var in model.MindtPy_utils.discrete_variable_list: if string_zero: if var.value == 0: - # In cplex, negative zero is different from zero, so we use string to denote this(Only in singletree) + # In CPLEX, negative zero is different from zero, + # so we use string to denote this (Only in singletree). temp.append(str(var.value)) else: temp.append(int(round(var.value))) @@ -703,8 +700,8 @@ def copy_var_list_values_from_solution_pool( # instead log warnings). This means that the following will # always succeed and the ValueError should never be raised. v_to.set_value(var_val, skip_validation=True) - except ValueError as err: - err_msg = getattr(err, 'message', str(err)) + except ValueError as e: + config.logger.error(e) rounded_val = int(round(var_val)) # Check to see if this is just a tolerance issue if ignore_integrality and v_to.is_integer(): @@ -734,114 +731,16 @@ def f(gurobi_model, where): """Callback function for Gurobi. Args: - gurobi_model (gurobi model): the gurobi model derived from pyomo model. + gurobi_model (Gurobi model): the Gurobi model derived from pyomo model. where (int): an enum member of gurobipy.GRB.Callback. """ self._callback_func( - self._pyomo_model, self, where, self.solve_data, self.config + self._pyomo_model, self, where, self.mindtpy_solver, self.config ) return f -def update_gap(solve_data): - """Update the relative gap and the absolute gap. - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - """ - if solve_data.objective_sense == minimize: - solve_data.abs_gap = solve_data.primal_bound - solve_data.dual_bound - else: - solve_data.abs_gap = solve_data.dual_bound - solve_data.primal_bound - solve_data.rel_gap = solve_data.abs_gap / (abs(solve_data.primal_bound) + 1e-10) - - -def update_dual_bound(solve_data, bound_value): - """Update the dual bound. - - Call after solving relaxed problem, including relaxed NLP and MIP main problem. - Use the optimal primal bound of the relaxed problem to update the dual bound. - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - bound_value : float - The input value used to update the dual bound. - """ - if math.isnan(bound_value): - return - if solve_data.objective_sense == minimize: - solve_data.dual_bound = max(bound_value, solve_data.dual_bound) - solve_data.dual_bound_improved = ( - solve_data.dual_bound > solve_data.dual_bound_progress[-1] - ) - else: - solve_data.dual_bound = min(bound_value, solve_data.dual_bound) - solve_data.dual_bound_improved = ( - solve_data.dual_bound < solve_data.dual_bound_progress[-1] - ) - solve_data.dual_bound_progress.append(solve_data.dual_bound) - solve_data.dual_bound_progress_time.append(get_main_elapsed_time(solve_data.timing)) - if solve_data.dual_bound_improved: - update_gap(solve_data) - - -def update_suboptimal_dual_bound(solve_data, results): - """If the relaxed problem is not solved to optimality, the dual bound is updated - according to the dual bound of relaxed problem. - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - results : SolverResults - Results from solving the relaxed problem. - The dual bound of the relaxed problem can only be obtained from the result object. - """ - if solve_data.objective_sense == minimize: - bound_value = results.problem.lower_bound - else: - bound_value = results.problem.upper_bound - update_dual_bound(solve_data, bound_value) - - -def update_primal_bound(solve_data, bound_value): - """Update the primal bound. - - Call after solve fixed NLP subproblem. - Use the optimal primal bound of the relaxed problem to update the dual bound. - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - bound_value : float - The input value used to update the primal bound. - """ - if math.isnan(bound_value): - return - if solve_data.objective_sense == minimize: - solve_data.primal_bound = min(bound_value, solve_data.primal_bound) - solve_data.primal_bound_improved = ( - solve_data.primal_bound < solve_data.primal_bound_progress[-1] - ) - else: - solve_data.primal_bound = max(bound_value, solve_data.primal_bound) - solve_data.primal_bound_improved = ( - solve_data.primal_bound > solve_data.primal_bound_progress[-1] - ) - solve_data.primal_bound_progress.append(solve_data.primal_bound) - solve_data.primal_bound_progress_time.append( - get_main_elapsed_time(solve_data.timing) - ) - if solve_data.primal_bound_improved: - update_gap(solve_data) - - def set_up_logger(config): """Set up the formatter and handler for logger. @@ -861,103 +760,6 @@ def set_up_logger(config): config.logger.addHandler(ch) -def get_dual_integral(solve_data, config): - """Calculate the dual integral. - Ref: The confined primal integral. [http://www.optimization-online.org/DB_FILE/2020/07/7910.pdf] - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - - Returns - ------- - float - The dual integral. - """ - dual_integral = 0 - dual_bound_progress = solve_data.dual_bound_progress.copy() - # Initial dual bound is set to inf or -inf. To calculate dual integral, we set - # initial_dual_bound to 10% greater or smaller than the first_found_dual_bound. - # TODO: check if the calculation of initial_dual_bound needs to be modified. - for dual_bound in dual_bound_progress: - if dual_bound != dual_bound_progress[0]: - break - for i in range(len(dual_bound_progress)): - if dual_bound_progress[i] == solve_data.dual_bound_progress[0]: - dual_bound_progress[i] = dual_bound * ( - 1 - - config.initial_bound_coef - * solve_data.objective_sense - * math.copysign(1, dual_bound) - ) - else: - break - for i in range(len(dual_bound_progress)): - if i == 0: - dual_integral += abs(dual_bound_progress[i] - solve_data.dual_bound) * ( - solve_data.dual_bound_progress_time[i] - ) - else: - dual_integral += abs(dual_bound_progress[i] - solve_data.dual_bound) * ( - solve_data.dual_bound_progress_time[i] - - solve_data.dual_bound_progress_time[i - 1] - ) - config.logger.info(' {:<25}: {:>7.4f} '.format('Dual integral', dual_integral)) - return dual_integral - - -def get_primal_integral(solve_data, config): - """Calculate the primal integral. - Ref: The confined primal integral. [http://www.optimization-online.org/DB_FILE/2020/07/7910.pdf] - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - - Returns - ------- - float - The primal integral. - """ - primal_integral = 0 - primal_bound_progress = solve_data.primal_bound_progress.copy() - # Initial primal bound is set to inf or -inf. To calculate primal integral, we set - # initial_primal_bound to 10% greater or smaller than the first_found_primal_bound. - # TODO: check if the calculation of initial_primal_bound needs to be modified. - for primal_bound in primal_bound_progress: - if primal_bound != primal_bound_progress[0]: - break - for i in range(len(primal_bound_progress)): - if primal_bound_progress[i] == solve_data.primal_bound_progress[0]: - primal_bound_progress[i] = primal_bound * ( - 1 - + config.initial_bound_coef - * solve_data.objective_sense - * math.copysign(1, primal_bound) - ) - else: - break - for i in range(len(primal_bound_progress)): - if i == 0: - primal_integral += abs( - primal_bound_progress[i] - solve_data.primal_bound - ) * (solve_data.primal_bound_progress_time[i]) - else: - primal_integral += abs( - primal_bound_progress[i] - solve_data.primal_bound - ) * ( - solve_data.primal_bound_progress_time[i] - - solve_data.primal_bound_progress_time[i - 1] - ) - - config.logger.info( - ' {:<25}: {:>7.4f} '.format('Primal integral', primal_integral) - ) - return primal_integral - - def epigraph_reformulation(exp, slack_var_list, constraint_list, use_mcpp, sense): """Epigraph reformulation. @@ -1053,143 +855,7 @@ def setup_results_object(results, model, config): ) -def process_objective( - solve_data, - config, - move_objective=False, - use_mcpp=False, - update_var_con_list=True, - partition_nonlinear_terms=True, - obj_handleable_polynomial_degree={0, 1}, - constr_handleable_polynomial_degree={0, 1}, -): - """Process model objective function. - Check that the model has only 1 valid objective. - If the objective is nonlinear, move it into the constraints. - If no objective function exists, emit a warning and create a dummy - objective. - Parameters - ---------- - solve_data (MindtPySolveData): solver environment data class - config (ConfigBlock): solver configuration options - move_objective (bool): if True, move even linear - objective functions to the constraints - update_var_con_list (bool): if True, the variable/constraint/objective lists will not be updated. - This arg is set to True by default. Currently, update_var_con_list will be set to False only when - add_regularization is not None in MindtPy. - partition_nonlinear_terms (bool): if True, partition sum of nonlinear terms in the objective function. - """ - m = solve_data.working_model - util_block = getattr(m, solve_data.util_block_name) - # Handle missing or multiple objectives - active_objectives = list( - m.component_data_objects(ctype=Objective, active=True, descend_into=True) - ) - solve_data.results.problem.number_of_objectives = len(active_objectives) - if len(active_objectives) == 0: - config.logger.warning('Model has no active objectives. Adding dummy objective.') - util_block.dummy_objective = Objective(expr=1) - main_obj = util_block.dummy_objective - elif len(active_objectives) > 1: - raise ValueError('Model has multiple active objectives.') - else: - main_obj = active_objectives[0] - solve_data.results.problem.sense = ( - ProblemSense.minimize if main_obj.sense == 1 else ProblemSense.maximize - ) - solve_data.objective_sense = main_obj.sense - - # Move the objective to the constraints if it is nonlinear or move_objective is True. - if ( - main_obj.expr.polynomial_degree() not in obj_handleable_polynomial_degree - or move_objective - ): - if move_objective: - config.logger.info("Moving objective to constraint set.") - else: - config.logger.info("Objective is nonlinear. Moving it to constraint set.") - util_block.objective_value = VarList(domain=Reals, initialize=0) - util_block.objective_constr = ConstraintList() - if ( - main_obj.expr.polynomial_degree() not in obj_handleable_polynomial_degree - and partition_nonlinear_terms - and main_obj.expr.__class__ is EXPR.SumExpression - ): - repn = generate_standard_repn( - main_obj.expr, quadratic=2 in obj_handleable_polynomial_degree - ) - # the following code will also work if linear_subexpr is a constant. - linear_subexpr = ( - repn.constant - + sum( - coef * var for coef, var in zip(repn.linear_coefs, repn.linear_vars) - ) - + sum( - coef * var1 * var2 - for coef, (var1, var2) in zip( - repn.quadratic_coefs, repn.quadratic_vars - ) - ) - ) - # only need to generate one epigraph constraint for the sum of all linear terms and constant - epigraph_reformulation( - linear_subexpr, - util_block.objective_value, - util_block.objective_constr, - use_mcpp, - main_obj.sense, - ) - nonlinear_subexpr = repn.nonlinear_expr - if nonlinear_subexpr.__class__ is EXPR.SumExpression: - for subsubexpr in nonlinear_subexpr.args: - epigraph_reformulation( - subsubexpr, - util_block.objective_value, - util_block.objective_constr, - use_mcpp, - main_obj.sense, - ) - else: - epigraph_reformulation( - nonlinear_subexpr, - util_block.objective_value, - util_block.objective_constr, - use_mcpp, - main_obj.sense, - ) - else: - epigraph_reformulation( - main_obj.expr, - util_block.objective_value, - util_block.objective_constr, - use_mcpp, - main_obj.sense, - ) - - main_obj.deactivate() - util_block.objective = Objective( - expr=sum(util_block.objective_value[:]), sense=main_obj.sense - ) - - if ( - main_obj.expr.polynomial_degree() not in obj_handleable_polynomial_degree - or (move_objective and update_var_con_list) - ): - util_block.variable_list.extend(util_block.objective_value[:]) - util_block.continuous_variable_list.extend(util_block.objective_value[:]) - util_block.constraint_list.extend(util_block.objective_constr[:]) - util_block.objective_list.append(util_block.objective) - for constr in util_block.objective_constr[:]: - if ( - constr.body.polynomial_degree() - in constr_handleable_polynomial_degree - ): - util_block.linear_constraint_list.append(constr) - else: - util_block.nonlinear_constraint_list.append(constr) - - -def fp_converged(working_model, mip_model, config, discrete_only=True): +def fp_converged(working_model, mip_model, proj_zero_tolerance, discrete_only=True): """Calculates the euclidean norm between the discrete variables in the MIP and NLP models. Parameters @@ -1198,8 +864,8 @@ def fp_converged(working_model, mip_model, config, discrete_only=True): The working model(original model). mip_model : Pyomo model The mip model. - config : ConfigBlock - The specific configurations for MindtPy. + proj_zero_tolerance : Float + The projection zero tolerance of Feasibility Pump. discrete_only : bool, optional Whether to only optimize on distance between the discrete variables, by default True. @@ -1216,7 +882,7 @@ def fp_converged(working_model, mip_model, config, discrete_only=True): ) if (not discrete_only) or milp_var.is_integer() ) - return distance <= config.fp_projzerotol + return distance <= proj_zero_tolerance def add_orthogonality_cuts(working_model, mip_model, config):