Scippy

SCIP

Solving Constraint Integer Programs

prop_nlobbt.h
Go to the documentation of this file.
1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2 /* */
3 /* This file is part of the program and library */
4 /* SCIP --- Solving Constraint Integer Programs */
5 /* */
6 /* Copyright (c) 2002-2023 Zuse Institute Berlin (ZIB) */
7 /* */
8 /* Licensed under the Apache License, Version 2.0 (the "License"); */
9 /* you may not use this file except in compliance with the License. */
10 /* You may obtain a copy of the License at */
11 /* */
12 /* http://www.apache.org/licenses/LICENSE-2.0 */
13 /* */
14 /* Unless required by applicable law or agreed to in writing, software */
15 /* distributed under the License is distributed on an "AS IS" BASIS, */
16 /* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
17 /* See the License for the specific language governing permissions and */
18 /* limitations under the License. */
19 /* */
20 /* You should have received a copy of the Apache-2.0 license */
21 /* along with SCIP; see the file LICENSE. If not visit scipopt.org. */
22 /* */
23 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
24 
25 /**@file prop_nlobbt.h
26  * @ingroup PROPAGATORS
27  * @brief nonlinear OBBT propagator
28  * @author Benjamin Mueller
29  *
30  * In Nonlinear Optimization-Based Bound Tightening (NLOBBT), we solve auxiliary NLPs of the form
31  * \f[
32  * \min / \max \, x_i \\
33  * \f]
34  * \f[
35  * s.t. \; g_j(x) \le 0 \, \forall j=1,\ldots,m \\
36  * \f]
37  * \f[
38  * c'x \le \mathcal{U}
39  * \f]
40  * \f[
41  * x \in [\ell,u]
42  * \f]
43  *
44  * where each \f$ g_j \f$ is a convex function and \f$ \mathcal{U} \f$ the solution value of the current
45  * incumbent. Clearly, the optimal objective value of this nonlinear program provides a valid lower/upper bound on
46  * variable \f$ x_i \f$.
47  *
48  * The propagator sorts all variables w.r.t. their occurrences in convex nonlinear constraints and solves sequentially
49  * all convex NLPs. Variables which could be successfully tightened by the propagator will be prioritized in the next
50  * call of a new node in the branch-and-bound tree. By default, the propagator requires at least one nonconvex
51  * constraints to be executed. For purely convex problems, the benefit of having tighter bounds is negligible.
52  *
53  * By default, NLOBBT is only applied for non-binary variables. A reason for this can be found <a
54  * href="http://dx.doi.org/10.1007/s10898-016-0450-4">here </a>. Variables which do not appear non-linearly in the
55  * nonlinear constraints will not be considered even though they might lead to additional tightenings.
56  *
57  * After solving the NLP to optimize \f$ x_i \f$ we try to exploit the dual information to generate a globally valid
58  * inequality, called Generalized Variable Bound (see @ref prop_genvbounds.h). Let \f$ \lambda_j \f$, \f$ \mu \f$, \f$
59  * \alpha \f$, and \f$ \beta \f$ be the dual multipliers for the constraints of the NLP where \f$ \alpha \f$ and \f$
60  * \beta \f$ correspond to the variable bound constraints. Because of the convexity of \f$ g_j \f$ we know that
61  *
62  * \f[
63  * g_j(x) \ge g_j(x^*) + \nabla g_j(x^*)(x-x^*)
64  * \f]
65  *
66  * holds for every \f$ x^* \in [\ell,u] \f$. Let \f$ x^* \f$ be the optimal solution after solving the NLP for the case
67  * of minimizing \f$ x_i \f$ (similiar for the case of maximizing \f$ x_i \f$). Since the NLP is convex we know that the
68  * KKT conditions
69  *
70  * \f[
71  * e_i + \lambda' \nabla g(x^*) + \mu' c + \alpha - \beta = 0
72  * \f]
73  * \f[
74  * \lambda_j g_j(x^*) = 0
75  * \f]
76  *
77  * hold. Aggregating the inequalities \f$ x_i \ge x_i \f$ and \f$ g_j(x) \le 0 \f$ leads to the inequality
78  *
79  * \f[
80  * x_i \ge x_i + \sum_{j} g_j(x_i)
81  * \f]
82  *
83  * Instead of calling the (expensive) propagator during the tree search we can use this inequality to derive further
84  * reductions on \f$ x_i \f$. Multiplying the first KKT condition by \f$ (x - x^*) \f$ and using the fact that each
85  * \f$ g_j \f$ is convex we can rewrite the previous inequality to
86  *
87  * \f[
88  * x_i \ge (\beta - \alpha)'x + (e_i + \alpha - \beta) x^* + \mu \mathcal{U}.
89  * \f]
90  *
91  * which is passed to the genvbounds propagator. Note that if \f$ \alpha_i \neq \beta_i \f$ we know that the bound of
92  * \f$ x_i \f$ is the proof for optimality and thus no useful genvbound can be found.
93  */
94 
95 /*---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----1----+----2*/
96 
97 #ifndef __SCIP_PROP_NLOBBT_H__
98 #define __SCIP_PROP_NLOBBT_H__
99 
100 #include "scip/def.h"
101 #include "scip/type_retcode.h"
102 #include "scip/type_scip.h"
103 
104 #ifdef __cplusplus
105 extern "C" {
106 #endif
107 
108 /** creates the nlobbt propagator and includes it in SCIP
109  *
110  * @ingroup PropagatorIncludes
111  */
112 SCIP_EXPORT
114  SCIP* scip /**< SCIP data structure */
115  );
116 
117 #ifdef __cplusplus
118 }
119 #endif
120 
121 #endif
SCIP_RETCODE SCIPincludePropNlobbt(SCIP *scip)
Definition: prop_nlobbt.c:724
enum SCIP_Retcode SCIP_RETCODE
Definition: type_retcode.h:63
type definitions for return codes for SCIP methods
type definitions for SCIP&#39;s main datastructure
common defines and data types used in all packages of SCIP