Scippy

SCIP

Solving Constraint Integer Programs

exprinterpret_cppad.cpp
Go to the documentation of this file.
1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2 /* */
3 /* This file is part of the program and library */
4 /* SCIP --- Solving Constraint Integer Programs */
5 /* */
6 /* Copyright 2002-2022 Zuse Institute Berlin */
7 /* */
8 /* Licensed under the Apache License, Version 2.0 (the "License"); */
9 /* you may not use this file except in compliance with the License. */
10 /* You may obtain a copy of the License at */
11 /* */
12 /* http://www.apache.org/licenses/LICENSE-2.0 */
13 /* */
14 /* Unless required by applicable law or agreed to in writing, software */
15 /* distributed under the License is distributed on an "AS IS" BASIS, */
16 /* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
17 /* See the License for the specific language governing permissions and */
18 /* limitations under the License. */
19 /* */
20 /* You should have received a copy of the Apache-2.0 license */
21 /* along with SCIP; see the file LICENSE. If not visit scipopt.org. */
22 /* */
23 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
24 
25 /**@file exprinterpret_cppad.cpp
26  * @brief methods to interpret (evaluate) an expression "fast" using CppAD
27  * @ingroup DEFPLUGINS_EXPRINT
28  * @author Stefan Vigerske
29  */
30 
31 /*---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----1----+----2*/
32 
33 #include "scip/exprinterpret.h"
34 #include "scip/def.h"
35 #include "scip/intervalarith.h"
36 #include "scip/pub_expr.h"
37 #include "scip/scip_expr.h"
38 #include "scip/expr_pow.h"
39 #include "scip/expr_exp.h"
40 #include "scip/expr_log.h"
41 #include "scip/expr_varidx.h"
42 
43 #include <cmath>
44 #include <cstring>
45 #include <algorithm>
46 #include <vector>
47 using std::vector;
48 
49 /* Turn off lint warning "747: Significant prototype coercion" and "732: Loss of sign".
50  * The first warning is generated for expressions like t[0], where t is a vector, since 0 is an integer constant, but a
51  * size_t is expected (usually long unsigned). The second is generated for expressions like t[n], where n is an
52  * integer. Both code pieces are likely to be correct. It seems to be impossible to inhibit these messages for
53  * vector<*>::operator[] only. */
54 /*lint --e{747,732}*/
55 
56 /* defining EVAL_USE_EXPRHDLR_ALWAYS uses the exprhdlr methods for the evaluation and differentiation of all expression (except for var and value)
57  * instead of only those that are not recognized in this code
58  * this is incomplete (no hessians) and slow (only forward-mode gradients), but can be useful for testing
59  */
60 // #define EVAL_USE_EXPRHDLR_ALWAYS
61 
62 /* defining NO_CPPAD_USER_ATOMIC disables the use of our own implementation of derivatives of power operators
63  * via CppAD's user-atomic function feature
64  * our customized implementation should give better results (tighter intervals) for the interval data type
65  * but we don't use interval types here anymore and the atomic operator does not look threadsafe (might be better in newer CppAD version)
66  */
67 #define NO_CPPAD_USER_ATOMIC
68 
69 /* fallback to non-thread-safe version if C++ is too old to have std::atomic */
70 #if __cplusplus < 201103L && defined(SCIP_THREADSAFE)
71 #undef SCIP_THREADSAFE
72 #endif
73 
74 /* CppAD needs to know a fixed upper bound on the number of threads at compile time.
75  * It is wise to set it to a power of 2, so that if the tape id overflows, it is likely to start at 0 again, which avoids difficult to debug errors.
76  */
77 #ifndef CPPAD_MAX_NUM_THREADS
78 #ifdef SCIP_THREADSAFE
79 #define CPPAD_MAX_NUM_THREADS 64
80 #else
81 #define CPPAD_MAX_NUM_THREADS 1
82 #endif
83 #endif
84 
85 /* disable -Wshadow warnings for upcoming includes of CppAD if using some old GCC
86  * -Wshadow was too strict with some versions of GCC 4 (https://stackoverflow.com/questions/2958457/gcc-wshadow-is-too-strict)
87  * disable -Wimplicit-fallthrough as I don't want to maintain extra comments in CppAD code to suppress these
88  */
89 #ifdef __GNUC__
90 #if __GNUC__ == 4
91 #pragma GCC diagnostic ignored "-Wshadow"
92 #endif
93 #if __GNUC__ >= 7
94 #pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
95 #endif
96 #endif
97 
98 // MS compiler doesn't have log2() - define an expensive workaround
99 #ifdef _MSC_VER
100 #define log2(x) (log((double)x) / log(2.0))
101 #endif
102 
103 #include <cppad/cppad.hpp>
104 #include <cppad/utility/error_handler.hpp>
105 
106 /* CppAD is not thread-safe by itself, but uses some static datastructures
107  * To run it in a multithreading environment, a special CppAD memory allocator that is aware of the multiple threads has to be used.
108  * This allocator requires to know the number of threads and a thread number for each thread.
109  * To implement this, we follow the team_pthread example of CppAD, which uses pthread's thread-specific data management.
110  */
111 #ifdef SCIP_THREADSAFE
112 
113 #include <atomic>
114 
115 /** currently registered number of threads */
116 static std::atomic_size_t ncurthreads{0};
117 static thread_local int thread_number{-1};
118 
119 /** CppAD callback function that indicates whether we are running in parallel mode */
120 static
121 bool in_parallel(void)
122 {
123  return ncurthreads > 1;
124 }
125 
126 /** CppAD callback function that returns the number of the current thread
127  *
128  * assigns a new number to the thread if new
129  */
130 static
131 size_t thread_num(void)
132 {
133  size_t threadnum;
134 
135  /* if no thread_number for this thread yet, then assign a new thread number to the current thread
136  */
137  if( thread_number == -1 )
138  {
139  thread_number = static_cast<int>(ncurthreads.fetch_add(1, std::memory_order_relaxed));
140  }
141 
142  threadnum = static_cast<size_t>(thread_number);
143 
144  return threadnum;
145 }
146 
147 /** sets up CppAD's datastructures for running in multithreading mode
148  *
149  * It must be called once before multithreading is started.
150  * For GCC-compatible compilers, this will happen automatically.
151  */
152 extern "C" SCIP_EXPORT char SCIPexprintCppADInitParallel(void);
153 #ifdef __GNUC__
154 __attribute__((constructor))
155 #endif
156 char SCIPexprintCppADInitParallel(void)
157 {
158  CppAD::thread_alloc::parallel_setup(CPPAD_MAX_NUM_THREADS, in_parallel, thread_num);
159  CppAD::parallel_ad<double>();
160 
161  return 0;
162 }
163 
164 #if !defined(__GNUC__)
165 /** a dummy variable that is initialized to the result of init_parallel
166  *
167  * The purpose is to make sure that init_parallel() is called before any multithreading is started.
168  */
169 static char init_parallel_return = SCIPexprintCppADInitParallel();
170 #endif
171 
172 #endif // SCIP_THREADSAFE
173 
174 using CppAD::AD;
175 
176 class atomic_userexpr;
177 
178 /** expression specific interpreter data */
179 struct SCIP_ExprIntData
180 {
181 public:
182  /** constructor */
183  SCIP_ExprIntData()
184  : val(0.0),
185  need_retape(true),
186  need_retape_always(false),
187  userevalcapability(SCIP_EXPRINTCAPABILITY_ALL),
188  hesrowidxs(NULL),
189  hescolidxs(NULL),
190  hesnnz(0),
191  hesconstant(false)
192  { }
193 
194  /** destructor */
195  ~SCIP_ExprIntData()
196  { }/*lint --e{1540}*/
197 
198  /** gives position of index in varidxs vector */
199  int getVarPos(
200  int varidx /**< variable index to look for */
201  ) const
202  {
203  // varidxs is sorted, so can use binary search functions
204  assert(std::binary_search(varidxs.begin(), varidxs.end(), varidx));
205  return std::lower_bound(varidxs.begin(), varidxs.end(), varidx) - varidxs.begin();
206  }
207 
208  vector< int > varidxs; /**< variable indices used in expression (unique and sorted) */
209  vector< AD<double> > X; /**< vector of dependent variables (same size as varidxs) */
210  vector< AD<double> > Y; /**< result vector (size 1) */
211  CppAD::ADFun<double> f; /**< the function to evaluate as CppAD object */
212 
213  vector<double> x; /**< current values of dependent variables (same size as varidxs) */
214  double val; /**< current function value */
215  bool need_retape; /**< will retaping be required for the next point evaluation? */
216  bool need_retape_always; /**< will retaping be always required? */
217 
218  vector<atomic_userexpr*> userexprs; /**< vector of atomic_userexpr that are created during eval() and need to be kept as long as the tape exists */
219  SCIP_EXPRINTCAPABILITY userevalcapability;/**< (intersection of) capabilities of evaluation routines of user expressions */
220 
221  int* hesrowidxs; /**< row indices of Hessian sparsity: indices are the variables used in expression */
222  int* hescolidxs; /**< column indices of Hessian sparsity: indices are the variables used in expression */
223  vector<SCIP_Real> hesvalues; /**< values of Hessian (a vector<> so we can pass it to CppAD) */
224  int hesnnz; /**< number of nonzeros in Hessian */
225  SCIP_Bool hesconstant; /**< whether Hessian is constant (because expr is at most quadratic) */
226 
227  // Hessian data in CppAD style: indices are 0..n-1 and elements on both lower and upper diagonal of matrix are considered
228  CppAD::local::internal_sparsity<bool>::pattern_type hessparsity_pattern; /**< packed sparsity pattern of Hessian in CppAD-internal form */
229  CppAD::vector<size_t> hessparsity_row; /**< row indices of sparsity pattern of Hessian in CppAD-internal form */
230  CppAD::vector<size_t> hessparsity_col; /**< column indices of sparsity pattern of Hessian in CppAD-internal form */
231  CppAD::sparse_hessian_work heswork; /**< work memory of CppAD for sparse Hessians */
232 };
233 
234 #ifndef NO_CPPAD_USER_ATOMIC
235 
236 /** computes sparsity of jacobian for a univariate function during a forward sweep
237  *
238  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
239  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
240  */
241 static
242 bool univariate_for_sparse_jac(
243  size_t q, /**< number of columns in R */
244  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
245  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
246  )
247 {
248  assert(r.size() == q);
249  assert(s.size() == q);
250 
251  s = r;
252 
253  return true;
254 }
255 
256 /** Computes sparsity of jacobian during a reverse sweep
257  *
258  * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
259  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
260  */
261 static
262 bool univariate_rev_sparse_jac(
263  size_t q, /**< number of rows in R */
264  const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
265  CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
266  )
267 {
268  assert(r.size() == q);
269  assert(s.size() == q);
270 
271  s = r;
272 
273  return true;
274 }
275 
276 /** computes sparsity of hessian during a reverse sweep
277  *
278  * Assume V(x) = (g(f(x)))'' R with f(x) = x^p for a function g:R->R and a matrix R.
279  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
280  */ /*lint -e715*/
281 static
282 bool univariate_rev_sparse_hes(
283  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
284  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
285  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
286  size_t q, /**< number of columns in R, U, and V */
287  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
288  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
289  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
290  )
291 { /*lint --e{439,715}*/ /* @todo take vx into account */
292  assert(r.size() == q);
293  assert(s.size() == 1);
294  assert(t.size() == 1);
295  assert(u.size() == q);
296  assert(v.size() == q);
297 
298  // T(x) = g'(f(x)) * f'(x) = S * f'(x), and f' is not identically 0
299  t[0] = s[0];
300 
301  // V(x) = g''(f(x)) f'(x) f'(x) R + g'(f(x)) f''(x) R466
302  // = f'(x) U + S f''(x) R, with f'(x) and f''(x) not identically 0
303  v = u;
304  if( s[0] )
305  for( size_t j = 0; j < q; ++j )
306  if( r[j] )
307  v[j] = true;
308 
309  return true;
310 }
311 
312 
313 /** Automatic differentiation of x -> x^p, p>=2 integer, as CppAD user-atomic function.
314  *
315  * This class implements forward and reverse operations for the function x -> x^p for use within CppAD.
316  * While CppAD would implement integer powers as a recursion of multiplications, we still use pow functions as they allow us to avoid overestimation in interval arithmetics.
317  *
318  * @todo treat the exponent as a (variable) argument to the function, with the assumption that we never differentiate w.r.t. it (this should make the approach threadsafe again)
319  */
320 template<class Type>
321 class atomic_posintpower : public CppAD::atomic_base<Type>
322 {
323 public:
324  atomic_posintpower()
325  : CppAD::atomic_base<Type>("posintpower"),
326  exponent(0)
327  {
328  /* indicate that we want to use bool-based sparsity pattern */
329  this->option(CppAD::atomic_base<Type>::bool_sparsity_enum);
330  }
331 
332 private:
333  /** exponent value for next call to forward or reverse */
334  int exponent;
335 
336  /** stores exponent value corresponding to next call to forward or reverse
337  *
338  * how is this supposed to be threadsafe? (we use only one global instantiation of this class)
339  * TODO using this function is deprecated, do as with atomic_userexpr instead
340  */
341  virtual void set_old(size_t id)
342  {
343  exponent = (int) id;
344  }
345 
346  /** forward sweep of positive integer power
347  *
348  * Given the taylor coefficients for x, we have to compute the taylor coefficients for f(x),
349  * that is, given tx = (x, x', x'', ...), we compute the coefficients ty = (y, y', y'', ...)
350  * in the taylor expansion of f(x) = x^p.
351  * Thus, y = x^p
352  * = tx[0]^p,
353  * y' = p * x^(p-1) * x'
354  * = p * tx[0]^(p-1) * tx[1],
355  * y'' = 1/2 * p * (p-1) * x^(p-2) * x'^2 + p * x^(p-1) * x''
356  * = 1/2 * p * (p-1) * tx[0]^(p-2) * tx[1]^2 + p * tx[0]^(p-1) * tx[2]
357  */
358  bool forward(
359  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
360  size_t p, /**< highest order Taylor coefficient that we are evaluating */
361  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
362  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
363  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
364  CppAD::vector<Type>& ty /**< vector to store taylor coefficients of y */
365  )
366  {
367  assert(exponent > 1);
368  assert(tx.size() >= p+1);
369  assert(ty.size() >= p+1);
370  assert(q <= p);
371 
372  if( vx.size() > 0 )
373  {
374  assert(vx.size() == 1);
375  assert(vy.size() == 1);
376  assert(p == 0);
377 
378  vy[0] = vx[0];
379  }
380 
381  if( q == 0 /* q <= 0 && 0 <= p */ )
382  {
383  ty[0] = CppAD::pow(tx[0], exponent);
384  }
385 
386  if( q <= 1 && 1 <= p )
387  {
388  ty[1] = CppAD::pow(tx[0], exponent-1) * tx[1];
389  ty[1] *= double(exponent);
390  }
391 
392  if( q <= 2 && 2 <= p )
393  {
394  if( exponent > 2 )
395  {
396  // ty[2] = 1/2 * exponent * (exponent-1) * pow(tx[0], exponent-2) * tx[1] * tx[1] + exponent * pow(tx[0], exponent-1) * tx[2];
397  ty[2] = CppAD::pow(tx[0], exponent-2) * tx[1] * tx[1];
398  ty[2] *= (exponent-1) / 2.0;
399  ty[2] += CppAD::pow(tx[0], exponent-1) * tx[2];
400  ty[2] *= exponent;
401  }
402  else
403  {
404  assert(exponent == 2);
405  // ty[2] = 1/2 * exponent * tx[1] * tx[1] + exponent * tx[0] * tx[2];
406  ty[2] = tx[1] * tx[1] + 2.0 * tx[0] * tx[2];
407  }
408  }
409 
410  /* higher order derivatives not implemented */
411  if( p > 2 )
412  return false;
413 
414  return true;
415  }
416 
417  /** reverse sweep of positive integer power
418  *
419  * Assume y(x) is a function of the taylor coefficients of f(x) = x^p for x, i.e.,
420  * y(x) = [ x^p, p * x^(p-1) * x', p * (p-1) * x^(p-2) * x'^2 + p * x^(p-1) * x'', ... ].
421  * Then in the reverse sweep we have to compute the elements of \f$\partial h / \partial x^[l], l = 0, ..., k,\f$
422  * where x^[l] is the l'th taylor coefficient (x, x', x'', ...) and h(x) = g(y(x)) for some function g:R^k -> R.
423  * That is, we have to compute
424  *\f$
425  * px[l] = \partial h / \partial x^[l] = (\partial g / \partial y) * (\partial y / \partial x^[l])
426  * = \sum_{i=0}^k (\partial g / \partial y_i) * (\partial y_i / \partial x^[l])
427  * = \sum_{i=0}^k py[i] * (\partial y_i / \partial x^[l])
428  * \f$
429  *
430  * For k = 0, this means
431  *\f$
432  * px[0] = py[0] * (\partial y_0 / \partial x^[0])
433  * = py[0] * (\partial x^p / \partial x)
434  * = py[0] * p * tx[0]^(p-1)
435  *\f$
436  *
437  * For k = 1, this means
438  * \f$
439  * px[0] = py[0] * (\partial y_0 / \partial x^[0]) + py[1] * (\partial y_1 / \partial x^[0])
440  * = py[0] * (\partial x^p / \partial x) + py[1] * (\partial (p * x^(p-1) * x') / \partial x)
441  * = py[0] * p * tx[0]^(p-1) + py[1] * p * (p-1) * tx[0]^(p-2) * tx[1]
442  * px[1] = py[0] * (\partial y_0 / \partial x^[1]) + py[1] * (\partial y_1 / \partial x^[1])
443  * = py[0] * (\partial x^p / \partial x') + py[1] * (\partial (p * x^(p-1) x') / \partial x')
444  * = py[0] * 0 + py[1] * p * tx[0]^(p-1)
445  * \f$
446  */ /*lint -e715*/
447  bool reverse(
448  size_t p, /**< highest order Taylor coefficient that we are evaluating */
449  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
450  const CppAD::vector<Type>& ty, /**< values for taylor coefficients of y */
451  CppAD::vector<Type>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
452  const CppAD::vector<Type>& py /**< values for partial derivatives of g(x) w.r.t. y */
453  )
454  { /*lint --e{715}*/
455  assert(exponent > 1);
456  assert(px.size() >= p+1);
457  assert(py.size() >= p+1);
458  assert(tx.size() >= p+1);
459 
460  switch( p )
461  {
462  case 0:
463  // px[0] = py[0] * exponent * pow(tx[0], exponent-1);
464  px[0] = py[0] * CppAD::pow(tx[0], exponent-1);
465  px[0] *= exponent;
466  break;
467 
468  case 1:
469  // px[0] = py[0] * exponent * pow(tx[0], exponent-1) + py[1] * exponent * (exponent-1) * pow(tx[0], exponent-2) * tx[1];
470  px[0] = py[1] * tx[1] * CppAD::pow(tx[0], exponent-2);
471  px[0] *= exponent-1;
472  px[0] += py[0] * CppAD::pow(tx[0], exponent-1);
473  px[0] *= exponent;
474  // px[1] = py[1] * exponent * pow(tx[0], exponent-1);
475  px[1] = py[1] * CppAD::pow(tx[0], exponent-1);
476  px[1] *= exponent;
477  break;
478 
479  default:
480  return false;
481  }
482 
483  return true;
484  }
485 
486  using CppAD::atomic_base<Type>::for_sparse_jac;
487 
488  /** computes sparsity of jacobian during a forward sweep
489  *
490  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
491  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
492  */
493  bool for_sparse_jac(
494  size_t q, /**< number of columns in R */
495  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
496  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
497  )
498  {
499  return univariate_for_sparse_jac(q, r, s);
500  }
501 
502  using CppAD::atomic_base<Type>::rev_sparse_jac;
503 
504  /** computes sparsity of jacobian during a reverse sweep
505  *
506  * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
507  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
508  */
509  bool rev_sparse_jac(
510  size_t q, /**< number of rows in R */
511  const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
512  CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
513  )
514  {
515  return univariate_rev_sparse_jac(q, r, s);
516  }
517 
518  using CppAD::atomic_base<Type>::rev_sparse_hes;
519 
520  /** computes sparsity of hessian during a reverse sweep
521  *
522  * Assume V(x) = (g(f(x)))'' R with f(x) = x^p for a function g:R->R and a matrix R.
523  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
524  */
525  bool rev_sparse_hes(
526  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
527  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
528  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
529  size_t q, /**< number of columns in R, U, and V */
530  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
531  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
532  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
533  )
534  {
535  return univariate_rev_sparse_hes(vx, s, t, q, r, u, v);
536  }
537 };
538 
539 /** power function with natural exponents */
540 template<class Type>
541 static
542 void posintpower(
543  const vector<Type>& in, /**< vector which first argument is base */
544  vector<Type>& out, /**< vector where to store result in first argument */
545  size_t exponent /**< exponent */
546  )
547 {
548  static atomic_posintpower<typename Type::value_type> pip;
549  pip(in, out, exponent);
550 }
551 
552 #else
553 
554 /** power function with natural exponents */
555 template<class Type>
557  const vector<Type>& in, /**< vector which first argument is base */
558  vector<Type>& out, /**< vector where to store result in first argument */
559  size_t exponent /**< exponent */
560  )
561 {
562  out[0] = pow(in[0], (int)exponent);
563 }
564 
565 #endif
566 
567 
568 #ifndef NO_CPPAD_USER_ATOMIC
569 
570 /** sign of a value (-1 or +1)
571  *
572  * 0.0 has sign +1
573  */
574 #define SIGN(x) ((x) >= 0.0 ? 1.0 : -1.0)
575 
576 /** Automatic differentiation of x -> sign(x)abs(x)^p, p>=1, as CppAD user-atomic function.
577  *
578  * This class implements forward and reverse operations for the function x -> sign(x)abs(x)^p for use within CppAD.
579  * While we otherwise would have to use discontinuous sign and abs functions, our own implementation allows to provide
580  * a continuously differentiable function.
581  *
582  * @todo treat the exponent as a (variable) argument to the function, with the assumption that we never differentiate w.r.t. it (this should make the approach threadsafe again)
583  */
584 template<class Type>
585 class atomic_signpower : public CppAD::atomic_base<Type>
586 {
587 public:
588  atomic_signpower()
589  : CppAD::atomic_base<Type>("signpower"),
590  exponent(0.0)
591  {
592  /* indicate that we want to use bool-based sparsity pattern */
593  this->option(CppAD::atomic_base<Type>::bool_sparsity_enum);
594  }
595 
596 private:
597  /** exponent for use in next call to forward or reverse */
598  SCIP_Real exponent;
599 
600  /** stores exponent corresponding to next call to forward or reverse
601  *
602  * How is this supposed to be threadsafe? (we use only one global instantiation of this class)
603  * TODO using this function is deprecated, do as with atomic_userexpr instead
604  */
605  virtual void set_old(size_t id)
606  {
607  exponent = SCIPgetExponentExprPow((SCIP_EXPR*)(void*)id);
608  }
609 
610  /** forward sweep of signpower
611  *
612  * Given the taylor coefficients for x, we have to compute the taylor coefficients for f(x),
613  * that is, given tx = (x, x', x'', ...), we compute the coefficients ty = (y, y', y'', ...)
614  * in the taylor expansion of f(x) = sign(x)abs(x)^p.
615  * Thus, y = sign(x)abs(x)^p
616  * = sign(tx[0])abs(tx[0])^p,
617  * y' = p * abs(x)^(p-1) * x'
618  * = p * abs(tx[0])^(p-1) * tx[1],
619  * y'' = 1/2 * p * (p-1) * sign(x) * abs(x)^(p-2) * x'^2 + p * abs(x)^(p-1) * x''
620  * = 1/2 * p * (p-1) * sign(tx[0]) * abs(tx[0])^(p-2) * tx[1]^2 + p * abs(tx[0])^(p-1) * tx[2]
621  */
622  bool forward(
623  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
624  size_t p, /**< highest order Taylor coefficient that we are evaluating */
625  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
626  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
627  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
628  CppAD::vector<Type>& ty /**< vector to store taylor coefficients of y */
629  )
630  {
631  assert(exponent > 0.0);
632  assert(tx.size() >= p+1);
633  assert(ty.size() >= p+1);
634  assert(q <= p);
635 
636  if( vx.size() > 0 )
637  {
638  assert(vx.size() == 1);
639  assert(vy.size() == 1);
640  assert(p == 0);
641 
642  vy[0] = vx[0];
643  }
644 
645  if( q == 0 /* q <= 0 && 0 <= p */ )
646  {
647  ty[0] = SIGN(tx[0]) * pow(REALABS(tx[0]), exponent);
648  }
649 
650  if( q <= 1 && 1 <= p )
651  {
652  ty[1] = pow(REALABS(tx[0]), exponent - 1.0) * tx[1];
653  ty[1] *= exponent;
654  }
655 
656  if( q <= 2 && 2 <= p )
657  {
658  if( exponent != 2.0 )
659  {
660  ty[2] = SIGN(tx[0]) * pow(REALABS(tx[0]), exponent - 2.0) * tx[1] * tx[1];
661  ty[2] *= (exponent - 1.0) / 2.0;
662  ty[2] += pow(REALABS(tx[0]), exponent - 1.0) * tx[2];
663  ty[2] *= exponent;
664  }
665  else
666  {
667  // y'' = 2 (1/2 * sign(x) * x'^2 + |x|*x'') = sign(tx[0]) * tx[1]^2 + 2 * abs(tx[0]) * tx[2]
668  ty[2] = SIGN(tx[0]) * tx[1] * tx[1];
669  ty[2] += 2.0 * REALABS(tx[0]) * tx[2];
670  }
671  }
672 
673  /* higher order derivatives not implemented */
674  if( p > 2 )
675  return false;
676 
677  return true;
678  }
679 
680  /** reverse sweep of signpower
681  *
682  * Assume y(x) is a function of the taylor coefficients of f(x) = sign(x)|x|^p for x, i.e.,
683  * y(x) = [ f(x), f'(x), f''(x), ... ].
684  * Then in the reverse sweep we have to compute the elements of \f$\partial h / \partial x^[l], l = 0, ..., k,\f$
685  * where x^[l] is the l'th taylor coefficient (x, x', x'', ...) and h(x) = g(y(x)) for some function g:R^k -> R.
686  * That is, we have to compute
687  *\f$
688  * px[l] = \partial h / \partial x^[l] = (\partial g / \partial y) * (\partial y / \partial x^[l])
689  * = \sum_{i=0}^k (\partial g / \partial y_i) * (\partial y_i / \partial x^[l])
690  * = \sum_{i=0}^k py[i] * (\partial y_i / \partial x^[l])
691  *\f$
692  *
693  * For k = 0, this means
694  *\f$
695  * px[0] = py[0] * (\partial y_0 / \partial x^[0])
696  * = py[0] * (\partial f(x) / \partial x)
697  * = py[0] * p * abs(tx[0])^(p-1)
698  * \f$
699  *
700  * For k = 1, this means
701  *\f$
702  * px[0] = py[0] * (\partial y_0 / \partial x^[0]) + py[1] * (\partial y_1 / \partial x^[0])
703  * = py[0] * (\partial f(x) / \partial x) + py[1] * (\partial f'(x) / \partial x)
704  * = py[0] * p * abs(tx[0])^(p-1) + py[1] * p * (p-1) * abs(tx[0])^(p-2) * sign(tx[0]) * tx[1]
705  * px[1] = py[0] * (\partial y_0 / \partial x^[1]) + py[1] * (\partial y_1 / \partial x^[1])
706  * = py[0] * (\partial f(x) / \partial x') + py[1] * (\partial f'(x) / \partial x')
707  * = py[0] * 0 + py[1] * p * abs(tx[0])^(p-1)
708  * \f$
709  */
710  bool reverse(
711  size_t p, /**< highest order Taylor coefficient that we are evaluating */
712  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
713  const CppAD::vector<Type>& ty, /**< values for taylor coefficients of y */
714  CppAD::vector<Type>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
715  const CppAD::vector<Type>& py /**< values for partial derivatives of g(x) w.r.t. y */
716  )
717  { /*lint --e{715}*/
718  assert(exponent > 1);
719  assert(px.size() >= p+1);
720  assert(py.size() >= p+1);
721  assert(tx.size() >= p+1);
722 
723  switch( p )
724  {
725  case 0:
726  // px[0] = py[0] * p * pow(abs(tx[0]), p-1);
727  px[0] = py[0] * pow(REALABS(tx[0]), exponent - 1.0);
728  px[0] *= p;
729  break;
730 
731  case 1:
732  if( exponent != 2.0 )
733  {
734  // px[0] = py[0] * p * abs(tx[0])^(p-1) + py[1] * p * (p-1) * abs(tx[0])^(p-2) * sign(tx[0]) * tx[1]
735  px[0] = py[1] * tx[1] * pow(REALABS(tx[0]), exponent - 2.0) * SIGN(tx[0]);
736  px[0] *= exponent - 1.0;
737  px[0] += py[0] * pow(REALABS(tx[0]), exponent - 1.0);
738  px[0] *= exponent;
739  // px[1] = py[1] * p * abs(tx[0])^(p-1)
740  px[1] = py[1] * pow(REALABS(tx[0]), exponent - 1.0);
741  px[1] *= exponent;
742  }
743  else
744  {
745  // px[0] = py[0] * 2.0 * abs(tx[0]) + py[1] * 2.0 * sign(tx[0]) * tx[1]
746  px[0] = py[1] * tx[1] * SIGN(tx[0]);
747  px[0] += py[0] * REALABS(tx[0]);
748  px[0] *= 2.0;
749  // px[1] = py[1] * 2.0 * abs(tx[0])
750  px[1] = py[1] * REALABS(tx[0]);
751  px[1] *= 2.0;
752  }
753  break;
754 
755  default:
756  return false;
757  }
758 
759  return true;
760  }
761 
762  using CppAD::atomic_base<Type>::for_sparse_jac;
763 
764  /** computes sparsity of jacobian during a forward sweep
765  *
766  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
767  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
768  */
769  bool for_sparse_jac(
770  size_t q, /**< number of columns in R */
771  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
772  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
773  )
774  {
775  return univariate_for_sparse_jac(q, r, s);
776  }
777 
778  using CppAD::atomic_base<Type>::rev_sparse_jac;
779 
780  /** computes sparsity of jacobian during a reverse sweep
781  *
782  * For a q x 1 matrix R, we have to return the sparsity pattern of the q x 1 matrix S(x) = R * f'(x).
783  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
784  */
785  bool rev_sparse_jac(
786  size_t q, /**< number of rows in R */
787  const CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
788  CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
789  )
790  {
791  return univariate_rev_sparse_jac(q, r, s);
792  }
793 
794  using CppAD::atomic_base<Type>::rev_sparse_hes;
795 
796  /** computes sparsity of hessian during a reverse sweep
797  *
798  * Assume V(x) = (g(f(x)))'' R with f(x) = sign(x)abs(x)^p for a function g:R->R and a matrix R.
799  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
800  */
801  bool rev_sparse_hes(
802  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
803  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
804  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
805  size_t q, /**< number of columns in S and R */
806  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
807  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
808  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
809  )
810  {
811  return univariate_rev_sparse_hes(vx, s, t, q, r, u, v);
812  }
813 
814 };
815 
816 /** template for evaluation for signpower operator */
817 template<class Type>
818 static
819 void evalSignPower(
820  Type& resultant, /**< resultant */
821  const Type& arg, /**< operand */
822  SCIP_EXPR* expr /**< expression that holds the exponent */
823  )
824 {
825  vector<Type> in(1, arg);
826  vector<Type> out(1);
827 
828  static atomic_signpower<typename Type::value_type> sp;
829  sp(in, out, (size_t)(void*)expr);
830 
831  resultant = out[0];
832  return;
833 }
834 
835 #else
836 
837 /** specialization of signpower evaluation for real numbers */
838 template<class Type>
839 static
841  CppAD::AD<Type>& resultant, /**< resultant */
842  const CppAD::AD<Type>& arg, /**< operand */
843  SCIP_EXPR* expr /**< expression that holds the exponent */
844  )
845 {
846  AD<Type> adzero(0.);
847  SCIP_Real exponent;
848 
849  exponent = SCIPgetExponentExprPow(expr);
850  assert(exponent >= 1.0);
851 
852  if( EPSISINT(exponent, 0.0) )
853  {
854  resultant = CppAD::CondExpGe(arg, adzero, pow(arg, (int)exponent), -pow(-arg, (int)exponent));
855  }
856  else
857  {
858  /* pow(0,fractional>1) is not differential in the CppAD world (https://github.com/coin-or/CppAD/discussions/93)
859  * this works around this by evaluating pow(eps,exponent) in this case
860  */
861  resultant = CppAD::CondExpEq(arg, adzero, pow(arg+std::numeric_limits<SCIP_Real>::epsilon(), exponent)-pow(std::numeric_limits<SCIP_Real>::epsilon(), exponent),
862  CppAD::CondExpGe(arg, adzero, pow(arg, exponent), -pow(-arg, exponent)));
863  }
864 }
865 #endif
866 
867 /** Automatic differentiation of user expression as CppAD user-atomic function.
868  *
869  * This class implements forward and reverse operations for a function given by a user expression for use within CppAD.
870  */
871 class atomic_userexpr : public CppAD::atomic_base<SCIP_Real>
872 {
873 public:
875  SCIP* scip_, /**< SCIP data structure */
876  SCIP_EXPR* expr_ /**< expression to use */
877  )
878  : CppAD::atomic_base<SCIP_Real>(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr_)), CppAD::atomic_base<SCIP_Real>::bool_sparsity_enum),
879  scip(scip_),
880  expr(expr_)
881  { }
882 
883 private:
884  /** SCIP data structure */
885  SCIP* scip;
886 
887  /** expression */
888  SCIP_EXPR* expr;
889 
890  /** forward sweep of userexpr
891  *
892  * We follow http://www.coin-or.org/CppAD/Doc/atomic_forward.xml
893  * Note, that p and q are interchanged!
894  *
895  * For a scalar variable t, let
896  * Y(t) = f(X(t))
897  * X(t) = x^0 + x^1 t^1 + ... + x^p t^p
898  * where for x^i the i an index, while for t^i the i is an exponent.
899  * Thus, x^k = 1/k! X^(k) (0), where X^(k)(.) denotes the k-th derivative.
900  *
901  * Next, let y^k = 1/k! Y^(k)(0) be the k'th taylor coefficient of Y. Thus,
902  * y^0 = Y^(0)(0) = Y(0) = f(X(0)) = f(x^0)
903  * y^1 = Y^(1)(0) = Y'(0) = f'(X(0)) * X'(0) = f'(x^0) * x^1
904  * y^2 = 1/2 Y^(2)(0) = 1/2 Y''(0) = 1/2 X'(0) * f''(X(0)) X'(0) + 1/2 * f'(X(0)) * X''(0) = 1/2 x^1 * f''(x^0) * x^1 + f'(x^0) * x^2
905  *
906  * As x^k = (tx[k], tx[(p+1)+k], tx[2*(p+1)+k], ..., tx[n*(p+1)+k], we get
907  * ty[0] = y^0 = f(x^0) = f(tx[{1..n}*(p+1)])
908  * ty[1] = y^1 = f'(x^0) * tx[{1..n}*(p+1)+1] = sum(i=1..n, grad[i] * tx[i*(p+1)+1]), where grad = f'(x^0)
909  * ty[2] = 1/2 sum(i,j=1..n, x[i*(p+1)+1] * x[j*(p+1)+q] * hessian[i,j]) + sum(i=1..n, grad[i] * x[i*(p+1)+2])
910  */
911  bool forward(
912  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
913  size_t p, /**< highest order Taylor coefficient that we are evaluating */
914  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
915  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
916  const CppAD::vector<SCIP_Real>& tx, /**< values for taylor coefficients of x */
917  CppAD::vector<SCIP_Real>& ty /**< vector to store taylor coefficients of y */
918  )
919  {
920  assert(scip != NULL);
921  assert(expr != NULL);
922  assert(ty.size() == p+1);
923  assert(q <= p);
924 
925  size_t n = tx.size() / (p+1);
926  assert(n == (size_t)SCIPexprGetNChildren(expr)); /*lint !e571*/
927  assert(n >= 1);
928 
929  SCIPdebugMsg(scip, "expr_%s:forward, q=%zd, p=%zd\n", SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), q, p);
930 
931  if( vx.size() > 0 )
932  {
933  assert(vx.size() == n);
934  assert(vy.size() == 1);
935  assert(p == 0);
936 
937  /* y_0 is a variable if at least one of the x_i is a variable */
938  vy[0] = false;
939  for( size_t i = 0; i < n; ++i )
940  if( vx[i] )
941  {
942  vy[0] = true;
943  break;
944  }
945  }
946 
947  if( p == 0 )
948  {
949  /* only eval requested
950  * arguments are in tx[i * (p+1) + 0], i == 0..n-1, that is tx[0]..tx[n-1]
951  */
952  if( SCIPcallExprEval(scip, expr, const_cast<double*>(tx.data()), &ty[0]) != SCIP_OKAY )
953  return false;
954 
955  if( ty[0] == SCIP_INVALID )
957 
958  return true;
959  }
960 
961  if( p == 1 )
962  {
963  /* eval and forward-diff requested
964  *
965  * point to eval is in tx[i * (p+1) + 0], i == 0..n-1
966  * direction is in tx[i * (p+1) + 1], i == 0..n-1
967  */
968  SCIP_Real* x = new SCIP_Real[n];
969  SCIP_Real* dir = new SCIP_Real[n];
970  for( size_t i = 0; i < n; ++i )
971  {
972  x[i] = tx[i * (p+1) + 0]; /*lint !e835*/
973  dir[i] = tx[i * (p+1) + 1]; /*lint !e835*/
974  }
975 
976  SCIP_RETCODE rc = SCIPcallExprEvalFwdiff(scip, expr, x, dir, &ty[0], &ty[1]);
977 
978  if( ty[0] == SCIP_INVALID )
980  if( ty[1] == SCIP_INVALID )
982 
983  delete[] dir;
984  delete[] x;
985 
986  return rc == SCIP_OKAY;
987  }
988 
989  /* higher order derivatives not implemented yet (TODO) */
990  SCIPABORT();
991  return false;
992 
993 #if SCIP_DISABLED_CODE
994  SCIP_Real* gradient = NULL;
995  SCIP_Real* hessian = NULL;
996 
997  if( q <= 2 && 1 <= p )
998  gradient = new SCIP_Real[n];
999  if( q <= 2 && 2 <= p )
1000  hessian = new SCIP_Real[n*n];
1001 
1002  if( exprEvalUser(expr, x, ty[0], gradient, hessian) != SCIP_OKAY )
1003  {
1004  delete[] x;
1005  delete[] gradient;
1006  delete[] hessian;
1007  return false;
1008  }
1009 
1010  if( gradient != NULL )
1011  {
1012  ty[1] = 0.0;
1013  for( size_t i = 0; i < n; ++i )
1014  ty[1] += gradient[i] * tx[i * (p+1) + 1];
1015  }
1016 
1017  if( hessian != NULL )
1018  {
1019  assert(gradient != NULL);
1020 
1021  ty[2] = 0.0;
1022  for( size_t i = 0; i < n; ++i )
1023  {
1024  for( size_t j = 0; j < n; ++j )
1025  ty[2] += 0.5 * hessian[i*n+j] * tx[i * (p+1) + 1] * tx[j * (p+1) + 1];
1026 
1027  ty[2] += gradient[i] * tx[i * (p+1) + 2];
1028  }
1029  }
1030 
1031  delete[] x;
1032  delete[] gradient;
1033  delete[] hessian;
1034 
1035  /* higher order derivatives not implemented */
1036  if( p > 2 )
1037  return false;
1038 
1039  return true;
1040 #endif
1041  }
1042 
1043  /** reverse sweep of userexpr
1044  *
1045  * We follow http://www.coin-or.org/CppAD/Doc/atomic_reverse.xml
1046  * Note, that there q is our p.
1047  *
1048  * For a scalar variable t, let
1049  * Y(t) = f(X(t))
1050  * X(t) = x^0 + x^1 t^1 + ... + x^p t^p
1051  * where for x^i the i an index, while for t^i the i is an exponent.
1052  * Thus, x^k = 1/k! X^(k) (0), where X^(k)(.) denotes the k-th derivative.
1053  *
1054  * Next, let y^k = 1/k! Y^(k)(0) be the k'th taylor coefficient of Y. Thus,
1055  * Y(t) = y^0 + y^1 t^1 + y^2 t^2 + ...
1056  * y^0, y^1, ... are the taylor coefficients of f(x).
1057  *
1058  * Further, let F(x^0,..,x^p) by given as F^k(x) = y^k. Thus,
1059  * F^0(x) = y^0 = Y^(0)(0) = f(x^0)
1060  * F^1(x) = y^1 = Y^(1)(0) = f'(x^0) * x^1
1061  * F^2(x) = y^2 = 1/2 Y''(0) = 1/2 x^1 f''(x^0) x^1 + f'(x^0) x^2
1062  *
1063  * Given functions G: R^(p+1) -> R and H: R^(n*(p+1)) -> R, where H(x^0, x^1, .., x^p) = G(F(x^0,..,x^p)),
1064  * we have to return the value of \f$\partial H / \partial x^l, l = 0..p,\f$ in px. Therefor,
1065  * \f$
1066  * px^l = \partial H / \partial x^l
1067  * = sum(k=0..p, (\partial G / \partial y^k) * (\partial y^k / \partial x^l)
1068  * = sum(k=0..p, py[k] * (\partial F^k / \partial x^l)
1069  * \f$
1070  *
1071  * For p = 0, this means
1072  * \f$
1073  * px^0 = py[0] * \partial F^0 / \partial x^0
1074  * = py[0] * \partial f(x^0) / \partial x^0
1075  * = py[0] * f'(x^0)
1076  * \f$
1077  *
1078  * For p = 1, this means (for l = 0):
1079  * \f[
1080  * px^0 = py[0] * \partial F^0 / \partial x^0 + py[1] * \partial F^1 / \partial x^0
1081  * = py[0] * \partial f(x^0) / \partial x^0 + py[1] * \partial (f'(x^0) * x^1) / \partial x^0
1082  * = py[0] * f'(x^0) + py[1] * f''(x^0) * x^1
1083  * \f]
1084  * and (for l=1):
1085  * \[
1086  * px^1 = py[0] * \partial F^0 / \partial x^1 + py[1] * \partial F^1 / \partial x^1
1087  * = py[0] * \partial f(x^0) / \partial x^1 + py[1] * \partial (f'(x^0) * x^1) / \partial x^0
1088  * = py[0] * 0 + py[1] * f'(x^0)
1089  * \f]
1090  *
1091  * As x^k = (tx[k], tx[(p+1)+k], tx[2*(p+1)+k], ..., tx[n*(p+1)+k] and
1092  * px^k = (px[k], px[(p+1)+k], px[2*(p+1)+k], ..., px[n*(p+1)+k], we get
1093  * for p = 0:
1094  * px[i] = (px^0)_i = py[0] * grad[i]
1095  * for p = 1:
1096  * px[i*2+0] = (px^0)_i = py[0] * grad[i] + py[1] * sum(j, hessian[j,i] * tx[j*2+1])
1097  * px[i*2+1] = (px^1)_i = py[1] * grad[i]
1098  */
1099  bool reverse(
1100  size_t p, /**< highest order Taylor coefficient that we are evaluating */
1101  const CppAD::vector<SCIP_Real>& tx, /**< values for taylor coefficients of x */
1102  const CppAD::vector<SCIP_Real>& ty, /**< values for taylor coefficients of y */
1103  CppAD::vector<SCIP_Real>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
1104  const CppAD::vector<SCIP_Real>& py /**< values for partial derivatives of g(x) w.r.t. y */
1105  )
1106  {
1107  assert(expr != NULL);
1108  assert(px.size() == tx.size());
1109  assert(py.size() == p+1);
1110 
1111  // TODO implement this again and then remove check for userexprs in SCIPexprintGrad
1112  SCIPABORT();
1113  return false;
1114 
1115 #ifdef SCIP_DISABLED_CODE
1116  size_t n = tx.size() / (p+1);
1117  assert(n == (size_t)SCIPexprGetNChildren(expr)); /*lint !e571*/
1118  assert(n >= 1);
1119 
1120  SCIP_Real* x = new SCIP_Real[n];
1121  SCIP_Real funcval;
1122  SCIP_Real* gradient = new SCIP_Real[n];
1123  SCIP_Real* hessian = NULL;
1124 
1125  if( p == 1 )
1126  hessian = new SCIP_Real[n*n];
1127 
1128  for( size_t i = 0; i < n; ++i )
1129  x[i] = tx[i * (p+1) + 0]; /*lint !e835*/
1130 
1131  if( exprEvalUser(expr, x, funcval, gradient, hessian) != SCIP_OKAY )
1132  {
1133  delete[] x;
1134  delete[] gradient;
1135  delete[] hessian;
1136  return false;
1137  }
1138 
1139  switch( p )
1140  {
1141  case 0:
1142  // px[j] = (px^0)_j = py[0] * grad[j]
1143  for( size_t i = 0; i < n; ++i )
1144  px[i] = py[0] * gradient[i];
1145  break;
1146 
1147  case 1:
1148  // px[i*2+0] = (px^0)_i = py[0] * grad[i] + py[1] * sum(j, hessian[j,i] * tx[j*2+1])
1149  // px[i*2+1] = (px^1)_i = py[1] * grad[i]
1150  assert(hessian != NULL);
1151  for( size_t i = 0; i < n; ++i )
1152  {
1153  px[i*2+0] = py[0] * gradient[i]; /*lint !e835*/
1154  for( size_t j = 0; j < n; ++j )
1155  px[i*2+0] += py[1] * hessian[i+n*j] * tx[j*2+1]; /*lint !e835*/
1156 
1157  px[i*2+1] = py[1] * gradient[i];
1158  }
1159  break;
1160 
1161  default:
1162  return false;
1163  }
1164 
1165  return true;
1166 #endif
1167  } /*lint !e715*/
1168 
1169  using CppAD::atomic_base<SCIP_Real>::for_sparse_jac;
1170 
1171  /** computes sparsity of jacobian during a forward sweep
1172  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
1173  * Since we assume f'(x) to be dense, the sparsity of S will be the sparsity of R.
1174  */
1175  bool for_sparse_jac(
1176  size_t q, /**< number of columns in R */
1177  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
1178  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
1179  )
1180  {
1181  assert(expr != NULL);
1182  assert(s.size() == q);
1183 
1184  size_t n = r.size() / q;
1185  assert(n == (size_t)SCIPexprGetNChildren(expr)); /*lint !e571*/
1186 
1187  // sparsity for S(x) = f'(x) * R
1188  for( size_t j = 0; j < q; j++ )
1189  {
1190  s[j] = false;
1191  for( size_t i = 0; i < n; i++ )
1192  s[j] |= (bool)r[i * q + j]; /*lint !e1786*/
1193  }
1194 
1195  return true;
1196  }
1197 
1198  using CppAD::atomic_base<SCIP_Real>::rev_sparse_jac;
1199 
1200  /** computes sparsity of jacobian during a reverse sweep
1201  * For a q x 1 matrix S, we have to return the sparsity pattern of the q x 1 matrix R(x) = S * f'(x).
1202  * Since we assume f'(x) to be dense, the sparsity of R will be the sparsity of S.
1203  */
1204  bool rev_sparse_jac(
1205  size_t q, /**< number of rows in R */
1206  const CppAD::vector<bool>& rt, /**< sparsity of R, rowwise */
1207  CppAD::vector<bool>& st /**< vector to store sparsity of S, rowwise */
1208  )
1209  {
1210  assert(expr != NULL);
1211  assert(rt.size() == q);
1212 
1213  size_t n = st.size() / q;
1214  assert(n == (size_t)SCIPexprGetNChildren(expr));
1215 
1216  // sparsity for S(x)^T = f'(x)^T * R^T
1217  for( size_t j = 0; j < q; j++ )
1218  for( size_t i = 0; i < n; i++ )
1219  st[i * q + j] = rt[j];
1220 
1221  return true;
1222  }
1223 
1224  using CppAD::atomic_base<SCIP_Real>::rev_sparse_hes;
1225 
1226  /** computes sparsity of hessian during a reverse sweep
1227  * Assume V(x) = (g(f(x)))'' R for a function g:R->R and a matrix R.
1228  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
1229  */
1230  bool rev_sparse_hes(
1231  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
1232  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
1233  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
1234  size_t q, /**< number of columns in S and R */
1235  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
1236  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
1237  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
1238  )
1239  {
1240  assert(expr != NULL);
1241  size_t n = vx.size();
1242  assert((size_t)SCIPexprGetNChildren(expr) == n);
1243  assert(s.size() == 1);
1244  assert(t.size() == n);
1245  assert(r.size() == n * q);
1246  assert(u.size() == q);
1247  assert(v.size() == n * q);
1248 
1249  size_t i, j, k;
1250 
1251  // sparsity for T(x) = S(x) * f'(x)
1252  for( i = 0; i < n; ++i )
1253  t[i] = s[0];
1254 
1255  // V(x) = f'(x)^T * g''(y) * f'(x) * R + g'(y) * f''(x) * R
1256  // U(x) = g''(y) * f'(x) * R
1257  // S(x) = g'(y)
1258 
1259  // back propagate the sparsity for U
1260  for( j = 0; j < q; j++ )
1261  for( i = 0; i < n; i++ )
1262  v[ i * q + j] = u[j];
1263 
1264  // include forward Jacobian sparsity in Hessian sparsity
1265  // sparsity for g'(y) * f''(x) * R (Note f''(x) is assumed to be dense)
1266  if( s[0] )
1267  for( j = 0; j < q; j++ )
1268  for( i = 0; i < n; i++ )
1269  for( k = 0; k < n; ++k )
1270  v[ i * q + j] |= (bool) r[ k * q + j];
1271 
1272  return true;
1273  }
1274 };
1275 
1276 
1277 /** integer power operation for arbitrary integer exponents */
1278 template<class Type>
1279 static
1281  Type& resultant, /**< resultant */
1282  const Type& arg, /**< operand */
1283  const int exponent /**< exponent */
1284  )
1285 {
1286  if( exponent > 1 )
1287  {
1288  vector<Type> in(1, arg);
1289  vector<Type> out(1);
1290 
1291  posintpower(in, out, exponent);
1292 
1293  resultant = out[0];
1294  return;
1295  }
1296 
1297  if( exponent < -1 )
1298  {
1299  vector<Type> in(1, arg);
1300  vector<Type> out(1);
1301 
1302  posintpower(in, out, -exponent);
1303 
1304  resultant = Type(1.0)/out[0];
1305  return;
1306  }
1307 
1308  if( exponent == 1 )
1309  {
1310  resultant = arg;
1311  return;
1312  }
1313 
1314  if( exponent == 0 )
1315  {
1316  resultant = Type(1.0);
1317  return;
1318  }
1319 
1320  assert(exponent == -1);
1321  resultant = Type(1.0)/arg;
1322 }
1323 
1324 /** CppAD compatible evaluation of an expression for given arguments */
1325 template<class Type>
1326 static
1328  SCIP* scip, /**< SCIP data structure */
1329  SCIP_EXPR* expr, /**< expression */
1330  SCIP_EXPRINTDATA* exprintdata, /**< interpreter data for root expression */
1331  const vector<Type>& x, /**< values of variables */
1332  Type& val /**< buffer to store expression value */
1333  )
1334 {
1335  Type* buf = NULL;
1336 
1337  assert(expr != NULL);
1338 
1339  // TODO this should iterate instead of using recursion
1340  // but the iterdata wouldn't work to hold Type at the moment
1341  // they could hold Type*, but then we need to alloc small portions all the time
1342  // or we have a big Type-array outside and point to it in iterdata
1343 
1344  if( SCIPisExprVaridx(scip, expr) )
1345  {
1346  val = x[exprintdata->getVarPos(SCIPgetIndexExprVaridx(expr))];
1347  return SCIP_OKAY;
1348  }
1349  if( SCIPisExprValue(scip, expr) )
1350  {
1351  val = SCIPgetValueExprValue(expr);
1352  return SCIP_OKAY;
1353  }
1354 
1355  SCIP_CALL( SCIPallocBufferArray(scip, &buf, SCIPexprGetNChildren(expr)) );
1356  for( int i = 0; i < SCIPexprGetNChildren(expr); ++i )
1357  {
1358  SCIP_CALL( eval(scip, SCIPexprGetChildren(expr)[i], exprintdata, x, buf[i]) );
1359  }
1360 
1361 #ifndef EVAL_USE_EXPRHDLR_ALWAYS
1362  if( SCIPisExprSum(scip, expr) )
1363  {
1364  val = SCIPgetConstantExprSum(expr);
1365  for( int i = 0; i < SCIPexprGetNChildren(expr); ++i )
1366  val += SCIPgetCoefsExprSum(expr)[i] * buf[i];
1367  }
1368  else if( SCIPisExprProduct(scip, expr) )
1369  {
1370  val = SCIPgetCoefExprProduct(expr);
1371  for( int i = 0; i < SCIPexprGetNChildren(expr); ++i )
1372  val *= buf[i];
1373  }
1374  else if( SCIPisExprPower(scip, expr) )
1375  {
1376  SCIP_Real exponent = SCIPgetExponentExprPow(expr);
1377  if( EPSISINT(exponent, 0.0) )
1378  evalIntPower(val, buf[0], (int)SCIPgetExponentExprPow(expr));
1379  else if( exponent == 0.5 )
1380  val = sqrt(buf[0]);
1381  else if( exponent < 1.0 )
1382  val = CppAD::pow(buf[0], SCIPgetExponentExprPow(expr));
1383  else
1384  {
1385  // workaround bug where CppAD claims pow(x,fractional>0) is nondiff at x=0
1386  // https://github.com/coin-or/CppAD/discussions/93#discussioncomment-327876
1387  AD<double> adzero(0.);
1388  val = CppAD::CondExpEq(buf[0], adzero, pow(buf[0]+std::numeric_limits<SCIP_Real>::epsilon(), exponent)-pow(std::numeric_limits<SCIP_Real>::epsilon(), exponent),
1389  pow(buf[0], exponent));
1390  }
1391  }
1392  else if( SCIPisExprSignpower(scip, expr) )
1393  {
1394  evalSignPower(val, buf[0], expr);
1395  }
1396  else if( SCIPisExprExp(scip, expr) )
1397  {
1398  val = exp(buf[0]);
1399  }
1400  else if( SCIPisExprLog(scip, expr) )
1401  {
1402  val = log(buf[0]);
1403  }
1404  else if( strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "sin") == 0 )
1405  {
1406  val = sin(buf[0]);
1407  }
1408  else if( strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "cos") == 0 )
1409  {
1410  val = cos(buf[0]);
1411  }
1412  else if( strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "erf") == 0 )
1413  {
1414  val = erf(buf[0]);
1415  }
1416  else if( strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "abs") == 0 )
1417  {
1418  val = abs(buf[0]);
1419  }
1420  else if( strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "entropy") == 0 )
1421  {
1422  /* -x*log(x) if x > 0, else 0
1423  * https://coin-or.github.io/CppAD/doc/cond_exp.cpp.htm suggest to use 0 for the x=0 case
1424  * but then derivatives at 0 are 0, while they are actually infinite (see expr_entropy.c:bwdiff)
1425  * so we use -sqrt(x) for the x=0 case, as this also has value 0 and first derivative -inf at 0
1426  */
1427  val = CppAD::CondExpGt(buf[0], AD<double>(0.), -buf[0] * log(buf[0]), -sqrt(buf[0]));
1428  }
1429  else
1430 #endif
1431  {
1432  //SCIPerrorMessage("using derivative methods of exprhdlr %s in CppAD is not implemented yet", SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)));
1433  //CppAD::ErrorHandler::Call(true, __LINE__, __FILE__, "evalUser()", "Error: user expressions in CppAD not possible without CppAD user atomic facility");
1434  vector<Type> in(buf, buf + SCIPexprGetNChildren(expr));
1435  vector<Type> out(1);
1436 
1437  exprintdata->userexprs.push_back(new atomic_userexpr(scip, expr));
1438  (*exprintdata->userexprs.back())(in, out);
1439 
1440  val = out[0];
1441  }
1442 
1443  SCIPfreeBufferArray(scip, &buf);
1444 
1445  return SCIP_OKAY;
1446 }
1447 
1448 /** replacement for CppAD's default error handler
1449  *
1450  * In debug mode, CppAD gives an error when an evaluation contains a nan.
1451  * We do not want to stop execution in such a case, since the calling routine should check for nan's and decide what to do.
1452  * Since we cannot ignore this particular error, we ignore all.
1453  * @todo find a way to check whether the error corresponds to a nan and communicate this back
1454  */
1455 static
1457  bool known, /**< is the error from a known source? */
1458  int line, /**< line where error occured */
1459  const char* file, /**< file where error occured */
1460  const char* cond, /**< error condition */
1461  const char* msg /**< error message */
1462  )
1463 {
1464  SCIPdebugMessage("ignore CppAD error from %sknown source %s:%d: msg: %s exp: %s\n", known ? "" : "un", file, line, msg, cond);
1465 }
1466 
1467 /* install our error handler */
1468 static CppAD::ErrorHandler errorhandler(cppaderrorcallback);
1469 
1470 /** gets name and version of expression interpreter */
1471 const char* SCIPexprintGetName(void)
1472 {
1473  return CPPAD_PACKAGE_STRING;
1474 }
1475 
1476 /** gets descriptive text of expression interpreter */
1477 const char* SCIPexprintGetDesc(void)
1478 {
1479  return "Algorithmic Differentiation of C++ algorithms developed by B. Bell (github.com/coin-or/CppAD)";
1480 }
1481 
1482 /** gets capabilities of expression interpreter (using bitflags) */
1484  void
1485  )
1486 {
1488 }
1489 
1490 /** creates an expression interpreter object */
1492  SCIP* scip, /**< SCIP data structure */
1493  SCIP_EXPRINT** exprint /**< buffer to store pointer to expression interpreter */
1494  )
1495 {
1496  assert(exprint != NULL);
1497 
1498  *exprint = (SCIP_EXPRINT*)1u; /* some code checks that a non-NULL pointer is returned here, even though it may not point anywhere */
1499 
1500  return SCIP_OKAY;
1501 }
1502 
1503 /** frees an expression interpreter object */
1505  SCIP* scip, /**< SCIP data structure */
1506  SCIP_EXPRINT** exprint /**< expression interpreter that should be freed */
1507  )
1508 {
1509  assert(exprint != NULL);
1510 
1511  *exprint = NULL;
1512 
1513  return SCIP_OKAY;
1514 }
1515 
1516 /** compiles an expression and returns interpreter-specific data for expression
1517  *
1518  * can be called again with existing exprintdata if expression has been changed
1519  *
1520  * @attention *exprintdata needs to be initialized to NULL at first call
1521  * @attention the expression is assumed to use varidx expressions instead of var expressions
1522  */
1524  SCIP* scip, /**< SCIP data structure */
1525  SCIP_EXPRINT* exprint, /**< interpreter data structure */
1526  SCIP_EXPR* rootexpr, /**< expression */
1527  SCIP_EXPRINTDATA** exprintdata /**< buffer to store pointer to compiled data */
1528  )
1529 {
1530  SCIP_EXPRITER* it;
1531  SCIP_EXPR* expr;
1532 
1533  assert(rootexpr != NULL);
1534  assert(exprintdata != NULL);
1535 
1536  if( *exprintdata == NULL )
1537  {
1538  *exprintdata = new SCIP_EXPRINTDATA();
1539  assert(*exprintdata != NULL);
1540  }
1541  else
1542  {
1543  (*exprintdata)->need_retape = true;
1544  (*exprintdata)->need_retape_always = false;
1545  (*exprintdata)->hesconstant = false;
1546  }
1547 
1548  SCIP_CALL( SCIPcreateExpriter(scip, &it) );
1550 
1551  std::set<int> varidxs;
1552  for( expr = SCIPexpriterGetCurrent(it); !SCIPexpriterIsEnd(it); expr = SCIPexpriterGetNext(it) )
1553  {
1554  /* cannot handle var-expressions in exprint so far, should be varidx expressions */
1555  assert(!SCIPisExprVar(scip, expr));
1556 
1557  if( SCIPisExprVaridx(scip, expr) )
1558  {
1559  varidxs.insert(SCIPgetIndexExprVaridx(expr));
1560  continue;
1561  }
1562 
1563  /* check whether expr is of a type we don't recognize */
1564 #ifndef EVAL_USE_EXPRHDLR_ALWAYS
1565  if( !SCIPisExprSum(scip, expr) &&
1566  !SCIPisExprProduct(scip, expr) &&
1567  !SCIPisExprPower(scip, expr) &&
1568  !SCIPisExprSignpower(scip, expr) &&
1569  !SCIPisExprExp(scip, expr) &&
1570  !SCIPisExprLog(scip, expr) &&
1571  strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "abs") != 0 &&
1572  strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "sin") != 0 &&
1573  strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "cos") != 0 &&
1574  strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "entropy") != 0 &&
1575  strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "erf") != 0 &&
1576  !SCIPisExprValue(scip, expr) )
1577 #endif
1578  {
1579  /* an expression for which we have no taping implemented in eval()
1580  * so we will try to use CppAD's atomic operand and the expression handler methods
1581  * however, only atomic_userexpr:forward() is implemented for p=0,1 at the moment, so we cannot do Hessians
1582  * also the exprhdlr needs to implement the fwdiff callback for derivatives
1583  */
1585  (*exprintdata)->userevalcapability &= SCIP_EXPRINTCAPABILITY_FUNCVALUE | SCIP_EXPRINTCAPABILITY_GRADIENT;
1586  else
1587  (*exprintdata)->userevalcapability &= SCIP_EXPRINTCAPABILITY_FUNCVALUE;
1588  }
1589 
1590  //if( strcmp(SCIPexprhdlrGetName(SCIPexprGetHdlr(expr)), "xyz") == 0 )
1591  // (*exprintdata)->need_retape_always = true;
1592  }
1593 
1594  SCIPfreeExpriter(&it);
1595 
1596  (*exprintdata)->varidxs.reserve(varidxs.size());
1597  (*exprintdata)->varidxs.insert((*exprintdata)->varidxs.begin(), varidxs.begin(), varidxs.end());
1598 
1599  size_t n = (*exprintdata)->varidxs.size();
1600  (*exprintdata)->X.resize(n);
1601  (*exprintdata)->x.resize(n);
1602  (*exprintdata)->Y.resize(1);
1603 
1604  // check whether we are quadratic (or linear), so we can save on Hessian time (so
1605  // assumes simplified and skips over x^2 and x*y cases
1606  // not using SCIPcheckExprQuadratic(), because we don't need the quadratic form
1607  if( SCIPisExprSum(scip, rootexpr) )
1608  {
1609  (*exprintdata)->hesconstant = true;
1610  for( int i = 0; i < SCIPexprGetNChildren(rootexpr); ++i )
1611  {
1612  SCIP_EXPR* child;
1613  child = SCIPexprGetChildren(rootexpr)[i];
1614  /* linear term is ok */
1615  if( SCIPisExprVaridx(scip, child) )
1616  continue;
1617  /* square term is ok */
1618  if( SCIPisExprPower(scip, child) && SCIPgetExponentExprPow(child) == 2.0 && SCIPisExprVaridx(scip, SCIPexprGetChildren(child)[0]) )
1619  continue;
1620  /* bilinear term is ok */
1621  if( SCIPisExprProduct(scip, child) && SCIPexprGetNChildren(child) == 2 && SCIPisExprVaridx(scip, SCIPexprGetChildren(child)[0]) && SCIPisExprVaridx(scip, SCIPexprGetChildren(child)[1]) )
1622  continue;
1623  /* everything else means not quadratic (or not simplified) */
1624  (*exprintdata)->hesconstant = false;
1625  break;
1626  }
1627  SCIPdebugMsg(scip, "Hessian found %sconstant\n", (*exprintdata)->hesconstant ? "" : "not ");
1628  }
1629 
1630  return SCIP_OKAY;
1631 }
1632 
1633 /** frees interpreter data for expression */
1635  SCIP* scip, /**< SCIP data structure */
1636  SCIP_EXPRINT* exprint, /**< interpreter data structure */
1637  SCIP_EXPR* expr, /**< expression */
1638  SCIP_EXPRINTDATA** exprintdata /**< pointer to pointer to compiled data to be freed */
1639  )
1640 {
1641  assert( exprintdata != NULL);
1642  assert(*exprintdata != NULL);
1643 
1644  SCIPfreeBlockMemoryArrayNull(scip, &(*exprintdata)->hesrowidxs, (*exprintdata)->hesnnz);
1645  SCIPfreeBlockMemoryArrayNull(scip, &(*exprintdata)->hescolidxs, (*exprintdata)->hesnnz);
1646 
1647  delete *exprintdata;
1648  *exprintdata = NULL;
1649 
1650  return SCIP_OKAY;
1651 }
1652 
1653 /** gives the capability to evaluate an expression by the expression interpreter
1654  *
1655  * In cases of user-given expressions, higher order derivatives may not be available for the user-expression,
1656  * even if the expression interpreter could handle these. This method allows to recognize that, e.g., the
1657  * Hessian for an expression is not available because it contains a user expression that does not provide
1658  * Hessians.
1659  */
1661  SCIP* scip, /**< SCIP data structure */
1662  SCIP_EXPRINT* exprint, /**< interpreter data structure */
1663  SCIP_EXPR* expr, /**< expression */
1664  SCIP_EXPRINTDATA* exprintdata /**< interpreter-specific data for expression */
1665  )
1666 {
1667  assert(exprintdata != NULL);
1668 
1669  return exprintdata->userevalcapability;
1670 }/*lint !e715*/
1671 
1672 /** evaluates an expression */
1674  SCIP* scip, /**< SCIP data structure */
1675  SCIP_EXPRINT* exprint, /**< interpreter data structure */
1676  SCIP_EXPR* expr, /**< expression */
1677  SCIP_EXPRINTDATA* exprintdata, /**< interpreter-specific data for expression */
1678  SCIP_Real* varvals, /**< values of variables */
1679  SCIP_Real* val /**< buffer to store value of expression */
1680  )
1681 {
1682  assert(expr != NULL);
1683  assert(exprintdata != NULL);
1684  assert(varvals != NULL);
1685  assert(val != NULL);
1686 
1687  size_t n = exprintdata->varidxs.size();
1688 
1689  if( n == 0 )
1690  {
1691  SCIP_CALL( SCIPevalExpr(scip, expr, NULL, 0L) );
1692  exprintdata->val = *val = SCIPexprGetEvalValue(expr);
1693  return SCIP_OKAY;
1694  }
1695 
1696  if( exprintdata->need_retape_always || exprintdata->need_retape )
1697  {
1698  /* free old Hessian data, if any */
1699  SCIPfreeBlockMemoryArrayNull(scip, &exprintdata->hesrowidxs, exprintdata->hesnnz);
1700  SCIPfreeBlockMemoryArrayNull(scip, &exprintdata->hescolidxs, exprintdata->hesnnz);
1701  exprintdata->hesvalues.clear();
1702  exprintdata->hesnnz = 0;
1703 
1704  for( size_t i = 0; i < n; ++i )
1705  {
1706  int idx = exprintdata->varidxs[i];
1707  exprintdata->X[i] = varvals[idx];
1708  exprintdata->x[i] = varvals[idx]; /* need this for a following grad or hessian eval with new_x = false */
1709  }
1710 
1711  // delete old atomic_userexprs before we start collecting new ones
1712  for( vector<atomic_userexpr*>::iterator it(exprintdata->userexprs.begin()); it != exprintdata->userexprs.end(); ++it )
1713  delete *it;
1714  exprintdata->userexprs.clear();
1715 
1716  CppAD::Independent(exprintdata->X);
1717 
1718  SCIP_CALL( eval(scip, expr, exprintdata, exprintdata->X, exprintdata->Y[0]) );
1719 
1720  exprintdata->f.Dependent(exprintdata->X, exprintdata->Y);
1721 
1722  exprintdata->val = Value(exprintdata->Y[0]);
1723  SCIPdebugMessage("Eval retaped and computed value %g\n", exprintdata->val);
1724 
1725  // the following is required if the gradient shall be computed by a reverse sweep later
1726  // exprintdata->val = exprintdata->f.Forward(0, exprintdata->x)[0];
1727 
1728  // https://coin-or.github.io/CppAD/doc/optimize.htm
1729  exprintdata->f.optimize();
1730 
1731  exprintdata->need_retape = false;
1732  }
1733  else
1734  {
1735  assert(exprintdata->x.size() >= n);
1736  for( size_t i = 0; i < n; ++i )
1737  exprintdata->x[i] = varvals[exprintdata->varidxs[i]];
1738 
1739  exprintdata->val = exprintdata->f.Forward(0, exprintdata->x)[0];
1740  SCIPdebugMessage("Eval used forward sweep to compute value %g\n", exprintdata->val);
1741  }
1742 
1743  *val = exprintdata->val;
1744 
1745  return SCIP_OKAY;
1746 }
1747 
1748 /** computes value and gradient of an expression */
1750  SCIP* scip, /**< SCIP data structure */
1751  SCIP_EXPRINT* exprint, /**< interpreter data structure */
1752  SCIP_EXPR* expr, /**< expression */
1753  SCIP_EXPRINTDATA* exprintdata, /**< interpreter-specific data for expression */
1754  SCIP_Real* varvals, /**< values of variables, can be NULL if new_varvals is FALSE */
1755  SCIP_Bool new_varvals, /**< have variable values changed since last call to a point evaluation routine? */
1756  SCIP_Real* val, /**< buffer to store expression value */
1757  SCIP_Real* gradient /**< buffer to store expression gradient */
1758  )
1759 {
1760  assert(expr != NULL);
1761  assert(exprintdata != NULL);
1762  assert(varvals != NULL || new_varvals == FALSE);
1763  assert(val != NULL);
1764  assert(gradient != NULL);
1765 
1766  if( new_varvals )
1767  {
1768  SCIP_CALL( SCIPexprintEval(scip, exprint, expr, exprintdata, varvals, val) );
1769  }
1770  else
1771  *val = exprintdata->val;
1772 
1773  size_t n = exprintdata->varidxs.size();
1774 
1775  if( n == 0 )
1776  return SCIP_OKAY;
1777 
1778  vector<double> jac;
1779  if( exprintdata->userexprs.empty() )
1780  jac = exprintdata->f.Jacobian(exprintdata->x);
1781  else
1782  {
1783  // atomic_userexpr:reverse not implemented yet (even for p=0)
1784  // so force use of forward-eval for gradient (though that seems pretty inefficient)
1785  exprintdata->f.Forward(0, exprintdata->x);
1786  jac.resize(n);
1787  CppAD::JacobianFor(exprintdata->f, exprintdata->x, jac);
1788  }
1789 
1790  for( size_t i = 0; i < n; ++i )
1791  gradient[exprintdata->varidxs[i]] = jac[i];
1792 
1793 #ifdef SCIP_DEBUG
1794  SCIPinfoMessage(scip, NULL, "Grad for ");
1795  SCIP_CALL( SCIPprintExpr(scip, expr, NULL) );
1796  SCIPinfoMessage(scip, NULL, "\nx = ");
1797  for( size_t i = 0; i < n; ++i )
1798  {
1799  SCIPinfoMessage(scip, NULL, "\t %g", exprintdata->x[i]);
1800  }
1801  SCIPinfoMessage(scip, NULL, "\ngrad = ");
1802  for( size_t i = 0; i < n; ++i )
1803  {
1804  SCIPinfoMessage(scip, NULL, "\t %g", jac[i]);
1805  }
1806  SCIPinfoMessage(scip, NULL, "\n");
1807 #endif
1808 
1809  return SCIP_OKAY;
1810 }
1811 
1812 /** gives sparsity pattern of lower-triangular part of Hessian
1813  *
1814  * Since the AD code might need to do a forward sweep, variable values need to be passed in here.
1815  *
1816  * Result will have `(*colidxs)[i] <= (*rowidixs)[i]` for `i=0..*nnz`.
1817  */
1819  SCIP* scip, /**< SCIP data structure */
1820  SCIP_EXPRINT* exprint, /**< interpreter data structure */
1821  SCIP_EXPR* expr, /**< expression */
1822  SCIP_EXPRINTDATA* exprintdata, /**< interpreter-specific data for expression */
1823  SCIP_Real* varvals, /**< values of variables */
1824  int** rowidxs, /**< buffer to return array with row indices of Hessian elements */
1825  int** colidxs, /**< buffer to return array with column indices of Hessian elements */
1826  int* nnz /**< buffer to return length of arrays */
1827  )
1828 {
1829  assert(expr != NULL);
1830  assert(exprintdata != NULL);
1831  assert(varvals != NULL);
1832  assert(rowidxs != NULL);
1833  assert(colidxs != NULL);
1834  assert(nnz != NULL);
1835 
1836  if( exprintdata->hesrowidxs == NULL )
1837  {
1838  assert(exprintdata->hescolidxs == NULL);
1839  assert(exprintdata->hesvalues.size() == 0);
1840  assert(exprintdata->hesnnz == 0);
1841 
1842  size_t n = exprintdata->varidxs.size();
1843  if( n == 0 )
1844  {
1845  *nnz = 0;
1846  return SCIP_OKAY;
1847  }
1848 
1849  size_t nn = n*n;
1850 
1851  if( exprintdata->need_retape_always )
1852  {
1853  // pretend dense
1854  exprintdata->hesnnz = (n * (n+1))/2;
1855  SCIP_CALL( SCIPallocBlockMemoryArray(scip, &exprintdata->hesrowidxs, exprintdata->hesnnz) );
1856  SCIP_CALL( SCIPallocBlockMemoryArray(scip, &exprintdata->hescolidxs, exprintdata->hesnnz) );
1857 
1858  int k = 0;
1859  for( size_t i = 0; i < n; ++i )
1860  for( size_t j = 0; j <= i; ++j )
1861  {
1862  exprintdata->hesrowidxs[k] = exprintdata->varidxs[i];
1863  exprintdata->hescolidxs[k] = exprintdata->varidxs[j];
1864  k++;
1865  }
1866 
1867 #ifdef SCIP_DEBUG
1868  SCIPinfoMessage(scip, NULL, "HessianSparsity for ");
1869  SCIP_CALL( SCIPprintExpr(scip, expr, NULL) );
1870  SCIPinfoMessage(scip, NULL, ": all elements, due to discontinuities\n");
1871 #endif
1872 
1873  *rowidxs = exprintdata->hesrowidxs;
1874  *colidxs = exprintdata->hescolidxs;
1875  *nnz = exprintdata->hesnnz;
1876 
1877  return SCIP_OKAY;
1878  }
1879 
1880  if( exprintdata->need_retape )
1881  {
1882  SCIP_Real val;
1883  SCIP_CALL( SCIPexprintEval(scip, exprint, expr, exprintdata, varvals, &val) );
1884  } /*lint !e438*/
1885 
1886  // following https://github.com/coin-or/CppAD/blob/20180000.0/cppad_ipopt/src/vec_fun_pattern.cpp
1887 
1888  SCIPdebugMessage("calling ForSparseJac\n");
1889 
1890  vector<bool> r(nn, false);
1891  for( size_t i = 0; i < n; ++i )
1892  r[i*n+i] = true;
1893  (void) exprintdata->f.ForSparseJac(n, r); // need to compute sparsity for Jacobian first
1894 
1895  SCIPdebugMessage("calling RevSparseHes\n");
1896 
1897  // this was originally
1898  // vector<bool> s(1, true);
1899  // hessparsity = exprintdata->f.RevSparseHes(n, s);
1900  // RevSparseHes is just calling RevSparseHesCase
1901  // to avoid copying hessparsity, call RevSparseHesCase directly
1902  vector<bool> hessparsity;
1903  exprintdata->f.RevSparseHesCase(true, false, n, vector<bool>(1, true), hessparsity);
1904 
1905  // count number of hessian elements and setup hessparsity_pattern
1906  exprintdata->hessparsity_pattern.resize(0, 0); // clear old data, if any
1907  exprintdata->hessparsity_pattern.resize(n, n);
1908  size_t hesnnz_full = 0; // number of nonzeros in full matrix, that is, not only lower-diagonal
1909  for( size_t i = 0; i < nn; ++i )
1910  if( hessparsity[i] )
1911  {
1912  size_t row = i / n;
1913  size_t col = i % n;
1914 
1915  ++hesnnz_full;
1916 
1917  if( col > row )
1918  continue;
1919  ++exprintdata->hesnnz;
1920  }
1921 
1922  // hessian sparsity in sparse form
1923  // - hesrowidxs,hescolidxs are nonzero entries in the lower-diagonal of the Hessian and are returned to the caller; indices are variable indices as in expression
1924  // - hessparsity_row,hessparsity_col are nonzero entries in the full Hessian and are used in SCIPexprintHessian(); indices are 0..n-1 (n=dimension of f)
1925  // - hessparsity_pattern is the same information as hessparsity_row,hessparsity_col, but stored in different form
1926  // originally we were calling CppAD::local::sparsity_user2internal(exprintdata->hessparsity_pattern, exprintdata->hessparsity, n, n, false, ""),
1927  // which was again looping over hessparsity, so this is included into one loop here
1928  SCIP_CALL( SCIPallocBlockMemoryArray(scip, &exprintdata->hesrowidxs, exprintdata->hesnnz) );
1929  SCIP_CALL( SCIPallocBlockMemoryArray(scip, &exprintdata->hescolidxs, exprintdata->hesnnz) );
1930 
1931  exprintdata->hessparsity_row.resize(hesnnz_full);
1932  exprintdata->hessparsity_col.resize(hesnnz_full);
1933 
1934  for( size_t i = 0, j = 0, k = 0; i < nn; ++i )
1935  if( hessparsity[i] )
1936  {
1937  size_t row = i / n;
1938  size_t col = i % n;
1939 
1940  if( (size_t)exprintdata->hesnnz <= nn/4 )
1941  {
1942  // we need hessparsity_pattern only if SCIPexprintHessian is doing a sparse hessian eval; nn/4 is the threshold for that
1943  exprintdata->hessparsity_pattern.add_element(row, col);
1944  }
1945 
1946  if( col > row )
1947  {
1948  // add above-diagonal elements into end part of hessparsity_row/col, so we have a 1:1 correspondence
1949  // between hessparsity_row/col and hesrowidxs/hescolidxs
1950  assert(exprintdata->hesnnz + k < hesnnz_full);
1951  exprintdata->hessparsity_row[exprintdata->hesnnz + k] = row;
1952  exprintdata->hessparsity_col[exprintdata->hesnnz + k] = col;
1953  ++k;
1954  continue;
1955  }
1956 
1957  exprintdata->hessparsity_row[j] = row;
1958  exprintdata->hessparsity_col[j] = col;
1959 
1960  assert(j < (size_t)exprintdata->hesnnz);
1961  exprintdata->hesrowidxs[j] = exprintdata->varidxs[row];
1962  exprintdata->hescolidxs[j] = exprintdata->varidxs[col];
1963  ++j;
1964  }
1965 
1966 #ifdef SCIP_DEBUG
1967  SCIPinfoMessage(scip, NULL, "HessianSparsity for ");
1968  SCIP_CALL( SCIPprintExpr(scip, expr, NULL) );
1969  SCIPinfoMessage(scip, NULL, ":");
1970  for( int i = 0; i < exprintdata->hesnnz; ++i )
1971  {
1972  SCIPinfoMessage(scip, NULL, " (%d,%d)", exprintdata->hesrowidxs[i], exprintdata->hescolidxs[i]);
1973  }
1974  SCIPinfoMessage(scip, NULL, "\n");
1975 #endif
1976  }
1977 
1978  *rowidxs = exprintdata->hesrowidxs;
1979  *colidxs = exprintdata->hescolidxs;
1980  *nnz = exprintdata->hesnnz;
1981 
1982  return SCIP_OKAY;
1983 }
1984 
1985 /** computes value and Hessian of an expression
1986  *
1987  * Returned arrays `rowidxs` and `colidxs` and number of elements `nnz` are the same as given by SCIPexprintHessianSparsity().
1988  * Returned array `hessianvals` will contain the corresponding Hessian elements.
1989  */
1991  SCIP* scip, /**< SCIP data structure */
1992  SCIP_EXPRINT* exprint, /**< interpreter data structure */
1993  SCIP_EXPR* expr, /**< expression */
1994  SCIP_EXPRINTDATA* exprintdata, /**< interpreter-specific data for expression */
1995  SCIP_Real* varvals, /**< values of variables, can be NULL if new_varvals is FALSE */
1996  SCIP_Bool new_varvals, /**< have variable values changed since last call to an evaluation routine? */
1997  SCIP_Real* val, /**< buffer to store function value */
1998  int** rowidxs, /**< buffer to return array with row indices of Hessian elements */
1999  int** colidxs, /**< buffer to return array with column indices of Hessian elements */
2000  SCIP_Real** hessianvals, /**< buffer to return array with Hessian elements */
2001  int* nnz /**< buffer to return length of arrays */
2002  )
2003 {
2004  assert(expr != NULL);
2005  assert(exprintdata != NULL);
2006 
2007  if( exprintdata->hesrowidxs == NULL )
2008  {
2009  /* setup sparsity if not done yet */
2010  int dummy1;
2011  int* dummy2;
2012 
2013  assert(exprintdata->hescolidxs == NULL);
2014  assert(exprintdata->hesvalues.size() == 0);
2015  assert(exprintdata->hesnnz == 0);
2016 
2017  SCIP_CALL( SCIPexprintHessianSparsity(scip, exprint, expr, exprintdata, varvals, &dummy2, &dummy2, &dummy1) );
2018 
2019  new_varvals = FALSE;
2020  }
2021 
2022  if( new_varvals )
2023  {
2024  SCIP_CALL( SCIPexprintEval(scip, exprint, expr, exprintdata, varvals, val) );
2025  }
2026  else
2027  *val = exprintdata->val;
2028 
2029  size_t n = exprintdata->varidxs.size();
2030 
2031  // eval hessian; if constant, then only if not evaluated yet (hesvalues has size 0, but hesnnz > 0)
2032  if( n > 0 && (!exprintdata->hesconstant || exprintdata->hesvalues.size() < (size_t)exprintdata->hesnnz) )
2033  {
2034  size_t nn = n*n;
2035 
2036  if( exprintdata->hesvalues.size() == 0 )
2037  {
2038  // using here hessparsity_row.size() instead of hesnnz as we want to pass hesvalues to CppAD and it prefers to calculate a full Hessian
2039  exprintdata->hesvalues.resize(exprintdata->hessparsity_row.size());
2040  }
2041 
2042  // these use reverse mode
2043  if( (size_t)exprintdata->hesnnz > nn/4 )
2044  {
2045  // use dense Hessian if there are many nonzero elements (approx more than half (recall only lower (or upper)-triangular is considered))
2046  // because it seems faster than the sparse Hessian if there isn't actually much sparsity
2047  vector<double> hess = exprintdata->f.Hessian(exprintdata->x, 0);
2048  for( int i = 0; i < exprintdata->hesnnz; ++i )
2049  exprintdata->hesvalues[i] = hess[exprintdata->hessparsity_row[i] * n + exprintdata->hessparsity_col[i]];
2050  }
2051  else
2052  {
2053  // originally, this was hess = exprintdata->f.SparseHessian(exprintdata->x, vector<double>(1, 1.0), exprintdata->hessparsity),
2054  // where hess was a dense nxn matrix as in the case above
2055  // to reuse the coloring of the sparsity pattern and use also a sparse matrix for the Hessian values, we now call SparseHessianCompute directly
2056  exprintdata->f.SparseHessianCompute(exprintdata->x, vector<double>(1, 1.0), exprintdata->hessparsity_pattern, exprintdata->hessparsity_row, exprintdata->hessparsity_col, exprintdata->hesvalues, exprintdata->heswork);
2057 
2058 #ifndef NDEBUG
2059  // hessparsity_row, hessparsity_col, hessparsity_pattern are no longer used by SparseHessianCompute after coloring has been computed in first call, except for some asserts, so we free some mem
2060  exprintdata->hessparsity_row.clear();
2061  exprintdata->hessparsity_col.clear();
2062  exprintdata->hessparsity_pattern.resize(0,0);
2063 #endif
2064  }
2065  }
2066 
2067 #ifdef SCIP_DEBUG
2068  SCIPinfoMessage(scip, NULL, "Hessian for ");
2069  SCIP_CALL( SCIPprintExpr(scip, expr, NULL) );
2070  SCIPinfoMessage(scip, NULL, "\nat x = ");
2071  for( size_t i = 0; i < n; ++i )
2072  {
2073  SCIPinfoMessage(scip, NULL, "\t %g", exprintdata->x[i]);
2074  }
2075  SCIPinfoMessage(scip, NULL, "\nis ");
2076  for( int i = 0; i < exprintdata->hesnnz; ++i )
2077  {
2078  SCIPinfoMessage(scip, NULL, " (%d,%d)=%g", exprintdata->hesrowidxs[i], exprintdata->hescolidxs[i], exprintdata->hesvalues[i]);
2079  }
2080  SCIPinfoMessage(scip, NULL, "\n");
2081 #endif
2082 
2083  *rowidxs = exprintdata->hesrowidxs;
2084  *colidxs = exprintdata->hescolidxs;
2085  *hessianvals = exprintdata->hesvalues.data();
2086  *nnz = exprintdata->hesnnz;
2087 
2088  return SCIP_OKAY;
2089 }
static SCIP_RETCODE eval(SCIP *scip, SCIP_EXPR *expr, SCIP_EXPRINTDATA *exprintdata, const vector< Type > &x, Type &val)
SCIP_RETCODE SCIPexpriterInit(SCIP_EXPRITER *iterator, SCIP_EXPR *expr, SCIP_EXPRITER_TYPE type, SCIP_Bool allowrevisit)
Definition: expriter.c:500
const char * SCIPexprintGetName(void)
#define SCIPallocBlockMemoryArray(scip, ptr, num)
Definition: scip_mem.h:93
methods to interpret (evaluate) an expression "fast"
SCIP_Bool SCIPexprhdlrHasFwdiff(SCIP_EXPRHDLR *exprhdlr)
Definition: expr.c:594
SCIP_RETCODE SCIPprintExpr(SCIP *scip, SCIP_EXPR *expr, FILE *file)
Definition: scip_expr.c:1485
SCIP_RETCODE SCIPexprintCompile(SCIP *scip, SCIP_EXPRINT *exprint, SCIP_EXPR *rootexpr, SCIP_EXPRINTDATA **exprintdata)
int SCIPexprGetNChildren(SCIP_EXPR *expr)
Definition: expr.c:3808
#define infinity
Definition: gastrans.c:80
const char * SCIPexprhdlrGetName(SCIP_EXPRHDLR *exprhdlr)
Definition: expr.c:534
void posintpower(const vector< Type > &in, vector< Type > &out, size_t exponent)
SCIP_RETCODE SCIPexprintFreeData(SCIP *scip, SCIP_EXPRINT *exprint, SCIP_EXPR *expr, SCIP_EXPRINTDATA **exprintdata)
static void evalSignPower(CppAD::AD< Type > &resultant, const CppAD::AD< Type > &arg, SCIP_EXPR *expr)
SCIP_Real SCIPgetConstantExprSum(SCIP_EXPR *expr)
Definition: expr_sum.c:1181
SCIP_RETCODE SCIPexprintHessianSparsity(SCIP *scip, SCIP_EXPRINT *exprint, SCIP_EXPR *expr, SCIP_EXPRINTDATA *exprintdata, SCIP_Real *varvals, int **rowidxs, int **colidxs, int *nnz)
#define FALSE
Definition: def.h:96
#define EPSISINT(x, eps)
Definition: def.h:223
SCIP_Real SCIPgetExponentExprPow(SCIP_EXPR *expr)
Definition: expr_pow.c:3352
enum SCIP_Retcode SCIP_RETCODE
Definition: type_retcode.h:63
const char * SCIPexprintGetDesc(void)
SCIP_Bool SCIPisExprLog(SCIP *scip, SCIP_EXPR *expr)
Definition: expr_log.c:648
#define CPPAD_MAX_NUM_THREADS
#define SCIPdebugMessage
Definition: pub_message.h:96
SCIP_RETCODE SCIPexprintGrad(SCIP *scip, SCIP_EXPRINT *exprint, SCIP_EXPR *expr, SCIP_EXPRINTDATA *exprintdata, SCIP_Real *varvals, SCIP_Bool new_varvals, SCIP_Real *val, SCIP_Real *gradient)
#define SCIPfreeBufferArray(scip, ptr)
Definition: scip_mem.h:136
unsigned int SCIP_EXPRINTCAPABILITY
#define SCIPdebugMsg
Definition: scip_message.h:78
SCIP_EXPR * SCIPexpriterGetCurrent(SCIP_EXPRITER *iterator)
Definition: expriter.c:682
SCIP_VAR ** x
Definition: circlepacking.c:63
void SCIPinfoMessage(SCIP *scip, FILE *file, const char *formatstr,...)
Definition: scip_message.c:208
#define SIGN(x)
Definition: expr_pow.c:71
SCIP_Bool SCIPisExprProduct(SCIP *scip, SCIP_EXPR *expr)
Definition: scip_expr.c:1463
SCIP_RETCODE SCIPevalExpr(SCIP *scip, SCIP_EXPR *expr, SCIP_SOL *sol, SCIP_Longint soltag)
Definition: scip_expr.c:1634
public functions to work with algebraic expressions
SCIP_EXPR ** SCIPexprGetChildren(SCIP_EXPR *expr)
Definition: expr.c:3818
SCIP_Real * SCIPgetCoefsExprSum(SCIP_EXPR *expr)
Definition: expr_sum.c:1166
SCIP_RETCODE SCIPcallExprEvalFwdiff(SCIP *scip, SCIP_EXPR *expr, SCIP_Real *childrenvalues, SCIP_Real *direction, SCIP_Real *val, SCIP_Real *dot)
Definition: scip_expr.c:2198
SCIP_Real SCIPexprGetEvalValue(SCIP_EXPR *expr)
Definition: expr.c:3882
handler for variable index expressions
interval arithmetics for provable bounds
static void cppaderrorcallback(bool known, int line, const char *file, const char *cond, const char *msg)
public functions to work with algebraic expressions
SCIP_Bool SCIPisExprValue(SCIP *scip, SCIP_EXPR *expr)
Definition: scip_expr.c:1441
#define NULL
Definition: lpi_spx1.cpp:164
power and signed power expression handlers
#define REALABS(x)
Definition: def.h:210
SCIP_RETCODE SCIPexprintCreate(SCIP *scip, SCIP_EXPRINT **exprint)
SCIP_Bool SCIPisExprSum(SCIP *scip, SCIP_EXPR *expr)
Definition: scip_expr.c:1452
#define SCIP_CALL(x)
Definition: def.h:393
SCIP_EXPRINTCAPABILITY SCIPexprintGetCapability(void)
SCIP_RETCODE SCIPcreateExpriter(SCIP *scip, SCIP_EXPRITER **iterator)
Definition: scip_expr.c:2311
SCIP_Real SCIPgetCoefExprProduct(SCIP_EXPR *expr)
SCIP_RETCODE SCIPexprintEval(SCIP *scip, SCIP_EXPRINT *exprint, SCIP_EXPR *expr, SCIP_EXPRINTDATA *exprintdata, SCIP_Real *varvals, SCIP_Real *val)
SCIP_RETCODE SCIPcallExprEval(SCIP *scip, SCIP_EXPR *expr, SCIP_Real *childrenvalues, SCIP_Real *val)
Definition: scip_expr.c:2171
#define SCIPallocBufferArray(scip, ptr, num)
Definition: scip_mem.h:124
#define SCIP_EXPRINTCAPABILITY_ALL
#define SCIP_EXPRINTCAPABILITY_HESSIAN
SCIP_EXPRINTCAPABILITY SCIPexprintGetExprCapability(SCIP *scip, SCIP_EXPRINT *exprint, SCIP_EXPR *expr, SCIP_EXPRINTDATA *exprintdata)
logarithm expression handler
#define SCIP_Bool
Definition: def.h:93
unsigned short Type
Definition: cons_xor.c:130
SCIP_RETCODE SCIPexprintFree(SCIP *scip, SCIP_EXPRINT **exprint)
struct SCIP_ExprInt SCIP_EXPRINT
SCIP_EXPR * SCIPexpriterGetNext(SCIP_EXPRITER *iterator)
Definition: expriter.c:857
SCIP_EXPRHDLR * SCIPexprGetHdlr(SCIP_EXPR *expr)
Definition: expr.c:3831
SCIP_Bool SCIPisExprPower(SCIP *scip, SCIP_EXPR *expr)
Definition: scip_expr.c:1474
SCIP_RETCODE SCIPexprintHessian(SCIP *scip, SCIP_EXPRINT *exprint, SCIP_EXPR *expr, SCIP_EXPRINTDATA *exprintdata, SCIP_Real *varvals, SCIP_Bool new_varvals, SCIP_Real *val, int **rowidxs, int **colidxs, SCIP_Real **hessianvals, int *nnz)
static CppAD::ErrorHandler errorhandler(cppaderrorcallback)
static void evalIntPower(Type &resultant, const Type &arg, const int exponent)
void SCIPfreeExpriter(SCIP_EXPRITER **iterator)
Definition: scip_expr.c:2325
SCIP_Real * r
Definition: circlepacking.c:59
#define SCIP_EXPRINTCAPABILITY_FUNCVALUE
SCIP_Bool SCIPisExprSignpower(SCIP *scip, SCIP_EXPR *expr)
Definition: expr_pow.c:3224
SCIP_Bool SCIPisExprVaridx(SCIP *scip, SCIP_EXPR *expr)
Definition: expr_varidx.c:251
SCIP_Bool SCIPisExprVar(SCIP *scip, SCIP_EXPR *expr)
Definition: scip_expr.c:1430
SCIP_Bool SCIPisExprExp(SCIP *scip, SCIP_EXPR *expr)
Definition: expr_exp.c:528
#define SCIP_Real
Definition: def.h:186
#define SCIP_INVALID
Definition: def.h:206
SCIP_Real SCIPgetValueExprValue(SCIP_EXPR *expr)
Definition: expr_value.c:294
atomic_userexpr(SCIP *scip_, SCIP_EXPR *expr_)
#define SCIPfreeBlockMemoryArrayNull(scip, ptr, num)
Definition: scip_mem.h:111
common defines and data types used in all packages of SCIP
SCIP_Bool SCIPexpriterIsEnd(SCIP_EXPRITER *iterator)
Definition: expriter.c:968
#define SCIPABORT()
Definition: def.h:365
int SCIPgetIndexExprVaridx(SCIP_EXPR *expr)
Definition: expr_varidx.c:266
struct SCIP_ExprIntData SCIP_EXPRINTDATA
exponential expression handler
#define SCIP_EXPRINTCAPABILITY_GRADIENT