Scippy

SCIP

Solving Constraint Integer Programs

exprinterpret_cppad.cpp
Go to the documentation of this file.
1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2 /* */
3 /* This file is part of the program and library */
4 /* SCIP --- Solving Constraint Integer Programs */
5 /* */
6 /* Copyright (C) 2002-2014 Konrad-Zuse-Zentrum */
7 /* fuer Informationstechnik Berlin */
8 /* */
9 /* SCIP is distributed under the terms of the ZIB Academic License. */
10 /* */
11 /* You should have received a copy of the ZIB Academic License */
12 /* along with SCIP; see the file COPYING. If not email to scip@zib.de. */
13 /* */
14 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
15 
16 /**@file exprinterpret_cppad.cpp
17  * @brief methods to interpret (evaluate) an expression tree "fast" using CppAD
18  * @ingroup EXPRINTS
19  * @author Stefan Vigerske
20  */
21 
22 /*---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----1----+----2*/
23 
24 #include "scip/def.h"
25 #include "blockmemshell/memory.h"
26 #include "nlpi/pub_expr.h"
27 #include "nlpi/exprinterpret.h"
28 
29 #include <cmath>
30 #include <vector>
31 using std::vector;
32 
33 /* defining NO_CPPAD_USER_ATOMIC disables the use of our own implementation of derivaties of power operators
34  * via CppAD's user-atomic function feature
35  * our customized implementation should give better results (tighter intervals) for the interval data type
36  */
37 /* #define NO_CPPAD_USER_ATOMIC */
38 
39 /** sign of a value (-1 or +1)
40  *
41  * 0.0 has sign +1
42  */
43 #define SIGN(x) ((x) >= 0.0 ? 1.0 : -1.0)
44 
45 /* in order to use intervals as operands in CppAD,
46  * we need to include the intervalarithext.h very early and require the interval operations to be in the CppAD namespace */
47 #define SCIPInterval_NAMESPACE CppAD
48 #include "nlpi/intervalarithext.h"
49 
50 SCIP_Real CppAD::SCIPInterval::infinity = SCIP_DEFAULT_INFINITY;
51 using CppAD::SCIPInterval;
52 
53 /** CppAD needs to know a fixed upper bound on the number of threads at compile time.
54  * It is wise to set it to a power of 2, so that if the tape id overflows, it is likely to start at 0 again, which avoids difficult to debug errors.
55  */
56 #ifndef CPPAD_MAX_NUM_THREADS
57 #ifndef NPARASCIP
58 #define CPPAD_MAX_NUM_THREADS 64
59 #else
60 #define CPPAD_MAX_NUM_THREADS 1
61 #endif
62 #endif
63 
64 #include <cppad/cppad.hpp>
65 #include <cppad/error_handler.hpp>
66 
67 /* CppAD is not thread-safe by itself, but uses some static datastructures
68  * To run it in a multithreading environment, a special CppAD memory allocator that is aware of the multiple threads has to be used.
69  * This allocator requires to know the number of threads and a thread number for each thread.
70  * To implement this, we follow the team_pthread example of CppAD, which uses pthread's thread-specific data management.
71  */
72 #ifndef NPARASCIP
73 #include <pthread.h>
74 
75 /* workaround error message regarding missing implementation of tanh during initialization of static variables (see cppad/local/erf.hpp) */
76 namespace CppAD
77 {
78 template <> SCIPInterval erf_template(
79  const SCIPInterval &x
80 )
81 {
82  CPPAD_ASSERT_FIRST_CALL_NOT_PARALLEL;
83  return SCIPInterval();
84 }
85 template <> AD<SCIPInterval> erf_template(
86  const AD<SCIPInterval> &x
87 )
88 {
89  CPPAD_ASSERT_FIRST_CALL_NOT_PARALLEL;
90  return AD<SCIPInterval>();
91 }
92 }
93 
94 /** mutex for locking in pthread case */
95 static pthread_mutex_t cppadmutex = PTHREAD_MUTEX_INITIALIZER;
96 
97 /** key for accessing thread specific information */
98 static pthread_key_t thread_specific_key;
99 
100 /** currently registered number of threads */
101 static size_t ncurthreads = 0;
102 
103 /** CppAD callback function that indicates whether we are running in parallel mode */
104 static
105 bool in_parallel(void)
106 {
107  return ncurthreads > 1;
108 }
109 
110 /** CppAD callback function that returns the number of the current thread
111  * assigns a new number to the thread if new
112  */
113 static
114 size_t thread_num(void)
115 {
116  size_t threadnum;
117  void* specific;
118 
119  specific = pthread_getspecific(thread_specific_key);
120 
121  /* if no data for this thread yet, then assign a new thread number to the current thread
122  * we store the thread number incremented by one, to distinguish the absence of data (=0) from existing data
123  */
124  if( specific == NULL )
125  {
126  pthread_mutex_lock(&cppadmutex);
127 
128  SCIPdebugMessage("Assigning thread number %lu to thread %p.\n", (long unsigned int)ncurthreads, (void*)pthread_self());
129 
130  pthread_setspecific(thread_specific_key, (void*)(ncurthreads + 1));
131 
132  threadnum = ncurthreads;
133 
134  ++ncurthreads;
135 
136  pthread_mutex_unlock(&cppadmutex);
137 
138  assert(pthread_getspecific(thread_specific_key) != NULL);
139  assert((size_t)pthread_getspecific(thread_specific_key) == threadnum + 1);
140  }
141  else
142  {
143  threadnum = (size_t)(specific) - 1;
144  }
145 
146  assert(threadnum < ncurthreads);
147 
148  return threadnum;
149 }
150 
151 /** sets up CppAD's datastructures for running in multithreading mode
152  * it must be called once before multithreading is started
153  */
154 static
155 char init_parallel(void)
156 {
157  pthread_key_create(&thread_specific_key, NULL);
158 
159  CppAD::thread_alloc::parallel_setup(CPPAD_MAX_NUM_THREADS, in_parallel, thread_num);
160  CppAD::parallel_ad<double>();
161  CppAD::parallel_ad<SCIPInterval>();
162 
163  return 0;
164 }
165 
166 /** a dummy variable that can is initialized to the result of init_parallel
167  * the purpose is to make sure that init_parallel() is called before any multithreading is started
168  */
169 static char init_parallel_return = init_parallel();
170 
171 #endif // NPARASCIP
172 
173 /** definition of CondExpOp for SCIPInterval (required by CppAD) */
174 inline
176  enum CppAD::CompareOp cop,
177  const SCIPInterval& left,
178  const SCIPInterval& right,
179  const SCIPInterval& trueCase,
180  const SCIPInterval& falseCase)
181 {
182  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
183  "SCIPInterval CondExpOp(...)",
184  "Error: cannot use CondExp with an interval type"
185  );
186 
187  return SCIPInterval();
188 }
189 
190 /** another function that returns whether two intervals are the same (required by CppAD) */
191 inline
193  const SCIPInterval& x, /**< first operand */
194  const SCIPInterval& y /**< second operand */
195  )
196 {
197  return x == y;
198 }
199 
200 /** another function required by CppAD */
201 inline
203  const SCIPInterval& x /**< operand */
204  )
205 {
206  return true;
207 }
208 
209 /** returns whether the interval equals [0,0] */
210 inline
212  const SCIPInterval& x /**< operand */
213  )
214 {
215  return (x == 0.0);
216 }
217 
218 /** returns whether the interval equals [1,1] */
219 inline
221  const SCIPInterval& x /**< operand */
222  )
223 {
224  return (x == 1.0);
225 }
226 
227 /** yet another function that checks whether two intervals are equal */
228 inline
230  const SCIPInterval& x, /**< first operand */
231  const SCIPInterval& y /**< second operand */
232  )
233 {
234  return (x == y);
235 }
236 
237 /** greater than zero not defined for intervals */
238 inline
240  const SCIPInterval& x /**< operand */
241  )
242 {
243  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
244  "GreaterThanZero(x)",
245  "Error: cannot use GreaterThanZero with interval"
246  );
247 
248  return false;
249 }
250 
251 /** greater than or equal zero not defined for intervals */
252 inline
254  const SCIPInterval& x /**< operand */
255  )
256 {
257  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__ ,
258  "GreaterThanOrZero(x)",
259  "Error: cannot use GreaterThanOrZero with interval"
260  );
261 
262  return false;
263 }
264 
265 /** less than not defined for intervals */
266 inline
268  const SCIPInterval& x /**< operand */
269  )
270 {
271  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
272  "LessThanZero(x)",
273  "Error: cannot use LessThanZero with interval"
274  );
275 
276  return false;
277 }
278 
279 /** less than or equal not defined for intervals */
280 inline
282  const SCIPInterval& x /**< operand */
283  )
284 {
285  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
286  "LessThanOrZero(x)",
287  "Error: cannot use LessThanOrZero with interval"
288  );
289 
290  return false;
291 }
292 
293 /** conversion to integers not defined for intervals */
294 inline
296  const SCIPInterval& x /**< operand */
297  )
298 {
299  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
300  "Integer(x)",
301  "Error: cannot use Integer with interval"
302  );
303 
304  return 0;
305 }
306 
307 /** printing of an interval (required by CppAD) */
308 inline
309 std::ostream& operator<<(std::ostream& out, const SCIP_INTERVAL& x)
310 {
311  out << '[' << x.inf << ',' << x.sup << ']';
312  return out;
313 }
314 
315 using CppAD::AD;
316 
317 /** expression interpreter */
318 struct SCIP_ExprInt
319 {
320  BMS_BLKMEM* blkmem; /**< block memory data structure */
321 };
322 
323 /** expression specific interpreter data */
324 struct SCIP_ExprIntData
325 {
326 public:
327  /* constructor */
328  SCIP_ExprIntData()
329  : need_retape(true), int_need_retape(true), need_retape_always(false), blkmem(NULL), root(NULL)
330  { }
331 
332  /* destructor */
333  ~SCIP_ExprIntData()
334  { }
335 
336  vector< AD<double> > X; /**< vector of dependent variables */
337  vector< AD<double> > Y; /**< result vector */
338  CppAD::ADFun<double> f; /**< the function to evaluate as CppAD object */
339 
340  vector<double> x; /**< current values of dependent variables */
341  double val; /**< current function value */
342  bool need_retape; /**< will retaping be required for the next point evaluation? */
343 
344  vector< AD<SCIPInterval> > int_X; /**< interval vector of dependent variables */
345  vector< AD<SCIPInterval> > int_Y; /**< interval result vector */
346  CppAD::ADFun<SCIPInterval> int_f; /**< the function to evaluate on intervals as CppAD object */
347 
348  vector<SCIPInterval> int_x; /**< current interval values of dependent variables */
349  SCIPInterval int_val; /**< current interval function value */
350  bool int_need_retape; /**< will retaping be required for the next interval evaluation? */
351 
352  bool need_retape_always; /**< will retaping be always required? */
353 
354  BMS_BLKMEM* blkmem; /**< block memory used to allocate expresstion tree */
355  SCIP_EXPR* root; /**< copy of expression tree; @todo we should not need to make a copy */
356 };
357 
358 
359 #ifndef NO_CPPAD_USER_ATOMIC
360 
361 /** computes sparsity of jacobian for a univariate function during a forward sweep
362  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
363  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
364  */
365 static
366 bool univariate_for_sparse_jac(
367  size_t q, /**< number of columns in R */
368  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
369  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
370 )
371 {
372  assert(r.size() == q);
373  assert(s.size() == q);
374 
375  s = r;
376 
377  return true;
378 }
379 
380 /** computes sparsity of jacobian during a reverse sweep
381  * For a q x 1 matrix S, we have to return the sparsity pattern of the q x 1 matrix R(x) = S * f'(x).
382  * Since f'(x) is dense, the sparsity of R will be the sparsity of S.
383  */
384 static
385 bool univariate_rev_sparse_jac(
386  size_t q, /**< number of rows in R */
387  CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
388  const CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
389 )
390 {
391  assert(r.size() == q);
392  assert(s.size() == q);
393 
394  r = s;
395 
396  return true;
397 }
398 
399 /** computes sparsity of hessian during a reverse sweep
400  * Assume V(x) = (g(f(x)))'' R with f(x) = x^p for a function g:R->R and a matrix R.
401  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
402  */
403 static
404 bool univariate_rev_sparse_hes(
405  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
406  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
407  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
408  size_t q, /**< number of columns in R, U, and V */
409  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
410  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
411  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
412 )
413 {
414  assert(r.size() == q);
415  assert(s.size() == 1);
416  assert(t.size() == 1);
417  assert(u.size() == q);
418  assert(v.size() == q);
419 
420  // T(x) = g'(f(x)) * f'(x) = S * f'(x), and f' is not identically 0
421  t[0] = s[0];
422 
423  // V(x) = g''(f(x)) f'(x) f'(x) R + g'(f(x)) f''(x) R
424  // = f'(x) U + S f''(x) R, with f'(x) and f''(x) not identically 0
425  v = u;
426  if( s[0] )
427  for( size_t j = 0; j < q; ++j )
428  v[j] |= r[j];
429 
430  return true;
431 }
432 
433 
434 /** Automatic differentiation of x -> x^p, p>=2 integer, as CppAD user-atomic function.
435  *
436  * This class implements forward and reverse operations for the function x -> x^p for use within CppAD.
437  * While CppAD would implement integer powers as a recursion of multiplications, we still use pow functions as they allow us to avoid overestimation in interval arithmetics.
438  *
439  * @todo treat the exponent as a (variable) argument to the function, with the assumption that we never differentiate w.r.t. it (this should make the approach threadsafe again)
440  */
441 template<class Type>
442 class atomic_posintpower : public CppAD::atomic_base<Type>
443 {
444 public:
446  : CppAD::atomic_base<Type>("posintpower"),
447  exponent(0)
448  {
449  /* indicate that we want to use bool-based sparsity pattern */
450  this->option(CppAD::atomic_base<Type>::bool_sparsity_enum);
451  }
452 
453 private:
454  /** exponent value for next call to forward or reverse */
455  int exponent;
456 
457  /** stores exponent value corresponding to next call to forward or reverse
458  *
459  * how is this supposed to be threadsafe? (we use only one global instantiation of this class)
460  */
461  virtual void set_id(size_t id)
462  {
463  exponent = (int) id;
464  }
465 
466  /** forward sweep of positive integer power
467  * Given the taylor coefficients for x, we have to compute the taylor coefficients for f(x),
468  * that is, given tx = (x, x', x'', ...), we compute the coefficients ty = (y, y', y'', ...)
469  * in the taylor expansion of f(x) = x^p.
470  * Thus, y = x^p
471  * = tx[0]^p,
472  * y' = p * x^(p-1) * x'
473  * = p * tx[0]^(p-1) * tx[1],
474  * y'' = p * (p-1) * x^(p-2) * x'^2 + p * x^(p-1) * x''
475  * = p * (p-1) * tx[0]^(p-2) * tx[1]^2 + p * tx[0]^(p-1) * tx[2]
476  */
477  bool forward(
478  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
479  size_t p, /**< highest order Taylor coefficient that we are evaluating */
480  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
481  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
482  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
483  CppAD::vector<Type>& ty /**< vector to store taylor coefficients of y */
484  )
485  {
486  assert(exponent > 1);
487  assert(tx.size() >= p+1);
488  assert(ty.size() >= p+1);
489  assert(q <= p);
490 
491  if( vx.size() > 0 )
492  {
493  assert(vx.size() == 1);
494  assert(vy.size() == 1);
495  assert(p == 0);
496 
497  vy[0] = vx[0];
498  }
499 
500  if( q == 0 /* q <= 0 && 0 <= p */ )
501  {
502  ty[0] = CppAD::pow(tx[0], exponent);
503  }
504 
505  if( q <= 1 && 1 <= p )
506  {
507  ty[1] = CppAD::pow(tx[0], exponent-1) * tx[1];
508  ty[1] *= double(exponent);
509  }
510 
511  if( q <= 2 && 2 <= p )
512  {
513  if( exponent > 2 )
514  {
515  // ty[2] = exponent * (exponent-1) * pow(tx[0], exponent-2) * tx[1] * tx[1] + exponent * pow(tx[0], exponent-1) * tx[2];
516  ty[2] = CppAD::pow(tx[0], exponent-2) * tx[1] * tx[1];
517  ty[2] *= exponent-1;
518  ty[2] += CppAD::pow(tx[0], exponent-1) * tx[2];
519  ty[2] *= exponent;
520  }
521  else
522  {
523  assert(exponent == 2);
524  // ty[2] = exponent * tx[1] * tx[1] + exponent * tx[0] * tx[2];
525  ty[2] = tx[1] * tx[1] + tx[0] * tx[2];
526  ty[2] *= exponent;
527  }
528  }
529 
530  /* higher order derivatives not implemented */
531  if( p > 2 )
532  return false;
533 
534  return true;
535  }
536 
537  /** reverse sweep of positive integer power
538  * Assume y(x) is a function of the taylor coefficients of f(x) = x^p for x, i.e.,
539  * y(x) = [ x^p, p * x^(p-1) * x', p * (p-1) * x^(p-2) * x'^2 + p * x^(p-1) * x'', ... ].
540  * Then in the reverse sweep we have to compute the elements of \f$\partial h / \partial x^[l], l = 0, ..., k,\f$
541  * where x^[l] is the l'th taylor coefficient (x, x', x'', ...) and h(x) = g(y(x)) for some function g:R^k -> R.
542  * That is, we have to compute
543  *\f$
544  * px[l] = \partial h / \partial x^[l] = (\partial g / \partial y) * (\partial y / \partial x^[l])
545  * = \sum_{i=0}^k (\partial g / \partial y_i) * (\partial y_i / \partial x^[l])
546  * = \sum_{i=0}^k py[i] * (\partial y_i / \partial x^[l])
547  * \f$
548  *
549  * For k = 0, this means
550  *\f$
551  * px[0] = py[0] * (\partial y_0 / \partial x^[0])
552  * = py[0] * (\partial x^p / \partial x)
553  * = py[0] * p * tx[0]^(p-1)
554  *\f$
555  *
556  * For k = 1, this means
557  * \f$
558  * px[0] = py[0] * (\partial y_0 / \partial x^[0]) + py[1] * (\partial y_1 / \partial x^[0])
559  * = py[0] * (\partial x^p / \partial x) + py[1] * (\partial (p * x^(p-1) * x') / \partial x)
560  * = py[0] * p * tx[0]^(p-1) + py[1] * p * (p-1) * tx[0]^(p-2) * tx[1]
561  * px[1] = py[0] * (\partial y_0 / \partial x^[1]) + py[1] * (\partial y_1 / \partial x^[1])
562  * = py[0] * (\partial x^p / \partial x') + py[1] * (\partial (p * x^(p-1) x') / \partial x')
563  * = py[0] * 0 + py[1] * p * tx[0]^(p-1)
564  * \f$
565  */
566  bool reverse(
567  size_t p, /**< highest order Taylor coefficient that we are evaluating */
568  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
569  const CppAD::vector<Type>& ty, /**< values for taylor coefficients of y */
570  CppAD::vector<Type>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
571  const CppAD::vector<Type>& py /**< values for partial derivatives of g(x) w.r.t. y */
572  )
573  {
574  assert(exponent > 1);
575  assert(px.size() >= p+1);
576  assert(py.size() >= p+1);
577  assert(tx.size() >= p+1);
578 
579  switch( p )
580  {
581  case 0:
582  // px[0] = py[0] * exponent * pow(tx[0], exponent-1);
583  px[0] = py[0] * CppAD::pow(tx[0], exponent-1);
584  px[0] *= exponent;
585  break;
586 
587  case 1:
588  // px[0] = py[0] * exponent * pow(tx[0], exponent-1) + py[1] * exponent * (exponent-1) * pow(tx[0], exponent-2) * tx[1];
589  px[0] = py[1] * tx[1] * CppAD::pow(tx[0], exponent-2);
590  px[0] *= exponent-1;
591  px[0] += py[0] * CppAD::pow(tx[0], exponent-1);
592  px[0] *= exponent;
593  // px[1] = py[1] * exponent * pow(tx[0], exponent-1);
594  px[1] = py[1] * CppAD::pow(tx[0], exponent-1);
595  px[1] *= exponent;
596  break;
597 
598  default:
599  return false;
600  }
601 
602  return true;
603  }
604 
605  using CppAD::atomic_base<Type>::for_sparse_jac;
606 
607  /** computes sparsity of jacobian during a forward sweep
608  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
609  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
610  */
611  bool for_sparse_jac(
612  size_t q, /**< number of columns in R */
613  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
614  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
615  )
616  {
617  return univariate_for_sparse_jac(q, r, s);
618  }
619 
620  using CppAD::atomic_base<Type>::rev_sparse_jac;
621 
622  /** computes sparsity of jacobian during a reverse sweep
623  * For a q x 1 matrix S, we have to return the sparsity pattern of the q x 1 matrix R(x) = S * f'(x).
624  * Since f'(x) is dense, the sparsity of R will be the sparsity of S.
625  */
626  bool rev_sparse_jac(
627  size_t q, /**< number of rows in R */
628  CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
629  const CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
630  )
631  {
632  return univariate_rev_sparse_jac(q, r, s);
633  }
634 
635  using CppAD::atomic_base<Type>::rev_sparse_hes;
636 
637  /** computes sparsity of hessian during a reverse sweep
638  * Assume V(x) = (g(f(x)))'' R with f(x) = x^p for a function g:R->R and a matrix R.
639  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
640  */
641  bool rev_sparse_hes(
642  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
643  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
644  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
645  size_t q, /**< number of columns in R, U, and V */
646  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
647  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
648  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
649  )
650  {
651  return univariate_rev_sparse_hes(vx, s, t, q, r, u, v);
652  }
653 };
654 
655 /** power function with natural exponents */
656 template<class Type>
657 static
658 void posintpower(
659  vector<Type>& in, /**< vector which first argument is base */
660  vector<Type>& out, /**< vector where to store result in first argument */
661  size_t exponent /**< exponent */
662 )
663 {
665  pip(in, out, exponent);
666 }
667 
668 #else
669 
670 /** power function with natural exponents */
671 template<class Type>
672 void posintpower(
673  vector<Type>& in, /**< vector which first argument is base */
674  vector<Type>& out, /**< vector where to store result in first argument */
675  size_t exponent /**< exponent */
676 )
677 {
678  out[0] = pow(in[0], (int)exponent);
679 }
680 
681 #endif
682 
683 
684 #ifndef NO_CPPAD_USER_ATOMIC
685 
686 /** Automatic differentiation of x -> sign(x)abs(x)^p, p>=1, as CppAD user-atomic function.
687  *
688  * This class implements forward and reverse operations for the function x -> sign(x)abs(x)^p for use within CppAD.
689  * While we otherwise would have to use discontinuous sign and abs functions, our own implementation allows to provide
690  * a continuously differentiable function.
691  *
692  * @todo treat the exponent as a (variable) argument to the function, with the assumption that we never differentiate w.r.t. it (this should make the approach threadsafe again)
693  */
694 template<class Type>
695 class atomic_signpower : public CppAD::atomic_base<Type>
696 {
697 public:
699  : CppAD::atomic_base<Type>("signpower"),
700  exponent(0.0)
701  {
702  /* indicate that we want to use bool-based sparsity pattern */
703  this->option(CppAD::atomic_base<Type>::bool_sparsity_enum);
704  }
705 
706 private:
707  /** exponent for use in next call to forward or reverse */
708  SCIP_Real exponent;
709 
710  /** stores exponent corresponding to next call to forward or reverse
711  *
712  * how is this supposed to be threadsafe? (we use only one global instantiation of this class)
713  */
714  virtual void set_id(size_t id)
715  {
716  exponent = SCIPexprGetSignPowerExponent((SCIP_EXPR*)(void*)id);
717  }
718 
719  /** forward sweep of signpower
720  * Given the taylor coefficients for x, we have to compute the taylor coefficients for f(x),
721  * that is, given tx = (x, x', x'', ...), we compute the coefficients ty = (y, y', y'', ...)
722  * in the taylor expansion of f(x) = sign(x)abs(x)^p.
723  * Thus, y = sign(x)abs(x)^p
724  * = sign(tx[0])abs(tx[0])^p,
725  * y' = p * abs(x)^(p-1) * x'
726  * = p * abs(tx[0])^(p-1) * tx[1],
727  * y'' = p * (p-1) * sign(x) * abs(x)^(p-2) * x'^2 + p * abs(x)^(p-1) * x''
728  * = p * (p-1) * sign(tx[0]) * abs(tx[0])^(p-2) * tx[1]^2 + p * abs(tx[0])^(p-1) * tx[2]
729  */
730  bool forward(
731  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
732  size_t p, /**< highest order Taylor coefficient that we are evaluating */
733  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
734  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
735  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
736  CppAD::vector<Type>& ty /**< vector to store taylor coefficients of y */
737  )
738  {
739  assert(exponent > 0.0);
740  assert(tx.size() >= p+1);
741  assert(ty.size() >= p+1);
742  assert(q <= p);
743 
744  if( vx.size() > 0 )
745  {
746  assert(vx.size() == 1);
747  assert(vy.size() == 1);
748  assert(p == 0);
749 
750  vy[0] = vx[0];
751  }
752 
753  if( q == 0 /* q <= 0 && 0 <= p */ )
754  {
755  ty[0] = SIGN(tx[0]) * pow(REALABS(tx[0]), exponent);
756  }
757 
758  if( q <= 1 && 1 <= p )
759  {
760  ty[1] = pow(REALABS(tx[0]), exponent - 1.0) * tx[1];
761  ty[1] *= exponent;
762  }
763 
764  if( q <= 2 && 2 <= p )
765  {
766  if( exponent != 2.0 )
767  {
768  ty[2] = SIGN(tx[0]) * pow(REALABS(tx[0]), exponent - 2.0) * tx[1] * tx[1];
769  ty[2] *= exponent - 1.0;
770  ty[2] += pow(REALABS(tx[0]), exponent - 1.0) * tx[2];
771  ty[2] *= exponent;
772  }
773  else
774  {
775  // y'' = 2 (sign(x) * x'^2 + |x|*x'') = 2 (sign(tx[0]) * tx[1]^2 + abs(tx[0]) * tx[2])
776  ty[2] = SIGN(tx[0]) * tx[1] * tx[1];
777  ty[2] += REALABS(tx[0]) * tx[2];
778  ty[2] *= exponent;
779  }
780  }
781 
782  /* higher order derivatives not implemented */
783  if( p > 2 )
784  return false;
785 
786  return true;
787  }
788 
789  /** reverse sweep of signpower
790  * Assume y(x) is a function of the taylor coefficients of f(x) = sign(x)|x|^p for x, i.e.,
791  * y(x) = [ f(x), f'(x), f''(x), ... ].
792  * Then in the reverse sweep we have to compute the elements of \f$\partial h / \partial x^[l], l = 0, ..., k,\f$
793  * where x^[l] is the l'th taylor coefficient (x, x', x'', ...) and h(x) = g(y(x)) for some function g:R^k -> R.
794  * That is, we have to compute
795  *\f$
796  * px[l] = \partial h / \partial x^[l] = (\partial g / \partial y) * (\partial y / \partial x^[l])
797  * = \sum_{i=0}^k (\partial g / \partial y_i) * (\partial y_i / \partial x^[l])
798  * = \sum_{i=0}^k py[i] * (\partial y_i / \partial x^[l])
799  *\f$
800  *
801  * For k = 0, this means
802  *\f$
803  * px[0] = py[0] * (\partial y_0 / \partial x^[0])
804  * = py[0] * (\partial f(x) / \partial x)
805  * = py[0] * p * abs(tx[0])^(p-1)
806  * \f$
807  *
808  * For k = 1, this means
809  *\f$
810  * px[0] = py[0] * (\partial y_0 / \partial x^[0]) + py[1] * (\partial y_1 / \partial x^[0])
811  * = py[0] * (\partial f(x) / \partial x) + py[1] * (\partial f'(x) / \partial x)
812  * = py[0] * p * abs(tx[0])^(p-1) + py[1] * p * (p-1) * abs(tx[0])^(p-2) * sign(tx[0]) * tx[1]
813  * px[1] = py[0] * (\partial y_0 / \partial x^[1]) + py[1] * (\partial y_1 / \partial x^[1])
814  * = py[0] * (\partial f(x) / \partial x') + py[1] * (\partial f'(x) / \partial x')
815  * = py[0] * 0 + py[1] * p * abs(tx[0])^(p-1)
816  * \f$
817  */
818  bool reverse(
819  size_t p, /**< highest order Taylor coefficient that we are evaluating */
820  const CppAD::vector<Type>& tx, /**< values for taylor coefficients of x */
821  const CppAD::vector<Type>& ty, /**< values for taylor coefficients of y */
822  CppAD::vector<Type>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
823  const CppAD::vector<Type>& py /**< values for partial derivatives of g(x) w.r.t. y */
824  )
825  {
826  assert(exponent > 1);
827  assert(px.size() >= p+1);
828  assert(py.size() >= p+1);
829  assert(tx.size() >= p+1);
830 
831  switch( p )
832  {
833  case 0:
834  // px[0] = py[0] * p * pow(abs(tx[0]), p-1);
835  px[0] = py[0] * pow(REALABS(tx[0]), exponent - 1.0);
836  px[0] *= p;
837  break;
838 
839  case 1:
840  if( exponent != 2.0 )
841  {
842  // px[0] = py[0] * p * abs(tx[0])^(p-1) + py[1] * p * (p-1) * abs(tx[0])^(p-2) * sign(tx[0]) * tx[1]
843  px[0] = py[1] * tx[1] * pow(REALABS(tx[0]), exponent - 2.0) * SIGN(tx[0]);
844  px[0] *= exponent - 1.0;
845  px[0] += py[0] * pow(REALABS(tx[0]), exponent - 1.0);
846  px[0] *= exponent;
847  // px[1] = py[1] * p * abs(tx[0])^(p-1)
848  px[1] = py[1] * pow(REALABS(tx[0]), exponent - 1.0);
849  px[1] *= exponent;
850  }
851  else
852  {
853  // px[0] = py[0] * 2.0 * abs(tx[0]) + py[1] * 2.0 * sign(tx[0]) * tx[1]
854  px[0] = py[1] * tx[1] * SIGN(tx[0]);
855  px[0] += py[0] * REALABS(tx[0]);
856  px[0] *= 2.0;
857  // px[1] = py[1] * 2.0 * abs(tx[0])
858  px[1] = py[1] * REALABS(tx[0]);
859  px[1] *= 2.0;
860  }
861  break;
862 
863  default:
864  return false;
865  }
866 
867  return true;
868  }
869 
870  using CppAD::atomic_base<Type>::for_sparse_jac;
871 
872  /** computes sparsity of jacobian during a forward sweep
873  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
874  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
875  */
876  bool for_sparse_jac(
877  size_t q, /**< number of columns in R */
878  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
879  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
880  )
881  {
882  return univariate_for_sparse_jac(q, r, s);
883  }
884 
885  using CppAD::atomic_base<Type>::rev_sparse_jac;
886 
887  /** computes sparsity of jacobian during a reverse sweep
888  * For a q x 1 matrix S, we have to return the sparsity pattern of the q x 1 matrix R(x) = S * f'(x).
889  * Since f'(x) is dense, the sparsity of R will be the sparsity of S.
890  */
891  bool rev_sparse_jac(
892  size_t q, /**< number of rows in R */
893  CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
894  const CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
895  )
896  {
897  return univariate_rev_sparse_jac(q, r, s);
898  }
899 
900  using CppAD::atomic_base<Type>::rev_sparse_hes;
901 
902  /** computes sparsity of hessian during a reverse sweep
903  * Assume V(x) = (g(f(x)))'' R with f(x) = sign(x)abs(x)^p for a function g:R->R and a matrix R.
904  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
905  */
906  bool rev_sparse_hes(
907  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
908  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
909  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
910  size_t q, /**< number of columns in S and R */
911  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
912  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
913  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
914  )
915  {
916  return univariate_rev_sparse_hes(vx, s, t, q, r, u, v);
917  }
918 
919 };
920 
921 /** Specialization of atomic_signpower template for intervals */
922 template<>
923 class atomic_signpower<SCIPInterval> : public CppAD::atomic_base<SCIPInterval>
924 {
925 public:
927  : CppAD::atomic_base<SCIPInterval>("signpowerint"),
928  exponent(0.0)
929  {
930  /* indicate that we want to use bool-based sparsity pattern */
931  this->option(CppAD::atomic_base<SCIPInterval>::bool_sparsity_enum);
932  }
933 
934 private:
935  /** exponent for use in next call to forward or reverse */
936  SCIP_Real exponent;
937 
938  /** stores exponent corresponding to next call to forward or reverse
939  *
940  * how is this supposed to be threadsafe? (we use only one global instantiation of this class)
941  */
942  virtual void set_id(size_t id)
943  {
944  exponent = SCIPexprGetSignPowerExponent((SCIP_EXPR*)(void*)id);
945  }
946 
947  /** specialization of atomic_signpower::forward template for SCIPinterval
948  *
949  * @todo try to compute tighter resultants
950  */
951  bool forward(
952  size_t q, /**< lowest order Taylor coefficient that we are evaluating */
953  size_t p, /**< highest order Taylor coefficient that we are evaluating */
954  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
955  CppAD::vector<bool>& vy, /**< vector to store which function values depend on variables, or empty vector */
956  const CppAD::vector<SCIPInterval>& tx, /**< values for taylor coefficients of x */
957  CppAD::vector<SCIPInterval>& ty /**< vector to store taylor coefficients of y */
958  )
959  {
960  assert(exponent > 0.0);
961  assert(tx.size() >= p+1);
962  assert(ty.size() >= p+1);
963  assert(q <= p);
964 
965  if( vx.size() > 0 )
966  {
967  assert(vx.size() == 1);
968  assert(vy.size() == 1);
969  assert(p == 0);
970 
971  vy[0] = vx[0];
972  }
973 
974  if( q == 0 /* q <= 0 && 0 <= p */ )
975  {
976  ty[0] = CppAD::signpow(tx[0], exponent);
977  }
978 
979  if( q <= 1 && 1 <= p )
980  {
981  ty[1] = CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0) * tx[1];
982  ty[1] *= p;
983  }
984 
985  if( q <= 2 && 2 <= p )
986  {
987  if( p != 2.0 )
988  {
989  ty[2] = CppAD::signpow(tx[0], exponent - 2.0) * CppAD::square(tx[1]);
990  ty[2] *= exponent - 1.0;
991  ty[2] += CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0) * tx[2];
992  ty[2] *= exponent;
993  }
994  else
995  {
996  // y'' = 2 (sign(x) * x'^2 + |x|*x'') = 2 (sign(tx[0]) * tx[1]^2 + abs(tx[0]) * tx[2])
997  ty[2] = CppAD::sign(tx[0]) * CppAD::square(tx[1]);
998  ty[2] += CppAD::abs(tx[0]) * tx[2];
999  ty[2] *= exponent;
1000  }
1001  }
1002 
1003  /* higher order derivatives not implemented */
1004  if( p > 2 )
1005  return false;
1006 
1007  return true;
1008  }
1009 
1010  /** specialization of atomic_signpower::reverse template for SCIPinterval
1011  *
1012  * @todo try to compute tighter resultants
1013  */
1014  bool reverse(
1015  size_t p, /**< highest order Taylor coefficient that we are evaluating */
1016  const CppAD::vector<SCIPInterval>& tx, /**< values for taylor coefficients of x */
1017  const CppAD::vector<SCIPInterval>& ty, /**< values for taylor coefficients of y */
1018  CppAD::vector<SCIPInterval>& px, /**< vector to store partial derivatives of h(x) = g(y(x)) w.r.t. x */
1019  const CppAD::vector<SCIPInterval>& py /**< values for partial derivatives of g(x) w.r.t. y */
1020  )
1021  {
1022  assert(exponent > 1);
1023  assert(px.size() >= p+1);
1024  assert(py.size() >= p+1);
1025  assert(tx.size() >= p+1);
1026 
1027  switch( p )
1028  {
1029  case 0:
1030  // px[0] = py[0] * p * pow(abs(tx[0]), p-1);
1031  px[0] = py[0] * CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0);
1032  px[0] *= exponent;
1033  break;
1034 
1035  case 1:
1036  if( exponent != 2.0 )
1037  {
1038  // px[0] = py[0] * p * abs(tx[0])^(p-1) + py[1] * p * (p-1) * abs(tx[0])^(p-2) * sign(tx[0]) * tx[1]
1039  px[0] = py[1] * tx[1] * CppAD::signpow(tx[0], exponent - 2.0);
1040  px[0] *= exponent - 1.0;
1041  px[0] += py[0] * CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0);
1042  px[0] *= exponent;
1043  // px[1] = py[1] * p * abs(tx[0])^(p-1)
1044  px[1] = py[1] * CppAD::pow(CppAD::abs(tx[0]), exponent - 1.0);
1045  px[1] *= exponent;
1046  }
1047  else
1048  {
1049  // px[0] = py[0] * 2.0 * abs(tx[0]) + py[1] * 2.0 * sign(tx[0]) * tx[1]
1050  px[0] = py[1] * tx[1] * CppAD::sign(tx[0]);
1051  px[0] += py[0] * CppAD::abs(tx[0]);
1052  px[0] *= 2.0;
1053  // px[1] = py[1] * 2.0 * abs(tx[0])
1054  px[1] = py[1] * CppAD::abs(tx[0]);
1055  px[1] *= 2.0;
1056  }
1057  break;
1058 
1059  default:
1060  return false;
1061  }
1062 
1063  return true;
1064  }
1065 
1066  using CppAD::atomic_base<SCIPInterval>::for_sparse_jac;
1067 
1068  /** computes sparsity of jacobian during a forward sweep
1069  * For a 1 x q matrix R, we have to return the sparsity pattern of the 1 x q matrix S(x) = f'(x) * R.
1070  * Since f'(x) is dense, the sparsity of S will be the sparsity of R.
1071  */
1072  bool for_sparse_jac(
1073  size_t q, /**< number of columns in R */
1074  const CppAD::vector<bool>& r, /**< sparsity of R, columnwise */
1075  CppAD::vector<bool>& s /**< vector to store sparsity of S, columnwise */
1076  )
1077  {
1078  return univariate_for_sparse_jac(q, r, s);
1079  }
1080 
1081  using CppAD::atomic_base<SCIPInterval>::rev_sparse_jac;
1082 
1083  /** computes sparsity of jacobian during a reverse sweep
1084  * For a q x 1 matrix S, we have to return the sparsity pattern of the q x 1 matrix R(x) = S * f'(x).
1085  * Since f'(x) is dense, the sparsity of R will be the sparsity of S.
1086  */
1087  bool rev_sparse_jac(
1088  size_t q, /**< number of rows in R */
1089  CppAD::vector<bool>& r, /**< sparsity of R, rowwise */
1090  const CppAD::vector<bool>& s /**< vector to store sparsity of S, rowwise */
1091  )
1092  {
1093  return univariate_rev_sparse_jac(q, r, s);
1094  }
1095 
1096  using CppAD::atomic_base<SCIPInterval>::rev_sparse_hes;
1097 
1098  /** computes sparsity of hessian during a reverse sweep
1099  * Assume V(x) = (g(f(x)))'' R with f(x) = sign(x)abs(x)^p for a function g:R->R and a matrix R.
1100  * we have to specify the sparsity pattern of V(x) and T(x) = (g(f(x)))'.
1101  */
1102  bool rev_sparse_hes(
1103  const CppAD::vector<bool>& vx, /**< indicates whether argument is a variable, or empty vector */
1104  const CppAD::vector<bool>& s, /**< sparsity pattern of S = g'(y) */
1105  CppAD::vector<bool>& t, /**< vector to store sparsity pattern of T(x) = (g(f(x)))' */
1106  size_t q, /**< number of columns in S and R */
1107  const CppAD::vector<bool>& r, /**< sparsity pattern of R */
1108  const CppAD::vector<bool>& u, /**< sparsity pattern of U(x) = g''(f(x)) f'(x) R */
1109  CppAD::vector<bool>& v /**< vector to store sparsity pattern of V(x) = (g(f(x)))'' R */
1110  )
1111  {
1112  return univariate_rev_sparse_hes(vx, s, t, q, r, u, v);
1113  }
1114 };
1115 
1116 template<class Type>
1117 static
1118 void evalSignPower(
1119  Type& resultant, /**< resultant */
1120  Type& arg, /**< operand */
1121  SCIP_EXPR* expr /**< expression that holds the exponent */
1122  )
1123 {
1124  vector<Type> in(1, arg);
1125  vector<Type> out(1);
1126 
1128  sp(in, out, (size_t)(void*)expr);
1129 
1130  resultant = out[0];
1131  return;
1132 }
1133 
1134 #else
1135 
1136 /** template for evaluation for signpower operator
1137  * only implemented for real numbers, thus gives error by default
1138  */
1139 template<class Type>
1140 static
1141 void evalSignPower(
1142  Type& resultant, /**< resultant */
1143  Type& arg, /**< operand */
1144  SCIP_EXPR* expr /**< expression that holds the exponent */
1145  )
1146 {
1147  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
1148  "evalSignPower()",
1149  "Error: SignPower not implemented for this value type"
1150  );
1151 }
1152 
1153 /** specialization of signpower evaluation for real numbers
1154  */
1155 template<>
1156 void evalSignPower(
1157  CppAD::AD<double>& resultant, /**< resultant */
1158  CppAD::AD<double>& arg, /**< operand */
1159  SCIP_EXPR* expr /**< expression that holds the exponent */
1160  )
1161 {
1162  SCIP_Real exponent;
1163 
1164  exponent = SCIPexprGetSignPowerExponent(expr);
1165 
1166  if( arg == 0.0 )
1167  resultant = 0.0;
1168  else if( arg > 0.0 )
1169  resultant = pow( arg, exponent);
1170  else
1171  resultant = -pow(-arg, exponent);
1172 }
1173 
1174 #endif
1175 
1176 /** template for evaluation for minimum operator
1177  * only implemented for real numbers, thus gives error by default
1178  * @todo implement own userad function
1179  */
1180 template<class Type>
1181 static
1182 void evalMin(
1183  Type& resultant, /**< resultant */
1184  Type& arg1, /**< first operand */
1185  Type& arg2 /**< second operand */
1186  )
1187 {
1188  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
1189  "evalMin()",
1190  "Error: Min not implemented for this value type"
1191  );
1192 }
1193 
1194 /** specialization of minimum evaluation for real numbers
1195  */
1196 template<>
1197 void evalMin(
1198  CppAD::AD<double>& resultant, /**< resultant */
1199  CppAD::AD<double>& arg1, /**< first operand */
1200  CppAD::AD<double>& arg2 /**< second operand */
1201  )
1202 {
1203  resultant = MIN(arg1, arg2);
1204 }
1205 
1206 /** template for evaluation for maximum operator
1207  * only implemented for real numbers, thus gives error by default
1208  * @todo implement own userad function
1209  */
1210 template<class Type>
1211 static
1212 void evalMax(
1213  Type& resultant, /**< resultant */
1214  Type& arg1, /**< first operand */
1215  Type& arg2 /**< second operand */
1216  )
1217 {
1218  CppAD::ErrorHandler::Call(true, __LINE__, __FILE__,
1219  "evalMax()",
1220  "Error: Max not implemented for this value type"
1221  );
1222 }
1223 
1224 /** specialization of maximum evaluation for real numbers
1225  */
1226 template<>
1227 void evalMax(
1228  CppAD::AD<double>& resultant, /**< resultant */
1229  CppAD::AD<double>& arg1, /**< first operand */
1230  CppAD::AD<double>& arg2 /**< second operand */
1231  )
1232 {
1233  resultant = MAX(arg1, arg2);
1234 }
1235 
1236 /** template for evaluation for square-root operator
1237  * default is to use the standard sqrt-function
1238  */
1239 template<class Type>
1240 static
1241 void evalSqrt(
1242  Type& resultant, /**< resultant */
1243  Type& arg /**< operand */
1244  )
1245 {
1246  resultant = sqrt(arg);
1247 }
1248 
1249 /** specialization of square-root operator for numbers
1250  * we perturb the function a little bit so that it's derivatives are defined in 0.0
1251  */
1252 template<>
1254  CppAD::AD<double>& resultant, /**< resultant */
1255  CppAD::AD<double>& arg /**< operand */
1256  )
1257 {
1258  resultant = sqrt(arg + 1e-20) - 1e-10;
1259 }
1260 
1261 /** template for evaluation for absolute value operator
1262  */
1263 template<class Type>
1264 static
1265 void evalAbs(
1266  Type& resultant, /**< resultant */
1267  Type& arg /**< operand */
1268  )
1269 {
1270  resultant = abs(arg);
1271 }
1272 
1273 /** specialization of absolute value evaluation for intervals
1274  * use sqrt(x^2) for now @todo implement own userad function
1275  */
1276 template<>
1277 void evalAbs(
1278  CppAD::AD<SCIPInterval>& resultant, /**< resultant */
1279  CppAD::AD<SCIPInterval>& arg /**< operand */
1280  )
1281 {
1282  vector<CppAD::AD<SCIPInterval> > in(1, arg);
1283  vector<CppAD::AD<SCIPInterval> > out(1);
1284 
1285  posintpower(in, out, 2);
1286 
1287  resultant = sqrt(out[0]);
1288 }
1289 
1290 /** integer power operation for arbitrary integer exponents */
1291 template<class Type>
1292 static
1293 void evalIntPower(
1294  Type& resultant, /**< resultant */
1295  Type& arg, /**< operand */
1296  int exponent /**< exponent */
1297  )
1298 {
1299  if( exponent > 1 )
1300  {
1301  vector<Type> in(1, arg);
1302  vector<Type> out(1);
1303 
1304  posintpower(in, out, exponent);
1305 
1306  resultant = out[0];
1307  return;
1308  }
1309 
1310  if( exponent < -1 )
1311  {
1312  vector<Type> in(1, arg);
1313  vector<Type> out(1);
1314 
1315  posintpower(in, out, -exponent);
1316 
1317  resultant = Type(1.0)/out[0];
1318  return;
1319  }
1320 
1321  if( exponent == 1 )
1322  {
1323  resultant = arg;
1324  return;
1325  }
1326 
1327  if( exponent == 0 )
1328  {
1329  resultant = Type(1.0);
1330  return;
1331  }
1332 
1333  assert(exponent == -1);
1334  resultant = Type(1.0)/arg;
1335 }
1336 
1337 /** CppAD compatible evaluation of an expression for given arguments and parameters */
1338 template<class Type>
1339 static
1340 SCIP_RETCODE eval(
1341  SCIP_EXPR* expr, /**< expression */
1342  const vector<Type>& x, /**< values of variables */
1343  SCIP_Real* param, /**< values of parameters */
1344  Type& val /**< buffer to store expression value */
1345  )
1346 {
1347  Type* buf;
1348 
1349  assert(expr != NULL);
1350 
1351  /* todo use SCIP_MAXCHILD_ESTIMATE as in expression.c */
1352 
1353  buf = NULL;
1354  if( SCIPexprGetNChildren(expr) )
1355  {
1356  if( BMSallocMemoryArray(&buf, SCIPexprGetNChildren(expr)) == NULL )
1357  return SCIP_NOMEMORY;
1358 
1359  for( int i = 0; i < SCIPexprGetNChildren(expr); ++i )
1360  SCIP_CALL( eval(SCIPexprGetChildren(expr)[i], x, param, buf[i]) );
1361  }
1362 
1363  switch(SCIPexprGetOperator(expr))
1364  {
1365  case SCIP_EXPR_VARIDX:
1366  assert(SCIPexprGetOpIndex(expr) < (int)x.size());
1367  val = x[SCIPexprGetOpIndex(expr)];
1368  break;
1369 
1370  case SCIP_EXPR_CONST:
1371  val = SCIPexprGetOpReal(expr);
1372  break;
1373 
1374  case SCIP_EXPR_PARAM:
1375  assert(param != NULL);
1376  val = param[SCIPexprGetOpIndex(expr)];
1377  break;
1378 
1379  case SCIP_EXPR_PLUS:
1380  val = buf[0] + buf[1];
1381  break;
1382 
1383  case SCIP_EXPR_MINUS:
1384  val = buf[0] - buf[1];
1385  break;
1386 
1387  case SCIP_EXPR_MUL:
1388  val = buf[0] * buf[1];
1389  break;
1390 
1391  case SCIP_EXPR_DIV:
1392  val = buf[0] / buf[1];
1393  break;
1394 
1395  case SCIP_EXPR_SQUARE:
1396  evalIntPower(val, buf[0], 2);
1397  break;
1398 
1399  case SCIP_EXPR_SQRT:
1400  evalSqrt(val, buf[0]);
1401  break;
1402 
1403  case SCIP_EXPR_REALPOWER:
1404  val = CppAD::pow(buf[0], SCIPexprGetRealPowerExponent(expr));
1405  break;
1406 
1407  case SCIP_EXPR_INTPOWER:
1408  evalIntPower(val, buf[0], SCIPexprGetIntPowerExponent(expr));
1409  break;
1410 
1411  case SCIP_EXPR_SIGNPOWER:
1412  evalSignPower(val, buf[0], expr);
1413  break;
1414 
1415  case SCIP_EXPR_EXP:
1416  val = exp(buf[0]);
1417  break;
1418 
1419  case SCIP_EXPR_LOG:
1420  val = log(buf[0]);
1421  break;
1422 
1423  case SCIP_EXPR_SIN:
1424  val = sin(buf[0]);
1425  break;
1426 
1427  case SCIP_EXPR_COS:
1428  val = cos(buf[0]);
1429  break;
1430 
1431  case SCIP_EXPR_TAN:
1432  val = tan(buf[0]);
1433  break;
1434 #if 0 /* these operators are currently disabled */
1435  case SCIP_EXPR_ERF:
1436  val = erf(buf[0]);
1437  break;
1438 
1439  case SCIP_EXPR_ERFI:
1440  return SCIP_ERROR;
1441 #endif
1442  case SCIP_EXPR_MIN:
1443  evalMin(val, buf[0], buf[1]);
1444  break;
1445 
1446  case SCIP_EXPR_MAX:
1447  evalMax(val, buf[0], buf[1]);
1448  break;
1449 
1450  case SCIP_EXPR_ABS:
1451  evalAbs(val, buf[0]);
1452  break;
1453 
1454  case SCIP_EXPR_SIGN:
1455  val = sign(buf[0]);
1456  break;
1457 
1458  case SCIP_EXPR_SUM:
1459  val = 0.0;
1460  for (int i = 0; i < SCIPexprGetNChildren(expr); ++i)
1461  val += buf[i];
1462  break;
1463 
1464  case SCIP_EXPR_PRODUCT:
1465  val = 1.0;
1466  for (int i = 0; i < SCIPexprGetNChildren(expr); ++i)
1467  val *= buf[i];
1468  break;
1469 
1470  case SCIP_EXPR_LINEAR:
1471  {
1472  SCIP_Real* coefs;
1473 
1474  coefs = SCIPexprGetLinearCoefs(expr);
1475  assert(coefs != NULL || SCIPexprGetNChildren(expr) == 0);
1476 
1477  val = SCIPexprGetLinearConstant(expr);
1478  for (int i = 0; i < SCIPexprGetNChildren(expr); ++i)
1479  val += coefs[i] * buf[i];
1480  break;
1481  }
1482 
1483  case SCIP_EXPR_QUADRATIC:
1484  {
1485  SCIP_Real* lincoefs;
1486  SCIP_QUADELEM* quadelems;
1487  int nquadelems;
1488  SCIP_Real sqrcoef;
1489  Type lincoef;
1490  vector<Type> in(1);
1491  vector<Type> out(1);
1492 
1493  lincoefs = SCIPexprGetQuadLinearCoefs(expr);
1494  nquadelems = SCIPexprGetNQuadElements(expr);
1495  quadelems = SCIPexprGetQuadElements(expr);
1496  assert(quadelems != NULL || nquadelems == 0);
1497 
1498  SCIPexprSortQuadElems(expr);
1499 
1500  val = SCIPexprGetQuadConstant(expr);
1501 
1502  /* for each argument, we collect it's linear index from lincoefs, it's square coefficients and all factors from bilinear terms
1503  * then we compute the interval sqrcoef*x^2 + lincoef*x and add it to result */
1504  int i = 0;
1505  for( int argidx = 0; argidx < SCIPexprGetNChildren(expr); ++argidx )
1506  {
1507  if( i == nquadelems || quadelems[i].idx1 > argidx )
1508  {
1509  /* there are no quadratic terms with argidx in its first argument, that should be easy to handle */
1510  if( lincoefs != NULL )
1511  val += lincoefs[argidx] * buf[argidx];
1512  continue;
1513  }
1514 
1515  sqrcoef = 0.0;
1516  lincoef = lincoefs != NULL ? lincoefs[argidx] : 0.0;
1517 
1518  assert(i < nquadelems && quadelems[i].idx1 == argidx);
1519  do
1520  {
1521  if( quadelems[i].idx2 == argidx )
1522  sqrcoef += quadelems[i].coef;
1523  else
1524  lincoef += quadelems[i].coef * buf[quadelems[i].idx2];
1525  ++i;
1526  } while( i < nquadelems && quadelems[i].idx1 == argidx );
1527  assert(i == nquadelems || quadelems[i].idx1 > argidx);
1528 
1529  /* this is not as good as what we can get from SCIPintervalQuad, but easy to implement */
1530  if( sqrcoef != 0.0 )
1531  {
1532  in[0] = buf[argidx];
1533  posintpower(in, out, 2);
1534  val += sqrcoef * out[0];
1535  }
1536 
1537  val += lincoef * buf[argidx];
1538  }
1539  assert(i == nquadelems);
1540 
1541  break;
1542  }
1543 
1544  case SCIP_EXPR_POLYNOMIAL:
1545  {
1546  SCIP_EXPRDATA_MONOMIAL** monomials;
1547  Type childval;
1548  Type monomialval;
1549  SCIP_Real exponent;
1550  int nmonomials;
1551  int nfactors;
1552  int* childidxs;
1553  SCIP_Real* exponents;
1554  int i;
1555  int j;
1556 
1557  val = SCIPexprGetPolynomialConstant(expr);
1558 
1559  nmonomials = SCIPexprGetNMonomials(expr);
1560  monomials = SCIPexprGetMonomials(expr);
1561 
1562  for( i = 0; i < nmonomials; ++i )
1563  {
1564  nfactors = SCIPexprGetMonomialNFactors(monomials[i]);
1565  childidxs = SCIPexprGetMonomialChildIndices(monomials[i]);
1566  exponents = SCIPexprGetMonomialExponents(monomials[i]);
1567  monomialval = SCIPexprGetMonomialCoef(monomials[i]);
1568 
1569  for( j = 0; j < nfactors; ++j )
1570  {
1571  assert(childidxs[j] >= 0);
1572  assert(childidxs[j] < SCIPexprGetNChildren(expr));
1573 
1574  childval = buf[childidxs[j]];
1575  exponent = exponents[j];
1576 
1577  /* cover some special exponents separately to avoid calling expensive pow function */
1578  if( exponent == 0.0 )
1579  continue;
1580  if( exponent == 1.0 )
1581  {
1582  monomialval *= childval;
1583  continue;
1584  }
1585  if( (int)exponent == exponent )
1586  {
1587  Type tmp;
1588  evalIntPower(tmp, childval, (int)exponent);
1589  monomialval *= tmp;
1590  continue;
1591  }
1592  if( exponent == 0.5 )
1593  {
1594  Type tmp;
1595  evalSqrt(tmp, childval);
1596  monomialval *= tmp;
1597  continue;
1598  }
1599  monomialval *= pow(childval, exponent);
1600  }
1601 
1602  val += monomialval;
1603  }
1604 
1605  break;
1606  }
1607 
1608  default:
1609  return SCIP_ERROR;
1610  }
1611 
1612  BMSfreeMemoryArrayNull(&buf);
1613 
1614  return SCIP_OKAY;
1615 }
1616 
1617 /** analysis an expression tree whether it requires retaping on every evaluation
1618  * this may be the case if the evaluation sequence depends on values of operands (e.g., in case of abs, sign, signpower, ...)
1619  */
1620 static
1621 bool needAlwaysRetape(SCIP_EXPR* expr)
1622 {
1623  assert(expr != NULL);
1624  assert(SCIPexprGetChildren(expr) != NULL || SCIPexprGetNChildren(expr) == 0);
1625 
1626  for( int i = 0; i < SCIPexprGetNChildren(expr); ++i )
1627  {
1628  if( needAlwaysRetape(SCIPexprGetChildren(expr)[i]) )
1629  return true;
1630  }
1631 
1632  switch( SCIPexprGetOperator(expr) )
1633  {
1634  case SCIP_EXPR_MIN:
1635  case SCIP_EXPR_MAX:
1636  case SCIP_EXPR_ABS:
1637 #ifdef NO_CPPAD_USER_ATOMIC
1638  case SCIP_EXPR_SIGNPOWER:
1639 #endif
1640  return true;
1641 
1642  default: ;
1643  }
1644 
1645  return false;
1646 }
1647 
1648 /** replacement for CppAD's default error handler
1649  * in debug mode, CppAD gives an error when an evaluation contains a nan
1650  * we do not want to stop execution in such a case, since the calling routine should check for nan's and decide what to do
1651  * since we cannot ignore this particular error, we ignore all
1652  * @todo find a way to check whether the error corresponds to a nan and communicate this back
1653  */
1654 static
1655 void cppaderrorcallback(
1656  bool known, /**< is the error from a known source? */
1657  int line, /**< line where error occured */
1658  const char* file, /**< file where error occured */
1659  const char* exp, /**< error condition */
1660  const char* msg /**< error message */
1661  )
1662 {
1663  SCIPdebugMessage("ignore CppAD error from %sknown source %s:%d: msg: %s exp: %s\n", known ? "" : "un", file, line, msg, exp);
1664 }
1665 
1666 /* install our error handler */
1667 static CppAD::ErrorHandler errorhandler(cppaderrorcallback);
1668 
1669 /** gets name and version of expression interpreter */
1670 const char* SCIPexprintGetName(void)
1671 {
1672  return CPPAD_PACKAGE_STRING;
1673 }
1674 
1675 /** gets descriptive text of expression interpreter */
1676 const char* SCIPexprintGetDesc(void)
1677 {
1678  return "Algorithmic Differentiation of C++ algorithms developed by B. Bell (www.coin-or.org/CppAD)";
1679 }
1680 
1681 /** gets capabilities of expression interpreter (using bitflags) */
1683  void
1684  )
1685 {
1689 }
1690 
1691 /** creates an expression interpreter object */
1693  BMS_BLKMEM* blkmem, /**< block memory data structure */
1694  SCIP_EXPRINT** exprint /**< buffer to store pointer to expression interpreter */
1695  )
1696 {
1697  assert(blkmem != NULL);
1698  assert(exprint != NULL);
1699 
1700  if( BMSallocMemory(exprint) == NULL )
1701  return SCIP_NOMEMORY;
1702 
1703  (*exprint)->blkmem = blkmem;
1704 
1705  return SCIP_OKAY;
1706 }
1707 
1708 /** frees an expression interpreter object */
1710  SCIP_EXPRINT** exprint /**< expression interpreter that should be freed */
1711  )
1712 {
1713  assert( exprint != NULL);
1714  assert(*exprint != NULL);
1715 
1716  BMSfreeMemory(exprint);
1717 
1718  return SCIP_OKAY;
1719 }
1720 
1721 /** compiles an expression tree and stores compiled data in expression tree */
1723  SCIP_EXPRINT* exprint, /**< interpreter data structure */
1724  SCIP_EXPRTREE* tree /**< expression tree */
1725  )
1726 {
1727  assert(tree != NULL);
1728 
1730  if (!data)
1731  {
1732  data = new SCIP_EXPRINTDATA();
1733  assert( data != NULL );
1734  SCIPexprtreeSetInterpreterData(tree, data);
1735  SCIPdebugMessage("set interpreter data in tree %p to %p\n", (void*)tree, (void*)data);
1736  }
1737  else
1738  {
1739  data->need_retape = true;
1740  data->int_need_retape = true;
1741  }
1742 
1743  int n = SCIPexprtreeGetNVars(tree);
1744 
1745  data->X.resize(n);
1746  data->x.resize(n);
1747  data->Y.resize(1);
1748 
1749  data->int_X.resize(n);
1750  data->int_x.resize(n);
1751  data->int_Y.resize(1);
1752 
1753  if( data->root != NULL )
1754  {
1755  SCIPexprFreeDeep(exprint->blkmem, &data->root);
1756  }
1757 
1758  SCIP_EXPR* root = SCIPexprtreeGetRoot(tree);
1759 
1760  SCIP_CALL( SCIPexprCopyDeep(exprint->blkmem, &data->root, root) );
1761 
1762  data->need_retape_always = needAlwaysRetape(SCIPexprtreeGetRoot(tree));
1763 
1764  data->blkmem = exprint->blkmem;
1765 
1766  return SCIP_OKAY;
1767 }
1768 
1769 /** frees interpreter data */
1771  SCIP_EXPRINTDATA** interpreterdata /**< interpreter data that should freed */
1772  )
1773 {
1774  assert( interpreterdata != NULL);
1775  assert(*interpreterdata != NULL);
1776 
1777  if( (*interpreterdata)->root != NULL )
1778  SCIPexprFreeDeep((*interpreterdata)->blkmem, &(*interpreterdata)->root);
1779 
1780  delete *interpreterdata;
1781  *interpreterdata = NULL;
1782 
1783  return SCIP_OKAY;
1784 }
1785 
1786 /** notify expression interpreter that a new parameterization is used
1787  * this probably causes retaping by AD algorithms
1788  */
1790  SCIP_EXPRINT* exprint, /**< interpreter data structure */
1791  SCIP_EXPRTREE* tree /**< expression tree */
1792  )
1793 {
1794  assert(exprint != NULL);
1795  assert(tree != NULL);
1796 
1798  if( data != NULL )
1799  {
1800  data->need_retape = true;
1801  data->int_need_retape = true;
1802  }
1803 
1804  return SCIP_OKAY;
1805 }
1806 
1807 /** evaluates an expression tree */
1809  SCIP_EXPRINT* exprint, /**< interpreter data structure */
1810  SCIP_EXPRTREE* tree, /**< expression tree */
1811  SCIP_Real* varvals, /**< values of variables */
1812  SCIP_Real* val /**< buffer to store value */
1813  )
1814 {
1815  SCIP_EXPRINTDATA* data;
1816 
1817  assert(exprint != NULL);
1818  assert(tree != NULL);
1819  assert(varvals != NULL);
1820  assert(val != NULL);
1821 
1822  data = SCIPexprtreeGetInterpreterData(tree);
1823  assert(data != NULL);
1824  assert(SCIPexprtreeGetNVars(tree) == (int)data->X.size());
1825  assert(SCIPexprtreeGetRoot(tree) != NULL);
1826 
1827  int n = SCIPexprtreeGetNVars(tree);
1828 
1829  if( data->need_retape_always || data->need_retape )
1830  {
1831  for( int i = 0; i < n; ++i )
1832  {
1833  data->X[i] = varvals[i];
1834  data->x[i] = varvals[i];
1835  }
1836 
1837  CppAD::Independent(data->X);
1838 
1839  if( data->root != NULL )
1840  SCIP_CALL( eval(data->root, data->X, SCIPexprtreeGetParamVals(tree), data->Y[0]) );
1841  else
1842  data->Y[0] = 0.0;
1843 
1844  data->f.Dependent(data->X, data->Y);
1845 
1846  data->val = Value(data->Y[0]);
1847  SCIPdebugMessage("Eval retaped and computed value %g\n", data->val);
1848 
1849  // the following is required if the gradient shall be computed by a reverse sweep later
1850  // data->val = data->f.Forward(0, data->x)[0];
1851 
1852  data->need_retape = false;
1853  }
1854  else
1855  {
1856  assert((int)data->x.size() >= n);
1857  for( int i = 0; i < n; ++i )
1858  data->x[i] = varvals[i];
1859 
1860  data->val = data->f.Forward(0, data->x)[0];
1861  SCIPdebugMessage("Eval used foward sweep to compute value %g\n", data->val);
1862  }
1863 
1864  *val = data->val;
1865 
1866  return SCIP_OKAY;
1867 }
1868 
1869 /** evaluates an expression tree on intervals */
1870 extern
1872  SCIP_EXPRINT* exprint, /**< interpreter data structure */
1873  SCIP_EXPRTREE* tree, /**< expression tree */
1874  SCIP_Real infinity, /**< value for infinity */
1875  SCIP_INTERVAL* varvals, /**< interval values of variables */
1876  SCIP_INTERVAL* val /**< buffer to store interval value of expression */
1877  )
1878 {
1879  SCIP_EXPRINTDATA* data;
1880 
1881  assert(exprint != NULL);
1882  assert(tree != NULL);
1883  assert(varvals != NULL);
1884  assert(val != NULL);
1885 
1886  data = SCIPexprtreeGetInterpreterData(tree);
1887  assert(data != NULL);
1888  assert(SCIPexprtreeGetNVars(tree) == (int)data->int_X.size());
1889  assert(SCIPexprtreeGetRoot(tree) != NULL);
1890 
1891  int n = SCIPexprtreeGetNVars(tree);
1892 
1893  SCIPInterval::infinity = infinity;
1894 
1895  if( data->int_need_retape || data->need_retape_always )
1896  {
1897  for( int i = 0; i < n; ++i )
1898  {
1899  data->int_X[i] = varvals[i];
1900  data->int_x[i] = varvals[i];
1901  }
1902 
1903  CppAD::Independent(data->int_X);
1904 
1905  if( data->root != NULL )
1906  SCIP_CALL( eval(data->root, data->int_X, SCIPexprtreeGetParamVals(tree), data->int_Y[0]) );
1907  else
1908  data->int_Y[0] = 0.0;
1909 
1910  data->int_f.Dependent(data->int_X, data->int_Y);
1911 
1912  data->int_val = Value(data->int_Y[0]);
1913 
1914  data->int_need_retape = false;
1915  }
1916  else
1917  {
1918  assert((int)data->int_x.size() >= n);
1919  for( int i = 0; i < n; ++i )
1920  data->int_x[i] = varvals[i];
1921 
1922  data->int_val = data->int_f.Forward(0, data->int_x)[0];
1923  }
1924 
1925  *val = data->int_val;
1926 
1927  return SCIP_OKAY;
1928 }
1929 
1930 /** computes value and gradient of an expression tree */
1932  SCIP_EXPRINT* exprint, /**< interpreter data structure */
1933  SCIP_EXPRTREE* tree, /**< expression tree */
1934  SCIP_Real* varvals, /**< values of variables, can be NULL if new_varvals is FALSE */
1935  SCIP_Bool new_varvals, /**< have variable values changed since last call to a point evaluation routine? */
1936  SCIP_Real* val, /**< buffer to store expression value */
1937  SCIP_Real* gradient /**< buffer to store expression gradient, need to have length at least SCIPexprtreeGetNVars(tree) */
1938  )
1939 {
1940  assert(exprint != NULL);
1941  assert(tree != NULL);
1942  assert(varvals != NULL || new_varvals == FALSE);
1943  assert(val != NULL);
1944  assert(gradient != NULL);
1945 
1947  assert(data != NULL);
1948 
1949  if( new_varvals )
1950  {
1951  SCIP_CALL( SCIPexprintEval(exprint, tree, varvals, val) );
1952  }
1953  else
1954  *val = data->val;
1955 
1956  int n = SCIPexprtreeGetNVars(tree);
1957 
1958  vector<double> jac(data->f.Jacobian(data->x));
1959 
1960  for( int i = 0; i < n; ++i )
1961  gradient[i] = jac[i];
1962 
1963 /* disable debug output since we have no message handler here
1964 #ifdef SCIP_DEBUG
1965  SCIPdebugMessage("Grad for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
1966  SCIPdebugMessage("x ="); for (int i = 0; i < n; ++i) printf("\t %g", data->x[i]); printf("\n");
1967  SCIPdebugMessage("grad ="); for (int i = 0; i < n; ++i) printf("\t %g", gradient[i]); printf("\n");
1968 #endif
1969 */
1970 
1971  return SCIP_OKAY;
1972 }
1973 
1974 /** computes interval value and interval gradient of an expression tree */
1976  SCIP_EXPRINT* exprint, /**< interpreter data structure */
1977  SCIP_EXPRTREE* tree, /**< expression tree */
1978  SCIP_Real infinity, /**< value for infinity */
1979  SCIP_INTERVAL* varvals, /**< interval values of variables, can be NULL if new_varvals is FALSE */
1980  SCIP_Bool new_varvals, /**< have variable interval values changed since last call to an interval evaluation routine? */
1981  SCIP_INTERVAL* val, /**< buffer to store expression interval value */
1982  SCIP_INTERVAL* gradient /**< buffer to store expression interval gradient, need to have length at least SCIPexprtreeGetNVars(tree) */
1983  )
1984 {
1985  assert(exprint != NULL);
1986  assert(tree != NULL);
1987  assert(varvals != NULL || new_varvals == false);
1988  assert(val != NULL);
1989  assert(gradient != NULL);
1990 
1992  assert(data != NULL);
1993 
1994  if (new_varvals)
1995  SCIP_CALL( SCIPexprintEvalInt(exprint, tree, infinity, varvals, val) );
1996  else
1997  *val = data->int_val;
1998 
1999  int n = SCIPexprtreeGetNVars(tree);
2000 
2001  vector<SCIPInterval> jac(data->int_f.Jacobian(data->int_x));
2002 
2003  for (int i = 0; i < n; ++i)
2004  gradient[i] = jac[i];
2005 
2006 /* disable debug output since we have no message handler here
2007 #ifdef SCIP_DEBUG
2008  SCIPdebugMessage("GradInt for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2009  SCIPdebugMessage("x ="); for (int i = 0; i < n; ++i) printf("\t [%g,%g]", SCIPintervalGetInf(data->int_x[i]), SCIPintervalGetSup(data->int_x[i])); printf("\n");
2010  SCIPdebugMessage("grad ="); for (int i = 0; i < n; ++i) printf("\t [%g,%g]", SCIPintervalGetInf(gradient[i]), SCIPintervalGetSup(gradient[i])); printf("\n");
2011 #endif
2012 */
2013 
2014  return SCIP_OKAY;
2015 }
2016 
2017 /** gives sparsity pattern of hessian
2018  * NOTE: this function might be replaced later by something nicer
2019  * Since the AD code might need to do a forward sweep, you should pass variable values in here.
2020  */
2022  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2023  SCIP_EXPRTREE* tree, /**< expression tree */
2024  SCIP_Real* varvals, /**< values of variables */
2025  SCIP_Bool* sparsity /**< buffer to store sparsity pattern of Hessian, sparsity[i+n*j] indicates whether entry (i,j) is nonzero in the hessian */
2026  )
2027 {
2028  assert(exprint != NULL);
2029  assert(tree != NULL);
2030  assert(varvals != NULL);
2031  assert(sparsity != NULL);
2032 
2034  assert(data != NULL);
2035 
2036  int n = SCIPexprtreeGetNVars(tree);
2037  int nn = n*n;
2038 
2039  if( data->need_retape_always )
2040  {
2041  // @todo can we do something better here, e.g., by looking at the expression tree by ourself?
2042 
2043  for( int i = 0; i < nn; ++i )
2044  sparsity[i] = TRUE;
2045 
2046 /* disable debug output since we have no message handler here
2047 #ifdef SCIP_DEBUG
2048  SCIPdebugMessage("HessianSparsityDense for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2049  SCIPdebugMessage("sparsity = all elements, due to discontinuouities\n");
2050 #endif
2051 */
2052 
2053  return SCIP_OKAY;
2054  }
2055 
2056  if( data->need_retape )
2057  {
2058  SCIP_Real val;
2059  SCIP_CALL( SCIPexprintEval(exprint, tree, varvals, &val) );
2060  }
2061 
2062  SCIPdebugMessage("calling ForSparseJac\n");
2063 
2064  vector<bool> r(nn, false);
2065  for (int i = 0; i < n; ++i)
2066  r[i*n+i] = true;
2067  data->f.ForSparseJac(n, r); // need to compute sparsity for Jacobian first
2068 
2069  SCIPdebugMessage("calling RevSparseHes\n");
2070 
2071  vector<bool> s(1, true);
2072  vector<bool> sparsehes(data->f.RevSparseHes(n, s));
2073 
2074  for( int i = 0; i < nn; ++i )
2075  sparsity[i] = sparsehes[i];
2076 
2077 /* disable debug output since we have no message handler here
2078 #ifdef SCIP_DEBUG
2079  SCIPdebugMessage("HessianSparsityDense for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2080  SCIPdebugMessage("sparsity ="); for (int i = 0; i < n; ++i) for (int j = 0; j < n; ++j) if (sparsity[i*n+j]) printf(" (%d,%d)", i, j); printf("\n");
2081 #endif
2082 */
2083 
2084  return SCIP_OKAY;
2085 }
2086 
2087 /** computes value and dense hessian of an expression tree
2088  * the full hessian is computed (lower left and upper right triangle)
2089  */
2091  SCIP_EXPRINT* exprint, /**< interpreter data structure */
2092  SCIP_EXPRTREE* tree, /**< expression tree */
2093  SCIP_Real* varvals, /**< values of variables, can be NULL if new_varvals is FALSE */
2094  SCIP_Bool new_varvals, /**< have variable values changed since last call to an evaluation routine? */
2095  SCIP_Real* val, /**< buffer to store function value */
2096  SCIP_Real* hessian /**< buffer to store hessian values, need to have size at least n*n */
2097  )
2098 {
2099  assert(exprint != NULL);
2100  assert(tree != NULL);
2101  assert(varvals != NULL || new_varvals == FALSE);
2102  assert(val != NULL);
2103  assert(hessian != NULL);
2104 
2106  assert(data != NULL);
2107 
2108  if( new_varvals )
2109  {
2110  SCIP_CALL( SCIPexprintEval(exprint, tree, varvals, val) );
2111  }
2112  else
2113  *val = data->val;
2114 
2115  int n = SCIPexprtreeGetNVars(tree);
2116 
2117  vector<double> hess(data->f.Hessian(data->x, 0));
2118 
2119  int nn = n*n;
2120  for (int i = 0; i < nn; ++i)
2121  hessian[i] = hess[i];
2122 
2123 /* disable debug output since we have no message handler here
2124 #ifdef SCIP_DEBUG
2125  SCIPdebugMessage("HessianDense for "); SCIPexprtreePrint(tree, NULL, NULL, NULL); printf("\n");
2126  SCIPdebugMessage("x ="); for (int i = 0; i < n; ++i) printf("\t %g", data->x[i]); printf("\n");
2127  SCIPdebugMessage("hess ="); for (int i = 0; i < n*n; ++i) printf("\t %g", hessian[i]); printf("\n");
2128 #endif
2129 */
2130  return SCIP_OKAY;
2131 }
2132