queso-0.57.1
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
GslOptimizer.C
Go to the documentation of this file.
1 //-----------------------------------------------------------------------bl-
2 //--------------------------------------------------------------------------
3 //
4 // QUESO - a library to support the Quantification of Uncertainty
5 // for Estimation, Simulation and Optimization
6 //
7 // Copyright (C) 2008-2017 The PECOS Development Team
8 //
9 // This library is free software; you can redistribute it and/or
10 // modify it under the terms of the Version 2.1 GNU Lesser General
11 // Public License as published by the Free Software Foundation.
12 //
13 // This library is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 // Lesser General Public License for more details.
17 //
18 // You should have received a copy of the GNU Lesser General Public
19 // License along with this library; if not, write to the Free Software
20 // Foundation, Inc. 51 Franklin Street, Fifth Floor,
21 // Boston, MA 02110-1301 USA
22 //
23 //-----------------------------------------------------------------------el-
24 
25 #include <iostream>
26 
27 #include <queso/Defines.h>
28 #include <queso/GslVector.h>
29 #include <queso/VectorSpace.h>
30 #include <queso/ScalarFunction.h>
31 #include <queso/GslOptimizer.h>
32 #include <queso/OptimizerMonitor.h>
33 
34 #include <gsl/gsl_multimin.h>
35 #include <gsl/gsl_blas.h>
36 
37 namespace QUESO {
38 
39 // We need to extern "C" because gsl needs a pointer to a C function to
40 // minimize
41 extern "C" {
42  // This evaluate -log posterior
43  double c_evaluate(const gsl_vector * x, void * context) {
44 
45  GslOptimizer * optimizer = static_cast<GslOptimizer * >(context);
46 
47  GslVector state(
48  optimizer->objectiveFunction().domainSet().vectorSpace().zeroVector());
49 
50  // DM: Doing this copy sucks, but whatever. It'll do for now.
51  for (unsigned int i = 0; i < state.sizeLocal(); i++) {
52  state[i] = gsl_vector_get(x, i);
53  }
54 
55  // Bail early if GSL tries to evaluate outside of the domain
56  if (!optimizer->objectiveFunction().domainSet().contains(state)) {
57  return GSL_NAN;
58  }
59 
60  // Should cache derivative here so we don't recompute stuff
61  double result = -optimizer->objectiveFunction().lnValue(state);
62 
63  return result;
64  }
65 
66  // This evaluates the derivative of -log posterior
67  void c_evaluate_derivative(const gsl_vector * x, void * context,
68  gsl_vector * derivative) {
69  GslOptimizer * optimizer = static_cast<GslOptimizer * >(context);
70 
71  GslVector state(
72  optimizer->objectiveFunction().domainSet().vectorSpace().zeroVector());
73  GslVector deriv(
74  optimizer->objectiveFunction().domainSet().vectorSpace().zeroVector());
75 
76  // DM: Doing this copy sucks, but whatever. It'll do for now.
77  for (unsigned int i = 0; i < state.sizeLocal(); i++) {
78  state[i] = gsl_vector_get(x, i);
79 
80  // We fill with GSL_NAN and use it as a flag to check later that the user
81  // actually fills the derivative vector with stuff
82  deriv[i] = GSL_NAN;
83  }
84 
85  if (!optimizer->objectiveFunction().domainSet().contains(state)) {
86  // Fill derivative with error codes if the point is outside of the
87  // domain
88  for (unsigned int i = 0; i < deriv.sizeLocal(); i++) {
89  gsl_vector_set(derivative, i, GSL_NAN);
90  }
91  }
92  else {
93  // We should cache the return value here so we don't recompute stuff
94  double fx = -optimizer->objectiveFunction().lnValue(state, deriv);
95 
96  // Decide whether or not we need to do a finite difference based on
97  // whether the user actually filled deriv with values that are not
98  // GSL_NAN
99  //
100  // We're currently doing this check every time this function gets called.
101  // We could probably pull this logic out of here and put it somewhere
102  // where it only happens once
103  bool userComputedDerivative = true;
104  for (unsigned int i = 0; i < deriv.sizeLocal(); i++) {
105  // If the user missed out a derivative in any direction, fall back to
106  // a finite difference
107  if (gsl_isnan(deriv[i])) {
108  userComputedDerivative = false;
109  break;
110  }
111  }
112 
113  if (userComputedDerivative) {
114  for (unsigned int i = 0; i < deriv.sizeLocal(); i++) {
115  gsl_vector_set(derivative, i, -deriv[i]); // We need the minus sign
116  }
117  }
118  else {
119  // Finite difference step-size
120  double h = optimizer->getFiniteDifferenceStepSize();
121 
122  // User did not provide a derivative, so do a finite difference
123  for (unsigned int i = 0; i < deriv.sizeLocal(); i++) {
124  double tempState = state[i];
125  state[i] += h;
126 
127  // User didn't provide a derivative, so we don't bother passing in
128  // the derivative vector again
129  double fxph = -optimizer->objectiveFunction().lnValue(state);
130 
131  // Reset the state back to what it was before
132  state[i] = tempState;
133 
134  // Make sure we didn't do anything dumb and tell gsl if we did
135  if (!gsl_isnan(fx) && !gsl_isnan(fxph)) {
136  gsl_vector_set(derivative, i, (fxph - fx) / h);
137  }
138  else {
139  gsl_vector_set(derivative, i, GSL_NAN);
140  }
141  }
142  }
143  }
144  }
145 
146  // This evaluates -log posterior and the derivative of -log posterior
147  void c_evaluate_with_derivative(const gsl_vector * x, void * context,
148  double * f, gsl_vector * derivative) {
149  // We don't need to call both of these
150  *f = c_evaluate(x, context);
151  c_evaluate_derivative(x, context, derivative);
152  }
153 } // End extern "C"
154 
156  const BaseScalarFunction<GslVector, GslMatrix> & objectiveFunction)
157  : BaseOptimizer(),
158  m_objectiveFunction(objectiveFunction),
159  m_initialPoint(new GslVector(objectiveFunction.domainSet().
160  vectorSpace().zeroVector())),
161  m_minimizer(new GslVector(this->m_objectiveFunction.domainSet().
162  vectorSpace().zeroVector())),
163  m_solver_type(BFGS2),
164  m_fstep_size(this->m_objectiveFunction.domainSet().vectorSpace().zeroVector()),
165  m_fdfstep_size(getFdfstepSize()),
166  m_line_tol(getLineTolerance())
167 {
168  // We initialize the minimizer to GSL_NAN just in case the optimization fails
169  m_minimizer->cwSet(GSL_NAN);
170 
171  // Set to documented default value.
173 
174  // Set solver type to the one set in the options object
176 }
177 
179  OptimizerOptions options,
180  const BaseScalarFunction<GslVector, GslMatrix> & objectiveFunction)
181  : BaseOptimizer(options),
182  m_objectiveFunction(objectiveFunction),
183  m_initialPoint(new GslVector(objectiveFunction.domainSet().
184  vectorSpace().zeroVector())),
185  m_minimizer(new GslVector(this->m_objectiveFunction.domainSet().
186  vectorSpace().zeroVector())),
187  m_solver_type(BFGS2),
188  m_fstep_size(this->m_objectiveFunction.domainSet().vectorSpace().zeroVector()),
189  m_fdfstep_size(getFdfstepSize()),
190  m_line_tol(getLineTolerance())
191 {
192  // We initialize the minimizer to GSL_NAN just in case the optimization fails
193  m_minimizer->cwSet(GSL_NAN);
194 
195  // Set to documented default value.
197 
198  // Set solver type to the one set in the options object
200 }
201 
203 {
204  delete this->m_initialPoint;
205 }
206 
208 
209  // First check that initial guess is reasonable
210  if (!this->m_objectiveFunction.domainSet().contains(*(this->m_initialPoint)))
211  {
212  if( m_objectiveFunction.domainSet().env().fullRank() == 0 )
213  {
214  std::cerr << "Minimization was given initial point outside of domain"
215  << std::endl;
216  }
217  queso_error();
218  }
219 
220  unsigned int dim = this->m_objectiveFunction.domainSet().vectorSpace().
221  zeroVector().sizeLocal();
222 
223  // We use m_solver_type here because we need the enum
225  {
226  this->minimize_with_gradient( dim, monitor );
227  }
228  else
229  {
230  this->minimize_no_gradient( dim, monitor );
231  }
232 
233  return;
234 }
235 
238 {
239  return this->m_objectiveFunction;
240 }
241 
242 void
244 {
245  for (unsigned int i = 0; i < initialPoint.sizeLocal(); i++) {
246  (*(this->m_initialPoint))[i] = initialPoint[i];
247  }
248 }
249 
250 const GslVector &
252 {
253  return *(this->m_minimizer);
254 }
255 
257 {
258  queso_deprecated();
259  m_solver_type = solver;
260 }
261 
263 {
264  bool gradient_needed = false;
265 
266  switch(solver)
267  {
268  case(FLETCHER_REEVES_CG):
269  case(POLAK_RIBIERE_CG):
270  case(BFGS):
271  case(BFGS2):
272  case(STEEPEST_DESCENT):
273  {
274  gradient_needed = true;
275  break;
276  }
277  case(NELDER_MEAD):
278  case(NELDER_MEAD2):
279  case(NELDER_MEAD2_RAND):
280  {
281  break;
282  }
283  default:
284  {
285  // Wat?!
286  queso_error();
287  }
288  } // switch(solver)
289 
290  return gradient_needed;
291 }
292 
294 {
295  // Set initial point
296  gsl_vector * x = gsl_vector_alloc(dim);
297  for (unsigned int i = 0; i < dim; i++) {
298  gsl_vector_set(x, i, (*m_initialPoint)[i]);
299  }
300 
301  // Tell GSL which solver we're using
302  const gsl_multimin_fdfminimizer_type* type = NULL;
303 
304  // We use m_solver_type here because we need the enum
305  switch(m_solver_type)
306  {
307  case(FLETCHER_REEVES_CG):
308  type = gsl_multimin_fdfminimizer_conjugate_fr;
309  break;
310  case(POLAK_RIBIERE_CG):
311  type = gsl_multimin_fdfminimizer_conjugate_pr;
312  break;
313  case(BFGS):
314  type = gsl_multimin_fdfminimizer_vector_bfgs;
315  break;
316  case(BFGS2):
317  type = gsl_multimin_fdfminimizer_vector_bfgs2;
318  break;
319  case(STEEPEST_DESCENT):
320  type = gsl_multimin_fdfminimizer_steepest_descent;
321  break;
322  case(NELDER_MEAD):
323  case(NELDER_MEAD2):
324  case(NELDER_MEAD2_RAND):
325  default:
326  // Wat?!
327  queso_error();
328  }
329 
330  // Init solver
331  gsl_multimin_fdfminimizer * solver =
332  gsl_multimin_fdfminimizer_alloc(type, dim);
333 
334  // Point GSL to the right functions
335  gsl_multimin_function_fdf minusLogPosterior;
336  minusLogPosterior.n = dim;
337  minusLogPosterior.f = &c_evaluate;
338  minusLogPosterior.df = &c_evaluate_derivative;
339  minusLogPosterior.fdf = &c_evaluate_with_derivative;
340  minusLogPosterior.params = (void *)(this);
341 
342  gsl_multimin_fdfminimizer_set(solver, &minusLogPosterior, x, getFdfstepSize(), getLineTolerance());
343 
344  int status;
345  size_t iter = 0;
346 
347  do {
348  iter++;
349  status = gsl_multimin_fdfminimizer_iterate(solver);
350 
351  if (status) {
352  if( m_objectiveFunction.domainSet().env().fullRank() == 0 )
353  {
354  std::cerr << "Error while GSL does optimisation. "
355  << "See below for GSL error type." << std::endl;
356  std::cerr << "Gsl error: " << gsl_strerror(status) << std::endl;
357  }
358  break;
359  }
360 
361  status = gsl_multimin_test_gradient(solver->gradient, this->getTolerance());
362 
363  if(monitor)
364  {
365  gsl_vector* x = gsl_multimin_fdfminimizer_x(solver);
366  std::vector<double> x_min(dim);
367  for( unsigned int i = 0; i < dim; i++)
368  x_min[i] = gsl_vector_get(x,i);
369 
370  double f = gsl_multimin_fdfminimizer_minimum(solver);
371 
372  gsl_vector* grad = gsl_multimin_fdfminimizer_gradient(solver);
373  double grad_norm = gsl_blas_dnrm2(grad);
374 
375  monitor->append( x_min, f, grad_norm );
376  }
377 
378  } while ((status == GSL_CONTINUE) && (iter < this->getMaxIterations()));
379 
380  for (unsigned int i = 0; i < dim; i++) {
381  (*m_minimizer)[i] = gsl_vector_get(solver->x, i);
382  }
383 
384  // We're being good human beings and cleaning up the memory we allocated
385  gsl_multimin_fdfminimizer_free(solver);
386  gsl_vector_free(x);
387 
388  return;
389 }
390 
392 {
393  // Set initial point
394  gsl_vector* x = gsl_vector_alloc(dim);
395  for (unsigned int i = 0; i < dim; i++) {
396  gsl_vector_set(x, i, (*m_initialPoint)[i]);
397  }
398 
399  // Tell GSL which solver we're using
400  const gsl_multimin_fminimizer_type* type = NULL;
401 
402  switch(m_solver_type)
403  {
404  case(NELDER_MEAD):
405  type = gsl_multimin_fminimizer_nmsimplex;
406  break;
407  case(NELDER_MEAD2):
408  type = gsl_multimin_fminimizer_nmsimplex2;
409  break;
410  case(NELDER_MEAD2_RAND):
411  type = gsl_multimin_fminimizer_nmsimplex2rand;
412  break;
413  case(FLETCHER_REEVES_CG):
414  case(POLAK_RIBIERE_CG):
415  case(BFGS):
416  case(BFGS2):
417  case(STEEPEST_DESCENT):
418  default:
419  // Wat?!
420  queso_error();
421  }
422 
423  // Init solver
424  gsl_multimin_fminimizer* solver =
425  gsl_multimin_fminimizer_alloc(type, dim);
426 
427  // Point GSL at the right functions
428  gsl_multimin_function minusLogPosterior;
429  minusLogPosterior.n = dim;
430  minusLogPosterior.f = &c_evaluate;
431  minusLogPosterior.params = (void *)(this);
432 
433  // Needed for these gradient free algorithms.
434  gsl_vector* step_size = gsl_vector_alloc(dim);
435 
436  for(unsigned int i = 0; i < dim; i++) {
437  gsl_vector_set(step_size, i, m_fstep_size[i]);
438  }
439 
440  gsl_multimin_fminimizer_set(solver, &minusLogPosterior, x, step_size);
441 
442  int status;
443  size_t iter = 0;
444  double size = 0.0;
445 
446  do
447  {
448  iter++;
449  status = gsl_multimin_fminimizer_iterate(solver);
450 
451  if (status) {
452  if( m_objectiveFunction.domainSet().env().fullRank() == 0 )
453  {
454  std::cerr << "Error while GSL does optimisation. "
455  << "See below for GSL error type." << std::endl;
456  std::cerr << "Gsl error: " << gsl_strerror(status) << std::endl;
457  }
458  break;
459  }
460 
461  size = gsl_multimin_fminimizer_size(solver);
462 
463  status = gsl_multimin_test_size (size, this->getTolerance());
464 
465  if(monitor)
466  {
467  gsl_vector* x = gsl_multimin_fminimizer_x(solver);
468  std::vector<double> x_min(dim);
469  for( unsigned int i = 0; i < dim; i++)
470  x_min[i] = gsl_vector_get(x,i);
471 
472  double f = gsl_multimin_fminimizer_minimum(solver);
473 
474  monitor->append( x_min, f, size );
475  }
476 
477  }
478 
479  while ((status == GSL_CONTINUE) && (iter < this->getMaxIterations()));
480 
481  for (unsigned int i = 0; i < dim; i++) {
482  (*m_minimizer)[i] = gsl_vector_get(solver->x, i);
483  }
484 
485  // We're being good human beings and cleaning up the memory we allocated
486  gsl_vector_free(step_size);
487  gsl_multimin_fminimizer_free(solver);
488  gsl_vector_free(x);
489 
490  return;
491 }
492 
493 void GslOptimizer::set_step_size( const GslVector& step_size )
494 {
495  queso_deprecated();
496  m_fstep_size = step_size;
497 }
498 
499 void GslOptimizer::set_step_size( double step_size )
500 {
501  queso_deprecated();
502  m_fdfstep_size = step_size;
503 }
504 
506 {
507  SolverType solver_type;
508 
509  if( solver == std::string("fletcher_reeves_cg") )
510  solver_type = FLETCHER_REEVES_CG;
511  else if( solver == std::string("polak_ribiere_cg") )
512  solver_type = POLAK_RIBIERE_CG;
513  else if( solver == std::string("bfgs") )
514  solver_type = BFGS;
515  else if( solver == std::string("bfgs2") )
516  solver_type = BFGS2;
517  else if( solver == std::string("steepest_decent") ) {
518  queso_deprecated();
519  solver_type = STEEPEST_DESCENT;
520  }
521  else if( solver == std::string("steepest_descent") )
522  solver_type = STEEPEST_DESCENT;
523  else if( solver == std::string("nelder_mead") )
524  solver_type = NELDER_MEAD;
525  else if( solver == std::string("nelder_mead2") )
526  solver_type = NELDER_MEAD2;
527  else if( solver == std::string("nelder_mead2_rand") )
528  solver_type = NELDER_MEAD2_RAND;
529  else
530  {
531  if( m_objectiveFunction.domainSet().env().fullRank() == 0 )
532  {
533  std::cerr << "Error: Invalid GslOptimizer solver name: " << solver << std::endl
534  << " Valids choices are: fletcher_reeves_cg" << std::endl
535  << " polak_ribiere_cg" << std::endl
536  << " bfgs" << std::endl
537  << " bfgs2" << std::endl
538  << " steepest_descent" << std::endl
539  << " nelder_mead" << std::endl
540  << " nelder_mead2" << std::endl
541  << " nelder_mead2_rand" << std::endl;
542  }
543  queso_error();
544  }
545 
546  return solver_type;
547 }
548 
549 void GslOptimizer::set_solver_type( std::string& solver )
550 {
551  queso_deprecated()
552  this->set_solver_type( this->string_to_enum(solver) );
553 }
554 
555 void
556 GslOptimizer::setSolverType(std::string solverType)
557 {
558  this->m_optionsObj->m_solverType = solverType;
559  this->set_solver_type(solverType);
560 }
561 
562 void
563 GslOptimizer::setFstepSize(double fstepSize)
564 {
565  this->m_optionsObj->m_fstepSize = fstepSize;
566 
567  GslVector fstepSizeVector(
568  objectiveFunction().domainSet().vectorSpace().zeroVector());
569  fstepSizeVector.cwSet(fstepSize);
570 
571  this->set_step_size(fstepSizeVector);
572 }
573 
574 void
575 GslOptimizer::setFdfstepSize(double fdfstepSize)
576 {
577  this->m_optionsObj->m_fdfstepSize = fdfstepSize;
578  this->set_step_size(fdfstepSize);
579 }
580 
581 void
582 GslOptimizer::setLineTolerance(double lineTolerance)
583 {
584  this->m_optionsObj->m_lineTolerance = lineTolerance;
585 }
586 
587 std::string
589 {
590  return this->m_optionsObj->m_solverType;
591 }
592 
593 double
595 {
596  return this->m_optionsObj->m_fstepSize;
597 }
598 
599 double
601 {
602  return this->m_optionsObj->m_fdfstepSize;
603 }
604 
605 double
607 {
608  return this->m_optionsObj->m_lineTolerance;
609 }
610 
611 } // End namespace QUESO
virtual void setSolverType(std::string solverType)
Sets the algorithm to use for minimisation.
Definition: GslOptimizer.C:556
const GslVector & minimizer() const
Return the point that minimizes the objective function.
Definition: GslOptimizer.C:251
void cwSet(double value)
Component-wise sets all values to this with value.
Definition: GslVector.C:327
GslOptimizer(const BaseScalarFunction< GslVector, GslMatrix > &objectiveFunction)
Constructs an object that will maximize a scalar function.
Definition: GslOptimizer.C:155
A templated (base) class for handling scalar functions.
virtual void setLineTolerance(double lineTolerance)
Sets the tolerance to use for line minimisation.
Definition: GslOptimizer.C:582
virtual void minimize(OptimizerMonitor *monitor=NULL)
Minimize the objective function, starting at m_initialPoint.
Definition: GslOptimizer.C:207
GslVector * m_minimizer
Definition: GslOptimizer.h:158
const BaseScalarFunction< GslVector, GslMatrix > & m_objectiveFunction
Definition: GslOptimizer.h:155
ScopedPtr< OptimizerOptions >::Type m_optionsObj
Definition: Optimizer.h:126
SolverType m_solver_type
Definition: GslOptimizer.h:160
double getTolerance() const
Returns the tolerance used to test for an extremum in the optimizer.
Definition: Optimizer.C:57
A base class for handling optimisation of scalar functions.
Definition: Optimizer.h:47
double m_fdfstep_size
For use in gradient-based algorithms.
Definition: GslOptimizer.h:166
bool solver_needs_gradient(SolverType solver)
Helper function.
Definition: GslOptimizer.C:262
void minimize_with_gradient(unsigned int dim, OptimizerMonitor *monitor)
Definition: GslOptimizer.C:293
virtual double getLineTolerance() const
Gets the tolerance to use for line minimisation.
Definition: GslOptimizer.C:606
void set_solver_type(SolverType solver)
Definition: GslOptimizer.C:256
double getFiniteDifferenceStepSize() const
Returns the step size used in the finite difference formula.
Definition: Optimizer.C:63
virtual ~GslOptimizer()
Destructor.
Definition: GslOptimizer.C:202
void setInitialPoint(const GslVector &intialPoint)
Set the point at which the optimization starts.
Definition: GslOptimizer.C:243
void c_evaluate_derivative(const gsl_vector *x, void *context, gsl_vector *derivative)
Definition: GslOptimizer.C:67
double c_evaluate(const gsl_vector *x, void *context)
Definition: GslOptimizer.C:43
void set_step_size(const GslVector &step_size)
Sets step size used in gradient-free solvers.
Definition: GslOptimizer.C:493
void minimize_no_gradient(unsigned int dim, OptimizerMonitor *monitor)
Definition: GslOptimizer.C:391
void c_evaluate_with_derivative(const gsl_vector *x, void *context, double *f, gsl_vector *derivative)
Definition: GslOptimizer.C:147
virtual std::string getSolverType() const
Gets the algorithm to use for minimisation.
Definition: GslOptimizer.C:588
Class for vector operations using GSL library.
Definition: GslVector.h:49
This class provides options for a Optimizer.
void append(std::vector< double > &x_min, double objective, double norm)
Add current value of minimizer, objective, and error norm.
virtual void setFstepSize(double fstepSize)
Sets the step size to use in gradient-free solvers.
Definition: GslOptimizer.C:563
int dim
Definition: ann_test.cpp:472
virtual double getFdfstepSize() const
Gets the step to use in gradient-based solvers.
Definition: GslOptimizer.C:600
GslVector m_fstep_size
For use in gradient-free algorithms.
Definition: GslOptimizer.h:163
unsigned int sizeLocal() const
Returns the length of this vector.
Definition: GslVector.C:241
virtual void setFdfstepSize(double fdfstepSize)
Sets the step to use in gradient-based solvers.
Definition: GslOptimizer.C:575
Object to monitor convergence of optimizers.
const BaseScalarFunction< GslVector, GslMatrix > & objectiveFunction() const
Returns the objective function.
Definition: GslOptimizer.C:237
GslVector * m_initialPoint
Definition: GslOptimizer.h:157
virtual double getFstepSize() const
Gets the step size to use in gradient-free solvers.
Definition: GslOptimizer.C:594
unsigned int getMaxIterations() const
Returns the maximum number of iterations the optimizer will do.
Definition: Optimizer.C:51
SolverType string_to_enum(std::string &solver)
Definition: GslOptimizer.C:505
A base class for handling optimisation of scalar functions.
Definition: GslOptimizer.h:50

Generated on Tue Jun 5 2018 19:48:55 for queso-0.57.1 by  doxygen 1.8.5