Home | History | Annotate | Download | only in ceres
      1 // Ceres Solver - A fast non-linear least squares minimizer
      2 // Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
      3 // http://code.google.com/p/ceres-solver/
      4 //
      5 // Redistribution and use in source and binary forms, with or without
      6 // modification, are permitted provided that the following conditions are met:
      7 //
      8 // * Redistributions of source code must retain the above copyright notice,
      9 //   this list of conditions and the following disclaimer.
     10 // * Redistributions in binary form must reproduce the above copyright notice,
     11 //   this list of conditions and the following disclaimer in the documentation
     12 //   and/or other materials provided with the distribution.
     13 // * Neither the name of Google Inc. nor the names of its contributors may be
     14 //   used to endorse or promote products derived from this software without
     15 //   specific prior written permission.
     16 //
     17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     18 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     19 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     20 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     21 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27 // POSSIBILITY OF SUCH DAMAGE.
     28 //
     29 // Author: sameeragarwal (at) google.com (Sameer Agarwal)
     30 
     31 #ifndef CERES_PUBLIC_SOLVER_H_
     32 #define CERES_PUBLIC_SOLVER_H_
     33 
     34 #include <cmath>
     35 #include <string>
     36 #include <vector>
     37 #include "ceres/crs_matrix.h"
     38 #include "ceres/internal/macros.h"
     39 #include "ceres/internal/port.h"
     40 #include "ceres/iteration_callback.h"
     41 #include "ceres/ordered_groups.h"
     42 #include "ceres/types.h"
     43 
     44 namespace ceres {
     45 
     46 class Problem;
     47 
     48 // Interface for non-linear least squares solvers.
     49 class Solver {
     50  public:
     51   virtual ~Solver();
     52 
     53   // The options structure contains, not surprisingly, options that control how
     54   // the solver operates. The defaults should be suitable for a wide range of
     55   // problems; however, better performance is often obtainable with tweaking.
     56   //
     57   // The constants are defined inside types.h
     58   struct Options {
     59     // Default constructor that sets up a generic sparse problem.
     60     Options() {
     61       minimizer_type = TRUST_REGION;
     62       line_search_direction_type = LBFGS;
     63       line_search_type = WOLFE;
     64       nonlinear_conjugate_gradient_type = FLETCHER_REEVES;
     65       max_lbfgs_rank = 20;
     66       use_approximate_eigenvalue_bfgs_scaling = false;
     67       line_search_interpolation_type = CUBIC;
     68       min_line_search_step_size = 1e-9;
     69       line_search_sufficient_function_decrease = 1e-4;
     70       max_line_search_step_contraction = 1e-3;
     71       min_line_search_step_contraction = 0.6;
     72       max_num_line_search_step_size_iterations = 20;
     73       max_num_line_search_direction_restarts = 5;
     74       line_search_sufficient_curvature_decrease = 0.9;
     75       max_line_search_step_expansion = 10.0;
     76       trust_region_strategy_type = LEVENBERG_MARQUARDT;
     77       dogleg_type = TRADITIONAL_DOGLEG;
     78       use_nonmonotonic_steps = false;
     79       max_consecutive_nonmonotonic_steps = 5;
     80       max_num_iterations = 50;
     81       max_solver_time_in_seconds = 1e9;
     82       num_threads = 1;
     83       initial_trust_region_radius = 1e4;
     84       max_trust_region_radius = 1e16;
     85       min_trust_region_radius = 1e-32;
     86       min_relative_decrease = 1e-3;
     87       min_lm_diagonal = 1e-6;
     88       max_lm_diagonal = 1e32;
     89       max_num_consecutive_invalid_steps = 5;
     90       function_tolerance = 1e-6;
     91       gradient_tolerance = 1e-10;
     92       parameter_tolerance = 1e-8;
     93 
     94 #if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
     95       linear_solver_type = DENSE_QR;
     96 #else
     97       linear_solver_type = SPARSE_NORMAL_CHOLESKY;
     98 #endif
     99 
    100       preconditioner_type = JACOBI;
    101 
    102       dense_linear_algebra_library_type = EIGEN;
    103       sparse_linear_algebra_library_type = SUITE_SPARSE;
    104 #if defined(CERES_NO_SUITESPARSE) && !defined(CERES_NO_CXSPARSE)
    105       sparse_linear_algebra_library_type = CX_SPARSE;
    106 #endif
    107 
    108 
    109       num_linear_solver_threads = 1;
    110       linear_solver_ordering = NULL;
    111       use_postordering = false;
    112       min_linear_solver_iterations = 1;
    113       max_linear_solver_iterations = 500;
    114       eta = 1e-1;
    115       jacobi_scaling = true;
    116       use_inner_iterations = false;
    117       inner_iteration_tolerance = 1e-3;
    118       inner_iteration_ordering = NULL;
    119       logging_type = PER_MINIMIZER_ITERATION;
    120       minimizer_progress_to_stdout = false;
    121       trust_region_problem_dump_directory = "/tmp";
    122       trust_region_problem_dump_format_type = TEXTFILE;
    123       check_gradients = false;
    124       gradient_check_relative_precision = 1e-8;
    125       numeric_derivative_relative_step_size = 1e-6;
    126       update_state_every_iteration = false;
    127     }
    128 
    129     ~Options();
    130     // Minimizer options ----------------------------------------
    131 
    132     // Ceres supports the two major families of optimization strategies -
    133     // Trust Region and Line Search.
    134     //
    135     // 1. The line search approach first finds a descent direction
    136     // along which the objective function will be reduced and then
    137     // computes a step size that decides how far should move along
    138     // that direction. The descent direction can be computed by
    139     // various methods, such as gradient descent, Newton's method and
    140     // Quasi-Newton method. The step size can be determined either
    141     // exactly or inexactly.
    142     //
    143     // 2. The trust region approach approximates the objective
    144     // function using using a model function (often a quadratic) over
    145     // a subset of the search space known as the trust region. If the
    146     // model function succeeds in minimizing the true objective
    147     // function the trust region is expanded; conversely, otherwise it
    148     // is contracted and the model optimization problem is solved
    149     // again.
    150     //
    151     // Trust region methods are in some sense dual to line search methods:
    152     // trust region methods first choose a step size (the size of the
    153     // trust region) and then a step direction while line search methods
    154     // first choose a step direction and then a step size.
    155     MinimizerType minimizer_type;
    156 
    157     LineSearchDirectionType line_search_direction_type;
    158     LineSearchType line_search_type;
    159     NonlinearConjugateGradientType nonlinear_conjugate_gradient_type;
    160 
    161     // The LBFGS hessian approximation is a low rank approximation to
    162     // the inverse of the Hessian matrix. The rank of the
    163     // approximation determines (linearly) the space and time
    164     // complexity of using the approximation. Higher the rank, the
    165     // better is the quality of the approximation. The increase in
    166     // quality is however is bounded for a number of reasons.
    167     //
    168     // 1. The method only uses secant information and not actual
    169     // derivatives.
    170     //
    171     // 2. The Hessian approximation is constrained to be positive
    172     // definite.
    173     //
    174     // So increasing this rank to a large number will cost time and
    175     // space complexity without the corresponding increase in solution
    176     // quality. There are no hard and fast rules for choosing the
    177     // maximum rank. The best choice usually requires some problem
    178     // specific experimentation.
    179     //
    180     // For more theoretical and implementation details of the LBFGS
    181     // method, please see:
    182     //
    183     // Nocedal, J. (1980). "Updating Quasi-Newton Matrices with
    184     // Limited Storage". Mathematics of Computation 35 (151): 773782.
    185     int max_lbfgs_rank;
    186 
    187     // As part of the (L)BFGS update step (BFGS) / right-multiply step (L-BFGS),
    188     // the initial inverse Hessian approximation is taken to be the Identity.
    189     // However, Oren showed that using instead I * \gamma, where \gamma is
    190     // chosen to approximate an eigenvalue of the true inverse Hessian can
    191     // result in improved convergence in a wide variety of cases. Setting
    192     // use_approximate_eigenvalue_bfgs_scaling to true enables this scaling.
    193     //
    194     // It is important to note that approximate eigenvalue scaling does not
    195     // always improve convergence, and that it can in fact significantly degrade
    196     // performance for certain classes of problem, which is why it is disabled
    197     // by default.  In particular it can degrade performance when the
    198     // sensitivity of the problem to different parameters varies significantly,
    199     // as in this case a single scalar factor fails to capture this variation
    200     // and detrimentally downscales parts of the jacobian approximation which
    201     // correspond to low-sensitivity parameters. It can also reduce the
    202     // robustness of the solution to errors in the jacobians.
    203     //
    204     // Oren S.S., Self-scaling variable metric (SSVM) algorithms
    205     // Part II: Implementation and experiments, Management Science,
    206     // 20(5), 863-874, 1974.
    207     bool use_approximate_eigenvalue_bfgs_scaling;
    208 
    209     // Degree of the polynomial used to approximate the objective
    210     // function. Valid values are BISECTION, QUADRATIC and CUBIC.
    211     //
    212     // BISECTION corresponds to pure backtracking search with no
    213     // interpolation.
    214     LineSearchInterpolationType line_search_interpolation_type;
    215 
    216     // If during the line search, the step_size falls below this
    217     // value, it is truncated to zero.
    218     double min_line_search_step_size;
    219 
    220     // Line search parameters.
    221 
    222     // Solving the line search problem exactly is computationally
    223     // prohibitive. Fortunately, line search based optimization
    224     // algorithms can still guarantee convergence if instead of an
    225     // exact solution, the line search algorithm returns a solution
    226     // which decreases the value of the objective function
    227     // sufficiently. More precisely, we are looking for a step_size
    228     // s.t.
    229     //
    230     //   f(step_size) <= f(0) + sufficient_decrease * f'(0) * step_size
    231     //
    232     double line_search_sufficient_function_decrease;
    233 
    234     // In each iteration of the line search,
    235     //
    236     //  new_step_size >= max_line_search_step_contraction * step_size
    237     //
    238     // Note that by definition, for contraction:
    239     //
    240     //  0 < max_step_contraction < min_step_contraction < 1
    241     //
    242     double max_line_search_step_contraction;
    243 
    244     // In each iteration of the line search,
    245     //
    246     //  new_step_size <= min_line_search_step_contraction * step_size
    247     //
    248     // Note that by definition, for contraction:
    249     //
    250     //  0 < max_step_contraction < min_step_contraction < 1
    251     //
    252     double min_line_search_step_contraction;
    253 
    254     // Maximum number of trial step size iterations during each line search,
    255     // if a step size satisfying the search conditions cannot be found within
    256     // this number of trials, the line search will terminate.
    257     int max_num_line_search_step_size_iterations;
    258 
    259     // Maximum number of restarts of the line search direction algorithm before
    260     // terminating the optimization. Restarts of the line search direction
    261     // algorithm occur when the current algorithm fails to produce a new descent
    262     // direction. This typically indicates a numerical failure, or a breakdown
    263     // in the validity of the approximations used.
    264     int max_num_line_search_direction_restarts;
    265 
    266     // The strong Wolfe conditions consist of the Armijo sufficient
    267     // decrease condition, and an additional requirement that the
    268     // step-size be chosen s.t. the _magnitude_ ('strong' Wolfe
    269     // conditions) of the gradient along the search direction
    270     // decreases sufficiently. Precisely, this second condition
    271     // is that we seek a step_size s.t.
    272     //
    273     //   |f'(step_size)| <= sufficient_curvature_decrease * |f'(0)|
    274     //
    275     // Where f() is the line search objective and f'() is the derivative
    276     // of f w.r.t step_size (d f / d step_size).
    277     double line_search_sufficient_curvature_decrease;
    278 
    279     // During the bracketing phase of the Wolfe search, the step size is
    280     // increased until either a point satisfying the Wolfe conditions is
    281     // found, or an upper bound for a bracket containing a point satisfying
    282     // the conditions is found.  Precisely, at each iteration of the
    283     // expansion:
    284     //
    285     //   new_step_size <= max_step_expansion * step_size.
    286     //
    287     // By definition for expansion, max_step_expansion > 1.0.
    288     double max_line_search_step_expansion;
    289 
    290     TrustRegionStrategyType trust_region_strategy_type;
    291 
    292     // Type of dogleg strategy to use.
    293     DoglegType dogleg_type;
    294 
    295     // The classical trust region methods are descent methods, in that
    296     // they only accept a point if it strictly reduces the value of
    297     // the objective function.
    298     //
    299     // Relaxing this requirement allows the algorithm to be more
    300     // efficient in the long term at the cost of some local increase
    301     // in the value of the objective function.
    302     //
    303     // This is because allowing for non-decreasing objective function
    304     // values in a princpled manner allows the algorithm to "jump over
    305     // boulders" as the method is not restricted to move into narrow
    306     // valleys while preserving its convergence properties.
    307     //
    308     // Setting use_nonmonotonic_steps to true enables the
    309     // non-monotonic trust region algorithm as described by Conn,
    310     // Gould & Toint in "Trust Region Methods", Section 10.1.
    311     //
    312     // The parameter max_consecutive_nonmonotonic_steps controls the
    313     // window size used by the step selection algorithm to accept
    314     // non-monotonic steps.
    315     //
    316     // Even though the value of the objective function may be larger
    317     // than the minimum value encountered over the course of the
    318     // optimization, the final parameters returned to the user are the
    319     // ones corresponding to the minimum cost over all iterations.
    320     bool use_nonmonotonic_steps;
    321     int max_consecutive_nonmonotonic_steps;
    322 
    323     // Maximum number of iterations for the minimizer to run for.
    324     int max_num_iterations;
    325 
    326     // Maximum time for which the minimizer should run for.
    327     double max_solver_time_in_seconds;
    328 
    329     // Number of threads used by Ceres for evaluating the cost and
    330     // jacobians.
    331     int num_threads;
    332 
    333     // Trust region minimizer settings.
    334     double initial_trust_region_radius;
    335     double max_trust_region_radius;
    336 
    337     // Minimizer terminates when the trust region radius becomes
    338     // smaller than this value.
    339     double min_trust_region_radius;
    340 
    341     // Lower bound for the relative decrease before a step is
    342     // accepted.
    343     double min_relative_decrease;
    344 
    345     // For the Levenberg-Marquadt algorithm, the scaled diagonal of
    346     // the normal equations J'J is used to control the size of the
    347     // trust region. Extremely small and large values along the
    348     // diagonal can make this regularization scheme
    349     // fail. max_lm_diagonal and min_lm_diagonal, clamp the values of
    350     // diag(J'J) from above and below. In the normal course of
    351     // operation, the user should not have to modify these parameters.
    352     double min_lm_diagonal;
    353     double max_lm_diagonal;
    354 
    355     // Sometimes due to numerical conditioning problems or linear
    356     // solver flakiness, the trust region strategy may return a
    357     // numerically invalid step that can be fixed by reducing the
    358     // trust region size. So the TrustRegionMinimizer allows for a few
    359     // successive invalid steps before it declares NUMERICAL_FAILURE.
    360     int max_num_consecutive_invalid_steps;
    361 
    362     // Minimizer terminates when
    363     //
    364     //   (new_cost - old_cost) < function_tolerance * old_cost;
    365     //
    366     double function_tolerance;
    367 
    368     // Minimizer terminates when
    369     //
    370     //   max_i |gradient_i| < gradient_tolerance * max_i|initial_gradient_i|
    371     //
    372     // This value should typically be 1e-4 * function_tolerance.
    373     double gradient_tolerance;
    374 
    375     // Minimizer terminates when
    376     //
    377     //   |step|_2 <= parameter_tolerance * ( |x|_2 +  parameter_tolerance)
    378     //
    379     double parameter_tolerance;
    380 
    381     // Linear least squares solver options -------------------------------------
    382 
    383     LinearSolverType linear_solver_type;
    384 
    385     // Type of preconditioner to use with the iterative linear solvers.
    386     PreconditionerType preconditioner_type;
    387 
    388     // Ceres supports using multiple dense linear algebra libraries
    389     // for dense matrix factorizations. Currently EIGEN and LAPACK are
    390     // the valid choices. EIGEN is always available, LAPACK refers to
    391     // the system BLAS + LAPACK library which may or may not be
    392     // available.
    393     //
    394     // This setting affects the DENSE_QR, DENSE_NORMAL_CHOLESKY and
    395     // DENSE_SCHUR solvers. For small to moderate sized probem EIGEN
    396     // is a fine choice but for large problems, an optimized LAPACK +
    397     // BLAS implementation can make a substantial difference in
    398     // performance.
    399     DenseLinearAlgebraLibraryType dense_linear_algebra_library_type;
    400 
    401     // Ceres supports using multiple sparse linear algebra libraries
    402     // for sparse matrix ordering and factorizations. Currently,
    403     // SUITE_SPARSE and CX_SPARSE are the valid choices, depending on
    404     // whether they are linked into Ceres at build time.
    405     SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type;
    406 
    407     // Number of threads used by Ceres to solve the Newton
    408     // step. Currently only the SPARSE_SCHUR solver is capable of
    409     // using this setting.
    410     int num_linear_solver_threads;
    411 
    412     // The order in which variables are eliminated in a linear solver
    413     // can have a significant of impact on the efficiency and accuracy
    414     // of the method. e.g., when doing sparse Cholesky factorization,
    415     // there are matrices for which a good ordering will give a
    416     // Cholesky factor with O(n) storage, where as a bad ordering will
    417     // result in an completely dense factor.
    418     //
    419     // Ceres allows the user to provide varying amounts of hints to
    420     // the solver about the variable elimination ordering to use. This
    421     // can range from no hints, where the solver is free to decide the
    422     // best possible ordering based on the user's choices like the
    423     // linear solver being used, to an exact order in which the
    424     // variables should be eliminated, and a variety of possibilities
    425     // in between.
    426     //
    427     // Instances of the ParameterBlockOrdering class are used to
    428     // communicate this information to Ceres.
    429     //
    430     // Formally an ordering is an ordered partitioning of the
    431     // parameter blocks, i.e, each parameter block belongs to exactly
    432     // one group, and each group has a unique non-negative integer
    433     // associated with it, that determines its order in the set of
    434     // groups.
    435     //
    436     // Given such an ordering, Ceres ensures that the parameter blocks in
    437     // the lowest numbered group are eliminated first, and then the
    438     // parmeter blocks in the next lowest numbered group and so on. Within
    439     // each group, Ceres is free to order the parameter blocks as it
    440     // chooses.
    441     //
    442     // If NULL, then all parameter blocks are assumed to be in the
    443     // same group and the solver is free to decide the best
    444     // ordering.
    445     //
    446     // e.g. Consider the linear system
    447     //
    448     //   x + y = 3
    449     //   2x + 3y = 7
    450     //
    451     // There are two ways in which it can be solved. First eliminating x
    452     // from the two equations, solving for y and then back substituting
    453     // for x, or first eliminating y, solving for x and back substituting
    454     // for y. The user can construct three orderings here.
    455     //
    456     //   {0: x}, {1: y} - eliminate x first.
    457     //   {0: y}, {1: x} - eliminate y first.
    458     //   {0: x, y}      - Solver gets to decide the elimination order.
    459     //
    460     // Thus, to have Ceres determine the ordering automatically using
    461     // heuristics, put all the variables in group 0 and to control the
    462     // ordering for every variable, create groups 0..N-1, one per
    463     // variable, in the desired order.
    464     //
    465     // Bundle Adjustment
    466     // -----------------
    467     //
    468     // A particular case of interest is bundle adjustment, where the user
    469     // has two options. The default is to not specify an ordering at all,
    470     // the solver will see that the user wants to use a Schur type solver
    471     // and figure out the right elimination ordering.
    472     //
    473     // But if the user already knows what parameter blocks are points and
    474     // what are cameras, they can save preprocessing time by partitioning
    475     // the parameter blocks into two groups, one for the points and one
    476     // for the cameras, where the group containing the points has an id
    477     // smaller than the group containing cameras.
    478     //
    479     // Once assigned, Solver::Options owns this pointer and will
    480     // deallocate the memory when destroyed.
    481     ParameterBlockOrdering* linear_solver_ordering;
    482 
    483     // Sparse Cholesky factorization algorithms use a fill-reducing
    484     // ordering to permute the columns of the Jacobian matrix. There
    485     // are two ways of doing this.
    486 
    487     // 1. Compute the Jacobian matrix in some order and then have the
    488     //    factorization algorithm permute the columns of the Jacobian.
    489 
    490     // 2. Compute the Jacobian with its columns already permuted.
    491 
    492     // The first option incurs a significant memory penalty. The
    493     // factorization algorithm has to make a copy of the permuted
    494     // Jacobian matrix, thus Ceres pre-permutes the columns of the
    495     // Jacobian matrix and generally speaking, there is no performance
    496     // penalty for doing so.
    497 
    498     // In some rare cases, it is worth using a more complicated
    499     // reordering algorithm which has slightly better runtime
    500     // performance at the expense of an extra copy of the Jacobian
    501     // matrix. Setting use_postordering to true enables this tradeoff.
    502     bool use_postordering;
    503 
    504     // Some non-linear least squares problems have additional
    505     // structure in the way the parameter blocks interact that it is
    506     // beneficial to modify the way the trust region step is computed.
    507     //
    508     // e.g., consider the following regression problem
    509     //
    510     //   y = a_1 exp(b_1 x) + a_2 exp(b_3 x^2 + c_1)
    511     //
    512     // Given a set of pairs{(x_i, y_i)}, the user wishes to estimate
    513     // a_1, a_2, b_1, b_2, and c_1.
    514     //
    515     // Notice here that the expression on the left is linear in a_1
    516     // and a_2, and given any value for b_1, b_2 and c_1, it is
    517     // possible to use linear regression to estimate the optimal
    518     // values of a_1 and a_2. Indeed, its possible to analytically
    519     // eliminate the variables a_1 and a_2 from the problem all
    520     // together. Problems like these are known as separable least
    521     // squares problem and the most famous algorithm for solving them
    522     // is the Variable Projection algorithm invented by Golub &
    523     // Pereyra.
    524     //
    525     // Similar structure can be found in the matrix factorization with
    526     // missing data problem. There the corresponding algorithm is
    527     // known as Wiberg's algorithm.
    528     //
    529     // Ruhe & Wedin (Algorithms for Separable Nonlinear Least Squares
    530     // Problems, SIAM Reviews, 22(3), 1980) present an analyis of
    531     // various algorithms for solving separable non-linear least
    532     // squares problems and refer to "Variable Projection" as
    533     // Algorithm I in their paper.
    534     //
    535     // Implementing Variable Projection is tedious and expensive, and
    536     // they present a simpler algorithm, which they refer to as
    537     // Algorithm II, where once the Newton/Trust Region step has been
    538     // computed for the whole problem (a_1, a_2, b_1, b_2, c_1) and
    539     // additional optimization step is performed to estimate a_1 and
    540     // a_2 exactly.
    541     //
    542     // This idea can be generalized to cases where the residual is not
    543     // linear in a_1 and a_2, i.e., Solve for the trust region step
    544     // for the full problem, and then use it as the starting point to
    545     // further optimize just a_1 and a_2. For the linear case, this
    546     // amounts to doing a single linear least squares solve. For
    547     // non-linear problems, any method for solving the a_1 and a_2
    548     // optimization problems will do. The only constraint on a_1 and
    549     // a_2 is that they do not co-occur in any residual block.
    550     //
    551     // This idea can be further generalized, by not just optimizing
    552     // (a_1, a_2), but decomposing the graph corresponding to the
    553     // Hessian matrix's sparsity structure in a collection of
    554     // non-overlapping independent sets and optimizing each of them.
    555     //
    556     // Setting "use_inner_iterations" to true enables the use of this
    557     // non-linear generalization of Ruhe & Wedin's Algorithm II.  This
    558     // version of Ceres has a higher iteration complexity, but also
    559     // displays better convergence behaviour per iteration. Setting
    560     // Solver::Options::num_threads to the maximum number possible is
    561     // highly recommended.
    562     bool use_inner_iterations;
    563 
    564     // If inner_iterations is true, then the user has two choices.
    565     //
    566     // 1. Let the solver heuristically decide which parameter blocks
    567     //    to optimize in each inner iteration. To do this leave
    568     //    Solver::Options::inner_iteration_ordering untouched.
    569     //
    570     // 2. Specify a collection of of ordered independent sets. Where
    571     //    the lower numbered groups are optimized before the higher
    572     //    number groups. Each group must be an independent set. Not
    573     //    all parameter blocks need to be present in the ordering.
    574     ParameterBlockOrdering* inner_iteration_ordering;
    575 
    576     // Generally speaking, inner iterations make significant progress
    577     // in the early stages of the solve and then their contribution
    578     // drops down sharply, at which point the time spent doing inner
    579     // iterations is not worth it.
    580     //
    581     // Once the relative decrease in the objective function due to
    582     // inner iterations drops below inner_iteration_tolerance, the use
    583     // of inner iterations in subsequent trust region minimizer
    584     // iterations is disabled.
    585     double inner_iteration_tolerance;
    586 
    587     // Minimum number of iterations for which the linear solver should
    588     // run, even if the convergence criterion is satisfied.
    589     int min_linear_solver_iterations;
    590 
    591     // Maximum number of iterations for which the linear solver should
    592     // run. If the solver does not converge in less than
    593     // max_linear_solver_iterations, then it returns MAX_ITERATIONS,
    594     // as its termination type.
    595     int max_linear_solver_iterations;
    596 
    597     // Forcing sequence parameter. The truncated Newton solver uses
    598     // this number to control the relative accuracy with which the
    599     // Newton step is computed.
    600     //
    601     // This constant is passed to ConjugateGradientsSolver which uses
    602     // it to terminate the iterations when
    603     //
    604     //  (Q_i - Q_{i-1})/Q_i < eta/i
    605     double eta;
    606 
    607     // Normalize the jacobian using Jacobi scaling before calling
    608     // the linear least squares solver.
    609     bool jacobi_scaling;
    610 
    611     // Logging options ---------------------------------------------------------
    612 
    613     LoggingType logging_type;
    614 
    615     // By default the Minimizer progress is logged to VLOG(1), which
    616     // is sent to STDERR depending on the vlog level. If this flag is
    617     // set to true, and logging_type is not SILENT, the logging output
    618     // is sent to STDOUT.
    619     bool minimizer_progress_to_stdout;
    620 
    621     // List of iterations at which the minimizer should dump the trust
    622     // region problem. Useful for testing and benchmarking. If empty
    623     // (default), no problems are dumped.
    624     vector<int> trust_region_minimizer_iterations_to_dump;
    625 
    626     // Directory to which the problems should be written to. Should be
    627     // non-empty if trust_region_minimizer_iterations_to_dump is
    628     // non-empty and trust_region_problem_dump_format_type is not
    629     // CONSOLE.
    630     string trust_region_problem_dump_directory;
    631     DumpFormatType trust_region_problem_dump_format_type;
    632 
    633     // Finite differences options ----------------------------------------------
    634 
    635     // Check all jacobians computed by each residual block with finite
    636     // differences. This is expensive since it involves computing the
    637     // derivative by normal means (e.g. user specified, autodiff,
    638     // etc), then also computing it using finite differences. The
    639     // results are compared, and if they differ substantially, details
    640     // are printed to the log.
    641     bool check_gradients;
    642 
    643     // Relative precision to check for in the gradient checker. If the
    644     // relative difference between an element in a jacobian exceeds
    645     // this number, then the jacobian for that cost term is dumped.
    646     double gradient_check_relative_precision;
    647 
    648     // Relative shift used for taking numeric derivatives. For finite
    649     // differencing, each dimension is evaluated at slightly shifted
    650     // values; for the case of central difference, this is what gets
    651     // evaluated:
    652     //
    653     //   delta = numeric_derivative_relative_step_size;
    654     //   f_initial  = f(x)
    655     //   f_forward  = f((1 + delta) * x)
    656     //   f_backward = f((1 - delta) * x)
    657     //
    658     // The finite differencing is done along each dimension. The
    659     // reason to use a relative (rather than absolute) step size is
    660     // that this way, numeric differentation works for functions where
    661     // the arguments are typically large (e.g. 1e9) and when the
    662     // values are small (e.g. 1e-5). It is possible to construct
    663     // "torture cases" which break this finite difference heuristic,
    664     // but they do not come up often in practice.
    665     //
    666     // TODO(keir): Pick a smarter number than the default above! In
    667     // theory a good choice is sqrt(eps) * x, which for doubles means
    668     // about 1e-8 * x. However, I have found this number too
    669     // optimistic. This number should be exposed for users to change.
    670     double numeric_derivative_relative_step_size;
    671 
    672     // If true, the user's parameter blocks are updated at the end of
    673     // every Minimizer iteration, otherwise they are updated when the
    674     // Minimizer terminates. This is useful if, for example, the user
    675     // wishes to visualize the state of the optimization every
    676     // iteration.
    677     bool update_state_every_iteration;
    678 
    679     // Callbacks that are executed at the end of each iteration of the
    680     // Minimizer. An iteration may terminate midway, either due to
    681     // numerical failures or because one of the convergence tests has
    682     // been satisfied. In this case none of the callbacks are
    683     // executed.
    684 
    685     // Callbacks are executed in the order that they are specified in
    686     // this vector. By default, parameter blocks are updated only at
    687     // the end of the optimization, i.e when the Minimizer
    688     // terminates. This behaviour is controlled by
    689     // update_state_every_variable. If the user wishes to have access
    690     // to the update parameter blocks when his/her callbacks are
    691     // executed, then set update_state_every_iteration to true.
    692     //
    693     // The solver does NOT take ownership of these pointers.
    694     vector<IterationCallback*> callbacks;
    695 
    696     // If non-empty, a summary of the execution of the solver is
    697     // recorded to this file.
    698     string solver_log;
    699   };
    700 
    701   struct Summary {
    702     Summary();
    703 
    704     // A brief one line description of the state of the solver after
    705     // termination.
    706     string BriefReport() const;
    707 
    708     // A full multiline description of the state of the solver after
    709     // termination.
    710     string FullReport() const;
    711 
    712     // Minimizer summary -------------------------------------------------
    713     MinimizerType minimizer_type;
    714 
    715     SolverTerminationType termination_type;
    716 
    717     // If the solver did not run, or there was a failure, a
    718     // description of the error.
    719     string error;
    720 
    721     // Cost of the problem before and after the optimization. See
    722     // problem.h for definition of the cost of a problem.
    723     double initial_cost;
    724     double final_cost;
    725 
    726     // The part of the total cost that comes from residual blocks that
    727     // were held fixed by the preprocessor because all the parameter
    728     // blocks that they depend on were fixed.
    729     double fixed_cost;
    730 
    731     vector<IterationSummary> iterations;
    732 
    733     int num_successful_steps;
    734     int num_unsuccessful_steps;
    735     int num_inner_iteration_steps;
    736 
    737     // All times reported below are wall times.
    738 
    739     // When the user calls Solve, before the actual optimization
    740     // occurs, Ceres performs a number of preprocessing steps. These
    741     // include error checks, memory allocations, and reorderings. This
    742     // time is accounted for as preprocessing time.
    743     double preprocessor_time_in_seconds;
    744 
    745     // Time spent in the TrustRegionMinimizer.
    746     double minimizer_time_in_seconds;
    747 
    748     // After the Minimizer is finished, some time is spent in
    749     // re-evaluating residuals etc. This time is accounted for in the
    750     // postprocessor time.
    751     double postprocessor_time_in_seconds;
    752 
    753     // Some total of all time spent inside Ceres when Solve is called.
    754     double total_time_in_seconds;
    755 
    756     double linear_solver_time_in_seconds;
    757     double residual_evaluation_time_in_seconds;
    758     double jacobian_evaluation_time_in_seconds;
    759     double inner_iteration_time_in_seconds;
    760 
    761     // Preprocessor summary.
    762     int num_parameter_blocks;
    763     int num_parameters;
    764     int num_effective_parameters;
    765     int num_residual_blocks;
    766     int num_residuals;
    767 
    768     int num_parameter_blocks_reduced;
    769     int num_parameters_reduced;
    770     int num_effective_parameters_reduced;
    771     int num_residual_blocks_reduced;
    772     int num_residuals_reduced;
    773 
    774     int num_eliminate_blocks_given;
    775     int num_eliminate_blocks_used;
    776 
    777     int num_threads_given;
    778     int num_threads_used;
    779 
    780     int num_linear_solver_threads_given;
    781     int num_linear_solver_threads_used;
    782 
    783     LinearSolverType linear_solver_type_given;
    784     LinearSolverType linear_solver_type_used;
    785 
    786     vector<int> linear_solver_ordering_given;
    787     vector<int> linear_solver_ordering_used;
    788 
    789     bool inner_iterations_given;
    790     bool inner_iterations_used;
    791 
    792     vector<int> inner_iteration_ordering_given;
    793     vector<int> inner_iteration_ordering_used;
    794 
    795     PreconditionerType preconditioner_type;
    796 
    797     TrustRegionStrategyType trust_region_strategy_type;
    798     DoglegType dogleg_type;
    799 
    800     DenseLinearAlgebraLibraryType dense_linear_algebra_library_type;
    801     SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type;
    802 
    803     LineSearchDirectionType line_search_direction_type;
    804     LineSearchType line_search_type;
    805     LineSearchInterpolationType line_search_interpolation_type;
    806     NonlinearConjugateGradientType nonlinear_conjugate_gradient_type;
    807 
    808     int max_lbfgs_rank;
    809   };
    810 
    811   // Once a least squares problem has been built, this function takes
    812   // the problem and optimizes it based on the values of the options
    813   // parameters. Upon return, a detailed summary of the work performed
    814   // by the preprocessor, the non-linear minmizer and the linear
    815   // solver are reported in the summary object.
    816   virtual void Solve(const Options& options,
    817                      Problem* problem,
    818                      Solver::Summary* summary);
    819 };
    820 
    821 // Helper function which avoids going through the interface.
    822 void Solve(const Solver::Options& options,
    823            Problem* problem,
    824            Solver::Summary* summary);
    825 
    826 }  // namespace ceres
    827 
    828 #endif  // CERES_PUBLIC_SOLVER_H_
    829