Home | History | Annotate | Download | only in ceres
      1 // Ceres Solver - A fast non-linear least squares minimizer
      2 // Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
      3 // http://code.google.com/p/ceres-solver/
      4 //
      5 // Redistribution and use in source and binary forms, with or without
      6 // modification, are permitted provided that the following conditions are met:
      7 //
      8 // * Redistributions of source code must retain the above copyright notice,
      9 //   this list of conditions and the following disclaimer.
     10 // * Redistributions in binary form must reproduce the above copyright notice,
     11 //   this list of conditions and the following disclaimer in the documentation
     12 //   and/or other materials provided with the distribution.
     13 // * Neither the name of Google Inc. nor the names of its contributors may be
     14 //   used to endorse or promote products derived from this software without
     15 //   specific prior written permission.
     16 //
     17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     18 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     19 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     20 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     21 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27 // POSSIBILITY OF SUCH DAMAGE.
     28 //
     29 // Author: sameeragarwal (at) google.com (Sameer Agarwal)
     30 //
     31 // Enums and other top level class definitions.
     32 //
     33 // Note: internal/types.cc defines stringification routines for some
     34 // of these enums. Please update those routines if you extend or
     35 // remove enums from here.
     36 
     37 #ifndef CERES_PUBLIC_TYPES_H_
     38 #define CERES_PUBLIC_TYPES_H_
     39 
     40 #include <string>
     41 
     42 #include "ceres/internal/port.h"
     43 #include "ceres/internal/disable_warnings.h"
     44 
     45 namespace ceres {
     46 
     47 // Basic integer types. These typedefs are in the Ceres namespace to avoid
     48 // conflicts with other packages having similar typedefs.
     49 typedef int   int32;
     50 
     51 // Argument type used in interfaces that can optionally take ownership
     52 // of a passed in argument. If TAKE_OWNERSHIP is passed, the called
     53 // object takes ownership of the pointer argument, and will call
     54 // delete on it upon completion.
     55 enum Ownership {
     56   DO_NOT_TAKE_OWNERSHIP,
     57   TAKE_OWNERSHIP
     58 };
     59 
     60 // TODO(keir): Considerably expand the explanations of each solver type.
     61 enum LinearSolverType {
     62   // These solvers are for general rectangular systems formed from the
     63   // normal equations A'A x = A'b. They are direct solvers and do not
     64   // assume any special problem structure.
     65 
     66   // Solve the normal equations using a dense Cholesky solver; based
     67   // on Eigen.
     68   DENSE_NORMAL_CHOLESKY,
     69 
     70   // Solve the normal equations using a dense QR solver; based on
     71   // Eigen.
     72   DENSE_QR,
     73 
     74   // Solve the normal equations using a sparse cholesky solver; requires
     75   // SuiteSparse or CXSparse.
     76   SPARSE_NORMAL_CHOLESKY,
     77 
     78   // Specialized solvers, specific to problems with a generalized
     79   // bi-partitite structure.
     80 
     81   // Solves the reduced linear system using a dense Cholesky solver;
     82   // based on Eigen.
     83   DENSE_SCHUR,
     84 
     85   // Solves the reduced linear system using a sparse Cholesky solver;
     86   // based on CHOLMOD.
     87   SPARSE_SCHUR,
     88 
     89   // Solves the reduced linear system using Conjugate Gradients, based
     90   // on a new Ceres implementation.  Suitable for large scale
     91   // problems.
     92   ITERATIVE_SCHUR,
     93 
     94   // Conjugate gradients on the normal equations.
     95   CGNR
     96 };
     97 
     98 enum PreconditionerType {
     99   // Trivial preconditioner - the identity matrix.
    100   IDENTITY,
    101 
    102   // Block diagonal of the Gauss-Newton Hessian.
    103   JACOBI,
    104 
    105   // Note: The following three preconditioners can only be used with
    106   // the ITERATIVE_SCHUR solver. They are well suited for Structure
    107   // from Motion problems.
    108 
    109   // Block diagonal of the Schur complement. This preconditioner may
    110   // only be used with the ITERATIVE_SCHUR solver.
    111   SCHUR_JACOBI,
    112 
    113   // Visibility clustering based preconditioners.
    114   //
    115   // The following two preconditioners use the visibility structure of
    116   // the scene to determine the sparsity structure of the
    117   // preconditioner. This is done using a clustering algorithm. The
    118   // available visibility clustering algorithms are described below.
    119   //
    120   // Note: Requires SuiteSparse.
    121   CLUSTER_JACOBI,
    122   CLUSTER_TRIDIAGONAL
    123 };
    124 
    125 enum VisibilityClusteringType {
    126   // Canonical views algorithm as described in
    127   //
    128   // "Scene Summarization for Online Image Collections", Ian Simon, Noah
    129   // Snavely, Steven M. Seitz, ICCV 2007.
    130   //
    131   // This clustering algorithm can be quite slow, but gives high
    132   // quality clusters. The original visibility based clustering paper
    133   // used this algorithm.
    134   CANONICAL_VIEWS,
    135 
    136   // The classic single linkage algorithm. It is extremely fast as
    137   // compared to CANONICAL_VIEWS, but can give slightly poorer
    138   // results. For problems with large number of cameras though, this
    139   // is generally a pretty good option.
    140   //
    141   // If you are using SCHUR_JACOBI preconditioner and have SuiteSparse
    142   // available, CLUSTER_JACOBI and CLUSTER_TRIDIAGONAL in combination
    143   // with the SINGLE_LINKAGE algorithm will generally give better
    144   // results.
    145   SINGLE_LINKAGE
    146 };
    147 
    148 enum SparseLinearAlgebraLibraryType {
    149   // High performance sparse Cholesky factorization and approximate
    150   // minimum degree ordering.
    151   SUITE_SPARSE,
    152 
    153   // A lightweight replacment for SuiteSparse, which does not require
    154   // a LAPACK/BLAS implementation. Consequently, its performance is
    155   // also a bit lower than SuiteSparse.
    156   CX_SPARSE,
    157 
    158   // Eigen's sparse linear algebra routines. In particular Ceres uses
    159   // the Simplicial LDLT routines.
    160   EIGEN_SPARSE
    161 };
    162 
    163 enum DenseLinearAlgebraLibraryType {
    164   EIGEN,
    165   LAPACK
    166 };
    167 
    168 // Logging options
    169 // The options get progressively noisier.
    170 enum LoggingType {
    171   SILENT,
    172   PER_MINIMIZER_ITERATION
    173 };
    174 
    175 enum MinimizerType {
    176   LINE_SEARCH,
    177   TRUST_REGION
    178 };
    179 
    180 enum LineSearchDirectionType {
    181   // Negative of the gradient.
    182   STEEPEST_DESCENT,
    183 
    184   // A generalization of the Conjugate Gradient method to non-linear
    185   // functions. The generalization can be performed in a number of
    186   // different ways, resulting in a variety of search directions. The
    187   // precise choice of the non-linear conjugate gradient algorithm
    188   // used is determined by NonlinerConjuateGradientType.
    189   NONLINEAR_CONJUGATE_GRADIENT,
    190 
    191   // BFGS, and it's limited memory approximation L-BFGS, are quasi-Newton
    192   // algorithms that approximate the Hessian matrix by iteratively refining
    193   // an initial estimate with rank-one updates using the gradient at each
    194   // iteration. They are a generalisation of the Secant method and satisfy
    195   // the Secant equation.  The Secant equation has an infinium of solutions
    196   // in multiple dimensions, as there are N*(N+1)/2 degrees of freedom in a
    197   // symmetric matrix but only N conditions are specified by the Secant
    198   // equation. The requirement that the Hessian approximation be positive
    199   // definite imposes another N additional constraints, but that still leaves
    200   // remaining degrees-of-freedom.  (L)BFGS methods uniquely deteremine the
    201   // approximate Hessian by imposing the additional constraints that the
    202   // approximation at the next iteration must be the 'closest' to the current
    203   // approximation (the nature of how this proximity is measured is actually
    204   // the defining difference between a family of quasi-Newton methods including
    205   // (L)BFGS & DFP). (L)BFGS is currently regarded as being the best known
    206   // general quasi-Newton method.
    207   //
    208   // The principal difference between BFGS and L-BFGS is that whilst BFGS
    209   // maintains a full, dense approximation to the (inverse) Hessian, L-BFGS
    210   // maintains only a window of the last M observations of the parameters and
    211   // gradients. Using this observation history, the calculation of the next
    212   // search direction can be computed without requiring the construction of the
    213   // full dense inverse Hessian approximation. This is particularly important
    214   // for problems with a large number of parameters, where storage of an N-by-N
    215   // matrix in memory would be prohibitive.
    216   //
    217   // For more details on BFGS see:
    218   //
    219   // Broyden, C.G., "The Convergence of a Class of Double-rank Minimization
    220   // Algorithms,"; J. Inst. Maths. Applics., Vol. 6, pp 7690, 1970.
    221   //
    222   // Fletcher, R., "A New Approach to Variable Metric Algorithms,"
    223   // Computer Journal, Vol. 13, pp 317322, 1970.
    224   //
    225   // Goldfarb, D., "A Family of Variable Metric Updates Derived by Variational
    226   // Means," Mathematics of Computing, Vol. 24, pp 2326, 1970.
    227   //
    228   // Shanno, D.F., "Conditioning of Quasi-Newton Methods for Function
    229   // Minimization," Mathematics of Computing, Vol. 24, pp 647656, 1970.
    230   //
    231   // For more details on L-BFGS see:
    232   //
    233   // Nocedal, J. (1980). "Updating Quasi-Newton Matrices with Limited
    234   // Storage". Mathematics of Computation 35 (151): 773782.
    235   //
    236   // Byrd, R. H.; Nocedal, J.; Schnabel, R. B. (1994).
    237   // "Representations of Quasi-Newton Matrices and their use in
    238   // Limited Memory Methods". Mathematical Programming 63 (4):
    239   // 129156.
    240   //
    241   // A general reference for both methods:
    242   //
    243   // Nocedal J., Wright S., Numerical Optimization, 2nd Ed. Springer, 1999.
    244   LBFGS,
    245   BFGS,
    246 };
    247 
    248 // Nonliner conjugate gradient methods are a generalization of the
    249 // method of Conjugate Gradients for linear systems. The
    250 // generalization can be carried out in a number of different ways
    251 // leading to number of different rules for computing the search
    252 // direction. Ceres provides a number of different variants. For more
    253 // details see Numerical Optimization by Nocedal & Wright.
    254 enum NonlinearConjugateGradientType {
    255   FLETCHER_REEVES,
    256   POLAK_RIBIERE,
    257   HESTENES_STIEFEL,
    258 };
    259 
    260 enum LineSearchType {
    261   // Backtracking line search with polynomial interpolation or
    262   // bisection.
    263   ARMIJO,
    264   WOLFE,
    265 };
    266 
    267 // Ceres supports different strategies for computing the trust region
    268 // step.
    269 enum TrustRegionStrategyType {
    270   // The default trust region strategy is to use the step computation
    271   // used in the Levenberg-Marquardt algorithm. For more details see
    272   // levenberg_marquardt_strategy.h
    273   LEVENBERG_MARQUARDT,
    274 
    275   // Powell's dogleg algorithm interpolates between the Cauchy point
    276   // and the Gauss-Newton step. It is particularly useful if the
    277   // LEVENBERG_MARQUARDT algorithm is making a large number of
    278   // unsuccessful steps. For more details see dogleg_strategy.h.
    279   //
    280   // NOTES:
    281   //
    282   // 1. This strategy has not been experimented with or tested as
    283   // extensively as LEVENBERG_MARQUARDT, and therefore it should be
    284   // considered EXPERIMENTAL for now.
    285   //
    286   // 2. For now this strategy should only be used with exact
    287   // factorization based linear solvers, i.e., SPARSE_SCHUR,
    288   // DENSE_SCHUR, DENSE_QR and SPARSE_NORMAL_CHOLESKY.
    289   DOGLEG
    290 };
    291 
    292 // Ceres supports two different dogleg strategies.
    293 // The "traditional" dogleg method by Powell and the
    294 // "subspace" method described in
    295 // R. H. Byrd, R. B. Schnabel, and G. A. Shultz,
    296 // "Approximate solution of the trust region problem by minimization
    297 //  over two-dimensional subspaces", Mathematical Programming,
    298 // 40 (1988), pp. 247--263
    299 enum DoglegType {
    300   // The traditional approach constructs a dogleg path
    301   // consisting of two line segments and finds the furthest
    302   // point on that path that is still inside the trust region.
    303   TRADITIONAL_DOGLEG,
    304 
    305   // The subspace approach finds the exact minimum of the model
    306   // constrained to the subspace spanned by the dogleg path.
    307   SUBSPACE_DOGLEG
    308 };
    309 
    310 enum TerminationType {
    311   // Minimizer terminated because one of the convergence criterion set
    312   // by the user was satisfied.
    313   //
    314   // 1.  (new_cost - old_cost) < function_tolerance * old_cost;
    315   // 2.  max_i |gradient_i| < gradient_tolerance
    316   // 3.  |step|_2 <= parameter_tolerance * ( |x|_2 +  parameter_tolerance)
    317   //
    318   // The user's parameter blocks will be updated with the solution.
    319   CONVERGENCE,
    320 
    321   // The solver ran for maximum number of iterations or maximum amount
    322   // of time specified by the user, but none of the convergence
    323   // criterion specified by the user were met. The user's parameter
    324   // blocks will be updated with the solution found so far.
    325   NO_CONVERGENCE,
    326 
    327   // The minimizer terminated because of an error.  The user's
    328   // parameter blocks will not be updated.
    329   FAILURE,
    330 
    331   // Using an IterationCallback object, user code can control the
    332   // minimizer. The following enums indicate that the user code was
    333   // responsible for termination.
    334   //
    335   // Minimizer terminated successfully because a user
    336   // IterationCallback returned SOLVER_TERMINATE_SUCCESSFULLY.
    337   //
    338   // The user's parameter blocks will be updated with the solution.
    339   USER_SUCCESS,
    340 
    341   // Minimizer terminated because because a user IterationCallback
    342   // returned SOLVER_ABORT.
    343   //
    344   // The user's parameter blocks will not be updated.
    345   USER_FAILURE
    346 };
    347 
    348 // Enums used by the IterationCallback instances to indicate to the
    349 // solver whether it should continue solving, the user detected an
    350 // error or the solution is good enough and the solver should
    351 // terminate.
    352 enum CallbackReturnType {
    353   // Continue solving to next iteration.
    354   SOLVER_CONTINUE,
    355 
    356   // Terminate solver, and do not update the parameter blocks upon
    357   // return. Unless the user has set
    358   // Solver:Options:::update_state_every_iteration, in which case the
    359   // state would have been updated every iteration
    360   // anyways. Solver::Summary::termination_type is set to USER_ABORT.
    361   SOLVER_ABORT,
    362 
    363   // Terminate solver, update state and
    364   // return. Solver::Summary::termination_type is set to USER_SUCCESS.
    365   SOLVER_TERMINATE_SUCCESSFULLY
    366 };
    367 
    368 // The format in which linear least squares problems should be logged
    369 // when Solver::Options::lsqp_iterations_to_dump is non-empty.
    370 enum DumpFormatType {
    371   // Print the linear least squares problem in a human readable format
    372   // to stderr. The Jacobian is printed as a dense matrix. The vectors
    373   // D, x and f are printed as dense vectors. This should only be used
    374   // for small problems.
    375   CONSOLE,
    376 
    377   // Write out the linear least squares problem to the directory
    378   // pointed to by Solver::Options::lsqp_dump_directory as text files
    379   // which can be read into MATLAB/Octave. The Jacobian is dumped as a
    380   // text file containing (i,j,s) triplets, the vectors D, x and f are
    381   // dumped as text files containing a list of their values.
    382   //
    383   // A MATLAB/octave script called lm_iteration_???.m is also output,
    384   // which can be used to parse and load the problem into memory.
    385   TEXTFILE
    386 };
    387 
    388 // For SizedCostFunction and AutoDiffCostFunction, DYNAMIC can be
    389 // specified for the number of residuals. If specified, then the
    390 // number of residuas for that cost function can vary at runtime.
    391 enum DimensionType {
    392   DYNAMIC = -1
    393 };
    394 
    395 enum NumericDiffMethod {
    396   CENTRAL,
    397   FORWARD
    398 };
    399 
    400 enum LineSearchInterpolationType {
    401   BISECTION,
    402   QUADRATIC,
    403   CUBIC
    404 };
    405 
    406 enum CovarianceAlgorithmType {
    407   DENSE_SVD,
    408   SUITE_SPARSE_QR,
    409   EIGEN_SPARSE_QR
    410 };
    411 
    412 CERES_EXPORT const char* LinearSolverTypeToString(
    413     LinearSolverType type);
    414 CERES_EXPORT bool StringToLinearSolverType(string value,
    415                                            LinearSolverType* type);
    416 
    417 CERES_EXPORT const char* PreconditionerTypeToString(PreconditionerType type);
    418 CERES_EXPORT bool StringToPreconditionerType(string value,
    419                                              PreconditionerType* type);
    420 
    421 CERES_EXPORT const char* VisibilityClusteringTypeToString(
    422     VisibilityClusteringType type);
    423 CERES_EXPORT bool StringToVisibilityClusteringType(string value,
    424                                       VisibilityClusteringType* type);
    425 
    426 CERES_EXPORT const char* SparseLinearAlgebraLibraryTypeToString(
    427     SparseLinearAlgebraLibraryType type);
    428 CERES_EXPORT bool StringToSparseLinearAlgebraLibraryType(
    429     string value,
    430     SparseLinearAlgebraLibraryType* type);
    431 
    432 CERES_EXPORT const char* DenseLinearAlgebraLibraryTypeToString(
    433     DenseLinearAlgebraLibraryType type);
    434 CERES_EXPORT bool StringToDenseLinearAlgebraLibraryType(
    435     string value,
    436     DenseLinearAlgebraLibraryType* type);
    437 
    438 CERES_EXPORT const char* TrustRegionStrategyTypeToString(
    439     TrustRegionStrategyType type);
    440 CERES_EXPORT bool StringToTrustRegionStrategyType(string value,
    441                                      TrustRegionStrategyType* type);
    442 
    443 CERES_EXPORT const char* DoglegTypeToString(DoglegType type);
    444 CERES_EXPORT bool StringToDoglegType(string value, DoglegType* type);
    445 
    446 CERES_EXPORT const char* MinimizerTypeToString(MinimizerType type);
    447 CERES_EXPORT bool StringToMinimizerType(string value, MinimizerType* type);
    448 
    449 CERES_EXPORT const char* LineSearchDirectionTypeToString(
    450     LineSearchDirectionType type);
    451 CERES_EXPORT bool StringToLineSearchDirectionType(string value,
    452                                      LineSearchDirectionType* type);
    453 
    454 CERES_EXPORT const char* LineSearchTypeToString(LineSearchType type);
    455 CERES_EXPORT bool StringToLineSearchType(string value, LineSearchType* type);
    456 
    457 CERES_EXPORT const char* NonlinearConjugateGradientTypeToString(
    458     NonlinearConjugateGradientType type);
    459 CERES_EXPORT bool StringToNonlinearConjugateGradientType(
    460     string value,
    461     NonlinearConjugateGradientType* type);
    462 
    463 CERES_EXPORT const char* LineSearchInterpolationTypeToString(
    464     LineSearchInterpolationType type);
    465 CERES_EXPORT bool StringToLineSearchInterpolationType(
    466     string value,
    467     LineSearchInterpolationType* type);
    468 
    469 CERES_EXPORT const char* CovarianceAlgorithmTypeToString(
    470     CovarianceAlgorithmType type);
    471 CERES_EXPORT bool StringToCovarianceAlgorithmType(
    472     string value,
    473     CovarianceAlgorithmType* type);
    474 
    475 CERES_EXPORT const char* TerminationTypeToString(TerminationType type);
    476 
    477 CERES_EXPORT bool IsSchurType(LinearSolverType type);
    478 CERES_EXPORT bool IsSparseLinearAlgebraLibraryTypeAvailable(
    479     SparseLinearAlgebraLibraryType type);
    480 CERES_EXPORT bool IsDenseLinearAlgebraLibraryTypeAvailable(
    481     DenseLinearAlgebraLibraryType type);
    482 
    483 }  // namespace ceres
    484 
    485 #include "ceres/internal/reenable_warnings.h"
    486 
    487 #endif  // CERES_PUBLIC_TYPES_H_
    488