Home | History | Annotate | Download | only in ceres
      1 // Ceres Solver - A fast non-linear least squares minimizer
      2 // Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
      3 // http://code.google.com/p/ceres-solver/
      4 //
      5 // Redistribution and use in source and binary forms, with or without
      6 // modification, are permitted provided that the following conditions are met:
      7 //
      8 // * Redistributions of source code must retain the above copyright notice,
      9 //   this list of conditions and the following disclaimer.
     10 // * Redistributions in binary form must reproduce the above copyright notice,
     11 //   this list of conditions and the following disclaimer in the documentation
     12 //   and/or other materials provided with the distribution.
     13 // * Neither the name of Google Inc. nor the names of its contributors may be
     14 //   used to endorse or promote products derived from this software without
     15 //   specific prior written permission.
     16 //
     17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     18 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     19 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     20 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     21 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27 // POSSIBILITY OF SUCH DAMAGE.
     28 //
     29 // Author: sameeragarwal (at) google.com (Sameer Agarwal)
     30 //
     31 // Abstract interface for objects solving linear systems of various
     32 // kinds.
     33 
     34 #ifndef CERES_INTERNAL_LINEAR_SOLVER_H_
     35 #define CERES_INTERNAL_LINEAR_SOLVER_H_
     36 
     37 #include <cstddef>
     38 #include <vector>
     39 
     40 #include <glog/logging.h>
     41 #include "ceres/block_sparse_matrix.h"
     42 #include "ceres/casts.h"
     43 #include "ceres/compressed_row_sparse_matrix.h"
     44 #include "ceres/dense_sparse_matrix.h"
     45 #include "ceres/triplet_sparse_matrix.h"
     46 #include "ceres/types.h"
     47 
     48 namespace ceres {
     49 namespace internal {
     50 
     51 class LinearOperator;
     52 
     53 // Abstract base class for objects that implement algorithms for
     54 // solving linear systems
     55 //
     56 //   Ax = b
     57 //
     58 // It is expected that a single instance of a LinearSolver object
     59 // maybe used multiple times for solving multiple linear systems with
     60 // the same sparsity structure. This allows them to cache and reuse
     61 // information across solves. This means that calling Solve on the
     62 // same LinearSolver instance with two different linear systems will
     63 // result in undefined behaviour.
     64 //
     65 // Subclasses of LinearSolver use two structs to configure themselves.
     66 // The Options struct configures the LinearSolver object for its
     67 // lifetime. The PerSolveOptions struct is used to specify options for
     68 // a particular Solve call.
     69 class LinearSolver {
     70  public:
     71   struct Options {
     72     Options()
     73         : type(SPARSE_NORMAL_CHOLESKY),
     74           preconditioner_type(JACOBI),
     75           sparse_linear_algebra_library(SUITE_SPARSE),
     76           use_block_amd(true),
     77           min_num_iterations(1),
     78           max_num_iterations(1),
     79           num_threads(1),
     80           residual_reset_period(10),
     81           row_block_size(Dynamic),
     82           e_block_size(Dynamic),
     83           f_block_size(Dynamic) {
     84     }
     85 
     86     LinearSolverType type;
     87 
     88     PreconditionerType preconditioner_type;
     89 
     90     SparseLinearAlgebraLibraryType sparse_linear_algebra_library;
     91 
     92     // See solver.h for explanation of this option.
     93     bool use_block_amd;
     94 
     95     // Number of internal iterations that the solver uses. This
     96     // parameter only makes sense for iterative solvers like CG.
     97     int min_num_iterations;
     98     int max_num_iterations;
     99 
    100     // If possible, how many threads can the solver use.
    101     int num_threads;
    102 
    103     // Hints about the order in which the parameter blocks should be
    104     // eliminated by the linear solver.
    105     //
    106     // For example if elimination_groups is a vector of size k, then
    107     // the linear solver is informed that it should eliminate the
    108     // parameter blocks 0 - elimination_groups[0] - 1 first, and then
    109     // elimination_groups[0] - elimination_groups[1] and so on. Within
    110     // each elimination group, the linear solver is free to choose how
    111     // the parameter blocks are ordered. Different linear solvers have
    112     // differing requirements on elimination_groups.
    113     //
    114     // The most common use is for Schur type solvers, where there
    115     // should be at least two elimination groups and the first
    116     // elimination group must form an independent set in the normal
    117     // equations. The first elimination group corresponds to the
    118     // num_eliminate_blocks in the Schur type solvers.
    119     vector<int> elimination_groups;
    120 
    121     // Iterative solvers, e.g. Preconditioned Conjugate Gradients
    122     // maintain a cheap estimate of the residual which may become
    123     // inaccurate over time. Thus for non-zero values of this
    124     // parameter, the solver can be told to recalculate the value of
    125     // the residual using a |b - Ax| evaluation.
    126     int residual_reset_period;
    127 
    128     // If the block sizes in a BlockSparseMatrix are fixed, then in
    129     // some cases the Schur complement based solvers can detect and
    130     // specialize on them.
    131     //
    132     // It is expected that these parameters are set programmatically
    133     // rather than manually.
    134     //
    135     // Please see schur_complement_solver.h and schur_eliminator.h for
    136     // more details.
    137     int row_block_size;
    138     int e_block_size;
    139     int f_block_size;
    140   };
    141 
    142   // Options for the Solve method.
    143   struct PerSolveOptions {
    144     PerSolveOptions()
    145         : D(NULL),
    146           preconditioner(NULL),
    147           r_tolerance(0.0),
    148           q_tolerance(0.0) {
    149     }
    150 
    151     // This option only makes sense for unsymmetric linear solvers
    152     // that can solve rectangular linear systems.
    153     //
    154     // Given a matrix A, an optional diagonal matrix D as a vector,
    155     // and a vector b, the linear solver will solve for
    156     //
    157     //   | A | x = | b |
    158     //   | D |     | 0 |
    159     //
    160     // If D is null, then it is treated as zero, and the solver returns
    161     // the solution to
    162     //
    163     //   A x = b
    164     //
    165     // In either case, x is the vector that solves the following
    166     // optimization problem.
    167     //
    168     //   arg min_x ||Ax - b||^2 + ||Dx||^2
    169     //
    170     // Here A is a matrix of size m x n, with full column rank. If A
    171     // does not have full column rank, the results returned by the
    172     // solver cannot be relied on. D, if it is not null is an array of
    173     // size n.  b is an array of size m and x is an array of size n.
    174     double * D;
    175 
    176     // This option only makes sense for iterative solvers.
    177     //
    178     // In general the performance of an iterative linear solver
    179     // depends on the condition number of the matrix A. For example
    180     // the convergence rate of the conjugate gradients algorithm
    181     // is proportional to the square root of the condition number.
    182     //
    183     // One particularly useful technique for improving the
    184     // conditioning of a linear system is to precondition it. In its
    185     // simplest form a preconditioner is a matrix M such that instead
    186     // of solving Ax = b, we solve the linear system AM^{-1} y = b
    187     // instead, where M is such that the condition number k(AM^{-1})
    188     // is smaller than the conditioner k(A). Given the solution to
    189     // this system, x = M^{-1} y. The iterative solver takes care of
    190     // the mechanics of solving the preconditioned system and
    191     // returning the corrected solution x. The user only needs to
    192     // supply a linear operator.
    193     //
    194     // A null preconditioner is equivalent to an identity matrix being
    195     // used a preconditioner.
    196     LinearOperator* preconditioner;
    197 
    198 
    199     // The following tolerance related options only makes sense for
    200     // iterative solvers. Direct solvers ignore them.
    201 
    202     // Solver terminates when
    203     //
    204     //   |Ax - b| <= r_tolerance * |b|.
    205     //
    206     // This is the most commonly used termination criterion for
    207     // iterative solvers.
    208     double r_tolerance;
    209 
    210     // For PSD matrices A, let
    211     //
    212     //   Q(x) = x'Ax - 2b'x
    213     //
    214     // be the cost of the quadratic function defined by A and b. Then,
    215     // the solver terminates at iteration i if
    216     //
    217     //   i * (Q(x_i) - Q(x_i-1)) / Q(x_i) < q_tolerance.
    218     //
    219     // This termination criterion is more useful when using CG to
    220     // solve the Newton step. This particular convergence test comes
    221     // from Stephen Nash's work on truncated Newton
    222     // methods. References:
    223     //
    224     //   1. Stephen G. Nash & Ariela Sofer, Assessing A Search
    225     //      Direction Within A Truncated Newton Method, Operation
    226     //      Research Letters 9(1990) 219-221.
    227     //
    228     //   2. Stephen G. Nash, A Survey of Truncated Newton Methods,
    229     //      Journal of Computational and Applied Mathematics,
    230     //      124(1-2), 45-59, 2000.
    231     //
    232     double q_tolerance;
    233   };
    234 
    235   // Summary of a call to the Solve method. We should move away from
    236   // the true/false method for determining solver success. We should
    237   // let the summary object do the talking.
    238   struct Summary {
    239     Summary()
    240         : residual_norm(0.0),
    241           num_iterations(-1),
    242           termination_type(FAILURE) {
    243     }
    244 
    245     double residual_norm;
    246     int num_iterations;
    247     LinearSolverTerminationType termination_type;
    248   };
    249 
    250   virtual ~LinearSolver();
    251 
    252   // Solve Ax = b.
    253   virtual Summary Solve(LinearOperator* A,
    254                         const double* b,
    255                         const PerSolveOptions& per_solve_options,
    256                         double* x) = 0;
    257 
    258   // Factory
    259   static LinearSolver* Create(const Options& options);
    260 };
    261 
    262 // This templated subclass of LinearSolver serves as a base class for
    263 // other linear solvers that depend on the particular matrix layout of
    264 // the underlying linear operator. For example some linear solvers
    265 // need low level access to the TripletSparseMatrix implementing the
    266 // LinearOperator interface. This class hides those implementation
    267 // details behind a private virtual method, and has the Solve method
    268 // perform the necessary upcasting.
    269 template <typename MatrixType>
    270 class TypedLinearSolver : public LinearSolver {
    271  public:
    272   virtual ~TypedLinearSolver() {}
    273   virtual LinearSolver::Summary Solve(
    274       LinearOperator* A,
    275       const double* b,
    276       const LinearSolver::PerSolveOptions& per_solve_options,
    277       double* x) {
    278     CHECK_NOTNULL(A);
    279     CHECK_NOTNULL(b);
    280     CHECK_NOTNULL(x);
    281     return SolveImpl(down_cast<MatrixType*>(A), b, per_solve_options, x);
    282   }
    283 
    284  private:
    285   virtual LinearSolver::Summary SolveImpl(
    286       MatrixType* A,
    287       const double* b,
    288       const LinearSolver::PerSolveOptions& per_solve_options,
    289       double* x) = 0;
    290 };
    291 
    292 // Linear solvers that depend on acccess to the low level structure of
    293 // a SparseMatrix.
    294 typedef TypedLinearSolver<BlockSparseMatrix>         BlockSparseMatrixSolver;          // NOLINT
    295 typedef TypedLinearSolver<BlockSparseMatrixBase>     BlockSparseMatrixBaseSolver;      // NOLINT
    296 typedef TypedLinearSolver<CompressedRowSparseMatrix> CompressedRowSparseMatrixSolver;  // NOLINT
    297 typedef TypedLinearSolver<DenseSparseMatrix>         DenseSparseMatrixSolver;          // NOLINT
    298 typedef TypedLinearSolver<TripletSparseMatrix>       TripletSparseMatrixSolver;        // NOLINT
    299 
    300 }  // namespace internal
    301 }  // namespace ceres
    302 
    303 #endif  // CERES_INTERNAL_LINEAR_SOLVER_H_
    304