Home | History | Annotate | Download | only in ceres
      1 // Ceres Solver - A fast non-linear least squares minimizer
      2 // Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
      3 // http://code.google.com/p/ceres-solver/
      4 //
      5 // Redistribution and use in source and binary forms, with or without
      6 // modification, are permitted provided that the following conditions are met:
      7 //
      8 // * Redistributions of source code must retain the above copyright notice,
      9 //   this list of conditions and the following disclaimer.
     10 // * Redistributions in binary form must reproduce the above copyright notice,
     11 //   this list of conditions and the following disclaimer in the documentation
     12 //   and/or other materials provided with the distribution.
     13 // * Neither the name of Google Inc. nor the names of its contributors may be
     14 //   used to endorse or promote products derived from this software without
     15 //   specific prior written permission.
     16 //
     17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     18 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     19 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     20 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     21 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27 // POSSIBILITY OF SUCH DAMAGE.
     28 //
     29 // Author: sameeragarwal (at) google.com (Sameer Agarwal)
     30 //
     31 // A simple C++ interface to the SuiteSparse and CHOLMOD libraries.
     32 
     33 #ifndef CERES_INTERNAL_SUITESPARSE_H_
     34 #define CERES_INTERNAL_SUITESPARSE_H_
     35 
     36 
     37 #ifndef CERES_NO_SUITESPARSE
     38 
     39 #include <cstring>
     40 #include <string>
     41 #include <vector>
     42 
     43 #include "ceres/internal/port.h"
     44 #include "cholmod.h"
     45 #include "glog/logging.h"
     46 #include "SuiteSparseQR.hpp"
     47 
     48 // Before SuiteSparse version 4.2.0, cholmod_camd was only enabled
     49 // if SuiteSparse was compiled with Metis support. This makes
     50 // calling and linking into cholmod_camd problematic even though it
     51 // has nothing to do with Metis. This has been fixed reliably in
     52 // 4.2.0.
     53 //
     54 // The fix was actually committed in 4.1.0, but there is
     55 // some confusion about a silent update to the tar ball, so we are
     56 // being conservative and choosing the next minor version where
     57 // things are stable.
     58 #if (SUITESPARSE_VERSION < 4002)
     59 #define CERES_NO_CAMD
     60 #endif
     61 
     62 // UF_long is deprecated but SuiteSparse_long is only available in
     63 // newer versions of SuiteSparse. So for older versions of
     64 // SuiteSparse, we define SuiteSparse_long to be the same as UF_long,
     65 // which is what recent versions of SuiteSparse do anyways.
     66 #ifndef SuiteSparse_long
     67 #define SuiteSparse_long UF_long
     68 #endif
     69 
     70 namespace ceres {
     71 namespace internal {
     72 
     73 class CompressedRowSparseMatrix;
     74 class TripletSparseMatrix;
     75 
     76 // The raw CHOLMOD and SuiteSparseQR libraries have a slightly
     77 // cumbersome c like calling format. This object abstracts it away and
     78 // provides the user with a simpler interface. The methods here cannot
     79 // be static as a cholmod_common object serves as a global variable
     80 // for all cholmod function calls.
     81 class SuiteSparse {
     82  public:
     83   SuiteSparse();
     84   ~SuiteSparse();
     85 
     86   // Functions for building cholmod_sparse objects from sparse
     87   // matrices stored in triplet form. The matrix A is not
     88   // modifed. Called owns the result.
     89   cholmod_sparse* CreateSparseMatrix(TripletSparseMatrix* A);
     90 
     91   // This function works like CreateSparseMatrix, except that the
     92   // return value corresponds to A' rather than A.
     93   cholmod_sparse* CreateSparseMatrixTranspose(TripletSparseMatrix* A);
     94 
     95   // Create a cholmod_sparse wrapper around the contents of A. This is
     96   // a shallow object, which refers to the contents of A and does not
     97   // use the SuiteSparse machinery to allocate memory.
     98   cholmod_sparse CreateSparseMatrixTransposeView(CompressedRowSparseMatrix* A);
     99 
    100   // Given a vector x, build a cholmod_dense vector of size out_size
    101   // with the first in_size entries copied from x. If x is NULL, then
    102   // an all zeros vector is returned. Caller owns the result.
    103   cholmod_dense* CreateDenseVector(const double* x, int in_size, int out_size);
    104 
    105   // The matrix A is scaled using the matrix whose diagonal is the
    106   // vector scale. mode describes how scaling is applied. Possible
    107   // values are CHOLMOD_ROW for row scaling - diag(scale) * A,
    108   // CHOLMOD_COL for column scaling - A * diag(scale) and CHOLMOD_SYM
    109   // for symmetric scaling which scales both the rows and the columns
    110   // - diag(scale) * A * diag(scale).
    111   void Scale(cholmod_dense* scale, int mode, cholmod_sparse* A) {
    112      cholmod_scale(scale, mode, A, &cc_);
    113   }
    114 
    115   // Create and return a matrix m = A * A'. Caller owns the
    116   // result. The matrix A is not modified.
    117   cholmod_sparse* AATranspose(cholmod_sparse* A) {
    118     cholmod_sparse*m =  cholmod_aat(A, NULL, A->nrow, 1, &cc_);
    119     m->stype = 1;  // Pay attention to the upper triangular part.
    120     return m;
    121   }
    122 
    123   // y = alpha * A * x + beta * y. Only y is modified.
    124   void SparseDenseMultiply(cholmod_sparse* A, double alpha, double beta,
    125                            cholmod_dense* x, cholmod_dense* y) {
    126     double alpha_[2] = {alpha, 0};
    127     double beta_[2] = {beta, 0};
    128     cholmod_sdmult(A, 0, alpha_, beta_, x, y, &cc_);
    129   }
    130 
    131   // Find an ordering of A or AA' (if A is unsymmetric) that minimizes
    132   // the fill-in in the Cholesky factorization of the corresponding
    133   // matrix. This is done by using the AMD algorithm.
    134   //
    135   // Using this ordering, the symbolic Cholesky factorization of A (or
    136   // AA') is computed and returned.
    137   //
    138   // A is not modified, only the pattern of non-zeros of A is used,
    139   // the actual numerical values in A are of no consequence.
    140   //
    141   // Caller owns the result.
    142   cholmod_factor* AnalyzeCholesky(cholmod_sparse* A);
    143 
    144   cholmod_factor* BlockAnalyzeCholesky(cholmod_sparse* A,
    145                                        const vector<int>& row_blocks,
    146                                        const vector<int>& col_blocks);
    147 
    148   // If A is symmetric, then compute the symbolic Cholesky
    149   // factorization of A(ordering, ordering). If A is unsymmetric, then
    150   // compute the symbolic factorization of
    151   // A(ordering,:) A(ordering,:)'.
    152   //
    153   // A is not modified, only the pattern of non-zeros of A is used,
    154   // the actual numerical values in A are of no consequence.
    155   //
    156   // Caller owns the result.
    157   cholmod_factor* AnalyzeCholeskyWithUserOrdering(cholmod_sparse* A,
    158                                                   const vector<int>& ordering);
    159 
    160   // Perform a symbolic factorization of A without re-ordering A. No
    161   // postordering of the elimination tree is performed. This ensures
    162   // that the symbolic factor does not introduce an extra permutation
    163   // on the matrix. See the documentation for CHOLMOD for more details.
    164   cholmod_factor* AnalyzeCholeskyWithNaturalOrdering(cholmod_sparse* A);
    165 
    166   // Use the symbolic factorization in L, to find the numerical
    167   // factorization for the matrix A or AA^T. Return true if
    168   // successful, false otherwise. L contains the numeric factorization
    169   // on return.
    170   bool Cholesky(cholmod_sparse* A, cholmod_factor* L);
    171 
    172   // Given a Cholesky factorization of a matrix A = LL^T, solve the
    173   // linear system Ax = b, and return the result. If the Solve fails
    174   // NULL is returned. Caller owns the result.
    175   cholmod_dense* Solve(cholmod_factor* L, cholmod_dense* b);
    176 
    177   // Combine the calls to Cholesky and Solve into a single call. If
    178   // the cholesky factorization or the solve fails, return
    179   // NULL. Caller owns the result.
    180   cholmod_dense* SolveCholesky(cholmod_sparse* A,
    181                                cholmod_factor* L,
    182                                cholmod_dense* b);
    183 
    184   // By virtue of the modeling layer in Ceres being block oriented,
    185   // all the matrices used by Ceres are also block oriented. When
    186   // doing sparse direct factorization of these matrices the
    187   // fill-reducing ordering algorithms (in particular AMD) can either
    188   // be run on the block or the scalar form of these matrices. The two
    189   // SuiteSparse::AnalyzeCholesky methods allows the the client to
    190   // compute the symbolic factorization of a matrix by either using
    191   // AMD on the matrix or a user provided ordering of the rows.
    192   //
    193   // But since the underlying matrices are block oriented, it is worth
    194   // running AMD on just the block structre of these matrices and then
    195   // lifting these block orderings to a full scalar ordering. This
    196   // preserves the block structure of the permuted matrix, and exposes
    197   // more of the super-nodal structure of the matrix to the numerical
    198   // factorization routines.
    199   //
    200   // Find the block oriented AMD ordering of a matrix A, whose row and
    201   // column blocks are given by row_blocks, and col_blocks
    202   // respectively. The matrix may or may not be symmetric. The entries
    203   // of col_blocks do not need to sum to the number of columns in
    204   // A. If this is the case, only the first sum(col_blocks) are used
    205   // to compute the ordering.
    206   bool BlockAMDOrdering(const cholmod_sparse* A,
    207                         const vector<int>& row_blocks,
    208                         const vector<int>& col_blocks,
    209                         vector<int>* ordering);
    210 
    211   // Find a fill reducing approximate minimum degree
    212   // ordering. ordering is expected to be large enough to hold the
    213   // ordering.
    214   void ApproximateMinimumDegreeOrdering(cholmod_sparse* matrix, int* ordering);
    215 
    216 
    217   // Before SuiteSparse version 4.2.0, cholmod_camd was only enabled
    218   // if SuiteSparse was compiled with Metis support. This makes
    219   // calling and linking into cholmod_camd problematic even though it
    220   // has nothing to do with Metis. This has been fixed reliably in
    221   // 4.2.0.
    222   //
    223   // The fix was actually committed in 4.1.0, but there is
    224   // some confusion about a silent update to the tar ball, so we are
    225   // being conservative and choosing the next minor version where
    226   // things are stable.
    227   static bool IsConstrainedApproximateMinimumDegreeOrderingAvailable() {
    228     return (SUITESPARSE_VERSION>4001);
    229   }
    230 
    231   // Find a fill reducing approximate minimum degree
    232   // ordering. constraints is an array which associates with each
    233   // column of the matrix an elimination group. i.e., all columns in
    234   // group 0 are eliminated first, all columns in group 1 are
    235   // eliminated next etc. This function finds a fill reducing ordering
    236   // that obeys these constraints.
    237   //
    238   // Calling ApproximateMinimumDegreeOrdering is equivalent to calling
    239   // ConstrainedApproximateMinimumDegreeOrdering with a constraint
    240   // array that puts all columns in the same elimination group.
    241   //
    242   // If CERES_NO_CAMD is defined then calling this function will
    243   // result in a crash.
    244   void ConstrainedApproximateMinimumDegreeOrdering(cholmod_sparse* matrix,
    245                                                    int* constraints,
    246                                                    int* ordering);
    247 
    248   void Free(cholmod_sparse* m) { cholmod_free_sparse(&m, &cc_); }
    249   void Free(cholmod_dense* m)  { cholmod_free_dense(&m, &cc_);  }
    250   void Free(cholmod_factor* m) { cholmod_free_factor(&m, &cc_); }
    251 
    252   void Print(cholmod_sparse* m, const string& name) {
    253     cholmod_print_sparse(m, const_cast<char*>(name.c_str()), &cc_);
    254   }
    255 
    256   void Print(cholmod_dense* m, const string& name) {
    257     cholmod_print_dense(m, const_cast<char*>(name.c_str()), &cc_);
    258   }
    259 
    260   void Print(cholmod_triplet* m, const string& name) {
    261     cholmod_print_triplet(m, const_cast<char*>(name.c_str()), &cc_);
    262   }
    263 
    264   cholmod_common* mutable_cc() { return &cc_; }
    265 
    266  private:
    267   cholmod_common cc_;
    268 };
    269 
    270 }  // namespace internal
    271 }  // namespace ceres
    272 
    273 #else  // CERES_NO_SUITESPARSE
    274 
    275 class SuiteSparse {};
    276 typedef void cholmod_factor;
    277 
    278 #endif  // CERES_NO_SUITESPARSE
    279 
    280 #endif  // CERES_INTERNAL_SUITESPARSE_H_
    281