Home | History | Annotate | Download | only in ceres
      1 // Ceres Solver - A fast non-linear least squares minimizer
      2 // Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
      3 // http://code.google.com/p/ceres-solver/
      4 //
      5 // Redistribution and use in source and binary forms, with or without
      6 // modification, are permitted provided that the following conditions are met:
      7 //
      8 // * Redistributions of source code must retain the above copyright notice,
      9 //   this list of conditions and the following disclaimer.
     10 // * Redistributions in binary form must reproduce the above copyright notice,
     11 //   this list of conditions and the following disclaimer in the documentation
     12 //   and/or other materials provided with the distribution.
     13 // * Neither the name of Google Inc. nor the names of its contributors may be
     14 //   used to endorse or promote products derived from this software without
     15 //   specific prior written permission.
     16 //
     17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     18 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     19 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     20 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     21 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27 // POSSIBILITY OF SUCH DAMAGE.
     28 //
     29 // Author: keir (at) google.com (Keir Mierle)
     30 //
     31 // The ProgramEvaluator runs the cost functions contained in each residual block
     32 // and stores the result into a jacobian. The particular type of jacobian is
     33 // abstracted out using two template parameters:
     34 //
     35 //   - An "EvaluatePreparer" that is responsible for creating the array with
     36 //     pointers to the jacobian blocks where the cost function evaluates to.
     37 //   - A "JacobianWriter" that is responsible for storing the resulting
     38 //     jacobian blocks in the passed sparse matrix.
     39 //
     40 // This abstraction affords an efficient evaluator implementation while still
     41 // supporting writing to multiple sparse matrix formats. For example, when the
     42 // ProgramEvaluator is parameterized for writing to block sparse matrices, the
     43 // residual jacobians are written directly into their final position in the
     44 // block sparse matrix by the user's CostFunction; there is no copying.
     45 //
     46 // The evaluation is threaded with OpenMP.
     47 //
     48 // The EvaluatePreparer and JacobianWriter interfaces are as follows:
     49 //
     50 //   class EvaluatePreparer {
     51 //     // Prepare the jacobians array for use as the destination of a call to
     52 //     // a cost function's evaluate method.
     53 //     void Prepare(const ResidualBlock* residual_block,
     54 //                  int residual_block_index,
     55 //                  SparseMatrix* jacobian,
     56 //                  double** jacobians);
     57 //   }
     58 //
     59 //   class JacobianWriter {
     60 //     // Create a jacobian that this writer can write. Same as
     61 //     // Evaluator::CreateJacobian.
     62 //     SparseMatrix* CreateJacobian() const;
     63 //
     64 //     // Create num_threads evaluate preparers. Caller owns result which must
     65 //     // be freed with delete[]. Resulting preparers are valid while *this is.
     66 //     EvaluatePreparer* CreateEvaluatePreparers(int num_threads);
     67 //
     68 //     // Write the block jacobians from a residual block evaluation to the
     69 //     // larger sparse jacobian.
     70 //     void Write(int residual_id,
     71 //                int residual_offset,
     72 //                double** jacobians,
     73 //                SparseMatrix* jacobian);
     74 //   }
     75 //
     76 // Note: The ProgramEvaluator is not thread safe, since internally it maintains
     77 // some per-thread scratch space.
     78 
     79 #ifndef CERES_INTERNAL_PROGRAM_EVALUATOR_H_
     80 #define CERES_INTERNAL_PROGRAM_EVALUATOR_H_
     81 
     82 #ifdef CERES_USE_OPENMP
     83 #include <omp.h>
     84 #endif
     85 
     86 #include "ceres/parameter_block.h"
     87 #include "ceres/program.h"
     88 #include "ceres/residual_block.h"
     89 #include "ceres/internal/eigen.h"
     90 #include "ceres/internal/scoped_ptr.h"
     91 
     92 namespace ceres {
     93 namespace internal {
     94 
     95 template<typename EvaluatePreparer, typename JacobianWriter>
     96 class ProgramEvaluator : public Evaluator {
     97  public:
     98   ProgramEvaluator(const Evaluator::Options &options, Program* program)
     99       : options_(options),
    100         program_(program),
    101         jacobian_writer_(options, program),
    102         evaluate_preparers_(
    103             jacobian_writer_.CreateEvaluatePreparers(options.num_threads)) {
    104 #ifndef CERES_USE_OPENMP
    105     CHECK_EQ(1, options_.num_threads)
    106         << "OpenMP support is not compiled into this binary; "
    107         << "only options.num_threads=1 is supported.";
    108 #endif
    109 
    110     BuildResidualLayout(*program, &residual_layout_);
    111     evaluate_scratch_.reset(CreateEvaluatorScratch(*program,
    112                                                    options.num_threads));
    113   }
    114 
    115   // Implementation of Evaluator interface.
    116   SparseMatrix* CreateJacobian() const {
    117     return jacobian_writer_.CreateJacobian();
    118   }
    119 
    120   bool Evaluate(const double* state,
    121                 double* cost,
    122                 double* residuals,
    123                 double* gradient,
    124                 SparseMatrix* jacobian) {
    125     // The parameters are stateful, so set the state before evaluating.
    126     if (!program_->StateVectorToParameterBlocks(state)) {
    127       return false;
    128     }
    129 
    130     if (residuals != NULL) {
    131       VectorRef(residuals, program_->NumResiduals()).setZero();
    132     }
    133 
    134     if (jacobian != NULL) {
    135       jacobian->SetZero();
    136     }
    137 
    138     // Each thread gets it's own cost and evaluate scratch space.
    139     for (int i = 0; i < options_.num_threads; ++i) {
    140       evaluate_scratch_[i].cost = 0.0;
    141     }
    142 
    143     // This bool is used to disable the loop if an error is encountered
    144     // without breaking out of it. The remaining loop iterations are still run,
    145     // but with an empty body, and so will finish quickly.
    146     bool abort = false;
    147     int num_residual_blocks = program_->NumResidualBlocks();
    148 #pragma omp parallel for num_threads(options_.num_threads)
    149     for (int i = 0; i < num_residual_blocks; ++i) {
    150 // Disable the loop instead of breaking, as required by OpenMP.
    151 #pragma omp flush(abort)
    152       if (abort) {
    153         continue;
    154       }
    155 
    156 #ifdef CERES_USE_OPENMP
    157       int thread_id = omp_get_thread_num();
    158 #else
    159       int thread_id = 0;
    160 #endif
    161       EvaluatePreparer* preparer = &evaluate_preparers_[thread_id];
    162       EvaluateScratch* scratch = &evaluate_scratch_[thread_id];
    163 
    164       // Prepare block residuals if requested.
    165       const ResidualBlock* residual_block = program_->residual_blocks()[i];
    166       double* block_residuals = NULL;
    167       if (residuals != NULL) {
    168         block_residuals = residuals + residual_layout_[i];
    169       } else if (gradient != NULL) {
    170         block_residuals = scratch->residual_block_residuals.get();
    171       }
    172 
    173       // Prepare block jacobians if requested.
    174       double** block_jacobians = NULL;
    175       if (jacobian != NULL || gradient != NULL) {
    176         preparer->Prepare(residual_block,
    177                           i,
    178                           jacobian,
    179                           scratch->jacobian_block_ptrs.get());
    180         block_jacobians = scratch->jacobian_block_ptrs.get();
    181       }
    182 
    183       // Evaluate the cost, residuals, and jacobians.
    184       double block_cost;
    185       if (!residual_block->Evaluate(
    186               &block_cost,
    187               block_residuals,
    188               block_jacobians,
    189               scratch->residual_block_evaluate_scratch.get())) {
    190         abort = true;
    191 // This ensures that the OpenMP threads have a consistent view of 'abort'. Do
    192 // the flush inside the failure case so that there is usually only one
    193 // synchronization point per loop iteration instead of two.
    194 #pragma omp flush(abort)
    195         continue;
    196       }
    197 
    198       scratch->cost += block_cost;
    199 
    200       // Store the jacobians, if they were requested.
    201       if (jacobian != NULL) {
    202         jacobian_writer_.Write(i,
    203                                residual_layout_[i],
    204                                block_jacobians,
    205                                jacobian);
    206       }
    207 
    208       // Compute and store the gradient, if it was requested.
    209       if (gradient != NULL) {
    210         int num_residuals = residual_block->NumResiduals();
    211         int num_parameter_blocks = residual_block->NumParameterBlocks();
    212         for (int j = 0; j < num_parameter_blocks; ++j) {
    213           const ParameterBlock* parameter_block =
    214               residual_block->parameter_blocks()[j];
    215           if (parameter_block->IsConstant()) {
    216             continue;
    217           }
    218           MatrixRef block_jacobian(block_jacobians[j],
    219                                    num_residuals,
    220                                    parameter_block->LocalSize());
    221           VectorRef block_gradient(scratch->gradient.get() +
    222                                    parameter_block->delta_offset(),
    223                                    parameter_block->LocalSize());
    224           VectorRef block_residual(block_residuals, num_residuals);
    225           block_gradient += block_residual.transpose() * block_jacobian;
    226         }
    227       }
    228     }
    229 
    230     if (!abort) {
    231       // Sum the cost and gradient (if requested) from each thread.
    232       (*cost) = 0.0;
    233       int num_parameters = program_->NumEffectiveParameters();
    234       if (gradient != NULL) {
    235         VectorRef(gradient, num_parameters).setZero();
    236       }
    237       for (int i = 0; i < options_.num_threads; ++i) {
    238         (*cost) += evaluate_scratch_[i].cost;
    239         if (gradient != NULL) {
    240           VectorRef(gradient, num_parameters) +=
    241               VectorRef(evaluate_scratch_[i].gradient.get(), num_parameters);
    242         }
    243       }
    244     }
    245     return !abort;
    246   }
    247 
    248   bool Plus(const double* state,
    249             const double* delta,
    250             double* state_plus_delta) const {
    251     return program_->Plus(state, delta, state_plus_delta);
    252   }
    253 
    254   int NumParameters() const {
    255     return program_->NumParameters();
    256   }
    257   int NumEffectiveParameters() const {
    258     return program_->NumEffectiveParameters();
    259   }
    260 
    261   int NumResiduals() const {
    262     return program_->NumResiduals();
    263   }
    264 
    265  private:
    266   // Per-thread scratch space needed to evaluate and store each residual block.
    267   struct EvaluateScratch {
    268     void Init(int max_parameters_per_residual_block,
    269               int max_scratch_doubles_needed_for_evaluate,
    270               int max_residuals_per_residual_block,
    271               int num_parameters) {
    272       residual_block_evaluate_scratch.reset(
    273           new double[max_scratch_doubles_needed_for_evaluate]);
    274       gradient.reset(new double[num_parameters]);
    275       VectorRef(gradient.get(), num_parameters).setZero();
    276       residual_block_residuals.reset(
    277           new double[max_residuals_per_residual_block]);
    278       jacobian_block_ptrs.reset(
    279           new double*[max_parameters_per_residual_block]);
    280     }
    281 
    282     double cost;
    283     scoped_array<double> residual_block_evaluate_scratch;
    284     // The gradient in the local parameterization.
    285     scoped_array<double> gradient;
    286     // Enough space to store the residual for the largest residual block.
    287     scoped_array<double> residual_block_residuals;
    288     scoped_array<double*> jacobian_block_ptrs;
    289   };
    290 
    291   static void BuildResidualLayout(const Program& program,
    292                                   vector<int>* residual_layout) {
    293     const vector<ResidualBlock*>& residual_blocks = program.residual_blocks();
    294     residual_layout->resize(program.NumResidualBlocks());
    295     int residual_pos = 0;
    296     for (int i = 0; i < residual_blocks.size(); ++i) {
    297       const int num_residuals = residual_blocks[i]->NumResiduals();
    298       (*residual_layout)[i] = residual_pos;
    299       residual_pos += num_residuals;
    300     }
    301   }
    302 
    303   // Create scratch space for each thread evaluating the program.
    304   static EvaluateScratch* CreateEvaluatorScratch(const Program& program,
    305                                                  int num_threads) {
    306     int max_parameters_per_residual_block =
    307         program.MaxParametersPerResidualBlock();
    308     int max_scratch_doubles_needed_for_evaluate =
    309         program.MaxScratchDoublesNeededForEvaluate();
    310     int max_residuals_per_residual_block =
    311         program.MaxResidualsPerResidualBlock();
    312     int num_parameters = program.NumEffectiveParameters();
    313 
    314     EvaluateScratch* evaluate_scratch = new EvaluateScratch[num_threads];
    315     for (int i = 0; i < num_threads; i++) {
    316       evaluate_scratch[i].Init(max_parameters_per_residual_block,
    317                                max_scratch_doubles_needed_for_evaluate,
    318                                max_residuals_per_residual_block,
    319                                num_parameters);
    320     }
    321     return evaluate_scratch;
    322   }
    323 
    324   Evaluator::Options options_;
    325   Program* program_;
    326   JacobianWriter jacobian_writer_;
    327   scoped_array<EvaluatePreparer> evaluate_preparers_;
    328   scoped_array<EvaluateScratch> evaluate_scratch_;
    329   vector<int> residual_layout_;
    330 };
    331 
    332 }  // namespace internal
    333 }  // namespace ceres
    334 
    335 #endif  // CERES_INTERNAL_PROGRAM_EVALUATOR_H_
    336