Home | History | Annotate | Download | only in ceres
      1 // Ceres Solver - A fast non-linear least squares minimizer
      2 // Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
      3 // http://code.google.com/p/ceres-solver/
      4 //
      5 // Redistribution and use in source and binary forms, with or without
      6 // modification, are permitted provided that the following conditions are met:
      7 //
      8 // * Redistributions of source code must retain the above copyright notice,
      9 //   this list of conditions and the following disclaimer.
     10 // * Redistributions in binary form must reproduce the above copyright notice,
     11 //   this list of conditions and the following disclaimer in the documentation
     12 //   and/or other materials provided with the distribution.
     13 // * Neither the name of Google Inc. nor the names of its contributors may be
     14 //   used to endorse or promote products derived from this software without
     15 //   specific prior written permission.
     16 //
     17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     18 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     19 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     20 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     21 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27 // POSSIBILITY OF SUCH DAMAGE.
     28 //
     29 // Author: keir (at) google.com (Keir Mierle)
     30 //
     31 // The ProgramEvaluator runs the cost functions contained in each residual block
     32 // and stores the result into a jacobian. The particular type of jacobian is
     33 // abstracted out using two template parameters:
     34 //
     35 //   - An "EvaluatePreparer" that is responsible for creating the array with
     36 //     pointers to the jacobian blocks where the cost function evaluates to.
     37 //   - A "JacobianWriter" that is responsible for storing the resulting
     38 //     jacobian blocks in the passed sparse matrix.
     39 //
     40 // This abstraction affords an efficient evaluator implementation while still
     41 // supporting writing to multiple sparse matrix formats. For example, when the
     42 // ProgramEvaluator is parameterized for writing to block sparse matrices, the
     43 // residual jacobians are written directly into their final position in the
     44 // block sparse matrix by the user's CostFunction; there is no copying.
     45 //
     46 // The evaluation is threaded with OpenMP.
     47 //
     48 // The EvaluatePreparer and JacobianWriter interfaces are as follows:
     49 //
     50 //   class EvaluatePreparer {
     51 //     // Prepare the jacobians array for use as the destination of a call to
     52 //     // a cost function's evaluate method.
     53 //     void Prepare(const ResidualBlock* residual_block,
     54 //                  int residual_block_index,
     55 //                  SparseMatrix* jacobian,
     56 //                  double** jacobians);
     57 //   }
     58 //
     59 //   class JacobianWriter {
     60 //     // Create a jacobian that this writer can write. Same as
     61 //     // Evaluator::CreateJacobian.
     62 //     SparseMatrix* CreateJacobian() const;
     63 //
     64 //     // Create num_threads evaluate preparers. Caller owns result which must
     65 //     // be freed with delete[]. Resulting preparers are valid while *this is.
     66 //     EvaluatePreparer* CreateEvaluatePreparers(int num_threads);
     67 //
     68 //     // Write the block jacobians from a residual block evaluation to the
     69 //     // larger sparse jacobian.
     70 //     void Write(int residual_id,
     71 //                int residual_offset,
     72 //                double** jacobians,
     73 //                SparseMatrix* jacobian);
     74 //   }
     75 //
     76 // Note: The ProgramEvaluator is not thread safe, since internally it maintains
     77 // some per-thread scratch space.
     78 
     79 #ifndef CERES_INTERNAL_PROGRAM_EVALUATOR_H_
     80 #define CERES_INTERNAL_PROGRAM_EVALUATOR_H_
     81 
     82 #ifdef CERES_USE_OPENMP
     83 #include <omp.h>
     84 #endif
     85 
     86 #include <map>
     87 #include <string>
     88 #include <vector>
     89 #include "ceres/execution_summary.h"
     90 #include "ceres/internal/eigen.h"
     91 #include "ceres/internal/scoped_ptr.h"
     92 #include "ceres/parameter_block.h"
     93 #include "ceres/program.h"
     94 #include "ceres/residual_block.h"
     95 #include "ceres/small_blas.h"
     96 
     97 namespace ceres {
     98 namespace internal {
     99 
    100 template<typename EvaluatePreparer, typename JacobianWriter>
    101 class ProgramEvaluator : public Evaluator {
    102  public:
    103   ProgramEvaluator(const Evaluator::Options &options, Program* program)
    104       : options_(options),
    105         program_(program),
    106         jacobian_writer_(options, program),
    107         evaluate_preparers_(
    108             jacobian_writer_.CreateEvaluatePreparers(options.num_threads)) {
    109 #ifndef CERES_USE_OPENMP
    110     CHECK_EQ(1, options_.num_threads)
    111         << "OpenMP support is not compiled into this binary; "
    112         << "only options.num_threads=1 is supported.";
    113 #endif
    114 
    115     BuildResidualLayout(*program, &residual_layout_);
    116     evaluate_scratch_.reset(CreateEvaluatorScratch(*program,
    117                                                    options.num_threads));
    118   }
    119 
    120   // Implementation of Evaluator interface.
    121   SparseMatrix* CreateJacobian() const {
    122     return jacobian_writer_.CreateJacobian();
    123   }
    124 
    125   bool Evaluate(const Evaluator::EvaluateOptions& evaluate_options,
    126                 const double* state,
    127                 double* cost,
    128                 double* residuals,
    129                 double* gradient,
    130                 SparseMatrix* jacobian) {
    131     ScopedExecutionTimer total_timer("Evaluator::Total", &execution_summary_);
    132     ScopedExecutionTimer call_type_timer(gradient == NULL && jacobian == NULL
    133                                          ? "Evaluator::Residual"
    134                                          : "Evaluator::Jacobian",
    135                                          &execution_summary_);
    136 
    137     // The parameters are stateful, so set the state before evaluating.
    138     if (!program_->StateVectorToParameterBlocks(state)) {
    139       return false;
    140     }
    141 
    142     if (residuals != NULL) {
    143       VectorRef(residuals, program_->NumResiduals()).setZero();
    144     }
    145 
    146     if (jacobian != NULL) {
    147       jacobian->SetZero();
    148     }
    149 
    150     // Each thread gets it's own cost and evaluate scratch space.
    151     for (int i = 0; i < options_.num_threads; ++i) {
    152       evaluate_scratch_[i].cost = 0.0;
    153       if (gradient != NULL) {
    154         VectorRef(evaluate_scratch_[i].gradient.get(),
    155                   program_->NumEffectiveParameters()).setZero();
    156       }
    157     }
    158 
    159     // This bool is used to disable the loop if an error is encountered
    160     // without breaking out of it. The remaining loop iterations are still run,
    161     // but with an empty body, and so will finish quickly.
    162     bool abort = false;
    163     int num_residual_blocks = program_->NumResidualBlocks();
    164 #pragma omp parallel for num_threads(options_.num_threads)
    165     for (int i = 0; i < num_residual_blocks; ++i) {
    166 // Disable the loop instead of breaking, as required by OpenMP.
    167 #pragma omp flush(abort)
    168       if (abort) {
    169         continue;
    170       }
    171 
    172 #ifdef CERES_USE_OPENMP
    173       int thread_id = omp_get_thread_num();
    174 #else
    175       int thread_id = 0;
    176 #endif
    177       EvaluatePreparer* preparer = &evaluate_preparers_[thread_id];
    178       EvaluateScratch* scratch = &evaluate_scratch_[thread_id];
    179 
    180       // Prepare block residuals if requested.
    181       const ResidualBlock* residual_block = program_->residual_blocks()[i];
    182       double* block_residuals = NULL;
    183       if (residuals != NULL) {
    184         block_residuals = residuals + residual_layout_[i];
    185       } else if (gradient != NULL) {
    186         block_residuals = scratch->residual_block_residuals.get();
    187       }
    188 
    189       // Prepare block jacobians if requested.
    190       double** block_jacobians = NULL;
    191       if (jacobian != NULL || gradient != NULL) {
    192         preparer->Prepare(residual_block,
    193                           i,
    194                           jacobian,
    195                           scratch->jacobian_block_ptrs.get());
    196         block_jacobians = scratch->jacobian_block_ptrs.get();
    197       }
    198 
    199       // Evaluate the cost, residuals, and jacobians.
    200       double block_cost;
    201       if (!residual_block->Evaluate(
    202               evaluate_options.apply_loss_function,
    203               &block_cost,
    204               block_residuals,
    205               block_jacobians,
    206               scratch->residual_block_evaluate_scratch.get())) {
    207         abort = true;
    208 // This ensures that the OpenMP threads have a consistent view of 'abort'. Do
    209 // the flush inside the failure case so that there is usually only one
    210 // synchronization point per loop iteration instead of two.
    211 #pragma omp flush(abort)
    212         continue;
    213       }
    214 
    215       scratch->cost += block_cost;
    216 
    217       // Store the jacobians, if they were requested.
    218       if (jacobian != NULL) {
    219         jacobian_writer_.Write(i,
    220                                residual_layout_[i],
    221                                block_jacobians,
    222                                jacobian);
    223       }
    224 
    225       // Compute and store the gradient, if it was requested.
    226       if (gradient != NULL) {
    227         int num_residuals = residual_block->NumResiduals();
    228         int num_parameter_blocks = residual_block->NumParameterBlocks();
    229         for (int j = 0; j < num_parameter_blocks; ++j) {
    230           const ParameterBlock* parameter_block =
    231               residual_block->parameter_blocks()[j];
    232           if (parameter_block->IsConstant()) {
    233             continue;
    234           }
    235 
    236           MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
    237               block_jacobians[j],
    238               num_residuals,
    239               parameter_block->LocalSize(),
    240               block_residuals,
    241               scratch->gradient.get() + parameter_block->delta_offset());
    242         }
    243       }
    244     }
    245 
    246     if (!abort) {
    247       // Sum the cost and gradient (if requested) from each thread.
    248       (*cost) = 0.0;
    249       int num_parameters = program_->NumEffectiveParameters();
    250       if (gradient != NULL) {
    251         VectorRef(gradient, num_parameters).setZero();
    252       }
    253       for (int i = 0; i < options_.num_threads; ++i) {
    254         (*cost) += evaluate_scratch_[i].cost;
    255         if (gradient != NULL) {
    256           VectorRef(gradient, num_parameters) +=
    257               VectorRef(evaluate_scratch_[i].gradient.get(), num_parameters);
    258         }
    259       }
    260     }
    261     return !abort;
    262   }
    263 
    264   bool Plus(const double* state,
    265             const double* delta,
    266             double* state_plus_delta) const {
    267     return program_->Plus(state, delta, state_plus_delta);
    268   }
    269 
    270   int NumParameters() const {
    271     return program_->NumParameters();
    272   }
    273   int NumEffectiveParameters() const {
    274     return program_->NumEffectiveParameters();
    275   }
    276 
    277   int NumResiduals() const {
    278     return program_->NumResiduals();
    279   }
    280 
    281   virtual map<string, int> CallStatistics() const {
    282     return execution_summary_.calls();
    283   }
    284 
    285   virtual map<string, double> TimeStatistics() const {
    286     return execution_summary_.times();
    287   }
    288 
    289  private:
    290   // Per-thread scratch space needed to evaluate and store each residual block.
    291   struct EvaluateScratch {
    292     void Init(int max_parameters_per_residual_block,
    293               int max_scratch_doubles_needed_for_evaluate,
    294               int max_residuals_per_residual_block,
    295               int num_parameters) {
    296       residual_block_evaluate_scratch.reset(
    297           new double[max_scratch_doubles_needed_for_evaluate]);
    298       gradient.reset(new double[num_parameters]);
    299       VectorRef(gradient.get(), num_parameters).setZero();
    300       residual_block_residuals.reset(
    301           new double[max_residuals_per_residual_block]);
    302       jacobian_block_ptrs.reset(
    303           new double*[max_parameters_per_residual_block]);
    304     }
    305 
    306     double cost;
    307     scoped_array<double> residual_block_evaluate_scratch;
    308     // The gradient in the local parameterization.
    309     scoped_array<double> gradient;
    310     // Enough space to store the residual for the largest residual block.
    311     scoped_array<double> residual_block_residuals;
    312     scoped_array<double*> jacobian_block_ptrs;
    313   };
    314 
    315   static void BuildResidualLayout(const Program& program,
    316                                   vector<int>* residual_layout) {
    317     const vector<ResidualBlock*>& residual_blocks = program.residual_blocks();
    318     residual_layout->resize(program.NumResidualBlocks());
    319     int residual_pos = 0;
    320     for (int i = 0; i < residual_blocks.size(); ++i) {
    321       const int num_residuals = residual_blocks[i]->NumResiduals();
    322       (*residual_layout)[i] = residual_pos;
    323       residual_pos += num_residuals;
    324     }
    325   }
    326 
    327   // Create scratch space for each thread evaluating the program.
    328   static EvaluateScratch* CreateEvaluatorScratch(const Program& program,
    329                                                  int num_threads) {
    330     int max_parameters_per_residual_block =
    331         program.MaxParametersPerResidualBlock();
    332     int max_scratch_doubles_needed_for_evaluate =
    333         program.MaxScratchDoublesNeededForEvaluate();
    334     int max_residuals_per_residual_block =
    335         program.MaxResidualsPerResidualBlock();
    336     int num_parameters = program.NumEffectiveParameters();
    337 
    338     EvaluateScratch* evaluate_scratch = new EvaluateScratch[num_threads];
    339     for (int i = 0; i < num_threads; i++) {
    340       evaluate_scratch[i].Init(max_parameters_per_residual_block,
    341                                max_scratch_doubles_needed_for_evaluate,
    342                                max_residuals_per_residual_block,
    343                                num_parameters);
    344     }
    345     return evaluate_scratch;
    346   }
    347 
    348   Evaluator::Options options_;
    349   Program* program_;
    350   JacobianWriter jacobian_writer_;
    351   scoped_array<EvaluatePreparer> evaluate_preparers_;
    352   scoped_array<EvaluateScratch> evaluate_scratch_;
    353   vector<int> residual_layout_;
    354   ::ceres::internal::ExecutionSummary execution_summary_;
    355 };
    356 
    357 }  // namespace internal
    358 }  // namespace ceres
    359 
    360 #endif  // CERES_INTERNAL_PROGRAM_EVALUATOR_H_
    361