Home | History | Annotate | Download | only in internal
      1 // Ceres Solver - A fast non-linear least squares minimizer
      2 // Copyright 2013 Google Inc. All rights reserved.
      3 // http://code.google.com/p/ceres-solver/
      4 //
      5 // Redistribution and use in source and binary forms, with or without
      6 // modification, are permitted provided that the following conditions are met:
      7 //
      8 // * Redistributions of source code must retain the above copyright notice,
      9 //   this list of conditions and the following disclaimer.
     10 // * Redistributions in binary form must reproduce the above copyright notice,
     11 //   this list of conditions and the following disclaimer in the documentation
     12 //   and/or other materials provided with the distribution.
     13 // * Neither the name of Google Inc. nor the names of its contributors may be
     14 //   used to endorse or promote products derived from this software without
     15 //   specific prior written permission.
     16 //
     17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     18 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     19 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     20 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     21 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27 // POSSIBILITY OF SUCH DAMAGE.
     28 //
     29 // Author: sameeragarwal (at) google.com (Sameer Agarwal)
     30 //         mierle (at) gmail.com (Keir Mierle)
     31 //
     32 // Finite differencing routine used by NumericDiffCostFunction.
     33 
     34 #ifndef CERES_PUBLIC_INTERNAL_NUMERIC_DIFF_H_
     35 #define CERES_PUBLIC_INTERNAL_NUMERIC_DIFF_H_
     36 
     37 #include <cstring>
     38 
     39 #include "Eigen/Dense"
     40 #include "ceres/cost_function.h"
     41 #include "ceres/internal/scoped_ptr.h"
     42 #include "ceres/internal/variadic_evaluate.h"
     43 #include "ceres/types.h"
     44 #include "glog/logging.h"
     45 
     46 
     47 namespace ceres {
     48 namespace internal {
     49 
     50 // Helper templates that allow evaluation of a variadic functor or a
     51 // CostFunction object.
     52 template <typename CostFunctor,
     53           int N0, int N1, int N2, int N3, int N4,
     54           int N5, int N6, int N7, int N8, int N9 >
     55 bool EvaluateImpl(const CostFunctor* functor,
     56                   double const* const* parameters,
     57                   double* residuals,
     58                   const void* /* NOT USED */) {
     59   return VariadicEvaluate<CostFunctor,
     60                           double,
     61                           N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>::Call(
     62                               *functor,
     63                               parameters,
     64                               residuals);
     65 }
     66 
     67 template <typename CostFunctor,
     68           int N0, int N1, int N2, int N3, int N4,
     69           int N5, int N6, int N7, int N8, int N9 >
     70 bool EvaluateImpl(const CostFunctor* functor,
     71                   double const* const* parameters,
     72                   double* residuals,
     73                   const CostFunction* /* NOT USED */) {
     74   return functor->Evaluate(parameters, residuals, NULL);
     75 }
     76 
     77 // This is split from the main class because C++ doesn't allow partial template
     78 // specializations for member functions. The alternative is to repeat the main
     79 // class for differing numbers of parameters, which is also unfortunate.
     80 template <typename CostFunctor,
     81           NumericDiffMethod kMethod,
     82           int kNumResiduals,
     83           int N0, int N1, int N2, int N3, int N4,
     84           int N5, int N6, int N7, int N8, int N9,
     85           int kParameterBlock,
     86           int kParameterBlockSize>
     87 struct NumericDiff {
     88   // Mutates parameters but must restore them before return.
     89   static bool EvaluateJacobianForParameterBlock(
     90       const CostFunctor* functor,
     91       double const* residuals_at_eval_point,
     92       const double relative_step_size,
     93       int num_residuals,
     94       double **parameters,
     95       double *jacobian) {
     96     using Eigen::Map;
     97     using Eigen::Matrix;
     98     using Eigen::RowMajor;
     99     using Eigen::ColMajor;
    100 
    101     const int NUM_RESIDUALS =
    102         (kNumResiduals != ceres::DYNAMIC ? kNumResiduals : num_residuals);
    103 
    104     typedef Matrix<double, kNumResiduals, 1> ResidualVector;
    105     typedef Matrix<double, kParameterBlockSize, 1> ParameterVector;
    106     typedef Matrix<double,
    107                    kNumResiduals,
    108                    kParameterBlockSize,
    109                    (kParameterBlockSize == 1 &&
    110                     kNumResiduals > 1) ? ColMajor : RowMajor>
    111         JacobianMatrix;
    112 
    113 
    114     Map<JacobianMatrix> parameter_jacobian(jacobian,
    115                                            NUM_RESIDUALS,
    116                                            kParameterBlockSize);
    117 
    118     // Mutate 1 element at a time and then restore.
    119     Map<ParameterVector> x_plus_delta(parameters[kParameterBlock],
    120                                       kParameterBlockSize);
    121     ParameterVector x(x_plus_delta);
    122     ParameterVector step_size = x.array().abs() * relative_step_size;
    123 
    124     // To handle cases where a parameter is exactly zero, instead use
    125     // the mean step_size for the other dimensions. If all the
    126     // parameters are zero, there's no good answer. Take
    127     // relative_step_size as a guess and hope for the best.
    128     const double fallback_step_size =
    129         (step_size.sum() == 0)
    130         ? relative_step_size
    131         : step_size.sum() / step_size.rows();
    132 
    133     // For each parameter in the parameter block, use finite differences to
    134     // compute the derivative for that parameter.
    135 
    136     ResidualVector residuals(NUM_RESIDUALS);
    137     for (int j = 0; j < kParameterBlockSize; ++j) {
    138       const double delta =
    139           (step_size(j) == 0.0) ? fallback_step_size : step_size(j);
    140 
    141       x_plus_delta(j) = x(j) + delta;
    142 
    143       if (!EvaluateImpl<CostFunctor, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>(
    144               functor, parameters, residuals.data(), functor)) {
    145         return false;
    146       }
    147 
    148       // Compute this column of the jacobian in 3 steps:
    149       // 1. Store residuals for the forward part.
    150       // 2. Subtract residuals for the backward (or 0) part.
    151       // 3. Divide out the run.
    152       parameter_jacobian.col(j) = residuals;
    153 
    154       double one_over_delta = 1.0 / delta;
    155       if (kMethod == CENTRAL) {
    156         // Compute the function on the other side of x(j).
    157         x_plus_delta(j) = x(j) - delta;
    158 
    159         if (!EvaluateImpl<CostFunctor, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>(
    160                 functor, parameters, residuals.data(), functor)) {
    161           return false;
    162         }
    163 
    164         parameter_jacobian.col(j) -= residuals;
    165         one_over_delta /= 2;
    166       } else {
    167         // Forward difference only; reuse existing residuals evaluation.
    168         parameter_jacobian.col(j) -=
    169             Map<const ResidualVector>(residuals_at_eval_point, NUM_RESIDUALS);
    170       }
    171       x_plus_delta(j) = x(j);  // Restore x_plus_delta.
    172 
    173       // Divide out the run to get slope.
    174       parameter_jacobian.col(j) *= one_over_delta;
    175     }
    176     return true;
    177   }
    178 };
    179 
    180 template <typename CostFunctor,
    181           NumericDiffMethod kMethod,
    182           int kNumResiduals,
    183           int N0, int N1, int N2, int N3, int N4,
    184           int N5, int N6, int N7, int N8, int N9,
    185           int kParameterBlock>
    186 struct NumericDiff<CostFunctor, kMethod, kNumResiduals,
    187                    N0, N1, N2, N3, N4, N5, N6, N7, N8, N9,
    188                    kParameterBlock, 0> {
    189   // Mutates parameters but must restore them before return.
    190   static bool EvaluateJacobianForParameterBlock(
    191       const CostFunctor* functor,
    192       double const* residuals_at_eval_point,
    193       const double relative_step_size,
    194       const int num_residuals,
    195       double **parameters,
    196       double *jacobian) {
    197     LOG(FATAL) << "Control should never reach here.";
    198     return true;
    199   }
    200 };
    201 
    202 }  // namespace internal
    203 }  // namespace ceres
    204 
    205 #endif  // CERES_PUBLIC_INTERNAL_NUMERIC_DIFF_H_
    206