1 // Ceres Solver - A fast non-linear least squares minimizer 2 // Copyright 2012 Google Inc. All rights reserved. 3 // http://code.google.com/p/ceres-solver/ 4 // 5 // Redistribution and use in source and binary forms, with or without 6 // modification, are permitted provided that the following conditions are met: 7 // 8 // * Redistributions of source code must retain the above copyright notice, 9 // this list of conditions and the following disclaimer. 10 // * Redistributions in binary form must reproduce the above copyright notice, 11 // this list of conditions and the following disclaimer in the documentation 12 // and/or other materials provided with the distribution. 13 // * Neither the name of Google Inc. nor the names of its contributors may be 14 // used to endorse or promote products derived from this software without 15 // specific prior written permission. 16 // 17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 21 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 // POSSIBILITY OF SUCH DAMAGE. 28 // 29 // Author: mierle (at) gmail.com (Keir Mierle) 30 // sameeragarwal (at) google.com (Sameer Agarwal) 31 // thadh (at) gmail.com (Thad Hughes) 32 // 33 // This numeric diff implementation differs from the one found in 34 // numeric_diff_cost_function.h by supporting numericdiff on cost 35 // functions with variable numbers of parameters with variable 36 // sizes. With the other implementation, all the sizes (both the 37 // number of parameter blocks and the size of each block) must be 38 // fixed at compile time. 39 // 40 // The functor API differs slightly from the API for fixed size 41 // numeric diff; the expected interface for the cost functors is: 42 // 43 // struct MyCostFunctor { 44 // template<typename T> 45 // bool operator()(double const* const* parameters, double* residuals) const { 46 // // Use parameters[i] to access the i'th parameter block. 47 // } 48 // } 49 // 50 // Since the sizing of the parameters is done at runtime, you must 51 // also specify the sizes after creating the 52 // DynamicNumericDiffCostFunction. For example: 53 // 54 // DynamicAutoDiffCostFunction<MyCostFunctor, CENTRAL> cost_function( 55 // new MyCostFunctor()); 56 // cost_function.AddParameterBlock(5); 57 // cost_function.AddParameterBlock(10); 58 // cost_function.SetNumResiduals(21); 59 60 #ifndef CERES_PUBLIC_DYNAMIC_NUMERIC_DIFF_COST_FUNCTION_H_ 61 #define CERES_PUBLIC_DYNAMIC_NUMERIC_DIFF_COST_FUNCTION_H_ 62 63 #include <cmath> 64 #include <numeric> 65 #include <vector> 66 67 #include "ceres/cost_function.h" 68 #include "ceres/internal/scoped_ptr.h" 69 #include "ceres/internal/eigen.h" 70 #include "ceres/internal/numeric_diff.h" 71 #include "glog/logging.h" 72 73 namespace ceres { 74 75 template <typename CostFunctor, NumericDiffMethod method = CENTRAL> 76 class DynamicNumericDiffCostFunction : public CostFunction { 77 public: 78 explicit DynamicNumericDiffCostFunction(const CostFunctor* functor, 79 Ownership ownership = TAKE_OWNERSHIP, 80 double relative_step_size = 1e-6) 81 : functor_(functor), 82 ownership_(ownership), 83 relative_step_size_(relative_step_size) { 84 } 85 86 virtual ~DynamicNumericDiffCostFunction() { 87 if (ownership_ != TAKE_OWNERSHIP) { 88 functor_.release(); 89 } 90 } 91 92 void AddParameterBlock(int size) { 93 mutable_parameter_block_sizes()->push_back(size); 94 } 95 96 void SetNumResiduals(int num_residuals) { 97 set_num_residuals(num_residuals); 98 } 99 100 virtual bool Evaluate(double const* const* parameters, 101 double* residuals, 102 double** jacobians) const { 103 CHECK_GT(num_residuals(), 0) 104 << "You must call DynamicNumericDiffCostFunction::SetNumResiduals() " 105 << "before DynamicNumericDiffCostFunction::Evaluate()."; 106 107 const vector<int32>& block_sizes = parameter_block_sizes(); 108 CHECK(!block_sizes.empty()) 109 << "You must call DynamicNumericDiffCostFunction::AddParameterBlock() " 110 << "before DynamicNumericDiffCostFunction::Evaluate()."; 111 112 const bool status = EvaluateCostFunctor(parameters, residuals); 113 if (jacobians == NULL || !status) { 114 return status; 115 } 116 117 // Create local space for a copy of the parameters which will get mutated. 118 int parameters_size = accumulate(block_sizes.begin(), block_sizes.end(), 0); 119 vector<double> parameters_copy(parameters_size); 120 vector<double*> parameters_references_copy(block_sizes.size()); 121 parameters_references_copy[0] = ¶meters_copy[0]; 122 for (int block = 1; block < block_sizes.size(); ++block) { 123 parameters_references_copy[block] = parameters_references_copy[block - 1] 124 + block_sizes[block - 1]; 125 } 126 127 // Copy the parameters into the local temp space. 128 for (int block = 0; block < block_sizes.size(); ++block) { 129 memcpy(parameters_references_copy[block], 130 parameters[block], 131 block_sizes[block] * sizeof(*parameters[block])); 132 } 133 134 for (int block = 0; block < block_sizes.size(); ++block) { 135 if (jacobians[block] != NULL && 136 !EvaluateJacobianForParameterBlock(block_sizes[block], 137 block, 138 relative_step_size_, 139 residuals, 140 ¶meters_references_copy[0], 141 jacobians)) { 142 return false; 143 } 144 } 145 return true; 146 } 147 148 private: 149 bool EvaluateJacobianForParameterBlock(const int parameter_block_size, 150 const int parameter_block, 151 const double relative_step_size, 152 double const* residuals_at_eval_point, 153 double** parameters, 154 double** jacobians) const { 155 using Eigen::Map; 156 using Eigen::Matrix; 157 using Eigen::Dynamic; 158 using Eigen::RowMajor; 159 160 typedef Matrix<double, Dynamic, 1> ResidualVector; 161 typedef Matrix<double, Dynamic, 1> ParameterVector; 162 typedef Matrix<double, Dynamic, Dynamic, RowMajor> JacobianMatrix; 163 164 int num_residuals = this->num_residuals(); 165 166 Map<JacobianMatrix> parameter_jacobian(jacobians[parameter_block], 167 num_residuals, 168 parameter_block_size); 169 170 // Mutate one element at a time and then restore. 171 Map<ParameterVector> x_plus_delta(parameters[parameter_block], 172 parameter_block_size); 173 ParameterVector x(x_plus_delta); 174 ParameterVector step_size = x.array().abs() * relative_step_size; 175 176 // To handle cases where a paremeter is exactly zero, instead use 177 // the mean step_size for the other dimensions. 178 double fallback_step_size = step_size.sum() / step_size.rows(); 179 if (fallback_step_size == 0.0) { 180 // If all the parameters are zero, there's no good answer. Use the given 181 // relative step_size as absolute step_size and hope for the best. 182 fallback_step_size = relative_step_size; 183 } 184 185 // For each parameter in the parameter block, use finite 186 // differences to compute the derivative for that parameter. 187 for (int j = 0; j < parameter_block_size; ++j) { 188 if (step_size(j) == 0.0) { 189 // The parameter is exactly zero, so compromise and use the 190 // mean step_size from the other parameters. This can break in 191 // many cases, but it's hard to pick a good number without 192 // problem specific knowledge. 193 step_size(j) = fallback_step_size; 194 } 195 x_plus_delta(j) = x(j) + step_size(j); 196 197 ResidualVector residuals(num_residuals); 198 if (!EvaluateCostFunctor(parameters, &residuals[0])) { 199 // Something went wrong; bail. 200 return false; 201 } 202 203 // Compute this column of the jacobian in 3 steps: 204 // 1. Store residuals for the forward part. 205 // 2. Subtract residuals for the backward (or 0) part. 206 // 3. Divide out the run. 207 parameter_jacobian.col(j).matrix() = residuals; 208 209 double one_over_h = 1 / step_size(j); 210 if (method == CENTRAL) { 211 // Compute the function on the other side of x(j). 212 x_plus_delta(j) = x(j) - step_size(j); 213 214 if (!EvaluateCostFunctor(parameters, &residuals[0])) { 215 // Something went wrong; bail. 216 return false; 217 } 218 219 parameter_jacobian.col(j) -= residuals; 220 one_over_h /= 2; 221 } else { 222 // Forward difference only; reuse existing residuals evaluation. 223 parameter_jacobian.col(j) -= 224 Map<const ResidualVector>(residuals_at_eval_point, num_residuals); 225 } 226 x_plus_delta(j) = x(j); // Restore x_plus_delta. 227 228 // Divide out the run to get slope. 229 parameter_jacobian.col(j) *= one_over_h; 230 } 231 return true; 232 } 233 234 bool EvaluateCostFunctor(double const* const* parameters, 235 double* residuals) const { 236 return EvaluateCostFunctorImpl(functor_.get(), 237 parameters, 238 residuals, 239 functor_.get()); 240 } 241 242 // Helper templates to allow evaluation of a functor or a 243 // CostFunction. 244 bool EvaluateCostFunctorImpl(const CostFunctor* functor, 245 double const* const* parameters, 246 double* residuals, 247 const void* /* NOT USED */) const { 248 return (*functor)(parameters, residuals); 249 } 250 251 bool EvaluateCostFunctorImpl(const CostFunctor* functor, 252 double const* const* parameters, 253 double* residuals, 254 const CostFunction* /* NOT USED */) const { 255 return functor->Evaluate(parameters, residuals, NULL); 256 } 257 258 internal::scoped_ptr<const CostFunctor> functor_; 259 Ownership ownership_; 260 const double relative_step_size_; 261 }; 262 263 } // namespace ceres 264 265 #endif // CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_ 266