Home | History | Annotate | Download | only in ceres
      1 // Ceres Solver - A fast non-linear least squares minimizer
      2 // Copyright 2012 Google Inc. All rights reserved.
      3 // http://code.google.com/p/ceres-solver/
      4 //
      5 // Redistribution and use in source and binary forms, with or without
      6 // modification, are permitted provided that the following conditions are met:
      7 //
      8 // * Redistributions of source code must retain the above copyright notice,
      9 //   this list of conditions and the following disclaimer.
     10 // * Redistributions in binary form must reproduce the above copyright notice,
     11 //   this list of conditions and the following disclaimer in the documentation
     12 //   and/or other materials provided with the distribution.
     13 // * Neither the name of Google Inc. nor the names of its contributors may be
     14 //   used to endorse or promote products derived from this software without
     15 //   specific prior written permission.
     16 //
     17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     18 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     19 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     20 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     21 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27 // POSSIBILITY OF SUCH DAMAGE.
     28 //
     29 // Author: sameeragarwal (at) google.com (Sameer Agarwal)
     30 
     31 #include "ceres/trust_region_minimizer.h"
     32 
     33 #include <algorithm>
     34 #include <cstdlib>
     35 #include <cmath>
     36 #include <cstring>
     37 #include <limits>
     38 #include <string>
     39 #include <vector>
     40 
     41 #include "Eigen/Core"
     42 #include "ceres/array_utils.h"
     43 #include "ceres/evaluator.h"
     44 #include "ceres/file.h"
     45 #include "ceres/internal/eigen.h"
     46 #include "ceres/internal/scoped_ptr.h"
     47 #include "ceres/linear_least_squares_problems.h"
     48 #include "ceres/sparse_matrix.h"
     49 #include "ceres/stringprintf.h"
     50 #include "ceres/trust_region_strategy.h"
     51 #include "ceres/types.h"
     52 #include "ceres/wall_time.h"
     53 #include "glog/logging.h"
     54 
     55 namespace ceres {
     56 namespace internal {
     57 namespace {
     58 // Small constant for various floating point issues.
     59 const double kEpsilon = 1e-12;
     60 }  // namespace
     61 
     62 // Compute a scaling vector that is used to improve the conditioning
     63 // of the Jacobian.
     64 void TrustRegionMinimizer::EstimateScale(const SparseMatrix& jacobian,
     65                                          double* scale) const {
     66   jacobian.SquaredColumnNorm(scale);
     67   for (int i = 0; i < jacobian.num_cols(); ++i) {
     68     scale[i] = 1.0 / (1.0 + sqrt(scale[i]));
     69   }
     70 }
     71 
     72 void TrustRegionMinimizer::Init(const Minimizer::Options& options) {
     73   options_ = options;
     74   sort(options_.trust_region_minimizer_iterations_to_dump.begin(),
     75        options_.trust_region_minimizer_iterations_to_dump.end());
     76 }
     77 
     78 void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
     79                                     double* parameters,
     80                                     Solver::Summary* summary) {
     81   double start_time = WallTimeInSeconds();
     82   double iteration_start_time =  start_time;
     83   Init(options);
     84 
     85   summary->termination_type = NO_CONVERGENCE;
     86   summary->num_successful_steps = 0;
     87   summary->num_unsuccessful_steps = 0;
     88 
     89   Evaluator* evaluator = CHECK_NOTNULL(options_.evaluator);
     90   SparseMatrix* jacobian = CHECK_NOTNULL(options_.jacobian);
     91   TrustRegionStrategy* strategy = CHECK_NOTNULL(options_.trust_region_strategy);
     92 
     93   const int num_parameters = evaluator->NumParameters();
     94   const int num_effective_parameters = evaluator->NumEffectiveParameters();
     95   const int num_residuals = evaluator->NumResiduals();
     96 
     97   VectorRef x_min(parameters, num_parameters);
     98   Vector x = x_min;
     99   double x_norm = x.norm();
    100 
    101   Vector residuals(num_residuals);
    102   Vector trust_region_step(num_effective_parameters);
    103   Vector delta(num_effective_parameters);
    104   Vector x_plus_delta(num_parameters);
    105   Vector gradient(num_effective_parameters);
    106   Vector model_residuals(num_residuals);
    107   Vector scale(num_effective_parameters);
    108 
    109   IterationSummary iteration_summary;
    110   iteration_summary.iteration = 0;
    111   iteration_summary.step_is_valid = false;
    112   iteration_summary.step_is_successful = false;
    113   iteration_summary.cost_change = 0.0;
    114   iteration_summary.gradient_max_norm = 0.0;
    115   iteration_summary.step_norm = 0.0;
    116   iteration_summary.relative_decrease = 0.0;
    117   iteration_summary.trust_region_radius = strategy->Radius();
    118   // TODO(sameeragarwal): Rename eta to linear_solver_accuracy or
    119   // something similar across the board.
    120   iteration_summary.eta = options_.eta;
    121   iteration_summary.linear_solver_iterations = 0;
    122   iteration_summary.step_solver_time_in_seconds = 0;
    123 
    124   // Do initial cost and Jacobian evaluation.
    125   double cost = 0.0;
    126   if (!evaluator->Evaluate(x.data(),
    127                            &cost,
    128                            residuals.data(),
    129                            gradient.data(),
    130                            jacobian)) {
    131     LOG(WARNING) << "Terminating: Residual and Jacobian evaluation failed.";
    132     summary->termination_type = NUMERICAL_FAILURE;
    133     return;
    134   }
    135 
    136   int num_consecutive_nonmonotonic_steps = 0;
    137   double minimum_cost = cost;
    138   double reference_cost = cost;
    139   double accumulated_reference_model_cost_change = 0.0;
    140   double candidate_cost = cost;
    141   double accumulated_candidate_model_cost_change = 0.0;
    142 
    143   summary->initial_cost = cost + summary->fixed_cost;
    144   iteration_summary.cost = cost + summary->fixed_cost;
    145   iteration_summary.gradient_max_norm = gradient.lpNorm<Eigen::Infinity>();
    146 
    147   // The initial gradient max_norm is bounded from below so that we do
    148   // not divide by zero.
    149   const double initial_gradient_max_norm =
    150       max(iteration_summary.gradient_max_norm, kEpsilon);
    151   const double absolute_gradient_tolerance =
    152       options_.gradient_tolerance * initial_gradient_max_norm;
    153 
    154   if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) {
    155     summary->termination_type = GRADIENT_TOLERANCE;
    156     VLOG(1) << "Terminating: Gradient tolerance reached."
    157             << "Relative gradient max norm: "
    158             << iteration_summary.gradient_max_norm / initial_gradient_max_norm
    159             << " <= " << options_.gradient_tolerance;
    160     return;
    161   }
    162 
    163   iteration_summary.iteration_time_in_seconds =
    164       WallTimeInSeconds() - iteration_start_time;
    165   iteration_summary.cumulative_time_in_seconds =
    166       WallTimeInSeconds() - start_time
    167       + summary->preprocessor_time_in_seconds;
    168   summary->iterations.push_back(iteration_summary);
    169 
    170   if (options_.jacobi_scaling) {
    171     EstimateScale(*jacobian, scale.data());
    172     jacobian->ScaleColumns(scale.data());
    173   } else {
    174     scale.setOnes();
    175   }
    176 
    177   int num_consecutive_invalid_steps = 0;
    178   bool inner_iterations_are_enabled = options.inner_iteration_minimizer != NULL;
    179   while (true) {
    180     bool inner_iterations_were_useful = false;
    181     if (!RunCallbacks(options.callbacks, iteration_summary, summary)) {
    182       return;
    183     }
    184 
    185     iteration_start_time = WallTimeInSeconds();
    186     if (iteration_summary.iteration >= options_.max_num_iterations) {
    187       summary->termination_type = NO_CONVERGENCE;
    188       VLOG(1) << "Terminating: Maximum number of iterations reached.";
    189       break;
    190     }
    191 
    192     const double total_solver_time = iteration_start_time - start_time +
    193         summary->preprocessor_time_in_seconds;
    194     if (total_solver_time >= options_.max_solver_time_in_seconds) {
    195       summary->termination_type = NO_CONVERGENCE;
    196       VLOG(1) << "Terminating: Maximum solver time reached.";
    197       break;
    198     }
    199 
    200     const double strategy_start_time = WallTimeInSeconds();
    201     TrustRegionStrategy::PerSolveOptions per_solve_options;
    202     per_solve_options.eta = options_.eta;
    203     if (find(options_.trust_region_minimizer_iterations_to_dump.begin(),
    204              options_.trust_region_minimizer_iterations_to_dump.end(),
    205              iteration_summary.iteration) !=
    206         options_.trust_region_minimizer_iterations_to_dump.end()) {
    207       per_solve_options.dump_format_type =
    208           options_.trust_region_problem_dump_format_type;
    209       per_solve_options.dump_filename_base =
    210           JoinPath(options_.trust_region_problem_dump_directory,
    211                    StringPrintf("ceres_solver_iteration_%03d",
    212                                 iteration_summary.iteration));
    213     } else {
    214       per_solve_options.dump_format_type = TEXTFILE;
    215       per_solve_options.dump_filename_base.clear();
    216     }
    217 
    218     TrustRegionStrategy::Summary strategy_summary =
    219         strategy->ComputeStep(per_solve_options,
    220                               jacobian,
    221                               residuals.data(),
    222                               trust_region_step.data());
    223 
    224     iteration_summary = IterationSummary();
    225     iteration_summary.iteration = summary->iterations.back().iteration + 1;
    226     iteration_summary.step_solver_time_in_seconds =
    227         WallTimeInSeconds() - strategy_start_time;
    228     iteration_summary.linear_solver_iterations =
    229         strategy_summary.num_iterations;
    230     iteration_summary.step_is_valid = false;
    231     iteration_summary.step_is_successful = false;
    232 
    233     double model_cost_change = 0.0;
    234     if (strategy_summary.termination_type != FAILURE) {
    235       // new_model_cost
    236       //  = 1/2 [f + J * step]^2
    237       //  = 1/2 [ f'f + 2f'J * step + step' * J' * J * step ]
    238       // model_cost_change
    239       //  = cost - new_model_cost
    240       //  = f'f/2  - 1/2 [ f'f + 2f'J * step + step' * J' * J * step]
    241       //  = -f'J * step - step' * J' * J * step / 2
    242       model_residuals.setZero();
    243       jacobian->RightMultiply(trust_region_step.data(), model_residuals.data());
    244       model_cost_change =
    245           - model_residuals.dot(residuals + model_residuals / 2.0);
    246 
    247       if (model_cost_change < 0.0) {
    248         VLOG(1) << "Invalid step: current_cost: " << cost
    249                 << " absolute difference " << model_cost_change
    250                 << " relative difference " << (model_cost_change / cost);
    251       } else {
    252         iteration_summary.step_is_valid = true;
    253       }
    254     }
    255 
    256     if (!iteration_summary.step_is_valid) {
    257       // Invalid steps can happen due to a number of reasons, and we
    258       // allow a limited number of successive failures, and return with
    259       // NUMERICAL_FAILURE if this limit is exceeded.
    260       if (++num_consecutive_invalid_steps >=
    261           options_.max_num_consecutive_invalid_steps) {
    262         summary->termination_type = NUMERICAL_FAILURE;
    263         summary->error = StringPrintf(
    264             "Terminating. Number of successive invalid steps more "
    265             "than Solver::Options::max_num_consecutive_invalid_steps: %d",
    266             options_.max_num_consecutive_invalid_steps);
    267 
    268         LOG(WARNING) << summary->error;
    269         return;
    270       }
    271 
    272       // We are going to try and reduce the trust region radius and
    273       // solve again. To do this, we are going to treat this iteration
    274       // as an unsuccessful iteration. Since the various callbacks are
    275       // still executed, we are going to fill the iteration summary
    276       // with data that assumes a step of length zero and no progress.
    277       iteration_summary.cost = cost + summary->fixed_cost;
    278       iteration_summary.cost_change = 0.0;
    279       iteration_summary.gradient_max_norm =
    280           summary->iterations.back().gradient_max_norm;
    281       iteration_summary.step_norm = 0.0;
    282       iteration_summary.relative_decrease = 0.0;
    283       iteration_summary.eta = options_.eta;
    284     } else {
    285       // The step is numerically valid, so now we can judge its quality.
    286       num_consecutive_invalid_steps = 0;
    287 
    288       // Undo the Jacobian column scaling.
    289       delta = (trust_region_step.array() * scale.array()).matrix();
    290       if (!evaluator->Plus(x.data(), delta.data(), x_plus_delta.data())) {
    291         summary->termination_type = NUMERICAL_FAILURE;
    292         summary->error =
    293             "Terminating. Failed to compute Plus(x, delta, x_plus_delta).";
    294 
    295         LOG(WARNING) << summary->error;
    296         return;
    297       }
    298 
    299       // Try this step.
    300       double new_cost = numeric_limits<double>::max();
    301       if (!evaluator->Evaluate(x_plus_delta.data(),
    302                                &new_cost,
    303                                NULL, NULL, NULL)) {
    304         // If the evaluation of the new cost fails, treat it as a step
    305         // with high cost.
    306         LOG(WARNING) << "Step failed to evaluate. "
    307                      << "Treating it as step with infinite cost";
    308         new_cost = numeric_limits<double>::max();
    309       } else {
    310         // Check if performing an inner iteration will make it better.
    311         if (inner_iterations_are_enabled) {
    312           ++summary->num_inner_iteration_steps;
    313           double inner_iteration_start_time = WallTimeInSeconds();
    314           const double x_plus_delta_cost = new_cost;
    315           Vector inner_iteration_x = x_plus_delta;
    316           Solver::Summary inner_iteration_summary;
    317           options.inner_iteration_minimizer->Minimize(options,
    318                                                       inner_iteration_x.data(),
    319                                                       &inner_iteration_summary);
    320           if (!evaluator->Evaluate(inner_iteration_x.data(),
    321                                    &new_cost,
    322                                    NULL, NULL, NULL)) {
    323             VLOG(2) << "Inner iteration failed.";
    324             new_cost = x_plus_delta_cost;
    325           } else {
    326             x_plus_delta = inner_iteration_x;
    327             // Boost the model_cost_change, since the inner iteration
    328             // improvements are not accounted for by the trust region.
    329             model_cost_change +=  x_plus_delta_cost - new_cost;
    330             VLOG(2) << "Inner iteration succeeded; current cost: " << cost
    331                     << " x_plus_delta_cost: " << x_plus_delta_cost
    332                     << " new_cost: " << new_cost;
    333             const double inner_iteration_relative_progress =
    334                 1.0 - new_cost / x_plus_delta_cost;
    335             inner_iterations_are_enabled =
    336                 (inner_iteration_relative_progress >
    337                  options.inner_iteration_tolerance);
    338 
    339             inner_iterations_were_useful = new_cost < cost;
    340 
    341             // Disable inner iterations once the relative improvement
    342             // drops below tolerance.
    343             if (!inner_iterations_are_enabled) {
    344               VLOG(2) << "Disabling inner iterations. Progress : "
    345                       << inner_iteration_relative_progress;
    346             }
    347           }
    348           summary->inner_iteration_time_in_seconds +=
    349               WallTimeInSeconds() - inner_iteration_start_time;
    350         }
    351       }
    352 
    353       iteration_summary.step_norm = (x - x_plus_delta).norm();
    354 
    355       // Convergence based on parameter_tolerance.
    356       const double step_size_tolerance =  options_.parameter_tolerance *
    357           (x_norm + options_.parameter_tolerance);
    358       if (iteration_summary.step_norm <= step_size_tolerance) {
    359         VLOG(1) << "Terminating. Parameter tolerance reached. "
    360                 << "relative step_norm: "
    361                 << iteration_summary.step_norm /
    362             (x_norm + options_.parameter_tolerance)
    363                 << " <= " << options_.parameter_tolerance;
    364         summary->termination_type = PARAMETER_TOLERANCE;
    365         return;
    366       }
    367 
    368       iteration_summary.cost_change =  cost - new_cost;
    369       const double absolute_function_tolerance =
    370           options_.function_tolerance * cost;
    371       if (fabs(iteration_summary.cost_change) < absolute_function_tolerance) {
    372         VLOG(1) << "Terminating. Function tolerance reached. "
    373                 << "|cost_change|/cost: "
    374                 << fabs(iteration_summary.cost_change) / cost
    375                 << " <= " << options_.function_tolerance;
    376         summary->termination_type = FUNCTION_TOLERANCE;
    377         return;
    378       }
    379 
    380       const double relative_decrease =
    381           iteration_summary.cost_change / model_cost_change;
    382 
    383       const double historical_relative_decrease =
    384           (reference_cost - new_cost) /
    385           (accumulated_reference_model_cost_change + model_cost_change);
    386 
    387       // If monotonic steps are being used, then the relative_decrease
    388       // is the usual ratio of the change in objective function value
    389       // divided by the change in model cost.
    390       //
    391       // If non-monotonic steps are allowed, then we take the maximum
    392       // of the relative_decrease and the
    393       // historical_relative_decrease, which measures the increase
    394       // from a reference iteration. The model cost change is
    395       // estimated by accumulating the model cost changes since the
    396       // reference iteration. The historical relative_decrease offers
    397       // a boost to a step which is not too bad compared to the
    398       // reference iteration, allowing for non-monotonic steps.
    399       iteration_summary.relative_decrease =
    400           options.use_nonmonotonic_steps
    401           ? max(relative_decrease, historical_relative_decrease)
    402           : relative_decrease;
    403 
    404       // Normally, the quality of a trust region step is measured by
    405       // the ratio
    406       //
    407       //              cost_change
    408       //    r =    -----------------
    409       //           model_cost_change
    410       //
    411       // All the change in the nonlinear objective is due to the trust
    412       // region step so this ratio is a good measure of the quality of
    413       // the trust region radius. However, when inner iterations are
    414       // being used, cost_change includes the contribution of the
    415       // inner iterations and its not fair to credit it all to the
    416       // trust region algorithm. So we change the ratio to be
    417       //
    418       //                              cost_change
    419       //    r =    ------------------------------------------------
    420       //           (model_cost_change + inner_iteration_cost_change)
    421       //
    422       // In most cases this is fine, but it can be the case that the
    423       // change in solution quality due to inner iterations is so large
    424       // and the trust region step is so bad, that this ratio can become
    425       // quite small.
    426       //
    427       // This can cause the trust region loop to reject this step. To
    428       // get around this, we expicitly check if the inner iterations
    429       // led to a net decrease in the objective function value. If
    430       // they did, we accept the step even if the trust region ratio
    431       // is small.
    432       //
    433       // Notice that we do not just check that cost_change is positive
    434       // which is a weaker condition and would render the
    435       // min_relative_decrease threshold useless. Instead, we keep
    436       // track of inner_iterations_were_useful, which is true only
    437       // when inner iterations lead to a net decrease in the cost.
    438       iteration_summary.step_is_successful =
    439           (inner_iterations_were_useful ||
    440            iteration_summary.relative_decrease >
    441            options_.min_relative_decrease);
    442 
    443       if (iteration_summary.step_is_successful) {
    444         accumulated_candidate_model_cost_change += model_cost_change;
    445         accumulated_reference_model_cost_change += model_cost_change;
    446 
    447         if (!inner_iterations_were_useful &&
    448             relative_decrease <= options_.min_relative_decrease) {
    449           iteration_summary.step_is_nonmonotonic = true;
    450           VLOG(2) << "Non-monotonic step! "
    451                   << " relative_decrease: " << relative_decrease
    452                   << " historical_relative_decrease: "
    453                   << historical_relative_decrease;
    454         }
    455       }
    456     }
    457 
    458     if (iteration_summary.step_is_successful) {
    459       ++summary->num_successful_steps;
    460       strategy->StepAccepted(iteration_summary.relative_decrease);
    461       x = x_plus_delta;
    462       x_norm = x.norm();
    463 
    464       // Step looks good, evaluate the residuals and Jacobian at this
    465       // point.
    466       if (!evaluator->Evaluate(x.data(),
    467                                &cost,
    468                                residuals.data(),
    469                                gradient.data(),
    470                                jacobian)) {
    471         summary->termination_type = NUMERICAL_FAILURE;
    472         summary->error =
    473             "Terminating: Residual and Jacobian evaluation failed.";
    474         LOG(WARNING) << summary->error;
    475         return;
    476       }
    477 
    478       iteration_summary.gradient_max_norm = gradient.lpNorm<Eigen::Infinity>();
    479 
    480       if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) {
    481         summary->termination_type = GRADIENT_TOLERANCE;
    482         VLOG(1) << "Terminating: Gradient tolerance reached."
    483                 << "Relative gradient max norm: "
    484                 << (iteration_summary.gradient_max_norm /
    485                     initial_gradient_max_norm)
    486                 << " <= " << options_.gradient_tolerance;
    487         return;
    488       }
    489 
    490       if (options_.jacobi_scaling) {
    491         jacobian->ScaleColumns(scale.data());
    492       }
    493 
    494       // Update the best, reference and candidate iterates.
    495       //
    496       // Based on algorithm 10.1.2 (page 357) of "Trust Region
    497       // Methods" by Conn Gould & Toint, or equations 33-40 of
    498       // "Non-monotone trust-region algorithms for nonlinear
    499       // optimization subject to convex constraints" by Phil Toint,
    500       // Mathematical Programming, 77, 1997.
    501       if (cost < minimum_cost) {
    502         // A step that improves solution quality was found.
    503         x_min = x;
    504         minimum_cost = cost;
    505         // Set the candidate iterate to the current point.
    506         candidate_cost = cost;
    507         num_consecutive_nonmonotonic_steps = 0;
    508         accumulated_candidate_model_cost_change = 0.0;
    509       } else {
    510         ++num_consecutive_nonmonotonic_steps;
    511         if (cost > candidate_cost) {
    512           // The current iterate is has a higher cost than the
    513           // candidate iterate. Set the candidate to this point.
    514           VLOG(2) << "Updating the candidate iterate to the current point.";
    515           candidate_cost = cost;
    516           accumulated_candidate_model_cost_change = 0.0;
    517         }
    518 
    519         // At this point we have made too many non-monotonic steps and
    520         // we are going to reset the value of the reference iterate so
    521         // as to force the algorithm to descend.
    522         //
    523         // This is the case because the candidate iterate has a value
    524         // greater than minimum_cost but smaller than the reference
    525         // iterate.
    526         if (num_consecutive_nonmonotonic_steps ==
    527             options.max_consecutive_nonmonotonic_steps) {
    528           VLOG(2) << "Resetting the reference point to the candidate point";
    529           reference_cost = candidate_cost;
    530           accumulated_reference_model_cost_change =
    531               accumulated_candidate_model_cost_change;
    532         }
    533       }
    534     } else {
    535       ++summary->num_unsuccessful_steps;
    536       if (iteration_summary.step_is_valid) {
    537         strategy->StepRejected(iteration_summary.relative_decrease);
    538       } else {
    539         strategy->StepIsInvalid();
    540       }
    541     }
    542 
    543     iteration_summary.cost = cost + summary->fixed_cost;
    544     iteration_summary.trust_region_radius = strategy->Radius();
    545     if (iteration_summary.trust_region_radius <
    546         options_.min_trust_region_radius) {
    547       summary->termination_type = PARAMETER_TOLERANCE;
    548       VLOG(1) << "Termination. Minimum trust region radius reached.";
    549       return;
    550     }
    551 
    552     iteration_summary.iteration_time_in_seconds =
    553         WallTimeInSeconds() - iteration_start_time;
    554     iteration_summary.cumulative_time_in_seconds =
    555         WallTimeInSeconds() - start_time
    556         + summary->preprocessor_time_in_seconds;
    557     summary->iterations.push_back(iteration_summary);
    558   }
    559 }
    560 
    561 
    562 }  // namespace internal
    563 }  // namespace ceres
    564