Home | History | Annotate | Download | only in ceres
      1 // Ceres Solver - A fast non-linear least squares minimizer
      2 // Copyright 2012 Google Inc. All rights reserved.
      3 // http://code.google.com/p/ceres-solver/
      4 //
      5 // Redistribution and use in source and binary forms, with or without
      6 // modification, are permitted provided that the following conditions are met:
      7 //
      8 // * Redistributions of source code must retain the above copyright notice,
      9 //   this list of conditions and the following disclaimer.
     10 // * Redistributions in binary form must reproduce the above copyright notice,
     11 //   this list of conditions and the following disclaimer in the documentation
     12 //   and/or other materials provided with the distribution.
     13 // * Neither the name of Google Inc. nor the names of its contributors may be
     14 //   used to endorse or promote products derived from this software without
     15 //   specific prior written permission.
     16 //
     17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     18 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     19 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     20 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     21 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27 // POSSIBILITY OF SUCH DAMAGE.
     28 //
     29 // Author: sameeragarwal (at) google.com (Sameer Agarwal)
     30 
     31 #include "ceres/dogleg_strategy.h"
     32 
     33 #include <cmath>
     34 #include "Eigen/Dense"
     35 #include "ceres/array_utils.h"
     36 #include "ceres/internal/eigen.h"
     37 #include "ceres/linear_solver.h"
     38 #include "ceres/polynomial_solver.h"
     39 #include "ceres/sparse_matrix.h"
     40 #include "ceres/trust_region_strategy.h"
     41 #include "ceres/types.h"
     42 #include "glog/logging.h"
     43 
     44 namespace ceres {
     45 namespace internal {
     46 namespace {
     47 const double kMaxMu = 1.0;
     48 const double kMinMu = 1e-8;
     49 }
     50 
     51 DoglegStrategy::DoglegStrategy(const TrustRegionStrategy::Options& options)
     52     : linear_solver_(options.linear_solver),
     53       radius_(options.initial_radius),
     54       max_radius_(options.max_radius),
     55       min_diagonal_(options.lm_min_diagonal),
     56       max_diagonal_(options.lm_max_diagonal),
     57       mu_(kMinMu),
     58       min_mu_(kMinMu),
     59       max_mu_(kMaxMu),
     60       mu_increase_factor_(10.0),
     61       increase_threshold_(0.75),
     62       decrease_threshold_(0.25),
     63       dogleg_step_norm_(0.0),
     64       reuse_(false),
     65       dogleg_type_(options.dogleg_type) {
     66   CHECK_NOTNULL(linear_solver_);
     67   CHECK_GT(min_diagonal_, 0.0);
     68   CHECK_LE(min_diagonal_, max_diagonal_);
     69   CHECK_GT(max_radius_, 0.0);
     70 }
     71 
     72 // If the reuse_ flag is not set, then the Cauchy point (scaled
     73 // gradient) and the new Gauss-Newton step are computed from
     74 // scratch. The Dogleg step is then computed as interpolation of these
     75 // two vectors.
     76 TrustRegionStrategy::Summary DoglegStrategy::ComputeStep(
     77     const TrustRegionStrategy::PerSolveOptions& per_solve_options,
     78     SparseMatrix* jacobian,
     79     const double* residuals,
     80     double* step) {
     81   CHECK_NOTNULL(jacobian);
     82   CHECK_NOTNULL(residuals);
     83   CHECK_NOTNULL(step);
     84 
     85   const int n = jacobian->num_cols();
     86   if (reuse_) {
     87     // Gauss-Newton and gradient vectors are always available, only a
     88     // new interpolant need to be computed. For the subspace case,
     89     // the subspace and the two-dimensional model are also still valid.
     90     switch(dogleg_type_) {
     91       case TRADITIONAL_DOGLEG:
     92         ComputeTraditionalDoglegStep(step);
     93         break;
     94 
     95       case SUBSPACE_DOGLEG:
     96         ComputeSubspaceDoglegStep(step);
     97         break;
     98     }
     99     TrustRegionStrategy::Summary summary;
    100     summary.num_iterations = 0;
    101     summary.termination_type = TOLERANCE;
    102     return summary;
    103   }
    104 
    105   reuse_ = true;
    106   // Check that we have the storage needed to hold the various
    107   // temporary vectors.
    108   if (diagonal_.rows() != n) {
    109     diagonal_.resize(n, 1);
    110     gradient_.resize(n, 1);
    111     gauss_newton_step_.resize(n, 1);
    112   }
    113 
    114   // Vector used to form the diagonal matrix that is used to
    115   // regularize the Gauss-Newton solve and that defines the
    116   // elliptical trust region
    117   //
    118   //   || D * step || <= radius_ .
    119   //
    120   jacobian->SquaredColumnNorm(diagonal_.data());
    121   for (int i = 0; i < n; ++i) {
    122     diagonal_[i] = min(max(diagonal_[i], min_diagonal_), max_diagonal_);
    123   }
    124   diagonal_ = diagonal_.array().sqrt();
    125 
    126   ComputeGradient(jacobian, residuals);
    127   ComputeCauchyPoint(jacobian);
    128 
    129   LinearSolver::Summary linear_solver_summary =
    130       ComputeGaussNewtonStep(jacobian, residuals);
    131 
    132   TrustRegionStrategy::Summary summary;
    133   summary.residual_norm = linear_solver_summary.residual_norm;
    134   summary.num_iterations = linear_solver_summary.num_iterations;
    135   summary.termination_type = linear_solver_summary.termination_type;
    136 
    137   if (linear_solver_summary.termination_type != FAILURE) {
    138     switch(dogleg_type_) {
    139       // Interpolate the Cauchy point and the Gauss-Newton step.
    140       case TRADITIONAL_DOGLEG:
    141         ComputeTraditionalDoglegStep(step);
    142         break;
    143 
    144       // Find the minimum in the subspace defined by the
    145       // Cauchy point and the (Gauss-)Newton step.
    146       case SUBSPACE_DOGLEG:
    147         if (!ComputeSubspaceModel(jacobian)) {
    148           summary.termination_type = FAILURE;
    149           break;
    150         }
    151         ComputeSubspaceDoglegStep(step);
    152         break;
    153     }
    154   }
    155 
    156   return summary;
    157 }
    158 
    159 // The trust region is assumed to be elliptical with the
    160 // diagonal scaling matrix D defined by sqrt(diagonal_).
    161 // It is implemented by substituting step' = D * step.
    162 // The trust region for step' is spherical.
    163 // The gradient, the Gauss-Newton step, the Cauchy point,
    164 // and all calculations involving the Jacobian have to
    165 // be adjusted accordingly.
    166 void DoglegStrategy::ComputeGradient(
    167     SparseMatrix* jacobian,
    168     const double* residuals) {
    169   gradient_.setZero();
    170   jacobian->LeftMultiply(residuals, gradient_.data());
    171   gradient_.array() /= diagonal_.array();
    172 }
    173 
    174 // The Cauchy point is the global minimizer of the quadratic model
    175 // along the one-dimensional subspace spanned by the gradient.
    176 void DoglegStrategy::ComputeCauchyPoint(SparseMatrix* jacobian) {
    177   // alpha * -gradient is the Cauchy point.
    178   Vector Jg(jacobian->num_rows());
    179   Jg.setZero();
    180   // The Jacobian is scaled implicitly by computing J * (D^-1 * (D^-1 * g))
    181   // instead of (J * D^-1) * (D^-1 * g).
    182   Vector scaled_gradient =
    183       (gradient_.array() / diagonal_.array()).matrix();
    184   jacobian->RightMultiply(scaled_gradient.data(), Jg.data());
    185   alpha_ = gradient_.squaredNorm() / Jg.squaredNorm();
    186 }
    187 
    188 // The dogleg step is defined as the intersection of the trust region
    189 // boundary with the piecewise linear path from the origin to the Cauchy
    190 // point and then from there to the Gauss-Newton point (global minimizer
    191 // of the model function). The Gauss-Newton point is taken if it lies
    192 // within the trust region.
    193 void DoglegStrategy::ComputeTraditionalDoglegStep(double* dogleg) {
    194   VectorRef dogleg_step(dogleg, gradient_.rows());
    195 
    196   // Case 1. The Gauss-Newton step lies inside the trust region, and
    197   // is therefore the optimal solution to the trust-region problem.
    198   const double gradient_norm = gradient_.norm();
    199   const double gauss_newton_norm = gauss_newton_step_.norm();
    200   if (gauss_newton_norm <= radius_) {
    201     dogleg_step = gauss_newton_step_;
    202     dogleg_step_norm_ = gauss_newton_norm;
    203     dogleg_step.array() /= diagonal_.array();
    204     VLOG(3) << "GaussNewton step size: " << dogleg_step_norm_
    205             << " radius: " << radius_;
    206     return;
    207   }
    208 
    209   // Case 2. The Cauchy point and the Gauss-Newton steps lie outside
    210   // the trust region. Rescale the Cauchy point to the trust region
    211   // and return.
    212   if  (gradient_norm * alpha_ >= radius_) {
    213     dogleg_step = -(radius_ / gradient_norm) * gradient_;
    214     dogleg_step_norm_ = radius_;
    215     dogleg_step.array() /= diagonal_.array();
    216     VLOG(3) << "Cauchy step size: " << dogleg_step_norm_
    217             << " radius: " << radius_;
    218     return;
    219   }
    220 
    221   // Case 3. The Cauchy point is inside the trust region and the
    222   // Gauss-Newton step is outside. Compute the line joining the two
    223   // points and the point on it which intersects the trust region
    224   // boundary.
    225 
    226   // a = alpha * -gradient
    227   // b = gauss_newton_step
    228   const double b_dot_a = -alpha_ * gradient_.dot(gauss_newton_step_);
    229   const double a_squared_norm = pow(alpha_ * gradient_norm, 2.0);
    230   const double b_minus_a_squared_norm =
    231       a_squared_norm - 2 * b_dot_a + pow(gauss_newton_norm, 2);
    232 
    233   // c = a' (b - a)
    234   //   = alpha * -gradient' gauss_newton_step - alpha^2 |gradient|^2
    235   const double c = b_dot_a - a_squared_norm;
    236   const double d = sqrt(c * c + b_minus_a_squared_norm *
    237                         (pow(radius_, 2.0) - a_squared_norm));
    238 
    239   double beta =
    240       (c <= 0)
    241       ? (d - c) /  b_minus_a_squared_norm
    242       : (radius_ * radius_ - a_squared_norm) / (d + c);
    243   dogleg_step = (-alpha_ * (1.0 - beta)) * gradient_
    244       + beta * gauss_newton_step_;
    245   dogleg_step_norm_ = dogleg_step.norm();
    246   dogleg_step.array() /= diagonal_.array();
    247   VLOG(3) << "Dogleg step size: " << dogleg_step_norm_
    248           << " radius: " << radius_;
    249 }
    250 
    251 // The subspace method finds the minimum of the two-dimensional problem
    252 //
    253 //   min. 1/2 x' B' H B x + g' B x
    254 //   s.t. || B x ||^2 <= r^2
    255 //
    256 // where r is the trust region radius and B is the matrix with unit columns
    257 // spanning the subspace defined by the steepest descent and Newton direction.
    258 // This subspace by definition includes the Gauss-Newton point, which is
    259 // therefore taken if it lies within the trust region.
    260 void DoglegStrategy::ComputeSubspaceDoglegStep(double* dogleg) {
    261   VectorRef dogleg_step(dogleg, gradient_.rows());
    262 
    263   // The Gauss-Newton point is inside the trust region if |GN| <= radius_.
    264   // This test is valid even though radius_ is a length in the two-dimensional
    265   // subspace while gauss_newton_step_ is expressed in the (scaled)
    266   // higher dimensional original space. This is because
    267   //
    268   //   1. gauss_newton_step_ by definition lies in the subspace, and
    269   //   2. the subspace basis is orthonormal.
    270   //
    271   // As a consequence, the norm of the gauss_newton_step_ in the subspace is
    272   // the same as its norm in the original space.
    273   const double gauss_newton_norm = gauss_newton_step_.norm();
    274   if (gauss_newton_norm <= radius_) {
    275     dogleg_step = gauss_newton_step_;
    276     dogleg_step_norm_ = gauss_newton_norm;
    277     dogleg_step.array() /= diagonal_.array();
    278     VLOG(3) << "GaussNewton step size: " << dogleg_step_norm_
    279             << " radius: " << radius_;
    280     return;
    281   }
    282 
    283   // The optimum lies on the boundary of the trust region. The above problem
    284   // therefore becomes
    285   //
    286   //   min. 1/2 x^T B^T H B x + g^T B x
    287   //   s.t. || B x ||^2 = r^2
    288   //
    289   // Notice the equality in the constraint.
    290   //
    291   // This can be solved by forming the Lagrangian, solving for x(y), where
    292   // y is the Lagrange multiplier, using the gradient of the objective, and
    293   // putting x(y) back into the constraint. This results in a fourth order
    294   // polynomial in y, which can be solved using e.g. the companion matrix.
    295   // See the description of MakePolynomialForBoundaryConstrainedProblem for
    296   // details. The result is up to four real roots y*, not all of which
    297   // correspond to feasible points. The feasible points x(y*) have to be
    298   // tested for optimality.
    299 
    300   if (subspace_is_one_dimensional_) {
    301     // The subspace is one-dimensional, so both the gradient and
    302     // the Gauss-Newton step point towards the same direction.
    303     // In this case, we move along the gradient until we reach the trust
    304     // region boundary.
    305     dogleg_step = -(radius_ / gradient_.norm()) * gradient_;
    306     dogleg_step_norm_ = radius_;
    307     dogleg_step.array() /= diagonal_.array();
    308     VLOG(3) << "Dogleg subspace step size (1D): " << dogleg_step_norm_
    309             << " radius: " << radius_;
    310     return;
    311   }
    312 
    313   Vector2d minimum(0.0, 0.0);
    314   if (!FindMinimumOnTrustRegionBoundary(&minimum)) {
    315     // For the positive semi-definite case, a traditional dogleg step
    316     // is taken in this case.
    317     LOG(WARNING) << "Failed to compute polynomial roots. "
    318                  << "Taking traditional dogleg step instead.";
    319     ComputeTraditionalDoglegStep(dogleg);
    320     return;
    321   }
    322 
    323   // Test first order optimality at the minimum.
    324   // The first order KKT conditions state that the minimum x*
    325   // has to satisfy either || x* ||^2 < r^2 (i.e. has to lie within
    326   // the trust region), or
    327   //
    328   //   (B x* + g) + y x* = 0
    329   //
    330   // for some positive scalar y.
    331   // Here, as it is already known that the minimum lies on the boundary, the
    332   // latter condition is tested. To allow for small imprecisions, we test if
    333   // the angle between (B x* + g) and -x* is smaller than acos(0.99).
    334   // The exact value of the cosine is arbitrary but should be close to 1.
    335   //
    336   // This condition should not be violated. If it is, the minimum was not
    337   // correctly determined.
    338   const double kCosineThreshold = 0.99;
    339   const Vector2d grad_minimum = subspace_B_ * minimum + subspace_g_;
    340   const double cosine_angle = -minimum.dot(grad_minimum) /
    341       (minimum.norm() * grad_minimum.norm());
    342   if (cosine_angle < kCosineThreshold) {
    343     LOG(WARNING) << "First order optimality seems to be violated "
    344                  << "in the subspace method!\n"
    345                  << "Cosine of angle between x and B x + g is "
    346                  << cosine_angle << ".\n"
    347                  << "Taking a regular dogleg step instead.\n"
    348                  << "Please consider filing a bug report if this "
    349                  << "happens frequently or consistently.\n";
    350     ComputeTraditionalDoglegStep(dogleg);
    351     return;
    352   }
    353 
    354   // Create the full step from the optimal 2d solution.
    355   dogleg_step = subspace_basis_ * minimum;
    356   dogleg_step_norm_ = radius_;
    357   dogleg_step.array() /= diagonal_.array();
    358   VLOG(3) << "Dogleg subspace step size: " << dogleg_step_norm_
    359           << " radius: " << radius_;
    360 }
    361 
    362 // Build the polynomial that defines the optimal Lagrange multipliers.
    363 // Let the Lagrangian be
    364 //
    365 //   L(x, y) = 0.5 x^T B x + x^T g + y (0.5 x^T x - 0.5 r^2).       (1)
    366 //
    367 // Stationary points of the Lagrangian are given by
    368 //
    369 //   0 = d L(x, y) / dx = Bx + g + y x                              (2)
    370 //   0 = d L(x, y) / dy = 0.5 x^T x - 0.5 r^2                       (3)
    371 //
    372 // For any given y, we can solve (2) for x as
    373 //
    374 //   x(y) = -(B + y I)^-1 g .                                       (4)
    375 //
    376 // As B + y I is 2x2, we form the inverse explicitly:
    377 //
    378 //   (B + y I)^-1 = (1 / det(B + y I)) adj(B + y I)                 (5)
    379 //
    380 // where adj() denotes adjugation. This should be safe, as B is positive
    381 // semi-definite and y is necessarily positive, so (B + y I) is indeed
    382 // invertible.
    383 // Plugging (5) into (4) and the result into (3), then dividing by 0.5 we
    384 // obtain
    385 //
    386 //   0 = (1 / det(B + y I))^2 g^T adj(B + y I)^T adj(B + y I) g - r^2
    387 //                                                                  (6)
    388 //
    389 // or
    390 //
    391 //   det(B + y I)^2 r^2 = g^T adj(B + y I)^T adj(B + y I) g         (7a)
    392 //                      = g^T adj(B)^T adj(B) g
    393 //                           + 2 y g^T adj(B)^T g + y^2 g^T g       (7b)
    394 //
    395 // as
    396 //
    397 //   adj(B + y I) = adj(B) + y I = adj(B)^T + y I .                 (8)
    398 //
    399 // The left hand side can be expressed explicitly using
    400 //
    401 //   det(B + y I) = det(B) + y tr(B) + y^2 .                        (9)
    402 //
    403 // So (7) is a polynomial in y of degree four.
    404 // Bringing everything back to the left hand side, the coefficients can
    405 // be read off as
    406 //
    407 //     y^4  r^2
    408 //   + y^3  2 r^2 tr(B)
    409 //   + y^2 (r^2 tr(B)^2 + 2 r^2 det(B) - g^T g)
    410 //   + y^1 (2 r^2 det(B) tr(B) - 2 g^T adj(B)^T g)
    411 //   + y^0 (r^2 det(B)^2 - g^T adj(B)^T adj(B) g)
    412 //
    413 Vector DoglegStrategy::MakePolynomialForBoundaryConstrainedProblem() const {
    414   const double detB = subspace_B_.determinant();
    415   const double trB = subspace_B_.trace();
    416   const double r2 = radius_ * radius_;
    417   Matrix2d B_adj;
    418   B_adj <<  subspace_B_(1,1) , -subspace_B_(0,1),
    419             -subspace_B_(1,0) ,  subspace_B_(0,0);
    420 
    421   Vector polynomial(5);
    422   polynomial(0) = r2;
    423   polynomial(1) = 2.0 * r2 * trB;
    424   polynomial(2) = r2 * ( trB * trB + 2.0 * detB ) - subspace_g_.squaredNorm();
    425   polynomial(3) = -2.0 * ( subspace_g_.transpose() * B_adj * subspace_g_
    426       - r2 * detB * trB );
    427   polynomial(4) = r2 * detB * detB - (B_adj * subspace_g_).squaredNorm();
    428 
    429   return polynomial;
    430 }
    431 
    432 // Given a Lagrange multiplier y that corresponds to a stationary point
    433 // of the Lagrangian L(x, y), compute the corresponding x from the
    434 // equation
    435 //
    436 //   0 = d L(x, y) / dx
    437 //     = B * x + g + y * x
    438 //     = (B + y * I) * x + g
    439 //
    440 DoglegStrategy::Vector2d DoglegStrategy::ComputeSubspaceStepFromRoot(
    441     double y) const {
    442   const Matrix2d B_i = subspace_B_ + y * Matrix2d::Identity();
    443   return -B_i.partialPivLu().solve(subspace_g_);
    444 }
    445 
    446 // This function evaluates the quadratic model at a point x in the
    447 // subspace spanned by subspace_basis_.
    448 double DoglegStrategy::EvaluateSubspaceModel(const Vector2d& x) const {
    449   return 0.5 * x.dot(subspace_B_ * x) + subspace_g_.dot(x);
    450 }
    451 
    452 // This function attempts to solve the boundary-constrained subspace problem
    453 //
    454 //   min. 1/2 x^T B^T H B x + g^T B x
    455 //   s.t. || B x ||^2 = r^2
    456 //
    457 // where B is an orthonormal subspace basis and r is the trust-region radius.
    458 //
    459 // This is done by finding the roots of a fourth degree polynomial. If the
    460 // root finding fails, the function returns false and minimum will be set
    461 // to (0, 0). If it succeeds, true is returned.
    462 //
    463 // In the failure case, another step should be taken, such as the traditional
    464 // dogleg step.
    465 bool DoglegStrategy::FindMinimumOnTrustRegionBoundary(Vector2d* minimum) const {
    466   CHECK_NOTNULL(minimum);
    467 
    468   // Return (0, 0) in all error cases.
    469   minimum->setZero();
    470 
    471   // Create the fourth-degree polynomial that is a necessary condition for
    472   // optimality.
    473   const Vector polynomial = MakePolynomialForBoundaryConstrainedProblem();
    474 
    475   // Find the real parts y_i of its roots (not only the real roots).
    476   Vector roots_real;
    477   if (!FindPolynomialRoots(polynomial, &roots_real, NULL)) {
    478     // Failed to find the roots of the polynomial, i.e. the candidate
    479     // solutions of the constrained problem. Report this back to the caller.
    480     return false;
    481   }
    482 
    483   // For each root y, compute B x(y) and check for feasibility.
    484   // Notice that there should always be four roots, as the leading term of
    485   // the polynomial is r^2 and therefore non-zero. However, as some roots
    486   // may be complex, the real parts are not necessarily unique.
    487   double minimum_value = std::numeric_limits<double>::max();
    488   bool valid_root_found = false;
    489   for (int i = 0; i < roots_real.size(); ++i) {
    490     const Vector2d x_i = ComputeSubspaceStepFromRoot(roots_real(i));
    491 
    492     // Not all roots correspond to points on the trust region boundary.
    493     // There are at most four candidate solutions. As we are interested
    494     // in the minimum, it is safe to consider all of them after projecting
    495     // them onto the trust region boundary.
    496     if (x_i.norm() > 0) {
    497       const double f_i = EvaluateSubspaceModel((radius_ / x_i.norm()) * x_i);
    498       valid_root_found = true;
    499       if (f_i < minimum_value) {
    500         minimum_value = f_i;
    501         *minimum = x_i;
    502       }
    503     }
    504   }
    505 
    506   return valid_root_found;
    507 }
    508 
    509 LinearSolver::Summary DoglegStrategy::ComputeGaussNewtonStep(
    510     SparseMatrix* jacobian,
    511     const double* residuals) {
    512   const int n = jacobian->num_cols();
    513   LinearSolver::Summary linear_solver_summary;
    514   linear_solver_summary.termination_type = FAILURE;
    515 
    516   // The Jacobian matrix is often quite poorly conditioned. Thus it is
    517   // necessary to add a diagonal matrix at the bottom to prevent the
    518   // linear solver from failing.
    519   //
    520   // We do this by computing the same diagonal matrix as the one used
    521   // by Levenberg-Marquardt (other choices are possible), and scaling
    522   // it by a small constant (independent of the trust region radius).
    523   //
    524   // If the solve fails, the multiplier to the diagonal is increased
    525   // up to max_mu_ by a factor of mu_increase_factor_ every time. If
    526   // the linear solver is still not successful, the strategy returns
    527   // with FAILURE.
    528   //
    529   // Next time when a new Gauss-Newton step is requested, the
    530   // multiplier starts out from the last successful solve.
    531   //
    532   // When a step is declared successful, the multiplier is decreased
    533   // by half of mu_increase_factor_.
    534 
    535   while (mu_ < max_mu_) {
    536     // Dogleg, as far as I (sameeragarwal) understand it, requires a
    537     // reasonably good estimate of the Gauss-Newton step. This means
    538     // that we need to solve the normal equations more or less
    539     // exactly. This is reflected in the values of the tolerances set
    540     // below.
    541     //
    542     // For now, this strategy should only be used with exact
    543     // factorization based solvers, for which these tolerances are
    544     // automatically satisfied.
    545     //
    546     // The right way to combine inexact solves with trust region
    547     // methods is to use Stiehaug's method.
    548     LinearSolver::PerSolveOptions solve_options;
    549     solve_options.q_tolerance = 0.0;
    550     solve_options.r_tolerance = 0.0;
    551 
    552     lm_diagonal_ = diagonal_ * std::sqrt(mu_);
    553     solve_options.D = lm_diagonal_.data();
    554 
    555     // As in the LevenbergMarquardtStrategy, solve Jy = r instead
    556     // of Jx = -r and later set x = -y to avoid having to modify
    557     // either jacobian or residuals.
    558     InvalidateArray(n, gauss_newton_step_.data());
    559     linear_solver_summary = linear_solver_->Solve(jacobian,
    560                                                   residuals,
    561                                                   solve_options,
    562                                                   gauss_newton_step_.data());
    563 
    564     if (linear_solver_summary.termination_type == FAILURE ||
    565         !IsArrayValid(n, gauss_newton_step_.data())) {
    566       mu_ *= mu_increase_factor_;
    567       VLOG(2) << "Increasing mu " << mu_;
    568       linear_solver_summary.termination_type = FAILURE;
    569       continue;
    570     }
    571     break;
    572   }
    573 
    574   if (linear_solver_summary.termination_type != FAILURE) {
    575     // The scaled Gauss-Newton step is D * GN:
    576     //
    577     //     - (D^-1 J^T J D^-1)^-1 (D^-1 g)
    578     //   = - D (J^T J)^-1 D D^-1 g
    579     //   = D -(J^T J)^-1 g
    580     //
    581     gauss_newton_step_.array() *= -diagonal_.array();
    582   }
    583 
    584   return linear_solver_summary;
    585 }
    586 
    587 void DoglegStrategy::StepAccepted(double step_quality) {
    588   CHECK_GT(step_quality, 0.0);
    589 
    590   if (step_quality < decrease_threshold_) {
    591     radius_ *= 0.5;
    592   }
    593 
    594   if (step_quality > increase_threshold_) {
    595     radius_ = max(radius_, 3.0 * dogleg_step_norm_);
    596   }
    597 
    598   // Reduce the regularization multiplier, in the hope that whatever
    599   // was causing the rank deficiency has gone away and we can return
    600   // to doing a pure Gauss-Newton solve.
    601   mu_ = max(min_mu_, 2.0 * mu_ / mu_increase_factor_ );
    602   reuse_ = false;
    603 }
    604 
    605 void DoglegStrategy::StepRejected(double step_quality) {
    606   radius_ *= 0.5;
    607   reuse_ = true;
    608 }
    609 
    610 void DoglegStrategy::StepIsInvalid() {
    611   mu_ *= mu_increase_factor_;
    612   reuse_ = false;
    613 }
    614 
    615 double DoglegStrategy::Radius() const {
    616   return radius_;
    617 }
    618 
    619 bool DoglegStrategy::ComputeSubspaceModel(SparseMatrix* jacobian) {
    620   // Compute an orthogonal basis for the subspace using QR decomposition.
    621   Matrix basis_vectors(jacobian->num_cols(), 2);
    622   basis_vectors.col(0) = gradient_;
    623   basis_vectors.col(1) = gauss_newton_step_;
    624   Eigen::ColPivHouseholderQR<Matrix> basis_qr(basis_vectors);
    625 
    626   switch (basis_qr.rank()) {
    627     case 0:
    628       // This should never happen, as it implies that both the gradient
    629       // and the Gauss-Newton step are zero. In this case, the minimizer should
    630       // have stopped due to the gradient being too small.
    631       LOG(ERROR) << "Rank of subspace basis is 0. "
    632                  << "This means that the gradient at the current iterate is "
    633                  << "zero but the optimization has not been terminated. "
    634                  << "You may have found a bug in Ceres.";
    635       return false;
    636 
    637     case 1:
    638       // Gradient and Gauss-Newton step coincide, so we lie on one of the
    639       // major axes of the quadratic problem. In this case, we simply move
    640       // along the gradient until we reach the trust region boundary.
    641       subspace_is_one_dimensional_ = true;
    642       return true;
    643 
    644     case 2:
    645       subspace_is_one_dimensional_ = false;
    646       break;
    647 
    648     default:
    649       LOG(ERROR) << "Rank of the subspace basis matrix is reported to be "
    650                  << "greater than 2. As the matrix contains only two "
    651                  << "columns this cannot be true and is indicative of "
    652                  << "a bug.";
    653       return false;
    654   }
    655 
    656   // The subspace is two-dimensional, so compute the subspace model.
    657   // Given the basis U, this is
    658   //
    659   //   subspace_g_ = g_scaled^T U
    660   //
    661   // and
    662   //
    663   //   subspace_B_ = U^T (J_scaled^T J_scaled) U
    664   //
    665   // As J_scaled = J * D^-1, the latter becomes
    666   //
    667   //   subspace_B_ = ((U^T D^-1) J^T) (J (D^-1 U))
    668   //               = (J (D^-1 U))^T (J (D^-1 U))
    669 
    670   subspace_basis_ =
    671       basis_qr.householderQ() * Matrix::Identity(jacobian->num_cols(), 2);
    672 
    673   subspace_g_ = subspace_basis_.transpose() * gradient_;
    674 
    675   Eigen::Matrix<double, 2, Eigen::Dynamic, Eigen::RowMajor>
    676       Jb(2, jacobian->num_rows());
    677   Jb.setZero();
    678 
    679   Vector tmp;
    680   tmp = (subspace_basis_.col(0).array() / diagonal_.array()).matrix();
    681   jacobian->RightMultiply(tmp.data(), Jb.row(0).data());
    682   tmp = (subspace_basis_.col(1).array() / diagonal_.array()).matrix();
    683   jacobian->RightMultiply(tmp.data(), Jb.row(1).data());
    684 
    685   subspace_B_ = Jb * Jb.transpose();
    686 
    687   return true;
    688 }
    689 
    690 }  // namespace internal
    691 }  // namespace ceres
    692