1 // Ceres Solver - A fast non-linear least squares minimizer 2 // Copyright 2012 Google Inc. All rights reserved. 3 // http://code.google.com/p/ceres-solver/ 4 // 5 // Redistribution and use in source and binary forms, with or without 6 // modification, are permitted provided that the following conditions are met: 7 // 8 // * Redistributions of source code must retain the above copyright notice, 9 // this list of conditions and the following disclaimer. 10 // * Redistributions in binary form must reproduce the above copyright notice, 11 // this list of conditions and the following disclaimer in the documentation 12 // and/or other materials provided with the distribution. 13 // * Neither the name of Google Inc. nor the names of its contributors may be 14 // used to endorse or promote products derived from this software without 15 // specific prior written permission. 16 // 17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 21 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 // POSSIBILITY OF SUCH DAMAGE. 28 // 29 // Author: sameeragarwal (at) google.com (Sameer Agarwal) 30 31 #include "ceres/dogleg_strategy.h" 32 33 #include <cmath> 34 #include "Eigen/Dense" 35 #include "ceres/array_utils.h" 36 #include "ceres/internal/eigen.h" 37 #include "ceres/linear_least_squares_problems.h" 38 #include "ceres/linear_solver.h" 39 #include "ceres/polynomial.h" 40 #include "ceres/sparse_matrix.h" 41 #include "ceres/trust_region_strategy.h" 42 #include "ceres/types.h" 43 #include "glog/logging.h" 44 45 namespace ceres { 46 namespace internal { 47 namespace { 48 const double kMaxMu = 1.0; 49 const double kMinMu = 1e-8; 50 } 51 52 DoglegStrategy::DoglegStrategy(const TrustRegionStrategy::Options& options) 53 : linear_solver_(options.linear_solver), 54 radius_(options.initial_radius), 55 max_radius_(options.max_radius), 56 min_diagonal_(options.min_lm_diagonal), 57 max_diagonal_(options.max_lm_diagonal), 58 mu_(kMinMu), 59 min_mu_(kMinMu), 60 max_mu_(kMaxMu), 61 mu_increase_factor_(10.0), 62 increase_threshold_(0.75), 63 decrease_threshold_(0.25), 64 dogleg_step_norm_(0.0), 65 reuse_(false), 66 dogleg_type_(options.dogleg_type) { 67 CHECK_NOTNULL(linear_solver_); 68 CHECK_GT(min_diagonal_, 0.0); 69 CHECK_LE(min_diagonal_, max_diagonal_); 70 CHECK_GT(max_radius_, 0.0); 71 } 72 73 // If the reuse_ flag is not set, then the Cauchy point (scaled 74 // gradient) and the new Gauss-Newton step are computed from 75 // scratch. The Dogleg step is then computed as interpolation of these 76 // two vectors. 77 TrustRegionStrategy::Summary DoglegStrategy::ComputeStep( 78 const TrustRegionStrategy::PerSolveOptions& per_solve_options, 79 SparseMatrix* jacobian, 80 const double* residuals, 81 double* step) { 82 CHECK_NOTNULL(jacobian); 83 CHECK_NOTNULL(residuals); 84 CHECK_NOTNULL(step); 85 86 const int n = jacobian->num_cols(); 87 if (reuse_) { 88 // Gauss-Newton and gradient vectors are always available, only a 89 // new interpolant need to be computed. For the subspace case, 90 // the subspace and the two-dimensional model are also still valid. 91 switch (dogleg_type_) { 92 case TRADITIONAL_DOGLEG: 93 ComputeTraditionalDoglegStep(step); 94 break; 95 96 case SUBSPACE_DOGLEG: 97 ComputeSubspaceDoglegStep(step); 98 break; 99 } 100 TrustRegionStrategy::Summary summary; 101 summary.num_iterations = 0; 102 summary.termination_type = LINEAR_SOLVER_SUCCESS; 103 return summary; 104 } 105 106 reuse_ = true; 107 // Check that we have the storage needed to hold the various 108 // temporary vectors. 109 if (diagonal_.rows() != n) { 110 diagonal_.resize(n, 1); 111 gradient_.resize(n, 1); 112 gauss_newton_step_.resize(n, 1); 113 } 114 115 // Vector used to form the diagonal matrix that is used to 116 // regularize the Gauss-Newton solve and that defines the 117 // elliptical trust region 118 // 119 // || D * step || <= radius_ . 120 // 121 jacobian->SquaredColumnNorm(diagonal_.data()); 122 for (int i = 0; i < n; ++i) { 123 diagonal_[i] = min(max(diagonal_[i], min_diagonal_), max_diagonal_); 124 } 125 diagonal_ = diagonal_.array().sqrt(); 126 127 ComputeGradient(jacobian, residuals); 128 ComputeCauchyPoint(jacobian); 129 130 LinearSolver::Summary linear_solver_summary = 131 ComputeGaussNewtonStep(per_solve_options, jacobian, residuals); 132 133 TrustRegionStrategy::Summary summary; 134 summary.residual_norm = linear_solver_summary.residual_norm; 135 summary.num_iterations = linear_solver_summary.num_iterations; 136 summary.termination_type = linear_solver_summary.termination_type; 137 138 if (linear_solver_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) { 139 return summary; 140 } 141 142 if (linear_solver_summary.termination_type != LINEAR_SOLVER_FAILURE) { 143 switch (dogleg_type_) { 144 // Interpolate the Cauchy point and the Gauss-Newton step. 145 case TRADITIONAL_DOGLEG: 146 ComputeTraditionalDoglegStep(step); 147 break; 148 149 // Find the minimum in the subspace defined by the 150 // Cauchy point and the (Gauss-)Newton step. 151 case SUBSPACE_DOGLEG: 152 if (!ComputeSubspaceModel(jacobian)) { 153 summary.termination_type = LINEAR_SOLVER_FAILURE; 154 break; 155 } 156 ComputeSubspaceDoglegStep(step); 157 break; 158 } 159 } 160 161 return summary; 162 } 163 164 // The trust region is assumed to be elliptical with the 165 // diagonal scaling matrix D defined by sqrt(diagonal_). 166 // It is implemented by substituting step' = D * step. 167 // The trust region for step' is spherical. 168 // The gradient, the Gauss-Newton step, the Cauchy point, 169 // and all calculations involving the Jacobian have to 170 // be adjusted accordingly. 171 void DoglegStrategy::ComputeGradient( 172 SparseMatrix* jacobian, 173 const double* residuals) { 174 gradient_.setZero(); 175 jacobian->LeftMultiply(residuals, gradient_.data()); 176 gradient_.array() /= diagonal_.array(); 177 } 178 179 // The Cauchy point is the global minimizer of the quadratic model 180 // along the one-dimensional subspace spanned by the gradient. 181 void DoglegStrategy::ComputeCauchyPoint(SparseMatrix* jacobian) { 182 // alpha * -gradient is the Cauchy point. 183 Vector Jg(jacobian->num_rows()); 184 Jg.setZero(); 185 // The Jacobian is scaled implicitly by computing J * (D^-1 * (D^-1 * g)) 186 // instead of (J * D^-1) * (D^-1 * g). 187 Vector scaled_gradient = 188 (gradient_.array() / diagonal_.array()).matrix(); 189 jacobian->RightMultiply(scaled_gradient.data(), Jg.data()); 190 alpha_ = gradient_.squaredNorm() / Jg.squaredNorm(); 191 } 192 193 // The dogleg step is defined as the intersection of the trust region 194 // boundary with the piecewise linear path from the origin to the Cauchy 195 // point and then from there to the Gauss-Newton point (global minimizer 196 // of the model function). The Gauss-Newton point is taken if it lies 197 // within the trust region. 198 void DoglegStrategy::ComputeTraditionalDoglegStep(double* dogleg) { 199 VectorRef dogleg_step(dogleg, gradient_.rows()); 200 201 // Case 1. The Gauss-Newton step lies inside the trust region, and 202 // is therefore the optimal solution to the trust-region problem. 203 const double gradient_norm = gradient_.norm(); 204 const double gauss_newton_norm = gauss_newton_step_.norm(); 205 if (gauss_newton_norm <= radius_) { 206 dogleg_step = gauss_newton_step_; 207 dogleg_step_norm_ = gauss_newton_norm; 208 dogleg_step.array() /= diagonal_.array(); 209 VLOG(3) << "GaussNewton step size: " << dogleg_step_norm_ 210 << " radius: " << radius_; 211 return; 212 } 213 214 // Case 2. The Cauchy point and the Gauss-Newton steps lie outside 215 // the trust region. Rescale the Cauchy point to the trust region 216 // and return. 217 if (gradient_norm * alpha_ >= radius_) { 218 dogleg_step = -(radius_ / gradient_norm) * gradient_; 219 dogleg_step_norm_ = radius_; 220 dogleg_step.array() /= diagonal_.array(); 221 VLOG(3) << "Cauchy step size: " << dogleg_step_norm_ 222 << " radius: " << radius_; 223 return; 224 } 225 226 // Case 3. The Cauchy point is inside the trust region and the 227 // Gauss-Newton step is outside. Compute the line joining the two 228 // points and the point on it which intersects the trust region 229 // boundary. 230 231 // a = alpha * -gradient 232 // b = gauss_newton_step 233 const double b_dot_a = -alpha_ * gradient_.dot(gauss_newton_step_); 234 const double a_squared_norm = pow(alpha_ * gradient_norm, 2.0); 235 const double b_minus_a_squared_norm = 236 a_squared_norm - 2 * b_dot_a + pow(gauss_newton_norm, 2); 237 238 // c = a' (b - a) 239 // = alpha * -gradient' gauss_newton_step - alpha^2 |gradient|^2 240 const double c = b_dot_a - a_squared_norm; 241 const double d = sqrt(c * c + b_minus_a_squared_norm * 242 (pow(radius_, 2.0) - a_squared_norm)); 243 244 double beta = 245 (c <= 0) 246 ? (d - c) / b_minus_a_squared_norm 247 : (radius_ * radius_ - a_squared_norm) / (d + c); 248 dogleg_step = (-alpha_ * (1.0 - beta)) * gradient_ 249 + beta * gauss_newton_step_; 250 dogleg_step_norm_ = dogleg_step.norm(); 251 dogleg_step.array() /= diagonal_.array(); 252 VLOG(3) << "Dogleg step size: " << dogleg_step_norm_ 253 << " radius: " << radius_; 254 } 255 256 // The subspace method finds the minimum of the two-dimensional problem 257 // 258 // min. 1/2 x' B' H B x + g' B x 259 // s.t. || B x ||^2 <= r^2 260 // 261 // where r is the trust region radius and B is the matrix with unit columns 262 // spanning the subspace defined by the steepest descent and Newton direction. 263 // This subspace by definition includes the Gauss-Newton point, which is 264 // therefore taken if it lies within the trust region. 265 void DoglegStrategy::ComputeSubspaceDoglegStep(double* dogleg) { 266 VectorRef dogleg_step(dogleg, gradient_.rows()); 267 268 // The Gauss-Newton point is inside the trust region if |GN| <= radius_. 269 // This test is valid even though radius_ is a length in the two-dimensional 270 // subspace while gauss_newton_step_ is expressed in the (scaled) 271 // higher dimensional original space. This is because 272 // 273 // 1. gauss_newton_step_ by definition lies in the subspace, and 274 // 2. the subspace basis is orthonormal. 275 // 276 // As a consequence, the norm of the gauss_newton_step_ in the subspace is 277 // the same as its norm in the original space. 278 const double gauss_newton_norm = gauss_newton_step_.norm(); 279 if (gauss_newton_norm <= radius_) { 280 dogleg_step = gauss_newton_step_; 281 dogleg_step_norm_ = gauss_newton_norm; 282 dogleg_step.array() /= diagonal_.array(); 283 VLOG(3) << "GaussNewton step size: " << dogleg_step_norm_ 284 << " radius: " << radius_; 285 return; 286 } 287 288 // The optimum lies on the boundary of the trust region. The above problem 289 // therefore becomes 290 // 291 // min. 1/2 x^T B^T H B x + g^T B x 292 // s.t. || B x ||^2 = r^2 293 // 294 // Notice the equality in the constraint. 295 // 296 // This can be solved by forming the Lagrangian, solving for x(y), where 297 // y is the Lagrange multiplier, using the gradient of the objective, and 298 // putting x(y) back into the constraint. This results in a fourth order 299 // polynomial in y, which can be solved using e.g. the companion matrix. 300 // See the description of MakePolynomialForBoundaryConstrainedProblem for 301 // details. The result is up to four real roots y*, not all of which 302 // correspond to feasible points. The feasible points x(y*) have to be 303 // tested for optimality. 304 305 if (subspace_is_one_dimensional_) { 306 // The subspace is one-dimensional, so both the gradient and 307 // the Gauss-Newton step point towards the same direction. 308 // In this case, we move along the gradient until we reach the trust 309 // region boundary. 310 dogleg_step = -(radius_ / gradient_.norm()) * gradient_; 311 dogleg_step_norm_ = radius_; 312 dogleg_step.array() /= diagonal_.array(); 313 VLOG(3) << "Dogleg subspace step size (1D): " << dogleg_step_norm_ 314 << " radius: " << radius_; 315 return; 316 } 317 318 Vector2d minimum(0.0, 0.0); 319 if (!FindMinimumOnTrustRegionBoundary(&minimum)) { 320 // For the positive semi-definite case, a traditional dogleg step 321 // is taken in this case. 322 LOG(WARNING) << "Failed to compute polynomial roots. " 323 << "Taking traditional dogleg step instead."; 324 ComputeTraditionalDoglegStep(dogleg); 325 return; 326 } 327 328 // Test first order optimality at the minimum. 329 // The first order KKT conditions state that the minimum x* 330 // has to satisfy either || x* ||^2 < r^2 (i.e. has to lie within 331 // the trust region), or 332 // 333 // (B x* + g) + y x* = 0 334 // 335 // for some positive scalar y. 336 // Here, as it is already known that the minimum lies on the boundary, the 337 // latter condition is tested. To allow for small imprecisions, we test if 338 // the angle between (B x* + g) and -x* is smaller than acos(0.99). 339 // The exact value of the cosine is arbitrary but should be close to 1. 340 // 341 // This condition should not be violated. If it is, the minimum was not 342 // correctly determined. 343 const double kCosineThreshold = 0.99; 344 const Vector2d grad_minimum = subspace_B_ * minimum + subspace_g_; 345 const double cosine_angle = -minimum.dot(grad_minimum) / 346 (minimum.norm() * grad_minimum.norm()); 347 if (cosine_angle < kCosineThreshold) { 348 LOG(WARNING) << "First order optimality seems to be violated " 349 << "in the subspace method!\n" 350 << "Cosine of angle between x and B x + g is " 351 << cosine_angle << ".\n" 352 << "Taking a regular dogleg step instead.\n" 353 << "Please consider filing a bug report if this " 354 << "happens frequently or consistently.\n"; 355 ComputeTraditionalDoglegStep(dogleg); 356 return; 357 } 358 359 // Create the full step from the optimal 2d solution. 360 dogleg_step = subspace_basis_ * minimum; 361 dogleg_step_norm_ = radius_; 362 dogleg_step.array() /= diagonal_.array(); 363 VLOG(3) << "Dogleg subspace step size: " << dogleg_step_norm_ 364 << " radius: " << radius_; 365 } 366 367 // Build the polynomial that defines the optimal Lagrange multipliers. 368 // Let the Lagrangian be 369 // 370 // L(x, y) = 0.5 x^T B x + x^T g + y (0.5 x^T x - 0.5 r^2). (1) 371 // 372 // Stationary points of the Lagrangian are given by 373 // 374 // 0 = d L(x, y) / dx = Bx + g + y x (2) 375 // 0 = d L(x, y) / dy = 0.5 x^T x - 0.5 r^2 (3) 376 // 377 // For any given y, we can solve (2) for x as 378 // 379 // x(y) = -(B + y I)^-1 g . (4) 380 // 381 // As B + y I is 2x2, we form the inverse explicitly: 382 // 383 // (B + y I)^-1 = (1 / det(B + y I)) adj(B + y I) (5) 384 // 385 // where adj() denotes adjugation. This should be safe, as B is positive 386 // semi-definite and y is necessarily positive, so (B + y I) is indeed 387 // invertible. 388 // Plugging (5) into (4) and the result into (3), then dividing by 0.5 we 389 // obtain 390 // 391 // 0 = (1 / det(B + y I))^2 g^T adj(B + y I)^T adj(B + y I) g - r^2 392 // (6) 393 // 394 // or 395 // 396 // det(B + y I)^2 r^2 = g^T adj(B + y I)^T adj(B + y I) g (7a) 397 // = g^T adj(B)^T adj(B) g 398 // + 2 y g^T adj(B)^T g + y^2 g^T g (7b) 399 // 400 // as 401 // 402 // adj(B + y I) = adj(B) + y I = adj(B)^T + y I . (8) 403 // 404 // The left hand side can be expressed explicitly using 405 // 406 // det(B + y I) = det(B) + y tr(B) + y^2 . (9) 407 // 408 // So (7) is a polynomial in y of degree four. 409 // Bringing everything back to the left hand side, the coefficients can 410 // be read off as 411 // 412 // y^4 r^2 413 // + y^3 2 r^2 tr(B) 414 // + y^2 (r^2 tr(B)^2 + 2 r^2 det(B) - g^T g) 415 // + y^1 (2 r^2 det(B) tr(B) - 2 g^T adj(B)^T g) 416 // + y^0 (r^2 det(B)^2 - g^T adj(B)^T adj(B) g) 417 // 418 Vector DoglegStrategy::MakePolynomialForBoundaryConstrainedProblem() const { 419 const double detB = subspace_B_.determinant(); 420 const double trB = subspace_B_.trace(); 421 const double r2 = radius_ * radius_; 422 Matrix2d B_adj; 423 B_adj << subspace_B_(1, 1) , -subspace_B_(0, 1), 424 -subspace_B_(1, 0) , subspace_B_(0, 0); 425 426 Vector polynomial(5); 427 polynomial(0) = r2; 428 polynomial(1) = 2.0 * r2 * trB; 429 polynomial(2) = r2 * (trB * trB + 2.0 * detB) - subspace_g_.squaredNorm(); 430 polynomial(3) = -2.0 * (subspace_g_.transpose() * B_adj * subspace_g_ 431 - r2 * detB * trB); 432 polynomial(4) = r2 * detB * detB - (B_adj * subspace_g_).squaredNorm(); 433 434 return polynomial; 435 } 436 437 // Given a Lagrange multiplier y that corresponds to a stationary point 438 // of the Lagrangian L(x, y), compute the corresponding x from the 439 // equation 440 // 441 // 0 = d L(x, y) / dx 442 // = B * x + g + y * x 443 // = (B + y * I) * x + g 444 // 445 DoglegStrategy::Vector2d DoglegStrategy::ComputeSubspaceStepFromRoot( 446 double y) const { 447 const Matrix2d B_i = subspace_B_ + y * Matrix2d::Identity(); 448 return -B_i.partialPivLu().solve(subspace_g_); 449 } 450 451 // This function evaluates the quadratic model at a point x in the 452 // subspace spanned by subspace_basis_. 453 double DoglegStrategy::EvaluateSubspaceModel(const Vector2d& x) const { 454 return 0.5 * x.dot(subspace_B_ * x) + subspace_g_.dot(x); 455 } 456 457 // This function attempts to solve the boundary-constrained subspace problem 458 // 459 // min. 1/2 x^T B^T H B x + g^T B x 460 // s.t. || B x ||^2 = r^2 461 // 462 // where B is an orthonormal subspace basis and r is the trust-region radius. 463 // 464 // This is done by finding the roots of a fourth degree polynomial. If the 465 // root finding fails, the function returns false and minimum will be set 466 // to (0, 0). If it succeeds, true is returned. 467 // 468 // In the failure case, another step should be taken, such as the traditional 469 // dogleg step. 470 bool DoglegStrategy::FindMinimumOnTrustRegionBoundary(Vector2d* minimum) const { 471 CHECK_NOTNULL(minimum); 472 473 // Return (0, 0) in all error cases. 474 minimum->setZero(); 475 476 // Create the fourth-degree polynomial that is a necessary condition for 477 // optimality. 478 const Vector polynomial = MakePolynomialForBoundaryConstrainedProblem(); 479 480 // Find the real parts y_i of its roots (not only the real roots). 481 Vector roots_real; 482 if (!FindPolynomialRoots(polynomial, &roots_real, NULL)) { 483 // Failed to find the roots of the polynomial, i.e. the candidate 484 // solutions of the constrained problem. Report this back to the caller. 485 return false; 486 } 487 488 // For each root y, compute B x(y) and check for feasibility. 489 // Notice that there should always be four roots, as the leading term of 490 // the polynomial is r^2 and therefore non-zero. However, as some roots 491 // may be complex, the real parts are not necessarily unique. 492 double minimum_value = std::numeric_limits<double>::max(); 493 bool valid_root_found = false; 494 for (int i = 0; i < roots_real.size(); ++i) { 495 const Vector2d x_i = ComputeSubspaceStepFromRoot(roots_real(i)); 496 497 // Not all roots correspond to points on the trust region boundary. 498 // There are at most four candidate solutions. As we are interested 499 // in the minimum, it is safe to consider all of them after projecting 500 // them onto the trust region boundary. 501 if (x_i.norm() > 0) { 502 const double f_i = EvaluateSubspaceModel((radius_ / x_i.norm()) * x_i); 503 valid_root_found = true; 504 if (f_i < minimum_value) { 505 minimum_value = f_i; 506 *minimum = x_i; 507 } 508 } 509 } 510 511 return valid_root_found; 512 } 513 514 LinearSolver::Summary DoglegStrategy::ComputeGaussNewtonStep( 515 const PerSolveOptions& per_solve_options, 516 SparseMatrix* jacobian, 517 const double* residuals) { 518 const int n = jacobian->num_cols(); 519 LinearSolver::Summary linear_solver_summary; 520 linear_solver_summary.termination_type = LINEAR_SOLVER_FAILURE; 521 522 // The Jacobian matrix is often quite poorly conditioned. Thus it is 523 // necessary to add a diagonal matrix at the bottom to prevent the 524 // linear solver from failing. 525 // 526 // We do this by computing the same diagonal matrix as the one used 527 // by Levenberg-Marquardt (other choices are possible), and scaling 528 // it by a small constant (independent of the trust region radius). 529 // 530 // If the solve fails, the multiplier to the diagonal is increased 531 // up to max_mu_ by a factor of mu_increase_factor_ every time. If 532 // the linear solver is still not successful, the strategy returns 533 // with LINEAR_SOLVER_FAILURE. 534 // 535 // Next time when a new Gauss-Newton step is requested, the 536 // multiplier starts out from the last successful solve. 537 // 538 // When a step is declared successful, the multiplier is decreased 539 // by half of mu_increase_factor_. 540 541 while (mu_ < max_mu_) { 542 // Dogleg, as far as I (sameeragarwal) understand it, requires a 543 // reasonably good estimate of the Gauss-Newton step. This means 544 // that we need to solve the normal equations more or less 545 // exactly. This is reflected in the values of the tolerances set 546 // below. 547 // 548 // For now, this strategy should only be used with exact 549 // factorization based solvers, for which these tolerances are 550 // automatically satisfied. 551 // 552 // The right way to combine inexact solves with trust region 553 // methods is to use Stiehaug's method. 554 LinearSolver::PerSolveOptions solve_options; 555 solve_options.q_tolerance = 0.0; 556 solve_options.r_tolerance = 0.0; 557 558 lm_diagonal_ = diagonal_ * std::sqrt(mu_); 559 solve_options.D = lm_diagonal_.data(); 560 561 // As in the LevenbergMarquardtStrategy, solve Jy = r instead 562 // of Jx = -r and later set x = -y to avoid having to modify 563 // either jacobian or residuals. 564 InvalidateArray(n, gauss_newton_step_.data()); 565 linear_solver_summary = linear_solver_->Solve(jacobian, 566 residuals, 567 solve_options, 568 gauss_newton_step_.data()); 569 570 if (per_solve_options.dump_format_type == CONSOLE || 571 (per_solve_options.dump_format_type != CONSOLE && 572 !per_solve_options.dump_filename_base.empty())) { 573 if (!DumpLinearLeastSquaresProblem(per_solve_options.dump_filename_base, 574 per_solve_options.dump_format_type, 575 jacobian, 576 solve_options.D, 577 residuals, 578 gauss_newton_step_.data(), 579 0)) { 580 LOG(ERROR) << "Unable to dump trust region problem." 581 << " Filename base: " 582 << per_solve_options.dump_filename_base; 583 } 584 } 585 586 if (linear_solver_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) { 587 return linear_solver_summary; 588 } 589 590 if (linear_solver_summary.termination_type == LINEAR_SOLVER_FAILURE || 591 !IsArrayValid(n, gauss_newton_step_.data())) { 592 mu_ *= mu_increase_factor_; 593 VLOG(2) << "Increasing mu " << mu_; 594 linear_solver_summary.termination_type = LINEAR_SOLVER_FAILURE; 595 continue; 596 } 597 break; 598 } 599 600 if (linear_solver_summary.termination_type != LINEAR_SOLVER_FAILURE) { 601 // The scaled Gauss-Newton step is D * GN: 602 // 603 // - (D^-1 J^T J D^-1)^-1 (D^-1 g) 604 // = - D (J^T J)^-1 D D^-1 g 605 // = D -(J^T J)^-1 g 606 // 607 gauss_newton_step_.array() *= -diagonal_.array(); 608 } 609 610 return linear_solver_summary; 611 } 612 613 void DoglegStrategy::StepAccepted(double step_quality) { 614 CHECK_GT(step_quality, 0.0); 615 616 if (step_quality < decrease_threshold_) { 617 radius_ *= 0.5; 618 } 619 620 if (step_quality > increase_threshold_) { 621 radius_ = max(radius_, 3.0 * dogleg_step_norm_); 622 } 623 624 // Reduce the regularization multiplier, in the hope that whatever 625 // was causing the rank deficiency has gone away and we can return 626 // to doing a pure Gauss-Newton solve. 627 mu_ = max(min_mu_, 2.0 * mu_ / mu_increase_factor_); 628 reuse_ = false; 629 } 630 631 void DoglegStrategy::StepRejected(double step_quality) { 632 radius_ *= 0.5; 633 reuse_ = true; 634 } 635 636 void DoglegStrategy::StepIsInvalid() { 637 mu_ *= mu_increase_factor_; 638 reuse_ = false; 639 } 640 641 double DoglegStrategy::Radius() const { 642 return radius_; 643 } 644 645 bool DoglegStrategy::ComputeSubspaceModel(SparseMatrix* jacobian) { 646 // Compute an orthogonal basis for the subspace using QR decomposition. 647 Matrix basis_vectors(jacobian->num_cols(), 2); 648 basis_vectors.col(0) = gradient_; 649 basis_vectors.col(1) = gauss_newton_step_; 650 Eigen::ColPivHouseholderQR<Matrix> basis_qr(basis_vectors); 651 652 switch (basis_qr.rank()) { 653 case 0: 654 // This should never happen, as it implies that both the gradient 655 // and the Gauss-Newton step are zero. In this case, the minimizer should 656 // have stopped due to the gradient being too small. 657 LOG(ERROR) << "Rank of subspace basis is 0. " 658 << "This means that the gradient at the current iterate is " 659 << "zero but the optimization has not been terminated. " 660 << "You may have found a bug in Ceres."; 661 return false; 662 663 case 1: 664 // Gradient and Gauss-Newton step coincide, so we lie on one of the 665 // major axes of the quadratic problem. In this case, we simply move 666 // along the gradient until we reach the trust region boundary. 667 subspace_is_one_dimensional_ = true; 668 return true; 669 670 case 2: 671 subspace_is_one_dimensional_ = false; 672 break; 673 674 default: 675 LOG(ERROR) << "Rank of the subspace basis matrix is reported to be " 676 << "greater than 2. As the matrix contains only two " 677 << "columns this cannot be true and is indicative of " 678 << "a bug."; 679 return false; 680 } 681 682 // The subspace is two-dimensional, so compute the subspace model. 683 // Given the basis U, this is 684 // 685 // subspace_g_ = g_scaled^T U 686 // 687 // and 688 // 689 // subspace_B_ = U^T (J_scaled^T J_scaled) U 690 // 691 // As J_scaled = J * D^-1, the latter becomes 692 // 693 // subspace_B_ = ((U^T D^-1) J^T) (J (D^-1 U)) 694 // = (J (D^-1 U))^T (J (D^-1 U)) 695 696 subspace_basis_ = 697 basis_qr.householderQ() * Matrix::Identity(jacobian->num_cols(), 2); 698 699 subspace_g_ = subspace_basis_.transpose() * gradient_; 700 701 Eigen::Matrix<double, 2, Eigen::Dynamic, Eigen::RowMajor> 702 Jb(2, jacobian->num_rows()); 703 Jb.setZero(); 704 705 Vector tmp; 706 tmp = (subspace_basis_.col(0).array() / diagonal_.array()).matrix(); 707 jacobian->RightMultiply(tmp.data(), Jb.row(0).data()); 708 tmp = (subspace_basis_.col(1).array() / diagonal_.array()).matrix(); 709 jacobian->RightMultiply(tmp.data(), Jb.row(1).data()); 710 711 subspace_B_ = Jb * Jb.transpose(); 712 713 return true; 714 } 715 716 } // namespace internal 717 } // namespace ceres 718