Home | History | Annotate | Download | only in ceres
      1 // Ceres Solver - A fast non-linear least squares minimizer
      2 // Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
      3 // http://code.google.com/p/ceres-solver/
      4 //
      5 // Redistribution and use in source and binary forms, with or without
      6 // modification, are permitted provided that the following conditions are met:
      7 //
      8 // * Redistributions of source code must retain the above copyright notice,
      9 //   this list of conditions and the following disclaimer.
     10 // * Redistributions in binary form must reproduce the above copyright notice,
     11 //   this list of conditions and the following disclaimer in the documentation
     12 //   and/or other materials provided with the distribution.
     13 // * Neither the name of Google Inc. nor the names of its contributors may be
     14 //   used to endorse or promote products derived from this software without
     15 //   specific prior written permission.
     16 //
     17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     18 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     19 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     20 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     21 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27 // POSSIBILITY OF SUCH DAMAGE.
     28 //
     29 // Author: sameeragarwal (at) google.com (Sameer Agarwal)
     30 
     31 #include "ceres/corrector.h"
     32 
     33 #include <algorithm>
     34 #include <cmath>
     35 #include <cstring>
     36 #include <cstdlib>
     37 #include "gtest/gtest.h"
     38 #include "ceres/random.h"
     39 #include "ceres/internal/eigen.h"
     40 
     41 namespace ceres {
     42 namespace internal {
     43 
     44 // If rho[1] is zero, the Corrector constructor should crash.
     45 TEST(Corrector, ZeroGradientDeathTest) {
     46   const double kRho[] = {0.0, 0.0, 1.0};
     47   EXPECT_DEATH_IF_SUPPORTED({Corrector c(1.0, kRho);},
     48                ".*");
     49 }
     50 
     51 // If rho[1] is negative, the Corrector constructor should crash.
     52 TEST(Corrector, NegativeGradientDeathTest) {
     53   const double kRho[] = {0.0, -0.1, 1.0};
     54   EXPECT_DEATH_IF_SUPPORTED({Corrector c(1.0, kRho);},
     55                ".*");
     56 }
     57 
     58 TEST(Corrector, ScalarCorrection) {
     59   double residuals = sqrt(3.0);
     60   double jacobian = 10.0;
     61   double sq_norm = residuals * residuals;
     62 
     63   const double kRho[] = {sq_norm, 0.1, -0.01};
     64 
     65   // In light of the rho'' < 0 clamping now implemented in
     66   // corrector.cc, alpha = 0 whenever rho'' < 0.
     67   const double kAlpha = 0.0;
     68 
     69   // Thus the expected value of the residual is
     70   // residual[i] * sqrt(kRho[1]) / (1.0 - kAlpha).
     71   const double kExpectedResidual =
     72       residuals * sqrt(kRho[1]) / (1 - kAlpha);
     73 
     74   // The jacobian in this case will be
     75   // sqrt(kRho[1]) * (1 - kAlpha) * jacobian.
     76   const double kExpectedJacobian = sqrt(kRho[1]) * (1 - kAlpha) * jacobian;
     77 
     78   Corrector c(sq_norm, kRho);
     79   c.CorrectJacobian(1.0, 1.0, &residuals, &jacobian);
     80   c.CorrectResiduals(1.0, &residuals);
     81 
     82   ASSERT_NEAR(residuals, kExpectedResidual, 1e-6);
     83   ASSERT_NEAR(kExpectedJacobian, jacobian, 1e-6);
     84 }
     85 
     86 TEST(Corrector, ScalarCorrectionZeroResidual) {
     87   double residuals = 0.0;
     88   double jacobian = 10.0;
     89   double sq_norm = residuals * residuals;
     90 
     91   const double kRho[] = {0.0, 0.1, -0.01};
     92   Corrector c(sq_norm, kRho);
     93 
     94   // The alpha equation is
     95   // 1/2 alpha^2 - alpha + 0.0 = 0.
     96   // i.e. alpha = 1.0 - sqrt(1.0).
     97   //      alpha = 0.0.
     98   // Thus the expected value of the residual is
     99   // residual[i] * sqrt(kRho[1])
    100   const double kExpectedResidual = residuals * sqrt(kRho[1]);
    101 
    102   // The jacobian in this case will be
    103   // sqrt(kRho[1]) * jacobian.
    104   const double kExpectedJacobian = sqrt(kRho[1]) * jacobian;
    105 
    106   c.CorrectJacobian(1, 1, &residuals, &jacobian);
    107   c.CorrectResiduals(1, &residuals);
    108 
    109   ASSERT_NEAR(residuals, kExpectedResidual, 1e-6);
    110   ASSERT_NEAR(kExpectedJacobian, jacobian, 1e-6);
    111 }
    112 
    113 // Scaling behaviour for one dimensional functions.
    114 TEST(Corrector, ScalarCorrectionAlphaClamped) {
    115   double residuals = sqrt(3.0);
    116   double jacobian = 10.0;
    117   double sq_norm = residuals * residuals;
    118 
    119   const double kRho[] = {3, 0.1, -0.1};
    120 
    121   // rho[2] < 0 -> alpha = 0.0
    122   const double kAlpha = 0.0;
    123 
    124   // Thus the expected value of the residual is
    125   // residual[i] * sqrt(kRho[1]) / (1.0 - kAlpha).
    126   const double kExpectedResidual =
    127       residuals * sqrt(kRho[1]) / (1.0 - kAlpha);
    128 
    129   // The jacobian in this case will be scaled by
    130   // sqrt(rho[1]) * (1 - alpha) * J.
    131   const double kExpectedJacobian = sqrt(kRho[1]) *
    132       (1.0 - kAlpha) * jacobian;
    133 
    134   Corrector c(sq_norm, kRho);
    135   c.CorrectJacobian(1, 1, &residuals, &jacobian);
    136   c.CorrectResiduals(1, &residuals);
    137 
    138   ASSERT_NEAR(residuals, kExpectedResidual, 1e-6);
    139   ASSERT_NEAR(kExpectedJacobian, jacobian, 1e-6);
    140 }
    141 
    142 // Test that the corrected multidimensional residual and jacobians
    143 // match the expected values and the resulting modified normal
    144 // equations match the robustified gauss newton approximation.
    145 TEST(Corrector, MultidimensionalGaussNewtonApproximation) {
    146   double residuals[3];
    147   double jacobian[2 * 3];
    148   double rho[3];
    149 
    150   // Eigen matrix references for linear algebra.
    151   MatrixRef jac(jacobian, 3, 2);
    152   VectorRef res(residuals, 3);
    153 
    154   // Ground truth values of the modified jacobian and residuals.
    155   Matrix g_jac(3, 2);
    156   Vector g_res(3);
    157 
    158   // Ground truth values of the robustified Gauss-Newton
    159   // approximation.
    160   Matrix g_hess(2, 2);
    161   Vector g_grad(2);
    162 
    163   // Corrected hessian and gradient implied by the modified jacobian
    164   // and hessians.
    165   Matrix c_hess(2, 2);
    166   Vector c_grad(2);
    167 
    168   srand(5);
    169   for (int iter = 0; iter < 10000; ++iter) {
    170     // Initialize the jacobian and residual.
    171     for (int i = 0; i < 2 * 3; ++i)
    172       jacobian[i] = RandDouble();
    173     for (int i = 0; i < 3; ++i)
    174       residuals[i] = RandDouble();
    175 
    176     const double sq_norm = res.dot(res);
    177 
    178     rho[0] = sq_norm;
    179     rho[1] = RandDouble();
    180     rho[2] = 2.0 * RandDouble() - 1.0;
    181 
    182     // If rho[2] > 0, then the curvature correction to the correction
    183     // and the gauss newton approximation will match. Otherwise, we
    184     // will clamp alpha to 0.
    185 
    186     const double kD = 1 + 2 * rho[2] / rho[1] * sq_norm;
    187     const double kAlpha = (rho[2] > 0.0) ? 1 - sqrt(kD) : 0.0;
    188 
    189     // Ground truth values.
    190     g_res = sqrt(rho[1]) / (1.0 - kAlpha) * res;
    191     g_jac = sqrt(rho[1]) * (jac - kAlpha / sq_norm *
    192                             res * res.transpose() * jac);
    193 
    194     g_grad = rho[1] * jac.transpose() * res;
    195     g_hess = rho[1] * jac.transpose() * jac +
    196         2.0 * rho[2] * jac.transpose() * res * res.transpose() * jac;
    197 
    198     Corrector c(sq_norm, rho);
    199     c.CorrectJacobian(3, 2, residuals, jacobian);
    200     c.CorrectResiduals(3, residuals);
    201 
    202     // Corrected gradient and hessian.
    203     c_grad  = jac.transpose() * res;
    204     c_hess = jac.transpose() * jac;
    205 
    206     ASSERT_NEAR((g_res - res).norm(), 0.0, 1e-10);
    207     ASSERT_NEAR((g_jac - jac).norm(), 0.0, 1e-10);
    208 
    209     ASSERT_NEAR((g_grad - c_grad).norm(), 0.0, 1e-10);
    210   }
    211 }
    212 
    213 TEST(Corrector, MultidimensionalGaussNewtonApproximationZeroResidual) {
    214   double residuals[3];
    215   double jacobian[2 * 3];
    216   double rho[3];
    217 
    218   // Eigen matrix references for linear algebra.
    219   MatrixRef jac(jacobian, 3, 2);
    220   VectorRef res(residuals, 3);
    221 
    222   // Ground truth values of the modified jacobian and residuals.
    223   Matrix g_jac(3, 2);
    224   Vector g_res(3);
    225 
    226   // Ground truth values of the robustified Gauss-Newton
    227   // approximation.
    228   Matrix g_hess(2, 2);
    229   Vector g_grad(2);
    230 
    231   // Corrected hessian and gradient implied by the modified jacobian
    232   // and hessians.
    233   Matrix c_hess(2, 2);
    234   Vector c_grad(2);
    235 
    236   srand(5);
    237   for (int iter = 0; iter < 10000; ++iter) {
    238     // Initialize the jacobian.
    239     for (int i = 0; i < 2 * 3; ++i)
    240       jacobian[i] = RandDouble();
    241 
    242     // Zero residuals
    243     res.setZero();
    244 
    245     const double sq_norm = res.dot(res);
    246 
    247     rho[0] = sq_norm;
    248     rho[1] = RandDouble();
    249     rho[2] = 2 * RandDouble() - 1.0;
    250 
    251     // Ground truth values.
    252     g_res = sqrt(rho[1]) * res;
    253     g_jac = sqrt(rho[1]) * jac;
    254 
    255     g_grad = rho[1] * jac.transpose() * res;
    256     g_hess = rho[1] * jac.transpose() * jac +
    257         2.0 * rho[2] * jac.transpose() * res * res.transpose() * jac;
    258 
    259     Corrector c(sq_norm, rho);
    260     c.CorrectJacobian(3, 2, residuals, jacobian);
    261     c.CorrectResiduals(3, residuals);
    262 
    263     // Corrected gradient and hessian.
    264     c_grad = jac.transpose() * res;
    265     c_hess = jac.transpose() * jac;
    266 
    267     ASSERT_NEAR((g_res - res).norm(), 0.0, 1e-10);
    268     ASSERT_NEAR((g_jac - jac).norm(), 0.0, 1e-10);
    269 
    270     ASSERT_NEAR((g_grad - c_grad).norm(), 0.0, 1e-10);
    271     ASSERT_NEAR((g_hess - c_hess).norm(), 0.0, 1e-10);
    272   }
    273 }
    274 
    275 }  // namespace internal
    276 }  // namespace ceres
    277