1 // Ceres Solver - A fast non-linear least squares minimizer 2 // Copyright 2013 Google Inc. All rights reserved. 3 // http://code.google.com/p/ceres-solver/ 4 // 5 // Redistribution and use in source and binary forms, with or without 6 // modification, are permitted provided that the following conditions are met: 7 // 8 // * Redistributions of source code must retain the above copyright notice, 9 // this list of conditions and the following disclaimer. 10 // * Redistributions in binary form must reproduce the above copyright notice, 11 // this list of conditions and the following disclaimer in the documentation 12 // and/or other materials provided with the distribution. 13 // * Neither the name of Google Inc. nor the names of its contributors may be 14 // used to endorse or promote products derived from this software without 15 // specific prior written permission. 16 // 17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 21 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 // POSSIBILITY OF SUCH DAMAGE. 28 // 29 // Author: sameeragarwal (at) google.com (Sameer Agarwal) 30 31 #include "ceres/covariance.h" 32 33 #include <algorithm> 34 #include <cmath> 35 #include "ceres/compressed_row_sparse_matrix.h" 36 #include "ceres/cost_function.h" 37 #include "ceres/covariance_impl.h" 38 #include "ceres/local_parameterization.h" 39 #include "ceres/map_util.h" 40 #include "ceres/problem_impl.h" 41 #include "gtest/gtest.h" 42 43 namespace ceres { 44 namespace internal { 45 46 TEST(CovarianceImpl, ComputeCovarianceSparsity) { 47 double parameters[10]; 48 49 double* block1 = parameters; 50 double* block2 = block1 + 1; 51 double* block3 = block2 + 2; 52 double* block4 = block3 + 3; 53 54 ProblemImpl problem; 55 56 // Add in random order 57 problem.AddParameterBlock(block1, 1); 58 problem.AddParameterBlock(block4, 4); 59 problem.AddParameterBlock(block3, 3); 60 problem.AddParameterBlock(block2, 2); 61 62 // Sparsity pattern 63 // 64 // x 0 0 0 0 0 x x x x 65 // 0 x x x x x 0 0 0 0 66 // 0 x x x x x 0 0 0 0 67 // 0 0 0 x x x 0 0 0 0 68 // 0 0 0 x x x 0 0 0 0 69 // 0 0 0 x x x 0 0 0 0 70 // 0 0 0 0 0 0 x x x x 71 // 0 0 0 0 0 0 x x x x 72 // 0 0 0 0 0 0 x x x x 73 // 0 0 0 0 0 0 x x x x 74 75 int expected_rows[] = {0, 5, 10, 15, 18, 21, 24, 28, 32, 36, 40}; 76 int expected_cols[] = {0, 6, 7, 8, 9, 77 1, 2, 3, 4, 5, 78 1, 2, 3, 4, 5, 79 3, 4, 5, 80 3, 4, 5, 81 3, 4, 5, 82 6, 7, 8, 9, 83 6, 7, 8, 9, 84 6, 7, 8, 9, 85 6, 7, 8, 9}; 86 87 88 vector<pair<const double*, const double*> > covariance_blocks; 89 covariance_blocks.push_back(make_pair(block1, block1)); 90 covariance_blocks.push_back(make_pair(block4, block4)); 91 covariance_blocks.push_back(make_pair(block2, block2)); 92 covariance_blocks.push_back(make_pair(block3, block3)); 93 covariance_blocks.push_back(make_pair(block2, block3)); 94 covariance_blocks.push_back(make_pair(block4, block1)); // reversed 95 96 Covariance::Options options; 97 CovarianceImpl covariance_impl(options); 98 EXPECT_TRUE(covariance_impl 99 .ComputeCovarianceSparsity(covariance_blocks, &problem)); 100 101 const CompressedRowSparseMatrix* crsm = covariance_impl.covariance_matrix(); 102 103 EXPECT_EQ(crsm->num_rows(), 10); 104 EXPECT_EQ(crsm->num_cols(), 10); 105 EXPECT_EQ(crsm->num_nonzeros(), 40); 106 107 const int* rows = crsm->rows(); 108 for (int r = 0; r < crsm->num_rows() + 1; ++r) { 109 EXPECT_EQ(rows[r], expected_rows[r]) 110 << r << " " 111 << rows[r] << " " 112 << expected_rows[r]; 113 } 114 115 const int* cols = crsm->cols(); 116 for (int c = 0; c < crsm->num_nonzeros(); ++c) { 117 EXPECT_EQ(cols[c], expected_cols[c]) 118 << c << " " 119 << cols[c] << " " 120 << expected_cols[c]; 121 } 122 } 123 124 125 class UnaryCostFunction: public CostFunction { 126 public: 127 UnaryCostFunction(const int num_residuals, 128 const int32 parameter_block_size, 129 const double* jacobian) 130 : jacobian_(jacobian, jacobian + num_residuals * parameter_block_size) { 131 set_num_residuals(num_residuals); 132 mutable_parameter_block_sizes()->push_back(parameter_block_size); 133 } 134 135 virtual bool Evaluate(double const* const* parameters, 136 double* residuals, 137 double** jacobians) const { 138 for (int i = 0; i < num_residuals(); ++i) { 139 residuals[i] = 1; 140 } 141 142 if (jacobians == NULL) { 143 return true; 144 } 145 146 if (jacobians[0] != NULL) { 147 copy(jacobian_.begin(), jacobian_.end(), jacobians[0]); 148 } 149 150 return true; 151 } 152 153 private: 154 vector<double> jacobian_; 155 }; 156 157 158 class BinaryCostFunction: public CostFunction { 159 public: 160 BinaryCostFunction(const int num_residuals, 161 const int32 parameter_block1_size, 162 const int32 parameter_block2_size, 163 const double* jacobian1, 164 const double* jacobian2) 165 : jacobian1_(jacobian1, 166 jacobian1 + num_residuals * parameter_block1_size), 167 jacobian2_(jacobian2, 168 jacobian2 + num_residuals * parameter_block2_size) { 169 set_num_residuals(num_residuals); 170 mutable_parameter_block_sizes()->push_back(parameter_block1_size); 171 mutable_parameter_block_sizes()->push_back(parameter_block2_size); 172 } 173 174 virtual bool Evaluate(double const* const* parameters, 175 double* residuals, 176 double** jacobians) const { 177 for (int i = 0; i < num_residuals(); ++i) { 178 residuals[i] = 2; 179 } 180 181 if (jacobians == NULL) { 182 return true; 183 } 184 185 if (jacobians[0] != NULL) { 186 copy(jacobian1_.begin(), jacobian1_.end(), jacobians[0]); 187 } 188 189 if (jacobians[1] != NULL) { 190 copy(jacobian2_.begin(), jacobian2_.end(), jacobians[1]); 191 } 192 193 return true; 194 } 195 196 private: 197 vector<double> jacobian1_; 198 vector<double> jacobian2_; 199 }; 200 201 // x_plus_delta = delta * x; 202 class PolynomialParameterization : public LocalParameterization { 203 public: 204 virtual ~PolynomialParameterization() {} 205 206 virtual bool Plus(const double* x, 207 const double* delta, 208 double* x_plus_delta) const { 209 x_plus_delta[0] = delta[0] * x[0]; 210 x_plus_delta[1] = delta[0] * x[1]; 211 return true; 212 } 213 214 virtual bool ComputeJacobian(const double* x, double* jacobian) const { 215 jacobian[0] = x[0]; 216 jacobian[1] = x[1]; 217 return true; 218 } 219 220 virtual int GlobalSize() const { return 2; } 221 virtual int LocalSize() const { return 1; } 222 }; 223 224 class CovarianceTest : public ::testing::Test { 225 protected: 226 virtual void SetUp() { 227 double* x = parameters_; 228 double* y = x + 2; 229 double* z = y + 3; 230 231 x[0] = 1; 232 x[1] = 1; 233 y[0] = 2; 234 y[1] = 2; 235 y[2] = 2; 236 z[0] = 3; 237 238 { 239 double jacobian[] = { 1.0, 0.0, 0.0, 1.0}; 240 problem_.AddResidualBlock(new UnaryCostFunction(2, 2, jacobian), NULL, x); 241 } 242 243 { 244 double jacobian[] = { 2.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 2.0 }; 245 problem_.AddResidualBlock(new UnaryCostFunction(3, 3, jacobian), NULL, y); 246 } 247 248 { 249 double jacobian = 5.0; 250 problem_.AddResidualBlock(new UnaryCostFunction(1, 1, &jacobian), NULL, z); 251 } 252 253 { 254 double jacobian1[] = { 1.0, 2.0, 3.0 }; 255 double jacobian2[] = { -5.0, -6.0 }; 256 problem_.AddResidualBlock( 257 new BinaryCostFunction(1, 3, 2, jacobian1, jacobian2), 258 NULL, 259 y, 260 x); 261 } 262 263 { 264 double jacobian1[] = {2.0 }; 265 double jacobian2[] = { 3.0, -2.0 }; 266 problem_.AddResidualBlock( 267 new BinaryCostFunction(1, 1, 2, jacobian1, jacobian2), 268 NULL, 269 z, 270 x); 271 } 272 273 all_covariance_blocks_.push_back(make_pair(x, x)); 274 all_covariance_blocks_.push_back(make_pair(y, y)); 275 all_covariance_blocks_.push_back(make_pair(z, z)); 276 all_covariance_blocks_.push_back(make_pair(x, y)); 277 all_covariance_blocks_.push_back(make_pair(x, z)); 278 all_covariance_blocks_.push_back(make_pair(y, z)); 279 280 column_bounds_[x] = make_pair(0, 2); 281 column_bounds_[y] = make_pair(2, 5); 282 column_bounds_[z] = make_pair(5, 6); 283 } 284 285 void ComputeAndCompareCovarianceBlocks(const Covariance::Options& options, 286 const double* expected_covariance) { 287 // Generate all possible combination of block pairs and check if the 288 // covariance computation is correct. 289 for (int i = 1; i <= 64; ++i) { 290 vector<pair<const double*, const double*> > covariance_blocks; 291 if (i & 1) { 292 covariance_blocks.push_back(all_covariance_blocks_[0]); 293 } 294 295 if (i & 2) { 296 covariance_blocks.push_back(all_covariance_blocks_[1]); 297 } 298 299 if (i & 4) { 300 covariance_blocks.push_back(all_covariance_blocks_[2]); 301 } 302 303 if (i & 8) { 304 covariance_blocks.push_back(all_covariance_blocks_[3]); 305 } 306 307 if (i & 16) { 308 covariance_blocks.push_back(all_covariance_blocks_[4]); 309 } 310 311 if (i & 32) { 312 covariance_blocks.push_back(all_covariance_blocks_[5]); 313 } 314 315 Covariance covariance(options); 316 EXPECT_TRUE(covariance.Compute(covariance_blocks, &problem_)); 317 318 for (int i = 0; i < covariance_blocks.size(); ++i) { 319 const double* block1 = covariance_blocks[i].first; 320 const double* block2 = covariance_blocks[i].second; 321 // block1, block2 322 GetCovarianceBlockAndCompare(block1, block2, covariance, expected_covariance); 323 // block2, block1 324 GetCovarianceBlockAndCompare(block2, block1, covariance, expected_covariance); 325 } 326 } 327 } 328 329 void GetCovarianceBlockAndCompare(const double* block1, 330 const double* block2, 331 const Covariance& covariance, 332 const double* expected_covariance) { 333 const int row_begin = FindOrDie(column_bounds_, block1).first; 334 const int row_end = FindOrDie(column_bounds_, block1).second; 335 const int col_begin = FindOrDie(column_bounds_, block2).first; 336 const int col_end = FindOrDie(column_bounds_, block2).second; 337 338 Matrix actual(row_end - row_begin, col_end - col_begin); 339 EXPECT_TRUE(covariance.GetCovarianceBlock(block1, 340 block2, 341 actual.data())); 342 343 ConstMatrixRef expected(expected_covariance, 6, 6); 344 double diff_norm = (expected.block(row_begin, 345 col_begin, 346 row_end - row_begin, 347 col_end - col_begin) - actual).norm(); 348 diff_norm /= (row_end - row_begin) * (col_end - col_begin); 349 350 const double kTolerance = 1e-5; 351 EXPECT_NEAR(diff_norm, 0.0, kTolerance) 352 << "rows: " << row_begin << " " << row_end << " " 353 << "cols: " << col_begin << " " << col_end << " " 354 << "\n\n expected: \n " << expected.block(row_begin, 355 col_begin, 356 row_end - row_begin, 357 col_end - col_begin) 358 << "\n\n actual: \n " << actual 359 << "\n\n full expected: \n" << expected; 360 } 361 362 double parameters_[10]; 363 Problem problem_; 364 vector<pair<const double*, const double*> > all_covariance_blocks_; 365 map<const double*, pair<int, int> > column_bounds_; 366 }; 367 368 369 TEST_F(CovarianceTest, NormalBehavior) { 370 // J 371 // 372 // 1 0 0 0 0 0 373 // 0 1 0 0 0 0 374 // 0 0 2 0 0 0 375 // 0 0 0 2 0 0 376 // 0 0 0 0 2 0 377 // 0 0 0 0 0 5 378 // -5 -6 1 2 3 0 379 // 3 -2 0 0 0 2 380 381 // J'J 382 // 383 // 35 24 -5 -10 -15 6 384 // 24 41 -6 -12 -18 -4 385 // -5 -6 5 2 3 0 386 // -10 -12 2 8 6 0 387 // -15 -18 3 6 13 0 388 // 6 -4 0 0 0 29 389 390 // inv(J'J) computed using octave. 391 double expected_covariance[] = { 392 7.0747e-02, -8.4923e-03, 1.6821e-02, 3.3643e-02, 5.0464e-02, -1.5809e-02, // NOLINT 393 -8.4923e-03, 8.1352e-02, 2.4758e-02, 4.9517e-02, 7.4275e-02, 1.2978e-02, // NOLINT 394 1.6821e-02, 2.4758e-02, 2.4904e-01, -1.9271e-03, -2.8906e-03, -6.5325e-05, // NOLINT 395 3.3643e-02, 4.9517e-02, -1.9271e-03, 2.4615e-01, -5.7813e-03, -1.3065e-04, // NOLINT 396 5.0464e-02, 7.4275e-02, -2.8906e-03, -5.7813e-03, 2.4133e-01, -1.9598e-04, // NOLINT 397 -1.5809e-02, 1.2978e-02, -6.5325e-05, -1.3065e-04, -1.9598e-04, 3.9544e-02, // NOLINT 398 }; 399 400 Covariance::Options options; 401 402 #ifndef CERES_NO_SUITESPARSE 403 options.algorithm_type = SUITE_SPARSE_QR; 404 ComputeAndCompareCovarianceBlocks(options, expected_covariance); 405 #endif 406 407 options.algorithm_type = DENSE_SVD; 408 ComputeAndCompareCovarianceBlocks(options, expected_covariance); 409 410 options.algorithm_type = EIGEN_SPARSE_QR; 411 ComputeAndCompareCovarianceBlocks(options, expected_covariance); 412 } 413 414 #ifdef CERES_USE_OPENMP 415 416 TEST_F(CovarianceTest, ThreadedNormalBehavior) { 417 // J 418 // 419 // 1 0 0 0 0 0 420 // 0 1 0 0 0 0 421 // 0 0 2 0 0 0 422 // 0 0 0 2 0 0 423 // 0 0 0 0 2 0 424 // 0 0 0 0 0 5 425 // -5 -6 1 2 3 0 426 // 3 -2 0 0 0 2 427 428 // J'J 429 // 430 // 35 24 -5 -10 -15 6 431 // 24 41 -6 -12 -18 -4 432 // -5 -6 5 2 3 0 433 // -10 -12 2 8 6 0 434 // -15 -18 3 6 13 0 435 // 6 -4 0 0 0 29 436 437 // inv(J'J) computed using octave. 438 double expected_covariance[] = { 439 7.0747e-02, -8.4923e-03, 1.6821e-02, 3.3643e-02, 5.0464e-02, -1.5809e-02, // NOLINT 440 -8.4923e-03, 8.1352e-02, 2.4758e-02, 4.9517e-02, 7.4275e-02, 1.2978e-02, // NOLINT 441 1.6821e-02, 2.4758e-02, 2.4904e-01, -1.9271e-03, -2.8906e-03, -6.5325e-05, // NOLINT 442 3.3643e-02, 4.9517e-02, -1.9271e-03, 2.4615e-01, -5.7813e-03, -1.3065e-04, // NOLINT 443 5.0464e-02, 7.4275e-02, -2.8906e-03, -5.7813e-03, 2.4133e-01, -1.9598e-04, // NOLINT 444 -1.5809e-02, 1.2978e-02, -6.5325e-05, -1.3065e-04, -1.9598e-04, 3.9544e-02, // NOLINT 445 }; 446 447 Covariance::Options options; 448 options.num_threads = 4; 449 450 #ifndef CERES_NO_SUITESPARSE 451 options.algorithm_type = SUITE_SPARSE_QR; 452 ComputeAndCompareCovarianceBlocks(options, expected_covariance); 453 #endif 454 455 options.algorithm_type = DENSE_SVD; 456 ComputeAndCompareCovarianceBlocks(options, expected_covariance); 457 458 options.algorithm_type = EIGEN_SPARSE_QR; 459 ComputeAndCompareCovarianceBlocks(options, expected_covariance); 460 } 461 462 #endif // CERES_USE_OPENMP 463 464 TEST_F(CovarianceTest, ConstantParameterBlock) { 465 problem_.SetParameterBlockConstant(parameters_); 466 467 // J 468 // 469 // 0 0 0 0 0 0 470 // 0 0 0 0 0 0 471 // 0 0 2 0 0 0 472 // 0 0 0 2 0 0 473 // 0 0 0 0 2 0 474 // 0 0 0 0 0 5 475 // 0 0 1 2 3 0 476 // 0 0 0 0 0 2 477 478 // J'J 479 // 480 // 0 0 0 0 0 0 481 // 0 0 0 0 0 0 482 // 0 0 5 2 3 0 483 // 0 0 2 8 6 0 484 // 0 0 3 6 13 0 485 // 0 0 0 0 0 29 486 487 // pinv(J'J) computed using octave. 488 double expected_covariance[] = { 489 0, 0, 0, 0, 0, 0, // NOLINT 490 0, 0, 0, 0, 0, 0, // NOLINT 491 0, 0, 0.23611, -0.02778, -0.04167, -0.00000, // NOLINT 492 0, 0, -0.02778, 0.19444, -0.08333, -0.00000, // NOLINT 493 0, 0, -0.04167, -0.08333, 0.12500, -0.00000, // NOLINT 494 0, 0, -0.00000, -0.00000, -0.00000, 0.03448 // NOLINT 495 }; 496 497 Covariance::Options options; 498 499 #ifndef CERES_NO_SUITESPARSE 500 options.algorithm_type = SUITE_SPARSE_QR; 501 ComputeAndCompareCovarianceBlocks(options, expected_covariance); 502 #endif 503 504 options.algorithm_type = DENSE_SVD; 505 ComputeAndCompareCovarianceBlocks(options, expected_covariance); 506 507 options.algorithm_type = EIGEN_SPARSE_QR; 508 ComputeAndCompareCovarianceBlocks(options, expected_covariance); 509 } 510 511 TEST_F(CovarianceTest, LocalParameterization) { 512 double* x = parameters_; 513 double* y = x + 2; 514 515 problem_.SetParameterization(x, new PolynomialParameterization); 516 517 vector<int> subset; 518 subset.push_back(2); 519 problem_.SetParameterization(y, new SubsetParameterization(3, subset)); 520 521 // Raw Jacobian: J 522 // 523 // 1 0 0 0 0 0 524 // 0 1 0 0 0 0 525 // 0 0 2 0 0 0 526 // 0 0 0 2 0 0 527 // 0 0 0 0 0 0 528 // 0 0 0 0 0 5 529 // -5 -6 1 2 0 0 530 // 3 -2 0 0 0 2 531 532 // Global to local jacobian: A 533 // 534 // 535 // 1 0 0 0 0 536 // 1 0 0 0 0 537 // 0 1 0 0 0 538 // 0 0 1 0 0 539 // 0 0 0 1 0 540 // 0 0 0 0 1 541 542 // A * pinv((J*A)'*(J*A)) * A' 543 // Computed using octave. 544 double expected_covariance[] = { 545 0.01766, 0.01766, 0.02158, 0.04316, 0.00000, -0.00122, 546 0.01766, 0.01766, 0.02158, 0.04316, 0.00000, -0.00122, 547 0.02158, 0.02158, 0.24860, -0.00281, 0.00000, -0.00149, 548 0.04316, 0.04316, -0.00281, 0.24439, 0.00000, -0.00298, 549 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 550 -0.00122, -0.00122, -0.00149, -0.00298, 0.00000, 0.03457 551 }; 552 553 Covariance::Options options; 554 555 #ifndef CERES_NO_SUITESPARSE 556 options.algorithm_type = SUITE_SPARSE_QR; 557 ComputeAndCompareCovarianceBlocks(options, expected_covariance); 558 #endif 559 560 options.algorithm_type = DENSE_SVD; 561 ComputeAndCompareCovarianceBlocks(options, expected_covariance); 562 563 options.algorithm_type = EIGEN_SPARSE_QR; 564 ComputeAndCompareCovarianceBlocks(options, expected_covariance); 565 } 566 567 568 TEST_F(CovarianceTest, TruncatedRank) { 569 // J 570 // 571 // 1 0 0 0 0 0 572 // 0 1 0 0 0 0 573 // 0 0 2 0 0 0 574 // 0 0 0 2 0 0 575 // 0 0 0 0 2 0 576 // 0 0 0 0 0 5 577 // -5 -6 1 2 3 0 578 // 3 -2 0 0 0 2 579 580 // J'J 581 // 582 // 35 24 -5 -10 -15 6 583 // 24 41 -6 -12 -18 -4 584 // -5 -6 5 2 3 0 585 // -10 -12 2 8 6 0 586 // -15 -18 3 6 13 0 587 // 6 -4 0 0 0 29 588 589 // 3.4142 is the smallest eigen value of J'J. The following matrix 590 // was obtained by dropping the eigenvector corresponding to this 591 // eigenvalue. 592 double expected_covariance[] = { 593 5.4135e-02, -3.5121e-02, 1.7257e-04, 3.4514e-04, 5.1771e-04, -1.6076e-02, 594 -3.5121e-02, 3.8667e-02, -1.9288e-03, -3.8576e-03, -5.7864e-03, 1.2549e-02, 595 1.7257e-04, -1.9288e-03, 2.3235e-01, -3.5297e-02, -5.2946e-02, -3.3329e-04, 596 3.4514e-04, -3.8576e-03, -3.5297e-02, 1.7941e-01, -1.0589e-01, -6.6659e-04, 597 5.1771e-04, -5.7864e-03, -5.2946e-02, -1.0589e-01, 9.1162e-02, -9.9988e-04, 598 -1.6076e-02, 1.2549e-02, -3.3329e-04, -6.6659e-04, -9.9988e-04, 3.9539e-02 599 }; 600 601 602 { 603 Covariance::Options options; 604 options.algorithm_type = DENSE_SVD; 605 // Force dropping of the smallest eigenvector. 606 options.null_space_rank = 1; 607 ComputeAndCompareCovarianceBlocks(options, expected_covariance); 608 } 609 610 { 611 Covariance::Options options; 612 options.algorithm_type = DENSE_SVD; 613 // Force dropping of the smallest eigenvector via the ratio but 614 // automatic truncation. 615 options.min_reciprocal_condition_number = 0.044494; 616 options.null_space_rank = -1; 617 ComputeAndCompareCovarianceBlocks(options, expected_covariance); 618 } 619 } 620 621 class RankDeficientCovarianceTest : public CovarianceTest { 622 protected: 623 virtual void SetUp() { 624 double* x = parameters_; 625 double* y = x + 2; 626 double* z = y + 3; 627 628 { 629 double jacobian[] = { 1.0, 0.0, 0.0, 1.0}; 630 problem_.AddResidualBlock(new UnaryCostFunction(2, 2, jacobian), NULL, x); 631 } 632 633 { 634 double jacobian[] = { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }; 635 problem_.AddResidualBlock(new UnaryCostFunction(3, 3, jacobian), NULL, y); 636 } 637 638 { 639 double jacobian = 5.0; 640 problem_.AddResidualBlock(new UnaryCostFunction(1, 1, &jacobian), NULL, z); 641 } 642 643 { 644 double jacobian1[] = { 0.0, 0.0, 0.0 }; 645 double jacobian2[] = { -5.0, -6.0 }; 646 problem_.AddResidualBlock( 647 new BinaryCostFunction(1, 3, 2, jacobian1, jacobian2), 648 NULL, 649 y, 650 x); 651 } 652 653 { 654 double jacobian1[] = {2.0 }; 655 double jacobian2[] = { 3.0, -2.0 }; 656 problem_.AddResidualBlock( 657 new BinaryCostFunction(1, 1, 2, jacobian1, jacobian2), 658 NULL, 659 z, 660 x); 661 } 662 663 all_covariance_blocks_.push_back(make_pair(x, x)); 664 all_covariance_blocks_.push_back(make_pair(y, y)); 665 all_covariance_blocks_.push_back(make_pair(z, z)); 666 all_covariance_blocks_.push_back(make_pair(x, y)); 667 all_covariance_blocks_.push_back(make_pair(x, z)); 668 all_covariance_blocks_.push_back(make_pair(y, z)); 669 670 column_bounds_[x] = make_pair(0, 2); 671 column_bounds_[y] = make_pair(2, 5); 672 column_bounds_[z] = make_pair(5, 6); 673 } 674 }; 675 676 TEST_F(RankDeficientCovarianceTest, AutomaticTruncation) { 677 // J 678 // 679 // 1 0 0 0 0 0 680 // 0 1 0 0 0 0 681 // 0 0 0 0 0 0 682 // 0 0 0 0 0 0 683 // 0 0 0 0 0 0 684 // 0 0 0 0 0 5 685 // -5 -6 0 0 0 0 686 // 3 -2 0 0 0 2 687 688 // J'J 689 // 690 // 35 24 0 0 0 6 691 // 24 41 0 0 0 -4 692 // 0 0 0 0 0 0 693 // 0 0 0 0 0 0 694 // 0 0 0 0 0 0 695 // 6 -4 0 0 0 29 696 697 // pinv(J'J) computed using octave. 698 double expected_covariance[] = { 699 0.053998, -0.033145, 0.000000, 0.000000, 0.000000, -0.015744, 700 -0.033145, 0.045067, 0.000000, 0.000000, 0.000000, 0.013074, 701 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 702 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 703 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 704 -0.015744, 0.013074, 0.000000, 0.000000, 0.000000, 0.039543 705 }; 706 707 Covariance::Options options; 708 options.algorithm_type = DENSE_SVD; 709 options.null_space_rank = -1; 710 ComputeAndCompareCovarianceBlocks(options, expected_covariance); 711 } 712 713 class LargeScaleCovarianceTest : public ::testing::Test { 714 protected: 715 virtual void SetUp() { 716 num_parameter_blocks_ = 2000; 717 parameter_block_size_ = 5; 718 parameters_.reset(new double[parameter_block_size_ * num_parameter_blocks_]); 719 720 Matrix jacobian(parameter_block_size_, parameter_block_size_); 721 for (int i = 0; i < num_parameter_blocks_; ++i) { 722 jacobian.setIdentity(); 723 jacobian *= (i + 1); 724 725 double* block_i = parameters_.get() + i * parameter_block_size_; 726 problem_.AddResidualBlock(new UnaryCostFunction(parameter_block_size_, 727 parameter_block_size_, 728 jacobian.data()), 729 NULL, 730 block_i); 731 for (int j = i; j < num_parameter_blocks_; ++j) { 732 double* block_j = parameters_.get() + j * parameter_block_size_; 733 all_covariance_blocks_.push_back(make_pair(block_i, block_j)); 734 } 735 } 736 } 737 738 void ComputeAndCompare(CovarianceAlgorithmType algorithm_type, 739 int num_threads) { 740 Covariance::Options options; 741 options.algorithm_type = algorithm_type; 742 options.num_threads = num_threads; 743 Covariance covariance(options); 744 EXPECT_TRUE(covariance.Compute(all_covariance_blocks_, &problem_)); 745 746 Matrix expected(parameter_block_size_, parameter_block_size_); 747 Matrix actual(parameter_block_size_, parameter_block_size_); 748 const double kTolerance = 1e-16; 749 750 for (int i = 0; i < num_parameter_blocks_; ++i) { 751 expected.setIdentity(); 752 expected /= (i + 1.0) * (i + 1.0); 753 754 double* block_i = parameters_.get() + i * parameter_block_size_; 755 covariance.GetCovarianceBlock(block_i, block_i, actual.data()); 756 EXPECT_NEAR((expected - actual).norm(), 0.0, kTolerance) 757 << "block: " << i << ", " << i << "\n" 758 << "expected: \n" << expected << "\n" 759 << "actual: \n" << actual; 760 761 expected.setZero(); 762 for (int j = i + 1; j < num_parameter_blocks_; ++j) { 763 double* block_j = parameters_.get() + j * parameter_block_size_; 764 covariance.GetCovarianceBlock(block_i, block_j, actual.data()); 765 EXPECT_NEAR((expected - actual).norm(), 0.0, kTolerance) 766 << "block: " << i << ", " << j << "\n" 767 << "expected: \n" << expected << "\n" 768 << "actual: \n" << actual; 769 } 770 } 771 } 772 773 scoped_array<double> parameters_; 774 int parameter_block_size_; 775 int num_parameter_blocks_; 776 777 Problem problem_; 778 vector<pair<const double*, const double*> > all_covariance_blocks_; 779 }; 780 781 #if !defined(CERES_NO_SUITESPARSE) && defined(CERES_USE_OPENMP) 782 783 TEST_F(LargeScaleCovarianceTest, Parallel) { 784 ComputeAndCompare(SUITE_SPARSE_QR, 4); 785 } 786 787 #endif // !defined(CERES_NO_SUITESPARSE) && defined(CERES_USE_OPENMP) 788 789 } // namespace internal 790 } // namespace ceres 791