1 // Ceres Solver - A fast non-linear least squares minimizer 2 // Copyright 2010, 2011, 2012 Google Inc. All rights reserved. 3 // http://code.google.com/p/ceres-solver/ 4 // 5 // Redistribution and use in source and binary forms, with or without 6 // modification, are permitted provided that the following conditions are met: 7 // 8 // * Redistributions of source code must retain the above copyright notice, 9 // this list of conditions and the following disclaimer. 10 // * Redistributions in binary form must reproduce the above copyright notice, 11 // this list of conditions and the following disclaimer in the documentation 12 // and/or other materials provided with the distribution. 13 // * Neither the name of Google Inc. nor the names of its contributors may be 14 // used to endorse or promote products derived from this software without 15 // specific prior written permission. 16 // 17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 21 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 // POSSIBILITY OF SUCH DAMAGE. 28 // 29 // Author: sameeragarwal (at) google.com (Sameer Agarwal) 30 // 31 // TODO(sameeragarwal): row_block_counter can perhaps be replaced by 32 // Chunk::start ? 33 34 #ifndef CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_ 35 #define CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_ 36 37 // Eigen has an internal threshold switching between different matrix 38 // multiplication algorithms. In particular for matrices larger than 39 // EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD it uses a cache friendly 40 // matrix matrix product algorithm that has a higher setup cost. For 41 // matrix sizes close to this threshold, especially when the matrices 42 // are thin and long, the default choice may not be optimal. This is 43 // the case for us, as the default choice causes a 30% performance 44 // regression when we moved from Eigen2 to Eigen3. 45 46 #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 10 47 48 // This include must come before any #ifndef check on Ceres compile options. 49 #include "ceres/internal/port.h" 50 51 #ifdef CERES_USE_OPENMP 52 #include <omp.h> 53 #endif 54 55 #include <algorithm> 56 #include <map> 57 #include "ceres/block_random_access_matrix.h" 58 #include "ceres/block_sparse_matrix.h" 59 #include "ceres/block_structure.h" 60 #include "ceres/internal/eigen.h" 61 #include "ceres/internal/fixed_array.h" 62 #include "ceres/internal/scoped_ptr.h" 63 #include "ceres/map_util.h" 64 #include "ceres/schur_eliminator.h" 65 #include "ceres/small_blas.h" 66 #include "ceres/stl_util.h" 67 #include "Eigen/Dense" 68 #include "glog/logging.h" 69 70 namespace ceres { 71 namespace internal { 72 73 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> 74 SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::~SchurEliminator() { 75 STLDeleteElements(&rhs_locks_); 76 } 77 78 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> 79 void 80 SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: 81 Init(int num_eliminate_blocks, const CompressedRowBlockStructure* bs) { 82 CHECK_GT(num_eliminate_blocks, 0) 83 << "SchurComplementSolver cannot be initialized with " 84 << "num_eliminate_blocks = 0."; 85 86 num_eliminate_blocks_ = num_eliminate_blocks; 87 88 const int num_col_blocks = bs->cols.size(); 89 const int num_row_blocks = bs->rows.size(); 90 91 buffer_size_ = 1; 92 chunks_.clear(); 93 lhs_row_layout_.clear(); 94 95 int lhs_num_rows = 0; 96 // Add a map object for each block in the reduced linear system 97 // and build the row/column block structure of the reduced linear 98 // system. 99 lhs_row_layout_.resize(num_col_blocks - num_eliminate_blocks_); 100 for (int i = num_eliminate_blocks_; i < num_col_blocks; ++i) { 101 lhs_row_layout_[i - num_eliminate_blocks_] = lhs_num_rows; 102 lhs_num_rows += bs->cols[i].size; 103 } 104 105 int r = 0; 106 // Iterate over the row blocks of A, and detect the chunks. The 107 // matrix should already have been ordered so that all rows 108 // containing the same y block are vertically contiguous. Along 109 // the way also compute the amount of space each chunk will need 110 // to perform the elimination. 111 while (r < num_row_blocks) { 112 const int chunk_block_id = bs->rows[r].cells.front().block_id; 113 if (chunk_block_id >= num_eliminate_blocks_) { 114 break; 115 } 116 117 chunks_.push_back(Chunk()); 118 Chunk& chunk = chunks_.back(); 119 chunk.size = 0; 120 chunk.start = r; 121 int buffer_size = 0; 122 const int e_block_size = bs->cols[chunk_block_id].size; 123 124 // Add to the chunk until the first block in the row is 125 // different than the one in the first row for the chunk. 126 while (r + chunk.size < num_row_blocks) { 127 const CompressedRow& row = bs->rows[r + chunk.size]; 128 if (row.cells.front().block_id != chunk_block_id) { 129 break; 130 } 131 132 // Iterate over the blocks in the row, ignoring the first 133 // block since it is the one to be eliminated. 134 for (int c = 1; c < row.cells.size(); ++c) { 135 const Cell& cell = row.cells[c]; 136 if (InsertIfNotPresent( 137 &(chunk.buffer_layout), cell.block_id, buffer_size)) { 138 buffer_size += e_block_size * bs->cols[cell.block_id].size; 139 } 140 } 141 142 buffer_size_ = max(buffer_size, buffer_size_); 143 ++chunk.size; 144 } 145 146 CHECK_GT(chunk.size, 0); 147 r += chunk.size; 148 } 149 const Chunk& chunk = chunks_.back(); 150 151 uneliminated_row_begins_ = chunk.start + chunk.size; 152 if (num_threads_ > 1) { 153 random_shuffle(chunks_.begin(), chunks_.end()); 154 } 155 156 buffer_.reset(new double[buffer_size_ * num_threads_]); 157 158 // chunk_outer_product_buffer_ only needs to store e_block_size * 159 // f_block_size, which is always less than buffer_size_, so we just 160 // allocate buffer_size_ per thread. 161 chunk_outer_product_buffer_.reset(new double[buffer_size_ * num_threads_]); 162 163 STLDeleteElements(&rhs_locks_); 164 rhs_locks_.resize(num_col_blocks - num_eliminate_blocks_); 165 for (int i = 0; i < num_col_blocks - num_eliminate_blocks_; ++i) { 166 rhs_locks_[i] = new Mutex; 167 } 168 } 169 170 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> 171 void 172 SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: 173 Eliminate(const BlockSparseMatrix* A, 174 const double* b, 175 const double* D, 176 BlockRandomAccessMatrix* lhs, 177 double* rhs) { 178 if (lhs->num_rows() > 0) { 179 lhs->SetZero(); 180 VectorRef(rhs, lhs->num_rows()).setZero(); 181 } 182 183 const CompressedRowBlockStructure* bs = A->block_structure(); 184 const int num_col_blocks = bs->cols.size(); 185 186 // Add the diagonal to the schur complement. 187 if (D != NULL) { 188 #pragma omp parallel for num_threads(num_threads_) schedule(dynamic) 189 for (int i = num_eliminate_blocks_; i < num_col_blocks; ++i) { 190 const int block_id = i - num_eliminate_blocks_; 191 int r, c, row_stride, col_stride; 192 CellInfo* cell_info = lhs->GetCell(block_id, block_id, 193 &r, &c, 194 &row_stride, &col_stride); 195 if (cell_info != NULL) { 196 const int block_size = bs->cols[i].size; 197 typename EigenTypes<kFBlockSize>::ConstVectorRef 198 diag(D + bs->cols[i].position, block_size); 199 200 CeresMutexLock l(&cell_info->m); 201 MatrixRef m(cell_info->values, row_stride, col_stride); 202 m.block(r, c, block_size, block_size).diagonal() 203 += diag.array().square().matrix(); 204 } 205 } 206 } 207 208 // Eliminate y blocks one chunk at a time. For each chunk,x3 209 // compute the entries of the normal equations and the gradient 210 // vector block corresponding to the y block and then apply 211 // Gaussian elimination to them. The matrix ete stores the normal 212 // matrix corresponding to the block being eliminated and array 213 // buffer_ contains the non-zero blocks in the row corresponding 214 // to this y block in the normal equations. This computation is 215 // done in ChunkDiagonalBlockAndGradient. UpdateRhs then applies 216 // gaussian elimination to the rhs of the normal equations, 217 // updating the rhs of the reduced linear system by modifying rhs 218 // blocks for all the z blocks that share a row block/residual 219 // term with the y block. EliminateRowOuterProduct does the 220 // corresponding operation for the lhs of the reduced linear 221 // system. 222 #pragma omp parallel for num_threads(num_threads_) schedule(dynamic) 223 for (int i = 0; i < chunks_.size(); ++i) { 224 #ifdef CERES_USE_OPENMP 225 int thread_id = omp_get_thread_num(); 226 #else 227 int thread_id = 0; 228 #endif 229 double* buffer = buffer_.get() + thread_id * buffer_size_; 230 const Chunk& chunk = chunks_[i]; 231 const int e_block_id = bs->rows[chunk.start].cells.front().block_id; 232 const int e_block_size = bs->cols[e_block_id].size; 233 234 VectorRef(buffer, buffer_size_).setZero(); 235 236 typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix 237 ete(e_block_size, e_block_size); 238 239 if (D != NULL) { 240 const typename EigenTypes<kEBlockSize>::ConstVectorRef 241 diag(D + bs->cols[e_block_id].position, e_block_size); 242 ete = diag.array().square().matrix().asDiagonal(); 243 } else { 244 ete.setZero(); 245 } 246 247 FixedArray<double, 8> g(e_block_size); 248 typename EigenTypes<kEBlockSize>::VectorRef gref(g.get(), e_block_size); 249 gref.setZero(); 250 251 // We are going to be computing 252 // 253 // S += F'F - F'E(E'E)^{-1}E'F 254 // 255 // for each Chunk. The computation is broken down into a number of 256 // function calls as below. 257 258 // Compute the outer product of the e_blocks with themselves (ete 259 // = E'E). Compute the product of the e_blocks with the 260 // corresonding f_blocks (buffer = E'F), the gradient of the terms 261 // in this chunk (g) and add the outer product of the f_blocks to 262 // Schur complement (S += F'F). 263 ChunkDiagonalBlockAndGradient( 264 chunk, A, b, chunk.start, &ete, g.get(), buffer, lhs); 265 266 // Normally one wouldn't compute the inverse explicitly, but 267 // e_block_size will typically be a small number like 3, in 268 // which case its much faster to compute the inverse once and 269 // use it to multiply other matrices/vectors instead of doing a 270 // Solve call over and over again. 271 typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix inverse_ete = 272 ete 273 .template selfadjointView<Eigen::Upper>() 274 .llt() 275 .solve(Matrix::Identity(e_block_size, e_block_size)); 276 277 // For the current chunk compute and update the rhs of the reduced 278 // linear system. 279 // 280 // rhs = F'b - F'E(E'E)^(-1) E'b 281 282 FixedArray<double, 8> inverse_ete_g(e_block_size); 283 MatrixVectorMultiply<kEBlockSize, kEBlockSize, 0>( 284 inverse_ete.data(), 285 e_block_size, 286 e_block_size, 287 g.get(), 288 inverse_ete_g.get()); 289 290 UpdateRhs(chunk, A, b, chunk.start, inverse_ete_g.get(), rhs); 291 292 // S -= F'E(E'E)^{-1}E'F 293 ChunkOuterProduct(bs, inverse_ete, buffer, chunk.buffer_layout, lhs); 294 } 295 296 // For rows with no e_blocks, the schur complement update reduces to 297 // S += F'F. 298 NoEBlockRowsUpdate(A, b, uneliminated_row_begins_, lhs, rhs); 299 } 300 301 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> 302 void 303 SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: 304 BackSubstitute(const BlockSparseMatrix* A, 305 const double* b, 306 const double* D, 307 const double* z, 308 double* y) { 309 const CompressedRowBlockStructure* bs = A->block_structure(); 310 #pragma omp parallel for num_threads(num_threads_) schedule(dynamic) 311 for (int i = 0; i < chunks_.size(); ++i) { 312 const Chunk& chunk = chunks_[i]; 313 const int e_block_id = bs->rows[chunk.start].cells.front().block_id; 314 const int e_block_size = bs->cols[e_block_id].size; 315 316 double* y_ptr = y + bs->cols[e_block_id].position; 317 typename EigenTypes<kEBlockSize>::VectorRef y_block(y_ptr, e_block_size); 318 319 typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix 320 ete(e_block_size, e_block_size); 321 if (D != NULL) { 322 const typename EigenTypes<kEBlockSize>::ConstVectorRef 323 diag(D + bs->cols[e_block_id].position, e_block_size); 324 ete = diag.array().square().matrix().asDiagonal(); 325 } else { 326 ete.setZero(); 327 } 328 329 const double* values = A->values(); 330 for (int j = 0; j < chunk.size; ++j) { 331 const CompressedRow& row = bs->rows[chunk.start + j]; 332 const Cell& e_cell = row.cells.front(); 333 DCHECK_EQ(e_block_id, e_cell.block_id); 334 335 FixedArray<double, 8> sj(row.block.size); 336 337 typename EigenTypes<kRowBlockSize>::VectorRef(sj.get(), row.block.size) = 338 typename EigenTypes<kRowBlockSize>::ConstVectorRef 339 (b + bs->rows[chunk.start + j].block.position, row.block.size); 340 341 for (int c = 1; c < row.cells.size(); ++c) { 342 const int f_block_id = row.cells[c].block_id; 343 const int f_block_size = bs->cols[f_block_id].size; 344 const int r_block = f_block_id - num_eliminate_blocks_; 345 346 MatrixVectorMultiply<kRowBlockSize, kFBlockSize, -1>( 347 values + row.cells[c].position, row.block.size, f_block_size, 348 z + lhs_row_layout_[r_block], 349 sj.get()); 350 } 351 352 MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>( 353 values + e_cell.position, row.block.size, e_block_size, 354 sj.get(), 355 y_ptr); 356 357 MatrixTransposeMatrixMultiply 358 <kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>( 359 values + e_cell.position, row.block.size, e_block_size, 360 values + e_cell.position, row.block.size, e_block_size, 361 ete.data(), 0, 0, e_block_size, e_block_size); 362 } 363 364 ete.llt().solveInPlace(y_block); 365 } 366 } 367 368 // Update the rhs of the reduced linear system. Compute 369 // 370 // F'b - F'E(E'E)^(-1) E'b 371 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> 372 void 373 SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: 374 UpdateRhs(const Chunk& chunk, 375 const BlockSparseMatrix* A, 376 const double* b, 377 int row_block_counter, 378 const double* inverse_ete_g, 379 double* rhs) { 380 const CompressedRowBlockStructure* bs = A->block_structure(); 381 const int e_block_id = bs->rows[chunk.start].cells.front().block_id; 382 const int e_block_size = bs->cols[e_block_id].size; 383 384 int b_pos = bs->rows[row_block_counter].block.position; 385 const double* values = A->values(); 386 for (int j = 0; j < chunk.size; ++j) { 387 const CompressedRow& row = bs->rows[row_block_counter + j]; 388 const Cell& e_cell = row.cells.front(); 389 390 typename EigenTypes<kRowBlockSize>::Vector sj = 391 typename EigenTypes<kRowBlockSize>::ConstVectorRef 392 (b + b_pos, row.block.size); 393 394 MatrixVectorMultiply<kRowBlockSize, kEBlockSize, -1>( 395 values + e_cell.position, row.block.size, e_block_size, 396 inverse_ete_g, sj.data()); 397 398 for (int c = 1; c < row.cells.size(); ++c) { 399 const int block_id = row.cells[c].block_id; 400 const int block_size = bs->cols[block_id].size; 401 const int block = block_id - num_eliminate_blocks_; 402 CeresMutexLock l(rhs_locks_[block]); 403 MatrixTransposeVectorMultiply<kRowBlockSize, kFBlockSize, 1>( 404 values + row.cells[c].position, 405 row.block.size, block_size, 406 sj.data(), rhs + lhs_row_layout_[block]); 407 } 408 b_pos += row.block.size; 409 } 410 } 411 412 // Given a Chunk - set of rows with the same e_block, e.g. in the 413 // following Chunk with two rows. 414 // 415 // E F 416 // [ y11 0 0 0 | z11 0 0 0 z51] 417 // [ y12 0 0 0 | z12 z22 0 0 0] 418 // 419 // this function computes twp matrices. The diagonal block matrix 420 // 421 // ete = y11 * y11' + y12 * y12' 422 // 423 // and the off diagonal blocks in the Guass Newton Hessian. 424 // 425 // buffer = [y11'(z11 + z12), y12' * z22, y11' * z51] 426 // 427 // which are zero compressed versions of the block sparse matrices E'E 428 // and E'F. 429 // 430 // and the gradient of the e_block, E'b. 431 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> 432 void 433 SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: 434 ChunkDiagonalBlockAndGradient( 435 const Chunk& chunk, 436 const BlockSparseMatrix* A, 437 const double* b, 438 int row_block_counter, 439 typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix* ete, 440 double* g, 441 double* buffer, 442 BlockRandomAccessMatrix* lhs) { 443 const CompressedRowBlockStructure* bs = A->block_structure(); 444 445 int b_pos = bs->rows[row_block_counter].block.position; 446 const int e_block_size = ete->rows(); 447 448 // Iterate over the rows in this chunk, for each row, compute the 449 // contribution of its F blocks to the Schur complement, the 450 // contribution of its E block to the matrix EE' (ete), and the 451 // corresponding block in the gradient vector. 452 const double* values = A->values(); 453 for (int j = 0; j < chunk.size; ++j) { 454 const CompressedRow& row = bs->rows[row_block_counter + j]; 455 456 if (row.cells.size() > 1) { 457 EBlockRowOuterProduct(A, row_block_counter + j, lhs); 458 } 459 460 // Extract the e_block, ETE += E_i' E_i 461 const Cell& e_cell = row.cells.front(); 462 MatrixTransposeMatrixMultiply 463 <kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>( 464 values + e_cell.position, row.block.size, e_block_size, 465 values + e_cell.position, row.block.size, e_block_size, 466 ete->data(), 0, 0, e_block_size, e_block_size); 467 468 // g += E_i' b_i 469 MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>( 470 values + e_cell.position, row.block.size, e_block_size, 471 b + b_pos, 472 g); 473 474 475 // buffer = E'F. This computation is done by iterating over the 476 // f_blocks for each row in the chunk. 477 for (int c = 1; c < row.cells.size(); ++c) { 478 const int f_block_id = row.cells[c].block_id; 479 const int f_block_size = bs->cols[f_block_id].size; 480 double* buffer_ptr = 481 buffer + FindOrDie(chunk.buffer_layout, f_block_id); 482 MatrixTransposeMatrixMultiply 483 <kRowBlockSize, kEBlockSize, kRowBlockSize, kFBlockSize, 1>( 484 values + e_cell.position, row.block.size, e_block_size, 485 values + row.cells[c].position, row.block.size, f_block_size, 486 buffer_ptr, 0, 0, e_block_size, f_block_size); 487 } 488 b_pos += row.block.size; 489 } 490 } 491 492 // Compute the outer product F'E(E'E)^{-1}E'F and subtract it from the 493 // Schur complement matrix, i.e 494 // 495 // S -= F'E(E'E)^{-1}E'F. 496 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> 497 void 498 SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: 499 ChunkOuterProduct(const CompressedRowBlockStructure* bs, 500 const Matrix& inverse_ete, 501 const double* buffer, 502 const BufferLayoutType& buffer_layout, 503 BlockRandomAccessMatrix* lhs) { 504 // This is the most computationally expensive part of this 505 // code. Profiling experiments reveal that the bottleneck is not the 506 // computation of the right-hand matrix product, but memory 507 // references to the left hand side. 508 const int e_block_size = inverse_ete.rows(); 509 BufferLayoutType::const_iterator it1 = buffer_layout.begin(); 510 511 #ifdef CERES_USE_OPENMP 512 int thread_id = omp_get_thread_num(); 513 #else 514 int thread_id = 0; 515 #endif 516 double* b1_transpose_inverse_ete = 517 chunk_outer_product_buffer_.get() + thread_id * buffer_size_; 518 519 // S(i,j) -= bi' * ete^{-1} b_j 520 for (; it1 != buffer_layout.end(); ++it1) { 521 const int block1 = it1->first - num_eliminate_blocks_; 522 const int block1_size = bs->cols[it1->first].size; 523 MatrixTransposeMatrixMultiply 524 <kEBlockSize, kFBlockSize, kEBlockSize, kEBlockSize, 0>( 525 buffer + it1->second, e_block_size, block1_size, 526 inverse_ete.data(), e_block_size, e_block_size, 527 b1_transpose_inverse_ete, 0, 0, block1_size, e_block_size); 528 529 BufferLayoutType::const_iterator it2 = it1; 530 for (; it2 != buffer_layout.end(); ++it2) { 531 const int block2 = it2->first - num_eliminate_blocks_; 532 533 int r, c, row_stride, col_stride; 534 CellInfo* cell_info = lhs->GetCell(block1, block2, 535 &r, &c, 536 &row_stride, &col_stride); 537 if (cell_info != NULL) { 538 const int block2_size = bs->cols[it2->first].size; 539 CeresMutexLock l(&cell_info->m); 540 MatrixMatrixMultiply 541 <kFBlockSize, kEBlockSize, kEBlockSize, kFBlockSize, -1>( 542 b1_transpose_inverse_ete, block1_size, e_block_size, 543 buffer + it2->second, e_block_size, block2_size, 544 cell_info->values, r, c, row_stride, col_stride); 545 } 546 } 547 } 548 } 549 550 // For rows with no e_blocks, the schur complement update reduces to S 551 // += F'F. This function iterates over the rows of A with no e_block, 552 // and calls NoEBlockRowOuterProduct on each row. 553 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> 554 void 555 SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: 556 NoEBlockRowsUpdate(const BlockSparseMatrix* A, 557 const double* b, 558 int row_block_counter, 559 BlockRandomAccessMatrix* lhs, 560 double* rhs) { 561 const CompressedRowBlockStructure* bs = A->block_structure(); 562 const double* values = A->values(); 563 for (; row_block_counter < bs->rows.size(); ++row_block_counter) { 564 const CompressedRow& row = bs->rows[row_block_counter]; 565 for (int c = 0; c < row.cells.size(); ++c) { 566 const int block_id = row.cells[c].block_id; 567 const int block_size = bs->cols[block_id].size; 568 const int block = block_id - num_eliminate_blocks_; 569 MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>( 570 values + row.cells[c].position, row.block.size, block_size, 571 b + row.block.position, 572 rhs + lhs_row_layout_[block]); 573 } 574 NoEBlockRowOuterProduct(A, row_block_counter, lhs); 575 } 576 } 577 578 579 // A row r of A, which has no e_blocks gets added to the Schur 580 // Complement as S += r r'. This function is responsible for computing 581 // the contribution of a single row r to the Schur complement. It is 582 // very similar in structure to EBlockRowOuterProduct except for 583 // one difference. It does not use any of the template 584 // parameters. This is because the algorithm used for detecting the 585 // static structure of the matrix A only pays attention to rows with 586 // e_blocks. This is becase rows without e_blocks are rare and 587 // typically arise from regularization terms in the original 588 // optimization problem, and have a very different structure than the 589 // rows with e_blocks. Including them in the static structure 590 // detection will lead to most template parameters being set to 591 // dynamic. Since the number of rows without e_blocks is small, the 592 // lack of templating is not an issue. 593 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> 594 void 595 SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: 596 NoEBlockRowOuterProduct(const BlockSparseMatrix* A, 597 int row_block_index, 598 BlockRandomAccessMatrix* lhs) { 599 const CompressedRowBlockStructure* bs = A->block_structure(); 600 const CompressedRow& row = bs->rows[row_block_index]; 601 const double* values = A->values(); 602 for (int i = 0; i < row.cells.size(); ++i) { 603 const int block1 = row.cells[i].block_id - num_eliminate_blocks_; 604 DCHECK_GE(block1, 0); 605 606 const int block1_size = bs->cols[row.cells[i].block_id].size; 607 int r, c, row_stride, col_stride; 608 CellInfo* cell_info = lhs->GetCell(block1, block1, 609 &r, &c, 610 &row_stride, &col_stride); 611 if (cell_info != NULL) { 612 CeresMutexLock l(&cell_info->m); 613 // This multiply currently ignores the fact that this is a 614 // symmetric outer product. 615 MatrixTransposeMatrixMultiply 616 <Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>( 617 values + row.cells[i].position, row.block.size, block1_size, 618 values + row.cells[i].position, row.block.size, block1_size, 619 cell_info->values, r, c, row_stride, col_stride); 620 } 621 622 for (int j = i + 1; j < row.cells.size(); ++j) { 623 const int block2 = row.cells[j].block_id - num_eliminate_blocks_; 624 DCHECK_GE(block2, 0); 625 DCHECK_LT(block1, block2); 626 int r, c, row_stride, col_stride; 627 CellInfo* cell_info = lhs->GetCell(block1, block2, 628 &r, &c, 629 &row_stride, &col_stride); 630 if (cell_info != NULL) { 631 const int block2_size = bs->cols[row.cells[j].block_id].size; 632 CeresMutexLock l(&cell_info->m); 633 MatrixTransposeMatrixMultiply 634 <Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>( 635 values + row.cells[i].position, row.block.size, block1_size, 636 values + row.cells[j].position, row.block.size, block2_size, 637 cell_info->values, r, c, row_stride, col_stride); 638 } 639 } 640 } 641 } 642 643 // For a row with an e_block, compute the contribition S += F'F. This 644 // function has the same structure as NoEBlockRowOuterProduct, except 645 // that this function uses the template parameters. 646 template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> 647 void 648 SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: 649 EBlockRowOuterProduct(const BlockSparseMatrix* A, 650 int row_block_index, 651 BlockRandomAccessMatrix* lhs) { 652 const CompressedRowBlockStructure* bs = A->block_structure(); 653 const CompressedRow& row = bs->rows[row_block_index]; 654 const double* values = A->values(); 655 for (int i = 1; i < row.cells.size(); ++i) { 656 const int block1 = row.cells[i].block_id - num_eliminate_blocks_; 657 DCHECK_GE(block1, 0); 658 659 const int block1_size = bs->cols[row.cells[i].block_id].size; 660 int r, c, row_stride, col_stride; 661 CellInfo* cell_info = lhs->GetCell(block1, block1, 662 &r, &c, 663 &row_stride, &col_stride); 664 if (cell_info != NULL) { 665 CeresMutexLock l(&cell_info->m); 666 // block += b1.transpose() * b1; 667 MatrixTransposeMatrixMultiply 668 <kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>( 669 values + row.cells[i].position, row.block.size, block1_size, 670 values + row.cells[i].position, row.block.size, block1_size, 671 cell_info->values, r, c, row_stride, col_stride); 672 } 673 674 for (int j = i + 1; j < row.cells.size(); ++j) { 675 const int block2 = row.cells[j].block_id - num_eliminate_blocks_; 676 DCHECK_GE(block2, 0); 677 DCHECK_LT(block1, block2); 678 const int block2_size = bs->cols[row.cells[j].block_id].size; 679 int r, c, row_stride, col_stride; 680 CellInfo* cell_info = lhs->GetCell(block1, block2, 681 &r, &c, 682 &row_stride, &col_stride); 683 if (cell_info != NULL) { 684 // block += b1.transpose() * b2; 685 CeresMutexLock l(&cell_info->m); 686 MatrixTransposeMatrixMultiply 687 <kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>( 688 values + row.cells[i].position, row.block.size, block1_size, 689 values + row.cells[j].position, row.block.size, block2_size, 690 cell_info->values, r, c, row_stride, col_stride); 691 } 692 } 693 } 694 } 695 696 } // namespace internal 697 } // namespace ceres 698 699 #endif // CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_ 700