/external/ceres-solver/internal/ceres/ |
compressed_col_sparse_matrix_utils.cc | 43 const vector<int>& row_blocks, 49 const int num_row_blocks = row_blocks.size(); 55 cursor += row_blocks[i];
|
compressed_col_sparse_matrix_utils_test.cc | 87 int FillBlock(const vector<int>& row_blocks, 95 row_pos += row_blocks[i]; 104 for (int r = 0; r < row_blocks[row_block_id]; ++r) { 128 vector<int> row_blocks; local 129 row_blocks.push_back(1); 130 row_blocks.push_back(2); 131 row_blocks.push_back(2); 140 offset += FillBlock(row_blocks, col_blocks, \ 177 row_blocks,
|
compressed_row_sparse_matrix_test.cc | 83 vector<int>* row_blocks = crsm->mutable_row_blocks(); local 84 row_blocks->resize(num_rows); 85 std::fill(row_blocks->begin(), row_blocks->end(), 1); 180 const vector<int> pre_row_blocks = crsm->row_blocks(); 186 LOG(INFO) << appendage->row_blocks().size(); 190 const vector<int> post_row_blocks = crsm->row_blocks(); 200 EXPECT_EQ(expected_row_blocks, crsm->row_blocks()); 204 EXPECT_EQ(crsm->row_blocks(), pre_row_blocks); 255 EXPECT_EQ(blocks, matrix->row_blocks()); 423 vector<int> row_blocks; local [all...] |
cxsparse.h | 110 const vector<int>& row_blocks,
|
compressed_row_sparse_matrix.cc | 235 CHECK(row_blocks_.size() == 0 || m.row_blocks().size() !=0) 238 << "The matrix being appended has: " << m.row_blocks().size() 260 row_blocks_.insert(row_blocks_.end(), m.row_blocks().begin(), m.row_blocks().end()); 488 const vector<int>& row_blocks = m.row_blocks(); local 491 for (int row_block = 0; row_block < row_blocks.size(); ++row_block) { 492 const int row_block_end = row_block_begin + row_blocks[row_block]; 514 const vector<int>& row_blocks = m.row_blocks(); local [all...] |
suitesparse.cc | 153 const vector<int>& row_blocks, 157 if (!BlockAMDOrdering(A, row_blocks, col_blocks, &ordering)) { 207 const vector<int>& row_blocks, 210 const int num_row_blocks = row_blocks.size(); 220 row_blocks, 244 BlockOrderingToScalarOrdering(row_blocks, block_ordering, ordering);
|
suitesparse.h | 150 const vector<int>& row_blocks, 212 // column blocks are given by row_blocks, and col_blocks 218 const vector<int>& row_blocks,
|
compressed_row_sparse_matrix.h | 112 const vector<int>& row_blocks() const { return row_blocks_; } function in class:ceres::internal::CompressedRowSparseMatrix
|
cxsparse.cc | 107 const vector<int>& row_blocks, 109 const int num_row_blocks = row_blocks.size(); 116 row_blocks, 135 BlockOrderingToScalarOrdering(row_blocks, block_ordering, &scalar_ordering);
|
compressed_row_jacobian_writer.cc | 55 vector<int>& row_blocks = *(jacobian->mutable_row_blocks()); local 56 row_blocks.resize(residual_blocks.size()); 58 row_blocks[i] = residual_blocks[i]->NumResiduals();
|
compressed_col_sparse_matrix_utils.h | 52 const vector<int>& row_blocks,
|
sparse_normal_cholesky_solver.cc | 338 A->row_blocks(),
|