Home | History | Annotate | Download | only in SparseExtra
      1 // This file is part of Eigen, a lightweight C++ template library
      2 // for linear algebra.
      3 //
      4 // Copyright (C) 2013 Desire Nuentsa <desire.nuentsa_wakam (at) inria.fr>
      5 // Copyright (C) 2013 Gael Guennebaud <gael.guennebaud (at) inria.fr>
      6 //
      7 // This Source Code Form is subject to the terms of the Mozilla
      8 // Public License v. 2.0. If a copy of the MPL was not distributed
      9 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
     10 
     11 #ifndef EIGEN_SPARSEBLOCKMATRIX_H
     12 #define EIGEN_SPARSEBLOCKMATRIX_H
     13 
     14 namespace Eigen {
     15 /** \ingroup SparseCore_Module
     16   *
     17   * \class BlockSparseMatrix
     18   *
     19   * \brief A versatile sparse matrix representation where each element is a block
     20   *
     21   * This class provides routines to manipulate block sparse matrices stored in a
     22   * BSR-like representation. There are two main types :
     23   *
     24   * 1. All blocks have the same number of rows and columns, called block size
     25   * in the following. In this case, if this block size is known at compile time,
     26   * it can be given as a template parameter like
     27   * \code
     28   * BlockSparseMatrix<Scalar, 3, ColMajor> bmat(b_rows, b_cols);
     29   * \endcode
     30   * Here, bmat is a b_rows x b_cols block sparse matrix
     31   * where each coefficient is a 3x3 dense matrix.
     32   * If the block size is fixed but will be given at runtime,
     33   * \code
     34   * BlockSparseMatrix<Scalar, Dynamic, ColMajor> bmat(b_rows, b_cols);
     35   * bmat.setBlockSize(block_size);
     36   * \endcode
     37   *
     38   * 2. The second case is for variable-block sparse matrices.
     39   * Here each block has its own dimensions. The only restriction is that all the blocks
     40   * in a row (resp. a column) should have the same number of rows (resp. of columns).
     41   * It is thus required in this case to describe the layout of the matrix by calling
     42   * setBlockLayout(rowBlocks, colBlocks).
     43   *
     44   * In any of the previous case, the matrix can be filled by calling setFromTriplets().
     45   * A regular sparse matrix can be converted to a block sparse matrix and vice versa.
     46   * It is obviously required to describe the block layout beforehand by calling either
     47   * setBlockSize() for fixed-size blocks or setBlockLayout for variable-size blocks.
     48   *
     49   * \tparam _Scalar The Scalar type
     50   * \tparam _BlockAtCompileTime The block layout option. It takes the following values
     51   * Dynamic : block size known at runtime
     52   * a numeric number : fixed-size block known at compile time
     53   */
     54 template<typename _Scalar, int _BlockAtCompileTime=Dynamic, int _Options=ColMajor, typename _StorageIndex=int> class BlockSparseMatrix;
     55 
     56 template<typename BlockSparseMatrixT> class BlockSparseMatrixView;
     57 
     58 namespace internal {
     59 template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _Index>
     60 struct traits<BlockSparseMatrix<_Scalar,_BlockAtCompileTime,_Options, _Index> >
     61 {
     62   typedef _Scalar Scalar;
     63   typedef _Index Index;
     64   typedef Sparse StorageKind; // FIXME Where is it used ??
     65   typedef MatrixXpr XprKind;
     66   enum {
     67     RowsAtCompileTime = Dynamic,
     68     ColsAtCompileTime = Dynamic,
     69     MaxRowsAtCompileTime = Dynamic,
     70     MaxColsAtCompileTime = Dynamic,
     71     BlockSize = _BlockAtCompileTime,
     72     Flags = _Options | NestByRefBit | LvalueBit,
     73     CoeffReadCost = NumTraits<Scalar>::ReadCost,
     74     SupportedAccessPatterns = InnerRandomAccessPattern
     75   };
     76 };
     77 template<typename BlockSparseMatrixT>
     78 struct traits<BlockSparseMatrixView<BlockSparseMatrixT> >
     79 {
     80   typedef Ref<Matrix<typename BlockSparseMatrixT::Scalar, BlockSparseMatrixT::BlockSize, BlockSparseMatrixT::BlockSize> > Scalar;
     81   typedef Ref<Matrix<typename BlockSparseMatrixT::RealScalar, BlockSparseMatrixT::BlockSize, BlockSparseMatrixT::BlockSize> > RealScalar;
     82 
     83 };
     84 
     85 // Function object to sort a triplet list
     86 template<typename Iterator, bool IsColMajor>
     87 struct TripletComp
     88 {
     89   typedef typename Iterator::value_type Triplet;
     90   bool operator()(const Triplet& a, const Triplet& b)
     91   { if(IsColMajor)
     92       return ((a.col() == b.col() && a.row() < b.row()) || (a.col() < b.col()));
     93     else
     94       return ((a.row() == b.row() && a.col() < b.col()) || (a.row() < b.row()));
     95   }
     96 };
     97 } // end namespace internal
     98 
     99 
    100 /* Proxy to view the block sparse matrix as a regular sparse matrix */
    101 template<typename BlockSparseMatrixT>
    102 class BlockSparseMatrixView : public SparseMatrixBase<BlockSparseMatrixT>
    103 {
    104   public:
    105     typedef Ref<typename BlockSparseMatrixT::BlockScalar> Scalar;
    106     typedef Ref<typename BlockSparseMatrixT::BlockRealScalar> RealScalar;
    107     typedef typename BlockSparseMatrixT::Index Index;
    108     typedef  BlockSparseMatrixT Nested;
    109     enum {
    110       Flags = BlockSparseMatrixT::Options,
    111       Options = BlockSparseMatrixT::Options,
    112       RowsAtCompileTime = BlockSparseMatrixT::RowsAtCompileTime,
    113       ColsAtCompileTime = BlockSparseMatrixT::ColsAtCompileTime,
    114       MaxColsAtCompileTime = BlockSparseMatrixT::MaxColsAtCompileTime,
    115       MaxRowsAtCompileTime = BlockSparseMatrixT::MaxRowsAtCompileTime
    116     };
    117   public:
    118     BlockSparseMatrixView(const BlockSparseMatrixT& spblockmat)
    119      : m_spblockmat(spblockmat)
    120     {}
    121 
    122     Index outerSize() const
    123     {
    124       return (Flags&RowMajorBit) == 1 ? this->rows() : this->cols();
    125     }
    126     Index cols() const
    127     {
    128       return m_spblockmat.blockCols();
    129     }
    130     Index rows() const
    131     {
    132       return m_spblockmat.blockRows();
    133     }
    134     Scalar coeff(Index row, Index col)
    135     {
    136       return m_spblockmat.coeff(row, col);
    137     }
    138     Scalar coeffRef(Index row, Index col)
    139     {
    140       return m_spblockmat.coeffRef(row, col);
    141     }
    142     // Wrapper to iterate over all blocks
    143     class InnerIterator : public BlockSparseMatrixT::BlockInnerIterator
    144     {
    145       public:
    146       InnerIterator(const BlockSparseMatrixView& mat, Index outer)
    147           : BlockSparseMatrixT::BlockInnerIterator(mat.m_spblockmat, outer)
    148       {}
    149 
    150     };
    151 
    152   protected:
    153     const BlockSparseMatrixT& m_spblockmat;
    154 };
    155 
    156 // Proxy to view a regular vector as a block vector
    157 template<typename BlockSparseMatrixT, typename VectorType>
    158 class BlockVectorView
    159 {
    160   public:
    161     enum {
    162       BlockSize = BlockSparseMatrixT::BlockSize,
    163       ColsAtCompileTime = VectorType::ColsAtCompileTime,
    164       RowsAtCompileTime = VectorType::RowsAtCompileTime,
    165       Flags = VectorType::Flags
    166     };
    167     typedef Ref<const Matrix<typename BlockSparseMatrixT::Scalar, (RowsAtCompileTime==1)? 1 : BlockSize, (ColsAtCompileTime==1)? 1 : BlockSize> >Scalar;
    168     typedef typename BlockSparseMatrixT::Index Index;
    169   public:
    170     BlockVectorView(const BlockSparseMatrixT& spblockmat, const VectorType& vec)
    171     : m_spblockmat(spblockmat),m_vec(vec)
    172     { }
    173     inline Index cols() const
    174     {
    175       return m_vec.cols();
    176     }
    177     inline Index size() const
    178     {
    179       return m_spblockmat.blockRows();
    180     }
    181     inline Scalar coeff(Index bi) const
    182     {
    183       Index startRow = m_spblockmat.blockRowsIndex(bi);
    184       Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow;
    185       return m_vec.middleRows(startRow, rowSize);
    186     }
    187     inline Scalar coeff(Index bi, Index j) const
    188     {
    189       Index startRow = m_spblockmat.blockRowsIndex(bi);
    190       Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow;
    191       return m_vec.block(startRow, j, rowSize, 1);
    192     }
    193   protected:
    194     const BlockSparseMatrixT& m_spblockmat;
    195     const VectorType& m_vec;
    196 };
    197 
    198 template<typename VectorType, typename Index> class BlockVectorReturn;
    199 
    200 
    201 // Proxy to view a regular vector as a block vector
    202 template<typename BlockSparseMatrixT, typename VectorType>
    203 class BlockVectorReturn
    204 {
    205   public:
    206     enum {
    207       ColsAtCompileTime = VectorType::ColsAtCompileTime,
    208       RowsAtCompileTime = VectorType::RowsAtCompileTime,
    209       Flags = VectorType::Flags
    210     };
    211     typedef Ref<Matrix<typename VectorType::Scalar, RowsAtCompileTime, ColsAtCompileTime> > Scalar;
    212     typedef typename BlockSparseMatrixT::Index Index;
    213   public:
    214     BlockVectorReturn(const BlockSparseMatrixT& spblockmat, VectorType& vec)
    215     : m_spblockmat(spblockmat),m_vec(vec)
    216     { }
    217     inline Index size() const
    218     {
    219       return m_spblockmat.blockRows();
    220     }
    221     inline Scalar coeffRef(Index bi)
    222     {
    223       Index startRow = m_spblockmat.blockRowsIndex(bi);
    224       Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow;
    225       return m_vec.middleRows(startRow, rowSize);
    226     }
    227     inline Scalar coeffRef(Index bi, Index j)
    228     {
    229       Index startRow = m_spblockmat.blockRowsIndex(bi);
    230       Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow;
    231       return m_vec.block(startRow, j, rowSize, 1);
    232     }
    233 
    234   protected:
    235     const BlockSparseMatrixT& m_spblockmat;
    236     VectorType& m_vec;
    237 };
    238 
    239 // Block version of the sparse dense product
    240 template<typename Lhs, typename Rhs>
    241 class BlockSparseTimeDenseProduct;
    242 
    243 namespace internal {
    244 
    245 template<typename BlockSparseMatrixT, typename VecType>
    246 struct traits<BlockSparseTimeDenseProduct<BlockSparseMatrixT, VecType> >
    247 {
    248   typedef Dense StorageKind;
    249   typedef MatrixXpr XprKind;
    250   typedef typename BlockSparseMatrixT::Scalar Scalar;
    251   typedef typename BlockSparseMatrixT::Index Index;
    252   enum {
    253     RowsAtCompileTime = Dynamic,
    254     ColsAtCompileTime = Dynamic,
    255     MaxRowsAtCompileTime = Dynamic,
    256     MaxColsAtCompileTime = Dynamic,
    257     Flags = 0,
    258     CoeffReadCost = internal::traits<BlockSparseMatrixT>::CoeffReadCost
    259   };
    260 };
    261 } // end namespace internal
    262 
    263 template<typename Lhs, typename Rhs>
    264 class BlockSparseTimeDenseProduct
    265   : public ProductBase<BlockSparseTimeDenseProduct<Lhs,Rhs>, Lhs, Rhs>
    266 {
    267   public:
    268     EIGEN_PRODUCT_PUBLIC_INTERFACE(BlockSparseTimeDenseProduct)
    269 
    270     BlockSparseTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
    271     {}
    272 
    273     template<typename Dest> void scaleAndAddTo(Dest& dest, const typename Rhs::Scalar& alpha) const
    274     {
    275       BlockVectorReturn<Lhs,Dest> tmpDest(m_lhs, dest);
    276       internal::sparse_time_dense_product( BlockSparseMatrixView<Lhs>(m_lhs),  BlockVectorView<Lhs, Rhs>(m_lhs, m_rhs), tmpDest, alpha);
    277     }
    278 
    279   private:
    280     BlockSparseTimeDenseProduct& operator=(const BlockSparseTimeDenseProduct&);
    281 };
    282 
    283 template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _StorageIndex>
    284 class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_BlockAtCompileTime, _Options,_StorageIndex> >
    285 {
    286   public:
    287     typedef _Scalar Scalar;
    288     typedef typename NumTraits<Scalar>::Real RealScalar;
    289     typedef _StorageIndex StorageIndex;
    290     typedef typename internal::ref_selector<BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex> >::type Nested;
    291 
    292     enum {
    293       Options = _Options,
    294       Flags = Options,
    295       BlockSize=_BlockAtCompileTime,
    296       RowsAtCompileTime = Dynamic,
    297       ColsAtCompileTime = Dynamic,
    298       MaxRowsAtCompileTime = Dynamic,
    299       MaxColsAtCompileTime = Dynamic,
    300       IsVectorAtCompileTime = 0,
    301       IsColMajor = Flags&RowMajorBit ? 0 : 1
    302     };
    303     typedef Matrix<Scalar, _BlockAtCompileTime, _BlockAtCompileTime,IsColMajor ? ColMajor : RowMajor> BlockScalar;
    304     typedef Matrix<RealScalar, _BlockAtCompileTime, _BlockAtCompileTime,IsColMajor ? ColMajor : RowMajor> BlockRealScalar;
    305     typedef typename internal::conditional<_BlockAtCompileTime==Dynamic, Scalar, BlockScalar>::type BlockScalarReturnType;
    306     typedef BlockSparseMatrix<Scalar, BlockSize, IsColMajor ? ColMajor : RowMajor, StorageIndex> PlainObject;
    307   public:
    308     // Default constructor
    309     BlockSparseMatrix()
    310     : m_innerBSize(0),m_outerBSize(0),m_innerOffset(0),m_outerOffset(0),
    311       m_nonzerosblocks(0),m_values(0),m_blockPtr(0),m_indices(0),
    312       m_outerIndex(0),m_blockSize(BlockSize)
    313     { }
    314 
    315 
    316     /**
    317      * \brief Construct and resize
    318      *
    319      */
    320     BlockSparseMatrix(Index brow, Index bcol)
    321       : m_innerBSize(IsColMajor ? brow : bcol),
    322         m_outerBSize(IsColMajor ? bcol : brow),
    323         m_innerOffset(0),m_outerOffset(0),m_nonzerosblocks(0),
    324         m_values(0),m_blockPtr(0),m_indices(0),
    325         m_outerIndex(0),m_blockSize(BlockSize)
    326     { }
    327 
    328     /**
    329      * \brief Copy-constructor
    330      */
    331     BlockSparseMatrix(const BlockSparseMatrix& other)
    332       : m_innerBSize(other.m_innerBSize),m_outerBSize(other.m_outerBSize),
    333         m_nonzerosblocks(other.m_nonzerosblocks),m_nonzeros(other.m_nonzeros),
    334         m_blockPtr(0),m_blockSize(other.m_blockSize)
    335     {
    336       // should we allow copying between variable-size blocks and fixed-size blocks ??
    337       eigen_assert(m_blockSize == BlockSize && " CAN NOT COPY BETWEEN FIXED-SIZE AND VARIABLE-SIZE BLOCKS");
    338 
    339       std::copy(other.m_innerOffset, other.m_innerOffset+m_innerBSize+1, m_innerOffset);
    340       std::copy(other.m_outerOffset, other.m_outerOffset+m_outerBSize+1, m_outerOffset);
    341       std::copy(other.m_values, other.m_values+m_nonzeros, m_values);
    342 
    343       if(m_blockSize != Dynamic)
    344         std::copy(other.m_blockPtr, other.m_blockPtr+m_nonzerosblocks, m_blockPtr);
    345 
    346       std::copy(other.m_indices, other.m_indices+m_nonzerosblocks, m_indices);
    347       std::copy(other.m_outerIndex, other.m_outerIndex+m_outerBSize, m_outerIndex);
    348     }
    349 
    350     friend void swap(BlockSparseMatrix& first, BlockSparseMatrix& second)
    351     {
    352       std::swap(first.m_innerBSize, second.m_innerBSize);
    353       std::swap(first.m_outerBSize, second.m_outerBSize);
    354       std::swap(first.m_innerOffset, second.m_innerOffset);
    355       std::swap(first.m_outerOffset, second.m_outerOffset);
    356       std::swap(first.m_nonzerosblocks, second.m_nonzerosblocks);
    357       std::swap(first.m_nonzeros, second.m_nonzeros);
    358       std::swap(first.m_values, second.m_values);
    359       std::swap(first.m_blockPtr, second.m_blockPtr);
    360       std::swap(first.m_indices, second.m_indices);
    361       std::swap(first.m_outerIndex, second.m_outerIndex);
    362       std::swap(first.m_BlockSize, second.m_blockSize);
    363     }
    364 
    365     BlockSparseMatrix& operator=(BlockSparseMatrix other)
    366     {
    367       //Copy-and-swap paradigm ... avoid leaked data if thrown
    368       swap(*this, other);
    369       return *this;
    370     }
    371 
    372     // Destructor
    373     ~BlockSparseMatrix()
    374     {
    375       delete[] m_outerIndex;
    376       delete[] m_innerOffset;
    377       delete[] m_outerOffset;
    378       delete[] m_indices;
    379       delete[] m_blockPtr;
    380       delete[] m_values;
    381     }
    382 
    383 
    384     /**
    385       * \brief Constructor from a sparse matrix
    386       *
    387       */
    388     template<typename MatrixType>
    389     inline BlockSparseMatrix(const MatrixType& spmat) : m_blockSize(BlockSize)
    390     {
    391       EIGEN_STATIC_ASSERT((m_blockSize != Dynamic), THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE);
    392 
    393       *this = spmat;
    394     }
    395 
    396     /**
    397       * \brief Assignment from a sparse matrix with the same storage order
    398       *
    399       * Convert from a sparse matrix to block sparse matrix.
    400       * \warning Before calling this function, tt is necessary to call
    401       * either setBlockLayout() (matrices with variable-size blocks)
    402       * or setBlockSize() (for fixed-size blocks).
    403       */
    404     template<typename MatrixType>
    405     inline BlockSparseMatrix& operator=(const MatrixType& spmat)
    406     {
    407       eigen_assert((m_innerBSize != 0 && m_outerBSize != 0)
    408                    && "Trying to assign to a zero-size matrix, call resize() first");
    409       eigen_assert(((MatrixType::Options&RowMajorBit) != IsColMajor) && "Wrong storage order");
    410       typedef SparseMatrix<bool,MatrixType::Options,typename MatrixType::Index> MatrixPatternType;
    411       MatrixPatternType  blockPattern(blockRows(), blockCols());
    412       m_nonzeros = 0;
    413 
    414       // First, compute the number of nonzero blocks and their locations
    415       for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
    416       {
    417         // Browse each outer block and compute the structure
    418         std::vector<bool> nzblocksFlag(m_innerBSize,false);  // Record the existing blocks
    419         blockPattern.startVec(bj);
    420         for(StorageIndex j = blockOuterIndex(bj); j < blockOuterIndex(bj+1); ++j)
    421         {
    422           typename MatrixType::InnerIterator it_spmat(spmat, j);
    423           for(; it_spmat; ++it_spmat)
    424           {
    425             StorageIndex bi = innerToBlock(it_spmat.index()); // Index of the current nonzero block
    426             if(!nzblocksFlag[bi])
    427             {
    428               // Save the index of this nonzero block
    429               nzblocksFlag[bi] = true;
    430               blockPattern.insertBackByOuterInnerUnordered(bj, bi) = true;
    431               // Compute the total number of nonzeros (including explicit zeros in blocks)
    432               m_nonzeros += blockOuterSize(bj) * blockInnerSize(bi);
    433             }
    434           }
    435         } // end current outer block
    436       }
    437       blockPattern.finalize();
    438 
    439       // Allocate the internal arrays
    440       setBlockStructure(blockPattern);
    441 
    442       for(StorageIndex nz = 0; nz < m_nonzeros; ++nz) m_values[nz] = Scalar(0);
    443       for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
    444       {
    445         // Now copy the values
    446         for(StorageIndex j = blockOuterIndex(bj); j < blockOuterIndex(bj+1); ++j)
    447         {
    448           // Browse the outer block column by column (for column-major matrices)
    449           typename MatrixType::InnerIterator it_spmat(spmat, j);
    450           for(; it_spmat; ++it_spmat)
    451           {
    452             StorageIndex idx = 0; // Position of this block in the column block
    453             StorageIndex bi = innerToBlock(it_spmat.index()); // Index of the current nonzero block
    454             // Go to the inner block where this element belongs to
    455             while(bi > m_indices[m_outerIndex[bj]+idx]) ++idx; // Not expensive for ordered blocks
    456             StorageIndex idxVal;// Get the right position in the array of values for this element
    457             if(m_blockSize == Dynamic)
    458             {
    459               // Offset from all blocks before ...
    460               idxVal =  m_blockPtr[m_outerIndex[bj]+idx];
    461               // ... and offset inside the block
    462               idxVal += (j - blockOuterIndex(bj)) * blockOuterSize(bj) + it_spmat.index() - m_innerOffset[bi];
    463             }
    464             else
    465             {
    466               // All blocks before
    467               idxVal = (m_outerIndex[bj] + idx) * m_blockSize * m_blockSize;
    468               // inside the block
    469               idxVal += (j - blockOuterIndex(bj)) * m_blockSize + (it_spmat.index()%m_blockSize);
    470             }
    471             // Insert the value
    472             m_values[idxVal] = it_spmat.value();
    473           } // end of this column
    474         } // end of this block
    475       } // end of this outer block
    476 
    477       return *this;
    478     }
    479 
    480     /**
    481       * \brief Set the nonzero block pattern of the matrix
    482       *
    483       * Given a sparse matrix describing the nonzero block pattern,
    484       * this function prepares the internal pointers for values.
    485       * After calling this function, any *nonzero* block (bi, bj) can be set
    486       * with a simple call to coeffRef(bi,bj).
    487       *
    488       *
    489       * \warning Before calling this function, tt is necessary to call
    490       * either setBlockLayout() (matrices with variable-size blocks)
    491       * or setBlockSize() (for fixed-size blocks).
    492       *
    493       * \param blockPattern Sparse matrix of boolean elements describing the block structure
    494       *
    495       * \sa setBlockLayout() \sa setBlockSize()
    496       */
    497     template<typename MatrixType>
    498     void setBlockStructure(const MatrixType& blockPattern)
    499     {
    500       resize(blockPattern.rows(), blockPattern.cols());
    501       reserve(blockPattern.nonZeros());
    502 
    503       // Browse the block pattern and set up the various pointers
    504       m_outerIndex[0] = 0;
    505       if(m_blockSize == Dynamic) m_blockPtr[0] = 0;
    506       for(StorageIndex nz = 0; nz < m_nonzeros; ++nz) m_values[nz] = Scalar(0);
    507       for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
    508       {
    509         //Browse each outer block
    510 
    511         //First, copy and save the indices of nonzero blocks
    512         //FIXME : find a way to avoid this ...
    513         std::vector<int> nzBlockIdx;
    514         typename MatrixType::InnerIterator it(blockPattern, bj);
    515         for(; it; ++it)
    516         {
    517           nzBlockIdx.push_back(it.index());
    518         }
    519         std::sort(nzBlockIdx.begin(), nzBlockIdx.end());
    520 
    521         // Now, fill block indices and (eventually) pointers to blocks
    522         for(StorageIndex idx = 0; idx < nzBlockIdx.size(); ++idx)
    523         {
    524           StorageIndex offset = m_outerIndex[bj]+idx; // offset in m_indices
    525           m_indices[offset] = nzBlockIdx[idx];
    526           if(m_blockSize == Dynamic)
    527             m_blockPtr[offset] = m_blockPtr[offset-1] + blockInnerSize(nzBlockIdx[idx]) * blockOuterSize(bj);
    528           // There is no blockPtr for fixed-size blocks... not needed !???
    529         }
    530         // Save the pointer to the next outer block
    531         m_outerIndex[bj+1] = m_outerIndex[bj] + nzBlockIdx.size();
    532       }
    533     }
    534 
    535     /**
    536       * \brief Set the number of rows and columns blocks
    537       */
    538     inline void resize(Index brow, Index bcol)
    539     {
    540       m_innerBSize = IsColMajor ? brow : bcol;
    541       m_outerBSize = IsColMajor ? bcol : brow;
    542     }
    543 
    544     /**
    545       * \brief set the block size at runtime for fixed-size block layout
    546       *
    547       * Call this only for fixed-size blocks
    548       */
    549     inline void setBlockSize(Index blockSize)
    550     {
    551       m_blockSize = blockSize;
    552     }
    553 
    554     /**
    555       * \brief Set the row and column block layouts,
    556       *
    557       * This function set the size of each row and column block.
    558       * So this function should be used only for blocks with variable size.
    559       * \param rowBlocks : Number of rows per row block
    560       * \param colBlocks : Number of columns per column block
    561       * \sa resize(), setBlockSize()
    562       */
    563     inline void setBlockLayout(const VectorXi& rowBlocks, const VectorXi& colBlocks)
    564     {
    565       const VectorXi& innerBlocks = IsColMajor ? rowBlocks : colBlocks;
    566       const VectorXi& outerBlocks = IsColMajor ? colBlocks : rowBlocks;
    567       eigen_assert(m_innerBSize == innerBlocks.size() && "CHECK THE NUMBER OF ROW OR COLUMN BLOCKS");
    568       eigen_assert(m_outerBSize == outerBlocks.size() && "CHECK THE NUMBER OF ROW OR COLUMN BLOCKS");
    569       m_outerBSize = outerBlocks.size();
    570       //  starting index of blocks... cumulative sums
    571       m_innerOffset = new StorageIndex[m_innerBSize+1];
    572       m_outerOffset = new StorageIndex[m_outerBSize+1];
    573       m_innerOffset[0] = 0;
    574       m_outerOffset[0] = 0;
    575       std::partial_sum(&innerBlocks[0], &innerBlocks[m_innerBSize-1]+1, &m_innerOffset[1]);
    576       std::partial_sum(&outerBlocks[0], &outerBlocks[m_outerBSize-1]+1, &m_outerOffset[1]);
    577 
    578       // Compute the total number of nonzeros
    579       m_nonzeros = 0;
    580       for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
    581         for(StorageIndex bi = 0; bi < m_innerBSize; ++bi)
    582           m_nonzeros += outerBlocks[bj] * innerBlocks[bi];
    583 
    584     }
    585 
    586     /**
    587       * \brief Allocate the internal array of pointers to blocks and their inner indices
    588       *
    589       * \note For fixed-size blocks, call setBlockSize() to set the block.
    590       * And For variable-size blocks, call setBlockLayout() before using this function
    591       *
    592       * \param nonzerosblocks Number of nonzero blocks. The total number of nonzeros is
    593       * is computed in setBlockLayout() for variable-size blocks
    594       * \sa setBlockSize()
    595       */
    596     inline void reserve(const Index nonzerosblocks)
    597     {
    598       eigen_assert((m_innerBSize != 0 && m_outerBSize != 0) &&
    599           "TRYING TO RESERVE ZERO-SIZE MATRICES, CALL resize() first");
    600 
    601       //FIXME Should free if already allocated
    602       m_outerIndex = new StorageIndex[m_outerBSize+1];
    603 
    604       m_nonzerosblocks = nonzerosblocks;
    605       if(m_blockSize != Dynamic)
    606       {
    607         m_nonzeros = nonzerosblocks * (m_blockSize * m_blockSize);
    608         m_blockPtr = 0;
    609       }
    610       else
    611       {
    612         // m_nonzeros  is already computed in setBlockLayout()
    613         m_blockPtr = new StorageIndex[m_nonzerosblocks+1];
    614       }
    615       m_indices = new StorageIndex[m_nonzerosblocks+1];
    616       m_values = new Scalar[m_nonzeros];
    617     }
    618 
    619 
    620     /**
    621       * \brief Fill values in a matrix  from a triplet list.
    622       *
    623       * Each triplet item has a block stored in an Eigen dense matrix.
    624       * The InputIterator class should provide the functions row(), col() and value()
    625       *
    626       * \note For fixed-size blocks, call setBlockSize() before this function.
    627       *
    628       * FIXME Do not accept duplicates
    629       */
    630     template<typename InputIterator>
    631     void setFromTriplets(const InputIterator& begin, const InputIterator& end)
    632     {
    633       eigen_assert((m_innerBSize!=0 && m_outerBSize !=0) && "ZERO BLOCKS, PLEASE CALL resize() before");
    634 
    635       /* First, sort the triplet list
    636         * FIXME This can be unnecessarily expensive since only the inner indices have to be sorted
    637         * The best approach is like in SparseMatrix::setFromTriplets()
    638         */
    639       internal::TripletComp<InputIterator, IsColMajor> tripletcomp;
    640       std::sort(begin, end, tripletcomp);
    641 
    642       /* Count the number of rows and column blocks,
    643        * and the number of nonzero blocks per outer dimension
    644        */
    645       VectorXi rowBlocks(m_innerBSize); // Size of each block row
    646       VectorXi colBlocks(m_outerBSize); // Size of each block column
    647       rowBlocks.setZero(); colBlocks.setZero();
    648       VectorXi nzblock_outer(m_outerBSize); // Number of nz blocks per outer vector
    649       VectorXi nz_outer(m_outerBSize); // Number of nz per outer vector...for variable-size blocks
    650       nzblock_outer.setZero();
    651       nz_outer.setZero();
    652       for(InputIterator it(begin); it !=end; ++it)
    653       {
    654         eigen_assert(it->row() >= 0 && it->row() < this->blockRows() && it->col() >= 0 && it->col() < this->blockCols());
    655         eigen_assert((it->value().rows() == it->value().cols() && (it->value().rows() == m_blockSize))
    656                      || (m_blockSize == Dynamic));
    657 
    658         if(m_blockSize == Dynamic)
    659         {
    660           eigen_assert((rowBlocks[it->row()] == 0 || rowBlocks[it->row()] == it->value().rows()) &&
    661               "NON CORRESPONDING SIZES FOR ROW BLOCKS");
    662           eigen_assert((colBlocks[it->col()] == 0 || colBlocks[it->col()] == it->value().cols()) &&
    663               "NON CORRESPONDING SIZES FOR COLUMN BLOCKS");
    664           rowBlocks[it->row()] =it->value().rows();
    665           colBlocks[it->col()] = it->value().cols();
    666         }
    667         nz_outer(IsColMajor ? it->col() : it->row()) += it->value().rows() * it->value().cols();
    668         nzblock_outer(IsColMajor ? it->col() : it->row())++;
    669       }
    670       // Allocate member arrays
    671       if(m_blockSize == Dynamic) setBlockLayout(rowBlocks, colBlocks);
    672       StorageIndex nzblocks = nzblock_outer.sum();
    673       reserve(nzblocks);
    674 
    675        // Temporary markers
    676       VectorXi block_id(m_outerBSize); // To be used as a block marker during insertion
    677 
    678       // Setup outer index pointers and markers
    679       m_outerIndex[0] = 0;
    680       if (m_blockSize == Dynamic)  m_blockPtr[0] =  0;
    681       for(StorageIndex bj = 0; bj < m_outerBSize; ++bj)
    682       {
    683         m_outerIndex[bj+1] = m_outerIndex[bj] + nzblock_outer(bj);
    684         block_id(bj) = m_outerIndex[bj];
    685         if(m_blockSize==Dynamic)
    686         {
    687           m_blockPtr[m_outerIndex[bj+1]] = m_blockPtr[m_outerIndex[bj]] + nz_outer(bj);
    688         }
    689       }
    690 
    691       // Fill the matrix
    692       for(InputIterator it(begin); it!=end; ++it)
    693       {
    694         StorageIndex outer = IsColMajor ? it->col() : it->row();
    695         StorageIndex inner = IsColMajor ? it->row() : it->col();
    696         m_indices[block_id(outer)] = inner;
    697         StorageIndex block_size = it->value().rows()*it->value().cols();
    698         StorageIndex nz_marker = blockPtr(block_id[outer]);
    699         memcpy(&(m_values[nz_marker]), it->value().data(), block_size * sizeof(Scalar));
    700         if(m_blockSize == Dynamic)
    701         {
    702           m_blockPtr[block_id(outer)+1] = m_blockPtr[block_id(outer)] + block_size;
    703         }
    704         block_id(outer)++;
    705       }
    706 
    707       // An alternative when the outer indices are sorted...no need to use an array of markers
    708 //      for(Index bcol = 0; bcol < m_outerBSize; ++bcol)
    709 //      {
    710 //      Index id = 0, id_nz = 0, id_nzblock = 0;
    711 //      for(InputIterator it(begin); it!=end; ++it)
    712 //      {
    713 //        while (id<bcol) // one pass should do the job unless there are empty columns
    714 //        {
    715 //          id++;
    716 //          m_outerIndex[id+1]=m_outerIndex[id];
    717 //        }
    718 //        m_outerIndex[id+1] += 1;
    719 //        m_indices[id_nzblock]=brow;
    720 //        Index block_size = it->value().rows()*it->value().cols();
    721 //        m_blockPtr[id_nzblock+1] = m_blockPtr[id_nzblock] + block_size;
    722 //        id_nzblock++;
    723 //        memcpy(&(m_values[id_nz]),it->value().data(), block_size*sizeof(Scalar));
    724 //        id_nz += block_size;
    725 //      }
    726 //      while(id < m_outerBSize-1) // Empty columns at the end
    727 //      {
    728 //        id++;
    729 //        m_outerIndex[id+1]=m_outerIndex[id];
    730 //      }
    731 //      }
    732     }
    733 
    734 
    735     /**
    736       * \returns the number of rows
    737       */
    738     inline Index rows() const
    739     {
    740 //      return blockRows();
    741       return (IsColMajor ? innerSize() : outerSize());
    742     }
    743 
    744     /**
    745       * \returns the number of cols
    746       */
    747     inline Index cols() const
    748     {
    749 //      return blockCols();
    750       return (IsColMajor ? outerSize() : innerSize());
    751     }
    752 
    753     inline Index innerSize() const
    754     {
    755       if(m_blockSize == Dynamic) return m_innerOffset[m_innerBSize];
    756       else return  (m_innerBSize * m_blockSize) ;
    757     }
    758 
    759     inline Index outerSize() const
    760     {
    761       if(m_blockSize == Dynamic) return m_outerOffset[m_outerBSize];
    762       else return  (m_outerBSize * m_blockSize) ;
    763     }
    764     /** \returns the number of rows grouped by blocks */
    765     inline Index blockRows() const
    766     {
    767       return (IsColMajor ? m_innerBSize : m_outerBSize);
    768     }
    769     /** \returns the number of columns grouped by blocks */
    770     inline Index blockCols() const
    771     {
    772       return (IsColMajor ? m_outerBSize : m_innerBSize);
    773     }
    774 
    775     inline Index outerBlocks() const { return m_outerBSize; }
    776     inline Index innerBlocks() const { return m_innerBSize; }
    777 
    778     /** \returns the block index where outer belongs to */
    779     inline Index outerToBlock(Index outer) const
    780     {
    781       eigen_assert(outer < outerSize() && "OUTER INDEX OUT OF BOUNDS");
    782 
    783       if(m_blockSize != Dynamic)
    784         return (outer / m_blockSize); // Integer division
    785 
    786       StorageIndex b_outer = 0;
    787       while(m_outerOffset[b_outer] <= outer) ++b_outer;
    788       return b_outer - 1;
    789     }
    790     /** \returns  the block index where inner belongs to */
    791     inline Index innerToBlock(Index inner) const
    792     {
    793       eigen_assert(inner < innerSize() && "OUTER INDEX OUT OF BOUNDS");
    794 
    795       if(m_blockSize != Dynamic)
    796         return (inner / m_blockSize); // Integer division
    797 
    798       StorageIndex b_inner = 0;
    799       while(m_innerOffset[b_inner] <= inner) ++b_inner;
    800       return b_inner - 1;
    801     }
    802 
    803     /**
    804       *\returns a reference to the (i,j) block as an Eigen Dense Matrix
    805       */
    806     Ref<BlockScalar> coeffRef(Index brow, Index bcol)
    807     {
    808       eigen_assert(brow < blockRows() && "BLOCK ROW INDEX OUT OF BOUNDS");
    809       eigen_assert(bcol < blockCols() && "BLOCK nzblocksFlagCOLUMN OUT OF BOUNDS");
    810 
    811       StorageIndex rsize = IsColMajor ? blockInnerSize(brow): blockOuterSize(bcol);
    812       StorageIndex csize = IsColMajor ? blockOuterSize(bcol) : blockInnerSize(brow);
    813       StorageIndex inner = IsColMajor ? brow : bcol;
    814       StorageIndex outer = IsColMajor ? bcol : brow;
    815       StorageIndex offset = m_outerIndex[outer];
    816       while(offset < m_outerIndex[outer+1] && m_indices[offset] != inner)
    817         offset++;
    818       if(m_indices[offset] == inner)
    819       {
    820         return Map<BlockScalar>(&(m_values[blockPtr(offset)]), rsize, csize);
    821       }
    822       else
    823       {
    824         //FIXME the block does not exist, Insert it !!!!!!!!!
    825         eigen_assert("DYNAMIC INSERTION IS NOT YET SUPPORTED");
    826       }
    827     }
    828 
    829     /**
    830       * \returns the value of the (i,j) block as an Eigen Dense Matrix
    831       */
    832     Map<const BlockScalar> coeff(Index brow, Index bcol) const
    833     {
    834       eigen_assert(brow < blockRows() && "BLOCK ROW INDEX OUT OF BOUNDS");
    835       eigen_assert(bcol < blockCols() && "BLOCK COLUMN OUT OF BOUNDS");
    836 
    837       StorageIndex rsize = IsColMajor ? blockInnerSize(brow): blockOuterSize(bcol);
    838       StorageIndex csize = IsColMajor ? blockOuterSize(bcol) : blockInnerSize(brow);
    839       StorageIndex inner = IsColMajor ? brow : bcol;
    840       StorageIndex outer = IsColMajor ? bcol : brow;
    841       StorageIndex offset = m_outerIndex[outer];
    842       while(offset < m_outerIndex[outer+1] && m_indices[offset] != inner) offset++;
    843       if(m_indices[offset] == inner)
    844       {
    845         return Map<const BlockScalar> (&(m_values[blockPtr(offset)]), rsize, csize);
    846       }
    847       else
    848 //        return BlockScalar::Zero(rsize, csize);
    849         eigen_assert("NOT YET SUPPORTED");
    850     }
    851 
    852     // Block Matrix times vector product
    853     template<typename VecType>
    854     BlockSparseTimeDenseProduct<BlockSparseMatrix, VecType> operator*(const VecType& lhs) const
    855     {
    856       return BlockSparseTimeDenseProduct<BlockSparseMatrix, VecType>(*this, lhs);
    857     }
    858 
    859     /** \returns the number of nonzero blocks */
    860     inline Index nonZerosBlocks() const { return m_nonzerosblocks; }
    861     /** \returns the total number of nonzero elements, including eventual explicit zeros in blocks */
    862     inline Index nonZeros() const { return m_nonzeros; }
    863 
    864     inline BlockScalarReturnType *valuePtr() {return static_cast<BlockScalarReturnType *>(m_values);}
    865 //    inline Scalar *valuePtr(){ return m_values; }
    866     inline StorageIndex *innerIndexPtr() {return m_indices; }
    867     inline const StorageIndex *innerIndexPtr() const {return m_indices; }
    868     inline StorageIndex *outerIndexPtr() {return m_outerIndex; }
    869     inline const StorageIndex* outerIndexPtr() const {return m_outerIndex; }
    870 
    871     /** \brief for compatibility purposes with the SparseMatrix class */
    872     inline bool isCompressed() const {return true;}
    873     /**
    874       * \returns the starting index of the bi row block
    875       */
    876     inline Index blockRowsIndex(Index bi) const
    877     {
    878       return IsColMajor ? blockInnerIndex(bi) : blockOuterIndex(bi);
    879     }
    880 
    881     /**
    882       * \returns the starting index of the bj col block
    883       */
    884     inline Index blockColsIndex(Index bj) const
    885     {
    886       return IsColMajor ? blockOuterIndex(bj) : blockInnerIndex(bj);
    887     }
    888 
    889     inline Index blockOuterIndex(Index bj) const
    890     {
    891       return (m_blockSize == Dynamic) ? m_outerOffset[bj] : (bj * m_blockSize);
    892     }
    893     inline Index blockInnerIndex(Index bi) const
    894     {
    895       return (m_blockSize == Dynamic) ? m_innerOffset[bi] : (bi * m_blockSize);
    896     }
    897 
    898     // Not needed ???
    899     inline Index blockInnerSize(Index bi) const
    900     {
    901       return (m_blockSize == Dynamic) ? (m_innerOffset[bi+1] - m_innerOffset[bi]) : m_blockSize;
    902     }
    903     inline Index blockOuterSize(Index bj) const
    904     {
    905       return (m_blockSize == Dynamic) ? (m_outerOffset[bj+1]- m_outerOffset[bj]) : m_blockSize;
    906     }
    907 
    908     /**
    909       * \brief Browse the matrix by outer index
    910       */
    911     class InnerIterator; // Browse column by column
    912 
    913     /**
    914       * \brief Browse the matrix by block outer index
    915       */
    916     class BlockInnerIterator; // Browse block by block
    917 
    918     friend std::ostream & operator << (std::ostream & s, const BlockSparseMatrix& m)
    919     {
    920       for (StorageIndex j = 0; j < m.outerBlocks(); ++j)
    921       {
    922         BlockInnerIterator itb(m, j);
    923         for(; itb; ++itb)
    924         {
    925           s << "("<<itb.row() << ", " << itb.col() << ")\n";
    926           s << itb.value() <<"\n";
    927         }
    928       }
    929       s << std::endl;
    930       return s;
    931     }
    932 
    933     /**
    934       * \returns the starting position of the block <id> in the array of values
    935       */
    936     Index blockPtr(Index id) const
    937     {
    938       if(m_blockSize == Dynamic) return m_blockPtr[id];
    939       else return id * m_blockSize * m_blockSize;
    940       //return blockDynIdx(id, typename internal::conditional<(BlockSize==Dynamic), internal::true_type, internal::false_type>::type());
    941     }
    942 
    943 
    944   protected:
    945 //    inline Index blockDynIdx(Index id, internal::true_type) const
    946 //    {
    947 //      return m_blockPtr[id];
    948 //    }
    949 //    inline Index blockDynIdx(Index id, internal::false_type) const
    950 //    {
    951 //      return id * BlockSize * BlockSize;
    952 //    }
    953 
    954     // To be implemented
    955     // Insert a block at a particular location... need to make a room for that
    956     Map<BlockScalar> insert(Index brow, Index bcol);
    957 
    958     Index m_innerBSize; // Number of block rows
    959     Index m_outerBSize; // Number of block columns
    960     StorageIndex *m_innerOffset; // Starting index of each inner block (size m_innerBSize+1)
    961     StorageIndex *m_outerOffset; // Starting index of each outer block (size m_outerBSize+1)
    962     Index m_nonzerosblocks; // Total nonzeros blocks (lower than  m_innerBSize x m_outerBSize)
    963     Index m_nonzeros; // Total nonzeros elements
    964     Scalar *m_values; //Values stored block column after block column (size m_nonzeros)
    965     StorageIndex *m_blockPtr; // Pointer to the beginning of each block in m_values, size m_nonzeroblocks ... null for fixed-size blocks
    966     StorageIndex *m_indices; //Inner block indices, size m_nonzerosblocks ... OK
    967     StorageIndex *m_outerIndex; // Starting pointer of each block column in m_indices (size m_outerBSize)... OK
    968     Index m_blockSize; // Size of a block for fixed-size blocks, otherwise -1
    969 };
    970 
    971 template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _StorageIndex>
    972 class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex>::BlockInnerIterator
    973 {
    974   public:
    975 
    976     enum{
    977       Flags = _Options
    978     };
    979 
    980     BlockInnerIterator(const BlockSparseMatrix& mat, const Index outer)
    981     : m_mat(mat),m_outer(outer),
    982       m_id(mat.m_outerIndex[outer]),
    983       m_end(mat.m_outerIndex[outer+1])
    984     {
    985     }
    986 
    987     inline BlockInnerIterator& operator++() {m_id++; return *this; }
    988 
    989     inline const Map<const BlockScalar> value() const
    990     {
    991       return Map<const BlockScalar>(&(m_mat.m_values[m_mat.blockPtr(m_id)]),
    992           rows(),cols());
    993     }
    994     inline Map<BlockScalar> valueRef()
    995     {
    996       return Map<BlockScalar>(&(m_mat.m_values[m_mat.blockPtr(m_id)]),
    997           rows(),cols());
    998     }
    999     // Block inner index
   1000     inline Index index() const {return m_mat.m_indices[m_id]; }
   1001     inline Index outer() const { return m_outer; }
   1002     // block row index
   1003     inline Index row() const  {return index(); }
   1004     // block column index
   1005     inline Index col() const {return outer(); }
   1006     // FIXME Number of rows in the current block
   1007     inline Index rows() const { return (m_mat.m_blockSize==Dynamic) ? (m_mat.m_innerOffset[index()+1] - m_mat.m_innerOffset[index()]) : m_mat.m_blockSize; }
   1008     // Number of columns in the current block ...
   1009     inline Index cols() const { return (m_mat.m_blockSize==Dynamic) ? (m_mat.m_outerOffset[m_outer+1]-m_mat.m_outerOffset[m_outer]) : m_mat.m_blockSize;}
   1010     inline operator bool() const { return (m_id < m_end); }
   1011 
   1012   protected:
   1013     const BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, StorageIndex>& m_mat;
   1014     const Index m_outer;
   1015     Index m_id;
   1016     Index m_end;
   1017 };
   1018 
   1019 template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _StorageIndex>
   1020 class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex>::InnerIterator
   1021 {
   1022   public:
   1023     InnerIterator(const BlockSparseMatrix& mat, Index outer)
   1024     : m_mat(mat),m_outerB(mat.outerToBlock(outer)),m_outer(outer),
   1025       itb(mat, mat.outerToBlock(outer)),
   1026       m_offset(outer - mat.blockOuterIndex(m_outerB))
   1027      {
   1028         if (itb)
   1029         {
   1030           m_id = m_mat.blockInnerIndex(itb.index());
   1031           m_start = m_id;
   1032           m_end = m_mat.blockInnerIndex(itb.index()+1);
   1033         }
   1034      }
   1035     inline InnerIterator& operator++()
   1036     {
   1037       m_id++;
   1038       if (m_id >= m_end)
   1039       {
   1040         ++itb;
   1041         if (itb)
   1042         {
   1043           m_id = m_mat.blockInnerIndex(itb.index());
   1044           m_start = m_id;
   1045           m_end = m_mat.blockInnerIndex(itb.index()+1);
   1046         }
   1047       }
   1048       return *this;
   1049     }
   1050     inline const Scalar& value() const
   1051     {
   1052       return itb.value().coeff(m_id - m_start, m_offset);
   1053     }
   1054     inline Scalar& valueRef()
   1055     {
   1056       return itb.valueRef().coeff(m_id - m_start, m_offset);
   1057     }
   1058     inline Index index() const { return m_id; }
   1059     inline Index outer() const {return m_outer; }
   1060     inline Index col() const {return outer(); }
   1061     inline Index row() const { return index();}
   1062     inline operator bool() const
   1063     {
   1064       return itb;
   1065     }
   1066   protected:
   1067     const BlockSparseMatrix& m_mat;
   1068     const Index m_outer;
   1069     const Index m_outerB;
   1070     BlockInnerIterator itb; // Iterator through the blocks
   1071     const Index m_offset; // Position of this column in the block
   1072     Index m_start; // starting inner index of this block
   1073     Index m_id; // current inner index in the block
   1074     Index m_end; // starting inner index of the next block
   1075 
   1076 };
   1077 } // end namespace Eigen
   1078 
   1079 #endif // EIGEN_SPARSEBLOCKMATRIX_H
   1080