Home | History | Annotate | Download | only in SparseCore
      1 // This file is part of Eigen, a lightweight C++ template library
      2 // for linear algebra.
      3 //
      4 // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud (at) inria.fr>
      5 //
      6 // This Source Code Form is subject to the terms of the Mozilla
      7 // Public License v. 2.0. If a copy of the MPL was not distributed
      8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
      9 
     10 #ifndef EIGEN_SPARSEMATRIX_H
     11 #define EIGEN_SPARSEMATRIX_H
     12 
     13 namespace Eigen {
     14 
     15 /** \ingroup SparseCore_Module
     16   *
     17   * \class SparseMatrix
     18   *
     19   * \brief A versatible sparse matrix representation
     20   *
     21   * This class implements a more versatile variants of the common \em compressed row/column storage format.
     22   * Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index.
     23   * All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra
     24   * space inbetween the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
     25   * can be done with limited memory reallocation and copies.
     26   *
     27   * A call to the function makeCompressed() turns the matrix into the standard \em compressed format
     28   * compatible with many library.
     29   *
     30   * More details on this storage sceheme are given in the \ref TutorialSparse "manual pages".
     31   *
     32   * \tparam _Scalar the scalar type, i.e. the type of the coefficients
     33   * \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility
     34   *                 is RowMajor. The default is 0 which means column-major.
     35   * \tparam _Index the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int.
     36   *
     37   * This class can be extended with the help of the plugin mechanism described on the page
     38   * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN.
     39   */
     40 
     41 namespace internal {
     42 template<typename _Scalar, int _Options, typename _Index>
     43 struct traits<SparseMatrix<_Scalar, _Options, _Index> >
     44 {
     45   typedef _Scalar Scalar;
     46   typedef _Index Index;
     47   typedef Sparse StorageKind;
     48   typedef MatrixXpr XprKind;
     49   enum {
     50     RowsAtCompileTime = Dynamic,
     51     ColsAtCompileTime = Dynamic,
     52     MaxRowsAtCompileTime = Dynamic,
     53     MaxColsAtCompileTime = Dynamic,
     54     Flags = _Options | NestByRefBit | LvalueBit,
     55     CoeffReadCost = NumTraits<Scalar>::ReadCost,
     56     SupportedAccessPatterns = InnerRandomAccessPattern
     57   };
     58 };
     59 
     60 template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
     61 struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
     62 {
     63   typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType;
     64   typedef typename nested<MatrixType>::type MatrixTypeNested;
     65   typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
     66 
     67   typedef _Scalar Scalar;
     68   typedef Dense StorageKind;
     69   typedef _Index Index;
     70   typedef MatrixXpr XprKind;
     71 
     72   enum {
     73     RowsAtCompileTime = Dynamic,
     74     ColsAtCompileTime = 1,
     75     MaxRowsAtCompileTime = Dynamic,
     76     MaxColsAtCompileTime = 1,
     77     Flags = 0,
     78     CoeffReadCost = _MatrixTypeNested::CoeffReadCost*10
     79   };
     80 };
     81 
     82 } // end namespace internal
     83 
     84 template<typename _Scalar, int _Options, typename _Index>
     85 class SparseMatrix
     86   : public SparseMatrixBase<SparseMatrix<_Scalar, _Options, _Index> >
     87 {
     88   public:
     89     EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
     90     EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=)
     91     EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=)
     92 
     93     typedef MappedSparseMatrix<Scalar,Flags> Map;
     94     using Base::IsRowMajor;
     95     typedef internal::CompressedStorage<Scalar,Index> Storage;
     96     enum {
     97       Options = _Options
     98     };
     99 
    100   protected:
    101 
    102     typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
    103 
    104     Index m_outerSize;
    105     Index m_innerSize;
    106     Index* m_outerIndex;
    107     Index* m_innerNonZeros;     // optional, if null then the data is compressed
    108     Storage m_data;
    109 
    110     Eigen::Map<Matrix<Index,Dynamic,1> > innerNonZeros() { return Eigen::Map<Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
    111     const  Eigen::Map<const Matrix<Index,Dynamic,1> > innerNonZeros() const { return Eigen::Map<const Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
    112 
    113   public:
    114 
    115     /** \returns whether \c *this is in compressed form. */
    116     inline bool isCompressed() const { return m_innerNonZeros==0; }
    117 
    118     /** \returns the number of rows of the matrix */
    119     inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
    120     /** \returns the number of columns of the matrix */
    121     inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
    122 
    123     /** \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */
    124     inline Index innerSize() const { return m_innerSize; }
    125     /** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */
    126     inline Index outerSize() const { return m_outerSize; }
    127 
    128     /** \returns a const pointer to the array of values.
    129       * This function is aimed at interoperability with other libraries.
    130       * \sa innerIndexPtr(), outerIndexPtr() */
    131     inline const Scalar* valuePtr() const { return &m_data.value(0); }
    132     /** \returns a non-const pointer to the array of values.
    133       * This function is aimed at interoperability with other libraries.
    134       * \sa innerIndexPtr(), outerIndexPtr() */
    135     inline Scalar* valuePtr() { return &m_data.value(0); }
    136 
    137     /** \returns a const pointer to the array of inner indices.
    138       * This function is aimed at interoperability with other libraries.
    139       * \sa valuePtr(), outerIndexPtr() */
    140     inline const Index* innerIndexPtr() const { return &m_data.index(0); }
    141     /** \returns a non-const pointer to the array of inner indices.
    142       * This function is aimed at interoperability with other libraries.
    143       * \sa valuePtr(), outerIndexPtr() */
    144     inline Index* innerIndexPtr() { return &m_data.index(0); }
    145 
    146     /** \returns a const pointer to the array of the starting positions of the inner vectors.
    147       * This function is aimed at interoperability with other libraries.
    148       * \sa valuePtr(), innerIndexPtr() */
    149     inline const Index* outerIndexPtr() const { return m_outerIndex; }
    150     /** \returns a non-const pointer to the array of the starting positions of the inner vectors.
    151       * This function is aimed at interoperability with other libraries.
    152       * \sa valuePtr(), innerIndexPtr() */
    153     inline Index* outerIndexPtr() { return m_outerIndex; }
    154 
    155     /** \returns a const pointer to the array of the number of non zeros of the inner vectors.
    156       * This function is aimed at interoperability with other libraries.
    157       * \warning it returns the null pointer 0 in compressed mode */
    158     inline const Index* innerNonZeroPtr() const { return m_innerNonZeros; }
    159     /** \returns a non-const pointer to the array of the number of non zeros of the inner vectors.
    160       * This function is aimed at interoperability with other libraries.
    161       * \warning it returns the null pointer 0 in compressed mode */
    162     inline Index* innerNonZeroPtr() { return m_innerNonZeros; }
    163 
    164     /** \internal */
    165     inline Storage& data() { return m_data; }
    166     /** \internal */
    167     inline const Storage& data() const { return m_data; }
    168 
    169     /** \returns the value of the matrix at position \a i, \a j
    170       * This function returns Scalar(0) if the element is an explicit \em zero */
    171     inline Scalar coeff(Index row, Index col) const
    172     {
    173       const Index outer = IsRowMajor ? row : col;
    174       const Index inner = IsRowMajor ? col : row;
    175       Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
    176       return m_data.atInRange(m_outerIndex[outer], end, inner);
    177     }
    178 
    179     /** \returns a non-const reference to the value of the matrix at position \a i, \a j
    180       *
    181       * If the element does not exist then it is inserted via the insert(Index,Index) function
    182       * which itself turns the matrix into a non compressed form if that was not the case.
    183       *
    184       * This is a O(log(nnz_j)) operation (binary search) plus the cost of insert(Index,Index)
    185       * function if the element does not already exist.
    186       */
    187     inline Scalar& coeffRef(Index row, Index col)
    188     {
    189       const Index outer = IsRowMajor ? row : col;
    190       const Index inner = IsRowMajor ? col : row;
    191 
    192       Index start = m_outerIndex[outer];
    193       Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
    194       eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
    195       if(end<=start)
    196         return insert(row,col);
    197       const Index p = m_data.searchLowerIndex(start,end-1,inner);
    198       if((p<end) && (m_data.index(p)==inner))
    199         return m_data.value(p);
    200       else
    201         return insert(row,col);
    202     }
    203 
    204     /** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col.
    205       * The non zero coefficient must \b not already exist.
    206       *
    207       * If the matrix \c *this is in compressed mode, then \c *this is turned into uncompressed
    208       * mode while reserving room for 2 non zeros per inner vector. It is strongly recommended to first
    209       * call reserve(const SizesType &) to reserve a more appropriate number of elements per
    210       * inner vector that better match your scenario.
    211       *
    212       * This function performs a sorted insertion in O(1) if the elements of each inner vector are
    213       * inserted in increasing inner index order, and in O(nnz_j) for a random insertion.
    214       *
    215       */
    216     EIGEN_DONT_INLINE Scalar& insert(Index row, Index col)
    217     {
    218       if(isCompressed())
    219       {
    220         reserve(VectorXi::Constant(outerSize(), 2));
    221       }
    222       return insertUncompressed(row,col);
    223     }
    224 
    225   public:
    226 
    227     class InnerIterator;
    228     class ReverseInnerIterator;
    229 
    230     /** Removes all non zeros but keep allocated memory */
    231     inline void setZero()
    232     {
    233       m_data.clear();
    234       memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
    235       if(m_innerNonZeros)
    236         memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(Index));
    237     }
    238 
    239     /** \returns the number of non zero coefficients */
    240     inline Index nonZeros() const
    241     {
    242       if(m_innerNonZeros)
    243         return innerNonZeros().sum();
    244       return static_cast<Index>(m_data.size());
    245     }
    246 
    247     /** Preallocates \a reserveSize non zeros.
    248       *
    249       * Precondition: the matrix must be in compressed mode. */
    250     inline void reserve(Index reserveSize)
    251     {
    252       eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
    253       m_data.reserve(reserveSize);
    254     }
    255 
    256     #ifdef EIGEN_PARSED_BY_DOXYGEN
    257     /** Preallocates \a reserveSize[\c j] non zeros for each column (resp. row) \c j.
    258       *
    259       * This function turns the matrix in non-compressed mode */
    260     template<class SizesType>
    261     inline void reserve(const SizesType& reserveSizes);
    262     #else
    263     template<class SizesType>
    264     inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif = typename SizesType::value_type())
    265     {
    266       EIGEN_UNUSED_VARIABLE(enableif);
    267       reserveInnerVectors(reserveSizes);
    268     }
    269     template<class SizesType>
    270     inline void reserve(const SizesType& reserveSizes, const typename SizesType::Scalar& enableif =
    271     #if (!defined(_MSC_VER)) || (_MSC_VER>=1500) // MSVC 2005 fails to compile with this typename
    272         typename
    273     #endif
    274         SizesType::Scalar())
    275     {
    276       EIGEN_UNUSED_VARIABLE(enableif);
    277       reserveInnerVectors(reserveSizes);
    278     }
    279     #endif // EIGEN_PARSED_BY_DOXYGEN
    280   protected:
    281     template<class SizesType>
    282     inline void reserveInnerVectors(const SizesType& reserveSizes)
    283     {
    284 
    285       if(isCompressed())
    286       {
    287         std::size_t totalReserveSize = 0;
    288         // turn the matrix into non-compressed mode
    289         m_innerNonZeros = new Index[m_outerSize];
    290 
    291         // temporarily use m_innerSizes to hold the new starting points.
    292         Index* newOuterIndex = m_innerNonZeros;
    293 
    294         Index count = 0;
    295         for(Index j=0; j<m_outerSize; ++j)
    296         {
    297           newOuterIndex[j] = count;
    298           count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
    299           totalReserveSize += reserveSizes[j];
    300         }
    301         m_data.reserve(totalReserveSize);
    302         std::ptrdiff_t previousOuterIndex = m_outerIndex[m_outerSize];
    303         for(std::ptrdiff_t j=m_outerSize-1; j>=0; --j)
    304         {
    305           ptrdiff_t innerNNZ = previousOuterIndex - m_outerIndex[j];
    306           for(std::ptrdiff_t i=innerNNZ-1; i>=0; --i)
    307           {
    308             m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
    309             m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
    310           }
    311           previousOuterIndex = m_outerIndex[j];
    312           m_outerIndex[j] = newOuterIndex[j];
    313           m_innerNonZeros[j] = innerNNZ;
    314         }
    315         m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
    316 
    317         m_data.resize(m_outerIndex[m_outerSize]);
    318       }
    319       else
    320       {
    321         Index* newOuterIndex = new Index[m_outerSize+1];
    322         Index count = 0;
    323         for(Index j=0; j<m_outerSize; ++j)
    324         {
    325           newOuterIndex[j] = count;
    326           Index alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
    327           Index toReserve = std::max<std::ptrdiff_t>(reserveSizes[j], alreadyReserved);
    328           count += toReserve + m_innerNonZeros[j];
    329         }
    330         newOuterIndex[m_outerSize] = count;
    331 
    332         m_data.resize(count);
    333         for(ptrdiff_t j=m_outerSize-1; j>=0; --j)
    334         {
    335           std::ptrdiff_t offset = newOuterIndex[j] - m_outerIndex[j];
    336           if(offset>0)
    337           {
    338             std::ptrdiff_t innerNNZ = m_innerNonZeros[j];
    339             for(std::ptrdiff_t i=innerNNZ-1; i>=0; --i)
    340             {
    341               m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
    342               m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
    343             }
    344           }
    345         }
    346 
    347         std::swap(m_outerIndex, newOuterIndex);
    348         delete[] newOuterIndex;
    349       }
    350 
    351     }
    352   public:
    353 
    354     //--- low level purely coherent filling ---
    355 
    356     /** \internal
    357       * \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
    358       * - the nonzero does not already exist
    359       * - the new coefficient is the last one according to the storage order
    360       *
    361       * Before filling a given inner vector you must call the statVec(Index) function.
    362       *
    363       * After an insertion session, you should call the finalize() function.
    364       *
    365       * \sa insert, insertBackByOuterInner, startVec */
    366     inline Scalar& insertBack(Index row, Index col)
    367     {
    368       return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
    369     }
    370 
    371     /** \internal
    372       * \sa insertBack, startVec */
    373     inline Scalar& insertBackByOuterInner(Index outer, Index inner)
    374     {
    375       eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
    376       eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
    377       Index p = m_outerIndex[outer+1];
    378       ++m_outerIndex[outer+1];
    379       m_data.append(0, inner);
    380       return m_data.value(p);
    381     }
    382 
    383     /** \internal
    384       * \warning use it only if you know what you are doing */
    385     inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
    386     {
    387       Index p = m_outerIndex[outer+1];
    388       ++m_outerIndex[outer+1];
    389       m_data.append(0, inner);
    390       return m_data.value(p);
    391     }
    392 
    393     /** \internal
    394       * \sa insertBack, insertBackByOuterInner */
    395     inline void startVec(Index outer)
    396     {
    397       eigen_assert(m_outerIndex[outer]==int(m_data.size()) && "You must call startVec for each inner vector sequentially");
    398       eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
    399       m_outerIndex[outer+1] = m_outerIndex[outer];
    400     }
    401 
    402     /** \internal
    403       * Must be called after inserting a set of non zero entries using the low level compressed API.
    404       */
    405     inline void finalize()
    406     {
    407       if(isCompressed())
    408       {
    409         Index size = static_cast<Index>(m_data.size());
    410         Index i = m_outerSize;
    411         // find the last filled column
    412         while (i>=0 && m_outerIndex[i]==0)
    413           --i;
    414         ++i;
    415         while (i<=m_outerSize)
    416         {
    417           m_outerIndex[i] = size;
    418           ++i;
    419         }
    420       }
    421     }
    422 
    423     //---
    424 
    425     template<typename InputIterators>
    426     void setFromTriplets(const InputIterators& begin, const InputIterators& end);
    427 
    428     void sumupDuplicates();
    429 
    430     //---
    431 
    432     /** \internal
    433       * same as insert(Index,Index) except that the indices are given relative to the storage order */
    434     EIGEN_DONT_INLINE Scalar& insertByOuterInner(Index j, Index i)
    435     {
    436       return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
    437     }
    438 
    439     /** Turns the matrix into the \em compressed format.
    440       */
    441     void makeCompressed()
    442     {
    443       if(isCompressed())
    444         return;
    445 
    446       Index oldStart = m_outerIndex[1];
    447       m_outerIndex[1] = m_innerNonZeros[0];
    448       for(Index j=1; j<m_outerSize; ++j)
    449       {
    450         Index nextOldStart = m_outerIndex[j+1];
    451         std::ptrdiff_t offset = oldStart - m_outerIndex[j];
    452         if(offset>0)
    453         {
    454           for(Index k=0; k<m_innerNonZeros[j]; ++k)
    455           {
    456             m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
    457             m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
    458           }
    459         }
    460         m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
    461         oldStart = nextOldStart;
    462       }
    463       delete[] m_innerNonZeros;
    464       m_innerNonZeros = 0;
    465       m_data.resize(m_outerIndex[m_outerSize]);
    466       m_data.squeeze();
    467     }
    468 
    469     /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerence \a epsilon */
    470     void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
    471     {
    472       prune(default_prunning_func(reference,epsilon));
    473     }
    474 
    475     /** Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predicate \a keep.
    476       * The functor type \a KeepFunc must implement the following function:
    477       * \code
    478       * bool operator() (const Index& row, const Index& col, const Scalar& value) const;
    479       * \endcode
    480       * \sa prune(Scalar,RealScalar)
    481       */
    482     template<typename KeepFunc>
    483     void prune(const KeepFunc& keep = KeepFunc())
    484     {
    485       // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
    486       // TODO also implement a unit test
    487       makeCompressed();
    488 
    489       Index k = 0;
    490       for(Index j=0; j<m_outerSize; ++j)
    491       {
    492         Index previousStart = m_outerIndex[j];
    493         m_outerIndex[j] = k;
    494         Index end = m_outerIndex[j+1];
    495         for(Index i=previousStart; i<end; ++i)
    496         {
    497           if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
    498           {
    499             m_data.value(k) = m_data.value(i);
    500             m_data.index(k) = m_data.index(i);
    501             ++k;
    502           }
    503         }
    504       }
    505       m_outerIndex[m_outerSize] = k;
    506       m_data.resize(k,0);
    507     }
    508 
    509     /** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero.
    510       * \sa resizeNonZeros(Index), reserve(), setZero()
    511       */
    512     void resize(Index rows, Index cols)
    513     {
    514       const Index outerSize = IsRowMajor ? rows : cols;
    515       m_innerSize = IsRowMajor ? cols : rows;
    516       m_data.clear();
    517       if (m_outerSize != outerSize || m_outerSize==0)
    518       {
    519         delete[] m_outerIndex;
    520         m_outerIndex = new Index [outerSize+1];
    521         m_outerSize = outerSize;
    522       }
    523       if(m_innerNonZeros)
    524       {
    525         delete[] m_innerNonZeros;
    526         m_innerNonZeros = 0;
    527       }
    528       memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
    529     }
    530 
    531     /** \internal
    532       * Resize the nonzero vector to \a size */
    533     void resizeNonZeros(Index size)
    534     {
    535       // TODO remove this function
    536       m_data.resize(size);
    537     }
    538 
    539     /** \returns a const expression of the diagonal coefficients */
    540     const Diagonal<const SparseMatrix> diagonal() const { return *this; }
    541 
    542     /** Default constructor yielding an empty \c 0 \c x \c 0 matrix */
    543     inline SparseMatrix()
    544       : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
    545     {
    546       check_template_parameters();
    547       resize(0, 0);
    548     }
    549 
    550     /** Constructs a \a rows \c x \a cols empty matrix */
    551     inline SparseMatrix(Index rows, Index cols)
    552       : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
    553     {
    554       check_template_parameters();
    555       resize(rows, cols);
    556     }
    557 
    558     /** Constructs a sparse matrix from the sparse expression \a other */
    559     template<typename OtherDerived>
    560     inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
    561       : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
    562     {
    563       check_template_parameters();
    564       *this = other.derived();
    565     }
    566 
    567     /** Copy constructor (it performs a deep copy) */
    568     inline SparseMatrix(const SparseMatrix& other)
    569       : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
    570     {
    571       check_template_parameters();
    572       *this = other.derived();
    573     }
    574 
    575     /** Swaps the content of two sparse matrices of the same type.
    576       * This is a fast operation that simply swaps the underlying pointers and parameters. */
    577     inline void swap(SparseMatrix& other)
    578     {
    579       //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
    580       std::swap(m_outerIndex, other.m_outerIndex);
    581       std::swap(m_innerSize, other.m_innerSize);
    582       std::swap(m_outerSize, other.m_outerSize);
    583       std::swap(m_innerNonZeros, other.m_innerNonZeros);
    584       m_data.swap(other.m_data);
    585     }
    586 
    587     inline SparseMatrix& operator=(const SparseMatrix& other)
    588     {
    589       if (other.isRValue())
    590       {
    591         swap(other.const_cast_derived());
    592       }
    593       else
    594       {
    595         initAssignment(other);
    596         if(other.isCompressed())
    597         {
    598           memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(Index));
    599           m_data = other.m_data;
    600         }
    601         else
    602         {
    603           Base::operator=(other);
    604         }
    605       }
    606       return *this;
    607     }
    608 
    609     #ifndef EIGEN_PARSED_BY_DOXYGEN
    610     template<typename Lhs, typename Rhs>
    611     inline SparseMatrix& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
    612     { return Base::operator=(product); }
    613 
    614     template<typename OtherDerived>
    615     inline SparseMatrix& operator=(const ReturnByValue<OtherDerived>& other)
    616     { return Base::operator=(other.derived()); }
    617 
    618     template<typename OtherDerived>
    619     inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
    620     { return Base::operator=(other.derived()); }
    621     #endif
    622 
    623     template<typename OtherDerived>
    624     EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other)
    625     {
    626       initAssignment(other.derived());
    627       const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
    628       if (needToTranspose)
    629       {
    630         // two passes algorithm:
    631         //  1 - compute the number of coeffs per dest inner vector
    632         //  2 - do the actual copy/eval
    633         // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
    634         typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
    635         typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
    636         OtherCopy otherCopy(other.derived());
    637 
    638         Eigen::Map<Matrix<Index, Dynamic, 1> > (m_outerIndex,outerSize()).setZero();
    639         // pass 1
    640         // FIXME the above copy could be merged with that pass
    641         for (Index j=0; j<otherCopy.outerSize(); ++j)
    642           for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
    643             ++m_outerIndex[it.index()];
    644 
    645         // prefix sum
    646         Index count = 0;
    647         VectorXi positions(outerSize());
    648         for (Index j=0; j<outerSize(); ++j)
    649         {
    650           Index tmp = m_outerIndex[j];
    651           m_outerIndex[j] = count;
    652           positions[j] = count;
    653           count += tmp;
    654         }
    655         m_outerIndex[outerSize()] = count;
    656         // alloc
    657         m_data.resize(count);
    658         // pass 2
    659         for (Index j=0; j<otherCopy.outerSize(); ++j)
    660         {
    661           for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
    662           {
    663             Index pos = positions[it.index()]++;
    664             m_data.index(pos) = j;
    665             m_data.value(pos) = it.value();
    666           }
    667         }
    668         return *this;
    669       }
    670       else
    671       {
    672         // there is no special optimization
    673         return Base::operator=(other.derived());
    674       }
    675     }
    676 
    677     friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
    678     {
    679       EIGEN_DBG_SPARSE(
    680         s << "Nonzero entries:\n";
    681         if(m.isCompressed())
    682           for (Index i=0; i<m.nonZeros(); ++i)
    683             s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
    684         else
    685           for (Index i=0; i<m.outerSize(); ++i)
    686           {
    687             int p = m.m_outerIndex[i];
    688             int pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
    689             Index k=p;
    690             for (; k<pe; ++k)
    691               s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
    692             for (; k<m.m_outerIndex[i+1]; ++k)
    693               s << "(_,_) ";
    694           }
    695         s << std::endl;
    696         s << std::endl;
    697         s << "Outer pointers:\n";
    698         for (Index i=0; i<m.outerSize(); ++i)
    699           s << m.m_outerIndex[i] << " ";
    700         s << " $" << std::endl;
    701         if(!m.isCompressed())
    702         {
    703           s << "Inner non zeros:\n";
    704           for (Index i=0; i<m.outerSize(); ++i)
    705             s << m.m_innerNonZeros[i] << " ";
    706           s << " $" << std::endl;
    707         }
    708         s << std::endl;
    709       );
    710       s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
    711       return s;
    712     }
    713 
    714     /** Destructor */
    715     inline ~SparseMatrix()
    716     {
    717       delete[] m_outerIndex;
    718       delete[] m_innerNonZeros;
    719     }
    720 
    721 #ifndef EIGEN_PARSED_BY_DOXYGEN
    722     /** Overloaded for performance */
    723     Scalar sum() const;
    724 #endif
    725 
    726 #   ifdef EIGEN_SPARSEMATRIX_PLUGIN
    727 #     include EIGEN_SPARSEMATRIX_PLUGIN
    728 #   endif
    729 
    730 protected:
    731 
    732     template<typename Other>
    733     void initAssignment(const Other& other)
    734     {
    735       resize(other.rows(), other.cols());
    736       if(m_innerNonZeros)
    737       {
    738         delete[] m_innerNonZeros;
    739         m_innerNonZeros = 0;
    740       }
    741     }
    742 
    743     /** \internal
    744       * \sa insert(Index,Index) */
    745     EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col)
    746     {
    747       eigen_assert(isCompressed());
    748 
    749       const Index outer = IsRowMajor ? row : col;
    750       const Index inner = IsRowMajor ? col : row;
    751 
    752       Index previousOuter = outer;
    753       if (m_outerIndex[outer+1]==0)
    754       {
    755         // we start a new inner vector
    756         while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
    757         {
    758           m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
    759           --previousOuter;
    760         }
    761         m_outerIndex[outer+1] = m_outerIndex[outer];
    762       }
    763 
    764       // here we have to handle the tricky case where the outerIndex array
    765       // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
    766       // the 2nd inner vector...
    767       bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
    768                     && (size_t(m_outerIndex[outer+1]) == m_data.size());
    769 
    770       size_t startId = m_outerIndex[outer];
    771       // FIXME let's make sure sizeof(long int) == sizeof(size_t)
    772       size_t p = m_outerIndex[outer+1];
    773       ++m_outerIndex[outer+1];
    774 
    775       float reallocRatio = 1;
    776       if (m_data.allocatedSize()<=m_data.size())
    777       {
    778         // if there is no preallocated memory, let's reserve a minimum of 32 elements
    779         if (m_data.size()==0)
    780         {
    781           m_data.reserve(32);
    782         }
    783         else
    784         {
    785           // we need to reallocate the data, to reduce multiple reallocations
    786           // we use a smart resize algorithm based on the current filling ratio
    787           // in addition, we use float to avoid integers overflows
    788           float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1);
    789           reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size());
    790           // furthermore we bound the realloc ratio to:
    791           //   1) reduce multiple minor realloc when the matrix is almost filled
    792           //   2) avoid to allocate too much memory when the matrix is almost empty
    793           reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f);
    794         }
    795       }
    796       m_data.resize(m_data.size()+1,reallocRatio);
    797 
    798       if (!isLastVec)
    799       {
    800         if (previousOuter==-1)
    801         {
    802           // oops wrong guess.
    803           // let's correct the outer offsets
    804           for (Index k=0; k<=(outer+1); ++k)
    805             m_outerIndex[k] = 0;
    806           Index k=outer+1;
    807           while(m_outerIndex[k]==0)
    808             m_outerIndex[k++] = 1;
    809           while (k<=m_outerSize && m_outerIndex[k]!=0)
    810             m_outerIndex[k++]++;
    811           p = 0;
    812           --k;
    813           k = m_outerIndex[k]-1;
    814           while (k>0)
    815           {
    816             m_data.index(k) = m_data.index(k-1);
    817             m_data.value(k) = m_data.value(k-1);
    818             k--;
    819           }
    820         }
    821         else
    822         {
    823           // we are not inserting into the last inner vec
    824           // update outer indices:
    825           Index j = outer+2;
    826           while (j<=m_outerSize && m_outerIndex[j]!=0)
    827             m_outerIndex[j++]++;
    828           --j;
    829           // shift data of last vecs:
    830           Index k = m_outerIndex[j]-1;
    831           while (k>=Index(p))
    832           {
    833             m_data.index(k) = m_data.index(k-1);
    834             m_data.value(k) = m_data.value(k-1);
    835             k--;
    836           }
    837         }
    838       }
    839 
    840       while ( (p > startId) && (m_data.index(p-1) > inner) )
    841       {
    842         m_data.index(p) = m_data.index(p-1);
    843         m_data.value(p) = m_data.value(p-1);
    844         --p;
    845       }
    846 
    847       m_data.index(p) = inner;
    848       return (m_data.value(p) = 0);
    849     }
    850 
    851     /** \internal
    852       * A vector object that is equal to 0 everywhere but v at the position i */
    853     class SingletonVector
    854     {
    855         Index m_index;
    856         Index m_value;
    857       public:
    858         typedef Index value_type;
    859         SingletonVector(Index i, Index v)
    860           : m_index(i), m_value(v)
    861         {}
    862 
    863         Index operator[](Index i) const { return i==m_index ? m_value : 0; }
    864     };
    865 
    866     /** \internal
    867       * \sa insert(Index,Index) */
    868     EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col)
    869     {
    870       eigen_assert(!isCompressed());
    871 
    872       const Index outer = IsRowMajor ? row : col;
    873       const Index inner = IsRowMajor ? col : row;
    874 
    875       std::ptrdiff_t room = m_outerIndex[outer+1] - m_outerIndex[outer];
    876       std::ptrdiff_t innerNNZ = m_innerNonZeros[outer];
    877       if(innerNNZ>=room)
    878       {
    879         // this inner vector is full, we need to reallocate the whole buffer :(
    880         reserve(SingletonVector(outer,std::max<std::ptrdiff_t>(2,innerNNZ)));
    881       }
    882 
    883       Index startId = m_outerIndex[outer];
    884       Index p = startId + m_innerNonZeros[outer];
    885       while ( (p > startId) && (m_data.index(p-1) > inner) )
    886       {
    887         m_data.index(p) = m_data.index(p-1);
    888         m_data.value(p) = m_data.value(p-1);
    889         --p;
    890       }
    891 
    892       m_innerNonZeros[outer]++;
    893 
    894       m_data.index(p) = inner;
    895       return (m_data.value(p) = 0);
    896     }
    897 
    898 public:
    899     /** \internal
    900       * \sa insert(Index,Index) */
    901     inline Scalar& insertBackUncompressed(Index row, Index col)
    902     {
    903       const Index outer = IsRowMajor ? row : col;
    904       const Index inner = IsRowMajor ? col : row;
    905 
    906       eigen_assert(!isCompressed());
    907       eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
    908 
    909       Index p = m_outerIndex[outer] + m_innerNonZeros[outer];
    910       m_innerNonZeros[outer]++;
    911       m_data.index(p) = inner;
    912       return (m_data.value(p) = 0);
    913     }
    914 
    915 private:
    916   static void check_template_parameters()
    917   {
    918     EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
    919   }
    920 
    921   struct default_prunning_func {
    922     default_prunning_func(Scalar ref, RealScalar eps) : reference(ref), epsilon(eps) {}
    923     inline bool operator() (const Index&, const Index&, const Scalar& value) const
    924     {
    925       return !internal::isMuchSmallerThan(value, reference, epsilon);
    926     }
    927     Scalar reference;
    928     RealScalar epsilon;
    929   };
    930 };
    931 
    932 template<typename Scalar, int _Options, typename _Index>
    933 class SparseMatrix<Scalar,_Options,_Index>::InnerIterator
    934 {
    935   public:
    936     InnerIterator(const SparseMatrix& mat, Index outer)
    937       : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer])
    938     {
    939       if(mat.isCompressed())
    940         m_end = mat.m_outerIndex[outer+1];
    941       else
    942         m_end = m_id + mat.m_innerNonZeros[outer];
    943     }
    944 
    945     inline InnerIterator& operator++() { m_id++; return *this; }
    946 
    947     inline const Scalar& value() const { return m_values[m_id]; }
    948     inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
    949 
    950     inline Index index() const { return m_indices[m_id]; }
    951     inline Index outer() const { return m_outer; }
    952     inline Index row() const { return IsRowMajor ? m_outer : index(); }
    953     inline Index col() const { return IsRowMajor ? index() : m_outer; }
    954 
    955     inline operator bool() const { return (m_id < m_end); }
    956 
    957   protected:
    958     const Scalar* m_values;
    959     const Index* m_indices;
    960     const Index m_outer;
    961     Index m_id;
    962     Index m_end;
    963 };
    964 
    965 template<typename Scalar, int _Options, typename _Index>
    966 class SparseMatrix<Scalar,_Options,_Index>::ReverseInnerIterator
    967 {
    968   public:
    969     ReverseInnerIterator(const SparseMatrix& mat, Index outer)
    970       : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_start(mat.m_outerIndex[outer])
    971     {
    972       if(mat.isCompressed())
    973         m_id = mat.m_outerIndex[outer+1];
    974       else
    975         m_id = m_start + mat.m_innerNonZeros[outer];
    976     }
    977 
    978     inline ReverseInnerIterator& operator--() { --m_id; return *this; }
    979 
    980     inline const Scalar& value() const { return m_values[m_id-1]; }
    981     inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id-1]); }
    982 
    983     inline Index index() const { return m_indices[m_id-1]; }
    984     inline Index outer() const { return m_outer; }
    985     inline Index row() const { return IsRowMajor ? m_outer : index(); }
    986     inline Index col() const { return IsRowMajor ? index() : m_outer; }
    987 
    988     inline operator bool() const { return (m_id > m_start); }
    989 
    990   protected:
    991     const Scalar* m_values;
    992     const Index* m_indices;
    993     const Index m_outer;
    994     Index m_id;
    995     const Index m_start;
    996 };
    997 
    998 namespace internal {
    999 
   1000 template<typename InputIterator, typename SparseMatrixType>
   1001 void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, int Options = 0)
   1002 {
   1003   EIGEN_UNUSED_VARIABLE(Options);
   1004   enum { IsRowMajor = SparseMatrixType::IsRowMajor };
   1005   typedef typename SparseMatrixType::Scalar Scalar;
   1006   typedef typename SparseMatrixType::Index Index;
   1007   SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor> trMat(mat.rows(),mat.cols());
   1008 
   1009   // pass 1: count the nnz per inner-vector
   1010   VectorXi wi(trMat.outerSize());
   1011   wi.setZero();
   1012   for(InputIterator it(begin); it!=end; ++it)
   1013     wi(IsRowMajor ? it->col() : it->row())++;
   1014 
   1015   // pass 2: insert all the elements into trMat
   1016   trMat.reserve(wi);
   1017   for(InputIterator it(begin); it!=end; ++it)
   1018     trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
   1019 
   1020   // pass 3:
   1021   trMat.sumupDuplicates();
   1022 
   1023   // pass 4: transposed copy -> implicit sorting
   1024   mat = trMat;
   1025 }
   1026 
   1027 }
   1028 
   1029 
   1030 /** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \b.
   1031   *
   1032   * A \em triplet is a tuple (i,j,value) defining a non-zero element.
   1033   * The input list of triplets does not have to be sorted, and can contains duplicated elements.
   1034   * In any case, the result is a \b sorted and \b compressed sparse matrix where the duplicates have been summed up.
   1035   * This is a \em O(n) operation, with \em n the number of triplet elements.
   1036   * The initial contents of \c *this is destroyed.
   1037   * The matrix \c *this must be properly resized beforehand using the SparseMatrix(Index,Index) constructor,
   1038   * or the resize(Index,Index) method. The sizes are not extracted from the triplet list.
   1039   *
   1040   * The \a InputIterators value_type must provide the following interface:
   1041   * \code
   1042   * Scalar value() const; // the value
   1043   * Scalar row() const;   // the row index i
   1044   * Scalar col() const;   // the column index j
   1045   * \endcode
   1046   * See for instance the Eigen::Triplet template class.
   1047   *
   1048   * Here is a typical usage example:
   1049   * \code
   1050     typedef Triplet<double> T;
   1051     std::vector<T> tripletList;
   1052     triplets.reserve(estimation_of_entries);
   1053     for(...)
   1054     {
   1055       // ...
   1056       tripletList.push_back(T(i,j,v_ij));
   1057     }
   1058     SparseMatrixType m(rows,cols);
   1059     m.setFromTriplets(tripletList.begin(), tripletList.end());
   1060     // m is ready to go!
   1061   * \endcode
   1062   *
   1063   * \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define
   1064   * an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
   1065   * be explicitely stored into a std::vector for instance.
   1066   */
   1067 template<typename Scalar, int _Options, typename _Index>
   1068 template<typename InputIterators>
   1069 void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
   1070 {
   1071   internal::set_from_triplets(begin, end, *this);
   1072 }
   1073 
   1074 /** \internal */
   1075 template<typename Scalar, int _Options, typename _Index>
   1076 void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
   1077 {
   1078   eigen_assert(!isCompressed());
   1079   // TODO, in practice we should be able to use m_innerNonZeros for that task
   1080   VectorXi wi(innerSize());
   1081   wi.fill(-1);
   1082   Index count = 0;
   1083   // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
   1084   for(int j=0; j<outerSize(); ++j)
   1085   {
   1086     Index start   = count;
   1087     Index oldEnd  = m_outerIndex[j]+m_innerNonZeros[j];
   1088     for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
   1089     {
   1090       Index i = m_data.index(k);
   1091       if(wi(i)>=start)
   1092       {
   1093         // we already meet this entry => accumulate it
   1094         m_data.value(wi(i)) += m_data.value(k);
   1095       }
   1096       else
   1097       {
   1098         m_data.value(count) = m_data.value(k);
   1099         m_data.index(count) = m_data.index(k);
   1100         wi(i) = count;
   1101         ++count;
   1102       }
   1103     }
   1104     m_outerIndex[j] = start;
   1105   }
   1106   m_outerIndex[m_outerSize] = count;
   1107 
   1108   // turn the matrix into compressed form
   1109   delete[] m_innerNonZeros;
   1110   m_innerNonZeros = 0;
   1111   m_data.resize(m_outerIndex[m_outerSize]);
   1112 }
   1113 
   1114 } // end namespace Eigen
   1115 
   1116 #endif // EIGEN_SPARSEMATRIX_H
   1117