Home | History | Annotate | Download | only in SparseCore
      1 // This file is part of Eigen, a lightweight C++ template library
      2 // for linear algebra.
      3 //
      4 // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud (at) inria.fr>
      5 //
      6 // This Source Code Form is subject to the terms of the Mozilla
      7 // Public License v. 2.0. If a copy of the MPL was not distributed
      8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
      9 
     10 #ifndef EIGEN_SPARSEMATRIX_H
     11 #define EIGEN_SPARSEMATRIX_H
     12 
     13 namespace Eigen {
     14 
     15 /** \ingroup SparseCore_Module
     16   *
     17   * \class SparseMatrix
     18   *
     19   * \brief A versatible sparse matrix representation
     20   *
     21   * This class implements a more versatile variants of the common \em compressed row/column storage format.
     22   * Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index.
     23   * All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra
     24   * space inbetween the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
     25   * can be done with limited memory reallocation and copies.
     26   *
     27   * A call to the function makeCompressed() turns the matrix into the standard \em compressed format
     28   * compatible with many library.
     29   *
     30   * More details on this storage sceheme are given in the \ref TutorialSparse "manual pages".
     31   *
     32   * \tparam _Scalar the scalar type, i.e. the type of the coefficients
     33   * \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility
     34   *                 is ColMajor or RowMajor. The default is 0 which means column-major.
     35   * \tparam _Index the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int.
     36   *
     37   * This class can be extended with the help of the plugin mechanism described on the page
     38   * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN.
     39   */
     40 
     41 namespace internal {
     42 template<typename _Scalar, int _Options, typename _Index>
     43 struct traits<SparseMatrix<_Scalar, _Options, _Index> >
     44 {
     45   typedef _Scalar Scalar;
     46   typedef _Index Index;
     47   typedef Sparse StorageKind;
     48   typedef MatrixXpr XprKind;
     49   enum {
     50     RowsAtCompileTime = Dynamic,
     51     ColsAtCompileTime = Dynamic,
     52     MaxRowsAtCompileTime = Dynamic,
     53     MaxColsAtCompileTime = Dynamic,
     54     Flags = _Options | NestByRefBit | LvalueBit,
     55     CoeffReadCost = NumTraits<Scalar>::ReadCost,
     56     SupportedAccessPatterns = InnerRandomAccessPattern
     57   };
     58 };
     59 
     60 template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
     61 struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
     62 {
     63   typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType;
     64   typedef typename nested<MatrixType>::type MatrixTypeNested;
     65   typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
     66 
     67   typedef _Scalar Scalar;
     68   typedef Dense StorageKind;
     69   typedef _Index Index;
     70   typedef MatrixXpr XprKind;
     71 
     72   enum {
     73     RowsAtCompileTime = Dynamic,
     74     ColsAtCompileTime = 1,
     75     MaxRowsAtCompileTime = Dynamic,
     76     MaxColsAtCompileTime = 1,
     77     Flags = 0,
     78     CoeffReadCost = _MatrixTypeNested::CoeffReadCost*10
     79   };
     80 };
     81 
     82 } // end namespace internal
     83 
     84 template<typename _Scalar, int _Options, typename _Index>
     85 class SparseMatrix
     86   : public SparseMatrixBase<SparseMatrix<_Scalar, _Options, _Index> >
     87 {
     88   public:
     89     EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
     90     EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=)
     91     EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=)
     92 
     93     typedef MappedSparseMatrix<Scalar,Flags> Map;
     94     using Base::IsRowMajor;
     95     typedef internal::CompressedStorage<Scalar,Index> Storage;
     96     enum {
     97       Options = _Options
     98     };
     99 
    100   protected:
    101 
    102     typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
    103 
    104     Index m_outerSize;
    105     Index m_innerSize;
    106     Index* m_outerIndex;
    107     Index* m_innerNonZeros;     // optional, if null then the data is compressed
    108     Storage m_data;
    109 
    110     Eigen::Map<Matrix<Index,Dynamic,1> > innerNonZeros() { return Eigen::Map<Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
    111     const  Eigen::Map<const Matrix<Index,Dynamic,1> > innerNonZeros() const { return Eigen::Map<const Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
    112 
    113   public:
    114 
    115     /** \returns whether \c *this is in compressed form. */
    116     inline bool isCompressed() const { return m_innerNonZeros==0; }
    117 
    118     /** \returns the number of rows of the matrix */
    119     inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
    120     /** \returns the number of columns of the matrix */
    121     inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
    122 
    123     /** \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */
    124     inline Index innerSize() const { return m_innerSize; }
    125     /** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */
    126     inline Index outerSize() const { return m_outerSize; }
    127 
    128     /** \returns a const pointer to the array of values.
    129       * This function is aimed at interoperability with other libraries.
    130       * \sa innerIndexPtr(), outerIndexPtr() */
    131     inline const Scalar* valuePtr() const { return &m_data.value(0); }
    132     /** \returns a non-const pointer to the array of values.
    133       * This function is aimed at interoperability with other libraries.
    134       * \sa innerIndexPtr(), outerIndexPtr() */
    135     inline Scalar* valuePtr() { return &m_data.value(0); }
    136 
    137     /** \returns a const pointer to the array of inner indices.
    138       * This function is aimed at interoperability with other libraries.
    139       * \sa valuePtr(), outerIndexPtr() */
    140     inline const Index* innerIndexPtr() const { return &m_data.index(0); }
    141     /** \returns a non-const pointer to the array of inner indices.
    142       * This function is aimed at interoperability with other libraries.
    143       * \sa valuePtr(), outerIndexPtr() */
    144     inline Index* innerIndexPtr() { return &m_data.index(0); }
    145 
    146     /** \returns a const pointer to the array of the starting positions of the inner vectors.
    147       * This function is aimed at interoperability with other libraries.
    148       * \sa valuePtr(), innerIndexPtr() */
    149     inline const Index* outerIndexPtr() const { return m_outerIndex; }
    150     /** \returns a non-const pointer to the array of the starting positions of the inner vectors.
    151       * This function is aimed at interoperability with other libraries.
    152       * \sa valuePtr(), innerIndexPtr() */
    153     inline Index* outerIndexPtr() { return m_outerIndex; }
    154 
    155     /** \returns a const pointer to the array of the number of non zeros of the inner vectors.
    156       * This function is aimed at interoperability with other libraries.
    157       * \warning it returns the null pointer 0 in compressed mode */
    158     inline const Index* innerNonZeroPtr() const { return m_innerNonZeros; }
    159     /** \returns a non-const pointer to the array of the number of non zeros of the inner vectors.
    160       * This function is aimed at interoperability with other libraries.
    161       * \warning it returns the null pointer 0 in compressed mode */
    162     inline Index* innerNonZeroPtr() { return m_innerNonZeros; }
    163 
    164     /** \internal */
    165     inline Storage& data() { return m_data; }
    166     /** \internal */
    167     inline const Storage& data() const { return m_data; }
    168 
    169     /** \returns the value of the matrix at position \a i, \a j
    170       * This function returns Scalar(0) if the element is an explicit \em zero */
    171     inline Scalar coeff(Index row, Index col) const
    172     {
    173       eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
    174 
    175       const Index outer = IsRowMajor ? row : col;
    176       const Index inner = IsRowMajor ? col : row;
    177       Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
    178       return m_data.atInRange(m_outerIndex[outer], end, inner);
    179     }
    180 
    181     /** \returns a non-const reference to the value of the matrix at position \a i, \a j
    182       *
    183       * If the element does not exist then it is inserted via the insert(Index,Index) function
    184       * which itself turns the matrix into a non compressed form if that was not the case.
    185       *
    186       * This is a O(log(nnz_j)) operation (binary search) plus the cost of insert(Index,Index)
    187       * function if the element does not already exist.
    188       */
    189     inline Scalar& coeffRef(Index row, Index col)
    190     {
    191       eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
    192 
    193       const Index outer = IsRowMajor ? row : col;
    194       const Index inner = IsRowMajor ? col : row;
    195 
    196       Index start = m_outerIndex[outer];
    197       Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
    198       eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
    199       if(end<=start)
    200         return insert(row,col);
    201       const Index p = m_data.searchLowerIndex(start,end-1,inner);
    202       if((p<end) && (m_data.index(p)==inner))
    203         return m_data.value(p);
    204       else
    205         return insert(row,col);
    206     }
    207 
    208     /** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col.
    209       * The non zero coefficient must \b not already exist.
    210       *
    211       * If the matrix \c *this is in compressed mode, then \c *this is turned into uncompressed
    212       * mode while reserving room for 2 non zeros per inner vector. It is strongly recommended to first
    213       * call reserve(const SizesType &) to reserve a more appropriate number of elements per
    214       * inner vector that better match your scenario.
    215       *
    216       * This function performs a sorted insertion in O(1) if the elements of each inner vector are
    217       * inserted in increasing inner index order, and in O(nnz_j) for a random insertion.
    218       *
    219       */
    220     Scalar& insert(Index row, Index col)
    221     {
    222       eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
    223 
    224       if(isCompressed())
    225       {
    226         reserve(Matrix<Index,Dynamic,1>::Constant(outerSize(), 2));
    227       }
    228       return insertUncompressed(row,col);
    229     }
    230 
    231   public:
    232 
    233     class InnerIterator;
    234     class ReverseInnerIterator;
    235 
    236     /** Removes all non zeros but keep allocated memory */
    237     inline void setZero()
    238     {
    239       m_data.clear();
    240       memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
    241       if(m_innerNonZeros)
    242         memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(Index));
    243     }
    244 
    245     /** \returns the number of non zero coefficients */
    246     inline Index nonZeros() const
    247     {
    248       if(m_innerNonZeros)
    249         return innerNonZeros().sum();
    250       return static_cast<Index>(m_data.size());
    251     }
    252 
    253     /** Preallocates \a reserveSize non zeros.
    254       *
    255       * Precondition: the matrix must be in compressed mode. */
    256     inline void reserve(Index reserveSize)
    257     {
    258       eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
    259       m_data.reserve(reserveSize);
    260     }
    261 
    262     #ifdef EIGEN_PARSED_BY_DOXYGEN
    263     /** Preallocates \a reserveSize[\c j] non zeros for each column (resp. row) \c j.
    264       *
    265       * This function turns the matrix in non-compressed mode */
    266     template<class SizesType>
    267     inline void reserve(const SizesType& reserveSizes);
    268     #else
    269     template<class SizesType>
    270     inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif = typename SizesType::value_type())
    271     {
    272       EIGEN_UNUSED_VARIABLE(enableif);
    273       reserveInnerVectors(reserveSizes);
    274     }
    275     template<class SizesType>
    276     inline void reserve(const SizesType& reserveSizes, const typename SizesType::Scalar& enableif =
    277     #if (!defined(_MSC_VER)) || (_MSC_VER>=1500) // MSVC 2005 fails to compile with this typename
    278         typename
    279     #endif
    280         SizesType::Scalar())
    281     {
    282       EIGEN_UNUSED_VARIABLE(enableif);
    283       reserveInnerVectors(reserveSizes);
    284     }
    285     #endif // EIGEN_PARSED_BY_DOXYGEN
    286   protected:
    287     template<class SizesType>
    288     inline void reserveInnerVectors(const SizesType& reserveSizes)
    289     {
    290       if(isCompressed())
    291       {
    292         std::size_t totalReserveSize = 0;
    293         // turn the matrix into non-compressed mode
    294         m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
    295         if (!m_innerNonZeros) internal::throw_std_bad_alloc();
    296 
    297         // temporarily use m_innerSizes to hold the new starting points.
    298         Index* newOuterIndex = m_innerNonZeros;
    299 
    300         Index count = 0;
    301         for(Index j=0; j<m_outerSize; ++j)
    302         {
    303           newOuterIndex[j] = count;
    304           count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
    305           totalReserveSize += reserveSizes[j];
    306         }
    307         m_data.reserve(totalReserveSize);
    308         Index previousOuterIndex = m_outerIndex[m_outerSize];
    309         for(Index j=m_outerSize-1; j>=0; --j)
    310         {
    311           Index innerNNZ = previousOuterIndex - m_outerIndex[j];
    312           for(Index i=innerNNZ-1; i>=0; --i)
    313           {
    314             m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
    315             m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
    316           }
    317           previousOuterIndex = m_outerIndex[j];
    318           m_outerIndex[j] = newOuterIndex[j];
    319           m_innerNonZeros[j] = innerNNZ;
    320         }
    321         m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
    322 
    323         m_data.resize(m_outerIndex[m_outerSize]);
    324       }
    325       else
    326       {
    327         Index* newOuterIndex = static_cast<Index*>(std::malloc((m_outerSize+1)*sizeof(Index)));
    328         if (!newOuterIndex) internal::throw_std_bad_alloc();
    329 
    330         Index count = 0;
    331         for(Index j=0; j<m_outerSize; ++j)
    332         {
    333           newOuterIndex[j] = count;
    334           Index alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
    335           Index toReserve = std::max<Index>(reserveSizes[j], alreadyReserved);
    336           count += toReserve + m_innerNonZeros[j];
    337         }
    338         newOuterIndex[m_outerSize] = count;
    339 
    340         m_data.resize(count);
    341         for(Index j=m_outerSize-1; j>=0; --j)
    342         {
    343           Index offset = newOuterIndex[j] - m_outerIndex[j];
    344           if(offset>0)
    345           {
    346             Index innerNNZ = m_innerNonZeros[j];
    347             for(Index i=innerNNZ-1; i>=0; --i)
    348             {
    349               m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
    350               m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
    351             }
    352           }
    353         }
    354 
    355         std::swap(m_outerIndex, newOuterIndex);
    356         std::free(newOuterIndex);
    357       }
    358 
    359     }
    360   public:
    361 
    362     //--- low level purely coherent filling ---
    363 
    364     /** \internal
    365       * \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
    366       * - the nonzero does not already exist
    367       * - the new coefficient is the last one according to the storage order
    368       *
    369       * Before filling a given inner vector you must call the statVec(Index) function.
    370       *
    371       * After an insertion session, you should call the finalize() function.
    372       *
    373       * \sa insert, insertBackByOuterInner, startVec */
    374     inline Scalar& insertBack(Index row, Index col)
    375     {
    376       return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
    377     }
    378 
    379     /** \internal
    380       * \sa insertBack, startVec */
    381     inline Scalar& insertBackByOuterInner(Index outer, Index inner)
    382     {
    383       eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
    384       eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
    385       Index p = m_outerIndex[outer+1];
    386       ++m_outerIndex[outer+1];
    387       m_data.append(0, inner);
    388       return m_data.value(p);
    389     }
    390 
    391     /** \internal
    392       * \warning use it only if you know what you are doing */
    393     inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
    394     {
    395       Index p = m_outerIndex[outer+1];
    396       ++m_outerIndex[outer+1];
    397       m_data.append(0, inner);
    398       return m_data.value(p);
    399     }
    400 
    401     /** \internal
    402       * \sa insertBack, insertBackByOuterInner */
    403     inline void startVec(Index outer)
    404     {
    405       eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
    406       eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
    407       m_outerIndex[outer+1] = m_outerIndex[outer];
    408     }
    409 
    410     /** \internal
    411       * Must be called after inserting a set of non zero entries using the low level compressed API.
    412       */
    413     inline void finalize()
    414     {
    415       if(isCompressed())
    416       {
    417         Index size = static_cast<Index>(m_data.size());
    418         Index i = m_outerSize;
    419         // find the last filled column
    420         while (i>=0 && m_outerIndex[i]==0)
    421           --i;
    422         ++i;
    423         while (i<=m_outerSize)
    424         {
    425           m_outerIndex[i] = size;
    426           ++i;
    427         }
    428       }
    429     }
    430 
    431     //---
    432 
    433     template<typename InputIterators>
    434     void setFromTriplets(const InputIterators& begin, const InputIterators& end);
    435 
    436     void sumupDuplicates();
    437 
    438     //---
    439 
    440     /** \internal
    441       * same as insert(Index,Index) except that the indices are given relative to the storage order */
    442     Scalar& insertByOuterInner(Index j, Index i)
    443     {
    444       return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
    445     }
    446 
    447     /** Turns the matrix into the \em compressed format.
    448       */
    449     void makeCompressed()
    450     {
    451       if(isCompressed())
    452         return;
    453 
    454       Index oldStart = m_outerIndex[1];
    455       m_outerIndex[1] = m_innerNonZeros[0];
    456       for(Index j=1; j<m_outerSize; ++j)
    457       {
    458         Index nextOldStart = m_outerIndex[j+1];
    459         Index offset = oldStart - m_outerIndex[j];
    460         if(offset>0)
    461         {
    462           for(Index k=0; k<m_innerNonZeros[j]; ++k)
    463           {
    464             m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
    465             m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
    466           }
    467         }
    468         m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
    469         oldStart = nextOldStart;
    470       }
    471       std::free(m_innerNonZeros);
    472       m_innerNonZeros = 0;
    473       m_data.resize(m_outerIndex[m_outerSize]);
    474       m_data.squeeze();
    475     }
    476 
    477     /** Turns the matrix into the uncompressed mode */
    478     void uncompress()
    479     {
    480       if(m_innerNonZeros != 0)
    481         return;
    482       m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
    483       for (Index i = 0; i < m_outerSize; i++)
    484       {
    485         m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
    486       }
    487     }
    488 
    489     /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerence \a epsilon */
    490     void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
    491     {
    492       prune(default_prunning_func(reference,epsilon));
    493     }
    494 
    495     /** Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predicate \a keep.
    496       * The functor type \a KeepFunc must implement the following function:
    497       * \code
    498       * bool operator() (const Index& row, const Index& col, const Scalar& value) const;
    499       * \endcode
    500       * \sa prune(Scalar,RealScalar)
    501       */
    502     template<typename KeepFunc>
    503     void prune(const KeepFunc& keep = KeepFunc())
    504     {
    505       // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
    506       // TODO also implement a unit test
    507       makeCompressed();
    508 
    509       Index k = 0;
    510       for(Index j=0; j<m_outerSize; ++j)
    511       {
    512         Index previousStart = m_outerIndex[j];
    513         m_outerIndex[j] = k;
    514         Index end = m_outerIndex[j+1];
    515         for(Index i=previousStart; i<end; ++i)
    516         {
    517           if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
    518           {
    519             m_data.value(k) = m_data.value(i);
    520             m_data.index(k) = m_data.index(i);
    521             ++k;
    522           }
    523         }
    524       }
    525       m_outerIndex[m_outerSize] = k;
    526       m_data.resize(k,0);
    527     }
    528 
    529     /** Resizes the matrix to a \a rows x \a cols matrix leaving old values untouched.
    530       * \sa resizeNonZeros(Index), reserve(), setZero()
    531       */
    532     void conservativeResize(Index rows, Index cols)
    533     {
    534       // No change
    535       if (this->rows() == rows && this->cols() == cols) return;
    536 
    537       // If one dimension is null, then there is nothing to be preserved
    538       if(rows==0 || cols==0) return resize(rows,cols);
    539 
    540       Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
    541       Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
    542       Index newInnerSize = IsRowMajor ? cols : rows;
    543 
    544       // Deals with inner non zeros
    545       if (m_innerNonZeros)
    546       {
    547         // Resize m_innerNonZeros
    548         Index *newInnerNonZeros = static_cast<Index*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(Index)));
    549         if (!newInnerNonZeros) internal::throw_std_bad_alloc();
    550         m_innerNonZeros = newInnerNonZeros;
    551 
    552         for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
    553           m_innerNonZeros[i] = 0;
    554       }
    555       else if (innerChange < 0)
    556       {
    557         // Inner size decreased: allocate a new m_innerNonZeros
    558         m_innerNonZeros = static_cast<Index*>(std::malloc((m_outerSize+outerChange+1) * sizeof(Index)));
    559         if (!m_innerNonZeros) internal::throw_std_bad_alloc();
    560         for(Index i = 0; i < m_outerSize; i++)
    561           m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
    562       }
    563 
    564       // Change the m_innerNonZeros in case of a decrease of inner size
    565       if (m_innerNonZeros && innerChange < 0)
    566       {
    567         for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
    568         {
    569           Index &n = m_innerNonZeros[i];
    570           Index start = m_outerIndex[i];
    571           while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
    572         }
    573       }
    574 
    575       m_innerSize = newInnerSize;
    576 
    577       // Re-allocate outer index structure if necessary
    578       if (outerChange == 0)
    579         return;
    580 
    581       Index *newOuterIndex = static_cast<Index*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(Index)));
    582       if (!newOuterIndex) internal::throw_std_bad_alloc();
    583       m_outerIndex = newOuterIndex;
    584       if (outerChange > 0)
    585       {
    586         Index last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
    587         for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
    588           m_outerIndex[i] = last;
    589       }
    590       m_outerSize += outerChange;
    591     }
    592 
    593     /** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero.
    594       * \sa resizeNonZeros(Index), reserve(), setZero()
    595       */
    596     void resize(Index rows, Index cols)
    597     {
    598       const Index outerSize = IsRowMajor ? rows : cols;
    599       m_innerSize = IsRowMajor ? cols : rows;
    600       m_data.clear();
    601       if (m_outerSize != outerSize || m_outerSize==0)
    602       {
    603         std::free(m_outerIndex);
    604         m_outerIndex = static_cast<Index*>(std::malloc((outerSize + 1) * sizeof(Index)));
    605         if (!m_outerIndex) internal::throw_std_bad_alloc();
    606 
    607         m_outerSize = outerSize;
    608       }
    609       if(m_innerNonZeros)
    610       {
    611         std::free(m_innerNonZeros);
    612         m_innerNonZeros = 0;
    613       }
    614       memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
    615     }
    616 
    617     /** \internal
    618       * Resize the nonzero vector to \a size */
    619     void resizeNonZeros(Index size)
    620     {
    621       // TODO remove this function
    622       m_data.resize(size);
    623     }
    624 
    625     /** \returns a const expression of the diagonal coefficients */
    626     const Diagonal<const SparseMatrix> diagonal() const { return *this; }
    627 
    628     /** Default constructor yielding an empty \c 0 \c x \c 0 matrix */
    629     inline SparseMatrix()
    630       : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
    631     {
    632       check_template_parameters();
    633       resize(0, 0);
    634     }
    635 
    636     /** Constructs a \a rows \c x \a cols empty matrix */
    637     inline SparseMatrix(Index rows, Index cols)
    638       : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
    639     {
    640       check_template_parameters();
    641       resize(rows, cols);
    642     }
    643 
    644     /** Constructs a sparse matrix from the sparse expression \a other */
    645     template<typename OtherDerived>
    646     inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
    647       : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
    648     {
    649       EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
    650         YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
    651       check_template_parameters();
    652       *this = other.derived();
    653     }
    654 
    655     /** Constructs a sparse matrix from the sparse selfadjoint view \a other */
    656     template<typename OtherDerived, unsigned int UpLo>
    657     inline SparseMatrix(const SparseSelfAdjointView<OtherDerived, UpLo>& other)
    658       : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
    659     {
    660       check_template_parameters();
    661       *this = other;
    662     }
    663 
    664     /** Copy constructor (it performs a deep copy) */
    665     inline SparseMatrix(const SparseMatrix& other)
    666       : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
    667     {
    668       check_template_parameters();
    669       *this = other.derived();
    670     }
    671 
    672     /** \brief Copy constructor with in-place evaluation */
    673     template<typename OtherDerived>
    674     SparseMatrix(const ReturnByValue<OtherDerived>& other)
    675       : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
    676     {
    677       check_template_parameters();
    678       initAssignment(other);
    679       other.evalTo(*this);
    680     }
    681 
    682     /** Swaps the content of two sparse matrices of the same type.
    683       * This is a fast operation that simply swaps the underlying pointers and parameters. */
    684     inline void swap(SparseMatrix& other)
    685     {
    686       //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
    687       std::swap(m_outerIndex, other.m_outerIndex);
    688       std::swap(m_innerSize, other.m_innerSize);
    689       std::swap(m_outerSize, other.m_outerSize);
    690       std::swap(m_innerNonZeros, other.m_innerNonZeros);
    691       m_data.swap(other.m_data);
    692     }
    693 
    694     /** Sets *this to the identity matrix */
    695     inline void setIdentity()
    696     {
    697       eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
    698       this->m_data.resize(rows());
    699       Eigen::Map<Matrix<Index, Dynamic, 1> >(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1);
    700       Eigen::Map<Matrix<Scalar, Dynamic, 1> >(&this->m_data.value(0), rows()).setOnes();
    701       Eigen::Map<Matrix<Index, Dynamic, 1> >(this->m_outerIndex, rows()+1).setLinSpaced(0, rows());
    702     }
    703     inline SparseMatrix& operator=(const SparseMatrix& other)
    704     {
    705       if (other.isRValue())
    706       {
    707         swap(other.const_cast_derived());
    708       }
    709       else if(this!=&other)
    710       {
    711         initAssignment(other);
    712         if(other.isCompressed())
    713         {
    714           memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(Index));
    715           m_data = other.m_data;
    716         }
    717         else
    718         {
    719           Base::operator=(other);
    720         }
    721       }
    722       return *this;
    723     }
    724 
    725     #ifndef EIGEN_PARSED_BY_DOXYGEN
    726     template<typename Lhs, typename Rhs>
    727     inline SparseMatrix& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
    728     { return Base::operator=(product); }
    729 
    730     template<typename OtherDerived>
    731     inline SparseMatrix& operator=(const ReturnByValue<OtherDerived>& other)
    732     {
    733       initAssignment(other);
    734       return Base::operator=(other.derived());
    735     }
    736 
    737     template<typename OtherDerived>
    738     inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
    739     { return Base::operator=(other.derived()); }
    740     #endif
    741 
    742     template<typename OtherDerived>
    743     EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
    744 
    745     friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
    746     {
    747       EIGEN_DBG_SPARSE(
    748         s << "Nonzero entries:\n";
    749         if(m.isCompressed())
    750           for (Index i=0; i<m.nonZeros(); ++i)
    751             s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
    752         else
    753           for (Index i=0; i<m.outerSize(); ++i)
    754           {
    755             Index p = m.m_outerIndex[i];
    756             Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
    757             Index k=p;
    758             for (; k<pe; ++k)
    759               s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
    760             for (; k<m.m_outerIndex[i+1]; ++k)
    761               s << "(_,_) ";
    762           }
    763         s << std::endl;
    764         s << std::endl;
    765         s << "Outer pointers:\n";
    766         for (Index i=0; i<m.outerSize(); ++i)
    767           s << m.m_outerIndex[i] << " ";
    768         s << " $" << std::endl;
    769         if(!m.isCompressed())
    770         {
    771           s << "Inner non zeros:\n";
    772           for (Index i=0; i<m.outerSize(); ++i)
    773             s << m.m_innerNonZeros[i] << " ";
    774           s << " $" << std::endl;
    775         }
    776         s << std::endl;
    777       );
    778       s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
    779       return s;
    780     }
    781 
    782     /** Destructor */
    783     inline ~SparseMatrix()
    784     {
    785       std::free(m_outerIndex);
    786       std::free(m_innerNonZeros);
    787     }
    788 
    789 #ifndef EIGEN_PARSED_BY_DOXYGEN
    790     /** Overloaded for performance */
    791     Scalar sum() const;
    792 #endif
    793 
    794 #   ifdef EIGEN_SPARSEMATRIX_PLUGIN
    795 #     include EIGEN_SPARSEMATRIX_PLUGIN
    796 #   endif
    797 
    798 protected:
    799 
    800     template<typename Other>
    801     void initAssignment(const Other& other)
    802     {
    803       resize(other.rows(), other.cols());
    804       if(m_innerNonZeros)
    805       {
    806         std::free(m_innerNonZeros);
    807         m_innerNonZeros = 0;
    808       }
    809     }
    810 
    811     /** \internal
    812       * \sa insert(Index,Index) */
    813     EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
    814 
    815     /** \internal
    816       * A vector object that is equal to 0 everywhere but v at the position i */
    817     class SingletonVector
    818     {
    819         Index m_index;
    820         Index m_value;
    821       public:
    822         typedef Index value_type;
    823         SingletonVector(Index i, Index v)
    824           : m_index(i), m_value(v)
    825         {}
    826 
    827         Index operator[](Index i) const { return i==m_index ? m_value : 0; }
    828     };
    829 
    830     /** \internal
    831       * \sa insert(Index,Index) */
    832     EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
    833 
    834 public:
    835     /** \internal
    836       * \sa insert(Index,Index) */
    837     EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
    838     {
    839       const Index outer = IsRowMajor ? row : col;
    840       const Index inner = IsRowMajor ? col : row;
    841 
    842       eigen_assert(!isCompressed());
    843       eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
    844 
    845       Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
    846       m_data.index(p) = inner;
    847       return (m_data.value(p) = 0);
    848     }
    849 
    850 private:
    851   static void check_template_parameters()
    852   {
    853     EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
    854     EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
    855   }
    856 
    857   struct default_prunning_func {
    858     default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
    859     inline bool operator() (const Index&, const Index&, const Scalar& value) const
    860     {
    861       return !internal::isMuchSmallerThan(value, reference, epsilon);
    862     }
    863     Scalar reference;
    864     RealScalar epsilon;
    865   };
    866 };
    867 
    868 template<typename Scalar, int _Options, typename _Index>
    869 class SparseMatrix<Scalar,_Options,_Index>::InnerIterator
    870 {
    871   public:
    872     InnerIterator(const SparseMatrix& mat, Index outer)
    873       : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer])
    874     {
    875       if(mat.isCompressed())
    876         m_end = mat.m_outerIndex[outer+1];
    877       else
    878         m_end = m_id + mat.m_innerNonZeros[outer];
    879     }
    880 
    881     inline InnerIterator& operator++() { m_id++; return *this; }
    882 
    883     inline const Scalar& value() const { return m_values[m_id]; }
    884     inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
    885 
    886     inline Index index() const { return m_indices[m_id]; }
    887     inline Index outer() const { return m_outer; }
    888     inline Index row() const { return IsRowMajor ? m_outer : index(); }
    889     inline Index col() const { return IsRowMajor ? index() : m_outer; }
    890 
    891     inline operator bool() const { return (m_id < m_end); }
    892 
    893   protected:
    894     const Scalar* m_values;
    895     const Index* m_indices;
    896     const Index m_outer;
    897     Index m_id;
    898     Index m_end;
    899 };
    900 
    901 template<typename Scalar, int _Options, typename _Index>
    902 class SparseMatrix<Scalar,_Options,_Index>::ReverseInnerIterator
    903 {
    904   public:
    905     ReverseInnerIterator(const SparseMatrix& mat, Index outer)
    906       : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_start(mat.m_outerIndex[outer])
    907     {
    908       if(mat.isCompressed())
    909         m_id = mat.m_outerIndex[outer+1];
    910       else
    911         m_id = m_start + mat.m_innerNonZeros[outer];
    912     }
    913 
    914     inline ReverseInnerIterator& operator--() { --m_id; return *this; }
    915 
    916     inline const Scalar& value() const { return m_values[m_id-1]; }
    917     inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id-1]); }
    918 
    919     inline Index index() const { return m_indices[m_id-1]; }
    920     inline Index outer() const { return m_outer; }
    921     inline Index row() const { return IsRowMajor ? m_outer : index(); }
    922     inline Index col() const { return IsRowMajor ? index() : m_outer; }
    923 
    924     inline operator bool() const { return (m_id > m_start); }
    925 
    926   protected:
    927     const Scalar* m_values;
    928     const Index* m_indices;
    929     const Index m_outer;
    930     Index m_id;
    931     const Index m_start;
    932 };
    933 
    934 namespace internal {
    935 
    936 template<typename InputIterator, typename SparseMatrixType>
    937 void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, int Options = 0)
    938 {
    939   EIGEN_UNUSED_VARIABLE(Options);
    940   enum { IsRowMajor = SparseMatrixType::IsRowMajor };
    941   typedef typename SparseMatrixType::Scalar Scalar;
    942   typedef typename SparseMatrixType::Index Index;
    943   SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,Index> trMat(mat.rows(),mat.cols());
    944 
    945   if(begin!=end)
    946   {
    947     // pass 1: count the nnz per inner-vector
    948     Matrix<Index,Dynamic,1> wi(trMat.outerSize());
    949     wi.setZero();
    950     for(InputIterator it(begin); it!=end; ++it)
    951     {
    952       eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
    953       wi(IsRowMajor ? it->col() : it->row())++;
    954     }
    955 
    956     // pass 2: insert all the elements into trMat
    957     trMat.reserve(wi);
    958     for(InputIterator it(begin); it!=end; ++it)
    959       trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
    960 
    961     // pass 3:
    962     trMat.sumupDuplicates();
    963   }
    964 
    965   // pass 4: transposed copy -> implicit sorting
    966   mat = trMat;
    967 }
    968 
    969 }
    970 
    971 
    972 /** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \a end.
    973   *
    974   * A \em triplet is a tuple (i,j,value) defining a non-zero element.
    975   * The input list of triplets does not have to be sorted, and can contains duplicated elements.
    976   * In any case, the result is a \b sorted and \b compressed sparse matrix where the duplicates have been summed up.
    977   * This is a \em O(n) operation, with \em n the number of triplet elements.
    978   * The initial contents of \c *this is destroyed.
    979   * The matrix \c *this must be properly resized beforehand using the SparseMatrix(Index,Index) constructor,
    980   * or the resize(Index,Index) method. The sizes are not extracted from the triplet list.
    981   *
    982   * The \a InputIterators value_type must provide the following interface:
    983   * \code
    984   * Scalar value() const; // the value
    985   * Scalar row() const;   // the row index i
    986   * Scalar col() const;   // the column index j
    987   * \endcode
    988   * See for instance the Eigen::Triplet template class.
    989   *
    990   * Here is a typical usage example:
    991   * \code
    992     typedef Triplet<double> T;
    993     std::vector<T> tripletList;
    994     triplets.reserve(estimation_of_entries);
    995     for(...)
    996     {
    997       // ...
    998       tripletList.push_back(T(i,j,v_ij));
    999     }
   1000     SparseMatrixType m(rows,cols);
   1001     m.setFromTriplets(tripletList.begin(), tripletList.end());
   1002     // m is ready to go!
   1003   * \endcode
   1004   *
   1005   * \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define
   1006   * an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
   1007   * be explicitely stored into a std::vector for instance.
   1008   */
   1009 template<typename Scalar, int _Options, typename _Index>
   1010 template<typename InputIterators>
   1011 void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
   1012 {
   1013   internal::set_from_triplets(begin, end, *this);
   1014 }
   1015 
   1016 /** \internal */
   1017 template<typename Scalar, int _Options, typename _Index>
   1018 void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
   1019 {
   1020   eigen_assert(!isCompressed());
   1021   // TODO, in practice we should be able to use m_innerNonZeros for that task
   1022   Matrix<Index,Dynamic,1> wi(innerSize());
   1023   wi.fill(-1);
   1024   Index count = 0;
   1025   // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
   1026   for(Index j=0; j<outerSize(); ++j)
   1027   {
   1028     Index start   = count;
   1029     Index oldEnd  = m_outerIndex[j]+m_innerNonZeros[j];
   1030     for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
   1031     {
   1032       Index i = m_data.index(k);
   1033       if(wi(i)>=start)
   1034       {
   1035         // we already meet this entry => accumulate it
   1036         m_data.value(wi(i)) += m_data.value(k);
   1037       }
   1038       else
   1039       {
   1040         m_data.value(count) = m_data.value(k);
   1041         m_data.index(count) = m_data.index(k);
   1042         wi(i) = count;
   1043         ++count;
   1044       }
   1045     }
   1046     m_outerIndex[j] = start;
   1047   }
   1048   m_outerIndex[m_outerSize] = count;
   1049 
   1050   // turn the matrix into compressed form
   1051   std::free(m_innerNonZeros);
   1052   m_innerNonZeros = 0;
   1053   m_data.resize(m_outerIndex[m_outerSize]);
   1054 }
   1055 
   1056 template<typename Scalar, int _Options, typename _Index>
   1057 template<typename OtherDerived>
   1058 EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Options,_Index>::operator=(const SparseMatrixBase<OtherDerived>& other)
   1059 {
   1060   EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
   1061         YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
   1062 
   1063   const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
   1064   if (needToTranspose)
   1065   {
   1066     // two passes algorithm:
   1067     //  1 - compute the number of coeffs per dest inner vector
   1068     //  2 - do the actual copy/eval
   1069     // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
   1070     typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
   1071     typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
   1072     OtherCopy otherCopy(other.derived());
   1073 
   1074     SparseMatrix dest(other.rows(),other.cols());
   1075     Eigen::Map<Matrix<Index, Dynamic, 1> > (dest.m_outerIndex,dest.outerSize()).setZero();
   1076 
   1077     // pass 1
   1078     // FIXME the above copy could be merged with that pass
   1079     for (Index j=0; j<otherCopy.outerSize(); ++j)
   1080       for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
   1081         ++dest.m_outerIndex[it.index()];
   1082 
   1083     // prefix sum
   1084     Index count = 0;
   1085     Matrix<Index,Dynamic,1> positions(dest.outerSize());
   1086     for (Index j=0; j<dest.outerSize(); ++j)
   1087     {
   1088       Index tmp = dest.m_outerIndex[j];
   1089       dest.m_outerIndex[j] = count;
   1090       positions[j] = count;
   1091       count += tmp;
   1092     }
   1093     dest.m_outerIndex[dest.outerSize()] = count;
   1094     // alloc
   1095     dest.m_data.resize(count);
   1096     // pass 2
   1097     for (Index j=0; j<otherCopy.outerSize(); ++j)
   1098     {
   1099       for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
   1100       {
   1101         Index pos = positions[it.index()]++;
   1102         dest.m_data.index(pos) = j;
   1103         dest.m_data.value(pos) = it.value();
   1104       }
   1105     }
   1106     this->swap(dest);
   1107     return *this;
   1108   }
   1109   else
   1110   {
   1111     if(other.isRValue())
   1112       initAssignment(other.derived());
   1113     // there is no special optimization
   1114     return Base::operator=(other.derived());
   1115   }
   1116 }
   1117 
   1118 template<typename _Scalar, int _Options, typename _Index>
   1119 EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertUncompressed(Index row, Index col)
   1120 {
   1121   eigen_assert(!isCompressed());
   1122 
   1123   const Index outer = IsRowMajor ? row : col;
   1124   const Index inner = IsRowMajor ? col : row;
   1125 
   1126   Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
   1127   Index innerNNZ = m_innerNonZeros[outer];
   1128   if(innerNNZ>=room)
   1129   {
   1130     // this inner vector is full, we need to reallocate the whole buffer :(
   1131     reserve(SingletonVector(outer,std::max<Index>(2,innerNNZ)));
   1132   }
   1133 
   1134   Index startId = m_outerIndex[outer];
   1135   Index p = startId + m_innerNonZeros[outer];
   1136   while ( (p > startId) && (m_data.index(p-1) > inner) )
   1137   {
   1138     m_data.index(p) = m_data.index(p-1);
   1139     m_data.value(p) = m_data.value(p-1);
   1140     --p;
   1141   }
   1142   eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exist, you must call coeffRef to this end");
   1143 
   1144   m_innerNonZeros[outer]++;
   1145 
   1146   m_data.index(p) = inner;
   1147   return (m_data.value(p) = 0);
   1148 }
   1149 
   1150 template<typename _Scalar, int _Options, typename _Index>
   1151 EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertCompressed(Index row, Index col)
   1152 {
   1153   eigen_assert(isCompressed());
   1154 
   1155   const Index outer = IsRowMajor ? row : col;
   1156   const Index inner = IsRowMajor ? col : row;
   1157 
   1158   Index previousOuter = outer;
   1159   if (m_outerIndex[outer+1]==0)
   1160   {
   1161     // we start a new inner vector
   1162     while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
   1163     {
   1164       m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
   1165       --previousOuter;
   1166     }
   1167     m_outerIndex[outer+1] = m_outerIndex[outer];
   1168   }
   1169 
   1170   // here we have to handle the tricky case where the outerIndex array
   1171   // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
   1172   // the 2nd inner vector...
   1173   bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
   1174                 && (size_t(m_outerIndex[outer+1]) == m_data.size());
   1175 
   1176   size_t startId = m_outerIndex[outer];
   1177   // FIXME let's make sure sizeof(long int) == sizeof(size_t)
   1178   size_t p = m_outerIndex[outer+1];
   1179   ++m_outerIndex[outer+1];
   1180 
   1181   double reallocRatio = 1;
   1182   if (m_data.allocatedSize()<=m_data.size())
   1183   {
   1184     // if there is no preallocated memory, let's reserve a minimum of 32 elements
   1185     if (m_data.size()==0)
   1186     {
   1187       m_data.reserve(32);
   1188     }
   1189     else
   1190     {
   1191       // we need to reallocate the data, to reduce multiple reallocations
   1192       // we use a smart resize algorithm based on the current filling ratio
   1193       // in addition, we use double to avoid integers overflows
   1194       double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
   1195       reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
   1196       // furthermore we bound the realloc ratio to:
   1197       //   1) reduce multiple minor realloc when the matrix is almost filled
   1198       //   2) avoid to allocate too much memory when the matrix is almost empty
   1199       reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
   1200     }
   1201   }
   1202   m_data.resize(m_data.size()+1,reallocRatio);
   1203 
   1204   if (!isLastVec)
   1205   {
   1206     if (previousOuter==-1)
   1207     {
   1208       // oops wrong guess.
   1209       // let's correct the outer offsets
   1210       for (Index k=0; k<=(outer+1); ++k)
   1211         m_outerIndex[k] = 0;
   1212       Index k=outer+1;
   1213       while(m_outerIndex[k]==0)
   1214         m_outerIndex[k++] = 1;
   1215       while (k<=m_outerSize && m_outerIndex[k]!=0)
   1216         m_outerIndex[k++]++;
   1217       p = 0;
   1218       --k;
   1219       k = m_outerIndex[k]-1;
   1220       while (k>0)
   1221       {
   1222         m_data.index(k) = m_data.index(k-1);
   1223         m_data.value(k) = m_data.value(k-1);
   1224         k--;
   1225       }
   1226     }
   1227     else
   1228     {
   1229       // we are not inserting into the last inner vec
   1230       // update outer indices:
   1231       Index j = outer+2;
   1232       while (j<=m_outerSize && m_outerIndex[j]!=0)
   1233         m_outerIndex[j++]++;
   1234       --j;
   1235       // shift data of last vecs:
   1236       Index k = m_outerIndex[j]-1;
   1237       while (k>=Index(p))
   1238       {
   1239         m_data.index(k) = m_data.index(k-1);
   1240         m_data.value(k) = m_data.value(k-1);
   1241         k--;
   1242       }
   1243     }
   1244   }
   1245 
   1246   while ( (p > startId) && (m_data.index(p-1) > inner) )
   1247   {
   1248     m_data.index(p) = m_data.index(p-1);
   1249     m_data.value(p) = m_data.value(p-1);
   1250     --p;
   1251   }
   1252 
   1253   m_data.index(p) = inner;
   1254   return (m_data.value(p) = 0);
   1255 }
   1256 
   1257 } // end namespace Eigen
   1258 
   1259 #endif // EIGEN_SPARSEMATRIX_H
   1260