Home | History | Annotate | Download | only in SparseCore
      1 // This file is part of Eigen, a lightweight C++ template library
      2 // for linear algebra.
      3 //
      4 // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud (at) inria.fr>
      5 //
      6 // This Source Code Form is subject to the terms of the Mozilla
      7 // Public License v. 2.0. If a copy of the MPL was not distributed
      8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
      9 
     10 #ifndef EIGEN_AMBIVECTOR_H
     11 #define EIGEN_AMBIVECTOR_H
     12 
     13 namespace Eigen {
     14 
     15 namespace internal {
     16 
     17 /** \internal
     18   * Hybrid sparse/dense vector class designed for intensive read-write operations.
     19   *
     20   * See BasicSparseLLT and SparseProduct for usage examples.
     21   */
     22 template<typename _Scalar, typename _Index>
     23 class AmbiVector
     24 {
     25   public:
     26     typedef _Scalar Scalar;
     27     typedef _Index Index;
     28     typedef typename NumTraits<Scalar>::Real RealScalar;
     29 
     30     AmbiVector(Index size)
     31       : m_buffer(0), m_zero(0), m_size(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1)
     32     {
     33       resize(size);
     34     }
     35 
     36     void init(double estimatedDensity);
     37     void init(int mode);
     38 
     39     Index nonZeros() const;
     40 
     41     /** Specifies a sub-vector to work on */
     42     void setBounds(Index start, Index end) { m_start = start; m_end = end; }
     43 
     44     void setZero();
     45 
     46     void restart();
     47     Scalar& coeffRef(Index i);
     48     Scalar& coeff(Index i);
     49 
     50     class Iterator;
     51 
     52     ~AmbiVector() { delete[] m_buffer; }
     53 
     54     void resize(Index size)
     55     {
     56       if (m_allocatedSize < size)
     57         reallocate(size);
     58       m_size = size;
     59     }
     60 
     61     Index size() const { return m_size; }
     62 
     63   protected:
     64 
     65     void reallocate(Index size)
     66     {
     67       // if the size of the matrix is not too large, let's allocate a bit more than needed such
     68       // that we can handle dense vector even in sparse mode.
     69       delete[] m_buffer;
     70       if (size<1000)
     71       {
     72         Index allocSize = (size * sizeof(ListEl))/sizeof(Scalar);
     73         m_allocatedElements = (allocSize*sizeof(Scalar))/sizeof(ListEl);
     74         m_buffer = new Scalar[allocSize];
     75       }
     76       else
     77       {
     78         m_allocatedElements = (size*sizeof(Scalar))/sizeof(ListEl);
     79         m_buffer = new Scalar[size];
     80       }
     81       m_size = size;
     82       m_start = 0;
     83       m_end = m_size;
     84     }
     85 
     86     void reallocateSparse()
     87     {
     88       Index copyElements = m_allocatedElements;
     89       m_allocatedElements = (std::min)(Index(m_allocatedElements*1.5),m_size);
     90       Index allocSize = m_allocatedElements * sizeof(ListEl);
     91       allocSize = allocSize/sizeof(Scalar) + (allocSize%sizeof(Scalar)>0?1:0);
     92       Scalar* newBuffer = new Scalar[allocSize];
     93       memcpy(newBuffer,  m_buffer,  copyElements * sizeof(ListEl));
     94       delete[] m_buffer;
     95       m_buffer = newBuffer;
     96     }
     97 
     98   protected:
     99     // element type of the linked list
    100     struct ListEl
    101     {
    102       Index next;
    103       Index index;
    104       Scalar value;
    105     };
    106 
    107     // used to store data in both mode
    108     Scalar* m_buffer;
    109     Scalar m_zero;
    110     Index m_size;
    111     Index m_start;
    112     Index m_end;
    113     Index m_allocatedSize;
    114     Index m_allocatedElements;
    115     Index m_mode;
    116 
    117     // linked list mode
    118     Index m_llStart;
    119     Index m_llCurrent;
    120     Index m_llSize;
    121 };
    122 
    123 /** \returns the number of non zeros in the current sub vector */
    124 template<typename _Scalar,typename _Index>
    125 _Index AmbiVector<_Scalar,_Index>::nonZeros() const
    126 {
    127   if (m_mode==IsSparse)
    128     return m_llSize;
    129   else
    130     return m_end - m_start;
    131 }
    132 
    133 template<typename _Scalar,typename _Index>
    134 void AmbiVector<_Scalar,_Index>::init(double estimatedDensity)
    135 {
    136   if (estimatedDensity>0.1)
    137     init(IsDense);
    138   else
    139     init(IsSparse);
    140 }
    141 
    142 template<typename _Scalar,typename _Index>
    143 void AmbiVector<_Scalar,_Index>::init(int mode)
    144 {
    145   m_mode = mode;
    146   if (m_mode==IsSparse)
    147   {
    148     m_llSize = 0;
    149     m_llStart = -1;
    150   }
    151 }
    152 
    153 /** Must be called whenever we might perform a write access
    154   * with an index smaller than the previous one.
    155   *
    156   * Don't worry, this function is extremely cheap.
    157   */
    158 template<typename _Scalar,typename _Index>
    159 void AmbiVector<_Scalar,_Index>::restart()
    160 {
    161   m_llCurrent = m_llStart;
    162 }
    163 
    164 /** Set all coefficients of current subvector to zero */
    165 template<typename _Scalar,typename _Index>
    166 void AmbiVector<_Scalar,_Index>::setZero()
    167 {
    168   if (m_mode==IsDense)
    169   {
    170     for (Index i=m_start; i<m_end; ++i)
    171       m_buffer[i] = Scalar(0);
    172   }
    173   else
    174   {
    175     eigen_assert(m_mode==IsSparse);
    176     m_llSize = 0;
    177     m_llStart = -1;
    178   }
    179 }
    180 
    181 template<typename _Scalar,typename _Index>
    182 _Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i)
    183 {
    184   if (m_mode==IsDense)
    185     return m_buffer[i];
    186   else
    187   {
    188     ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
    189     // TODO factorize the following code to reduce code generation
    190     eigen_assert(m_mode==IsSparse);
    191     if (m_llSize==0)
    192     {
    193       // this is the first element
    194       m_llStart = 0;
    195       m_llCurrent = 0;
    196       ++m_llSize;
    197       llElements[0].value = Scalar(0);
    198       llElements[0].index = i;
    199       llElements[0].next = -1;
    200       return llElements[0].value;
    201     }
    202     else if (i<llElements[m_llStart].index)
    203     {
    204       // this is going to be the new first element of the list
    205       ListEl& el = llElements[m_llSize];
    206       el.value = Scalar(0);
    207       el.index = i;
    208       el.next = m_llStart;
    209       m_llStart = m_llSize;
    210       ++m_llSize;
    211       m_llCurrent = m_llStart;
    212       return el.value;
    213     }
    214     else
    215     {
    216       Index nextel = llElements[m_llCurrent].next;
    217       eigen_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index");
    218       while (nextel >= 0 && llElements[nextel].index<=i)
    219       {
    220         m_llCurrent = nextel;
    221         nextel = llElements[nextel].next;
    222       }
    223 
    224       if (llElements[m_llCurrent].index==i)
    225       {
    226         // the coefficient already exists and we found it !
    227         return llElements[m_llCurrent].value;
    228       }
    229       else
    230       {
    231         if (m_llSize>=m_allocatedElements)
    232         {
    233           reallocateSparse();
    234           llElements = reinterpret_cast<ListEl*>(m_buffer);
    235         }
    236         eigen_internal_assert(m_llSize<m_allocatedElements && "internal error: overflow in sparse mode");
    237         // let's insert a new coefficient
    238         ListEl& el = llElements[m_llSize];
    239         el.value = Scalar(0);
    240         el.index = i;
    241         el.next = llElements[m_llCurrent].next;
    242         llElements[m_llCurrent].next = m_llSize;
    243         ++m_llSize;
    244         return el.value;
    245       }
    246     }
    247   }
    248 }
    249 
    250 template<typename _Scalar,typename _Index>
    251 _Scalar& AmbiVector<_Scalar,_Index>::coeff(_Index i)
    252 {
    253   if (m_mode==IsDense)
    254     return m_buffer[i];
    255   else
    256   {
    257     ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
    258     eigen_assert(m_mode==IsSparse);
    259     if ((m_llSize==0) || (i<llElements[m_llStart].index))
    260     {
    261       return m_zero;
    262     }
    263     else
    264     {
    265       Index elid = m_llStart;
    266       while (elid >= 0 && llElements[elid].index<i)
    267         elid = llElements[elid].next;
    268 
    269       if (llElements[elid].index==i)
    270         return llElements[m_llCurrent].value;
    271       else
    272         return m_zero;
    273     }
    274   }
    275 }
    276 
    277 /** Iterator over the nonzero coefficients */
    278 template<typename _Scalar,typename _Index>
    279 class AmbiVector<_Scalar,_Index>::Iterator
    280 {
    281   public:
    282     typedef _Scalar Scalar;
    283     typedef typename NumTraits<Scalar>::Real RealScalar;
    284 
    285     /** Default constructor
    286       * \param vec the vector on which we iterate
    287       * \param epsilon the minimal value used to prune zero coefficients.
    288       * In practice, all coefficients having a magnitude smaller than \a epsilon
    289       * are skipped.
    290       */
    291     Iterator(const AmbiVector& vec, RealScalar epsilon = 0)
    292       : m_vector(vec)
    293     {
    294       m_epsilon = epsilon;
    295       m_isDense = m_vector.m_mode==IsDense;
    296       if (m_isDense)
    297       {
    298         m_currentEl = 0;   // this is to avoid a compilation warning
    299         m_cachedValue = 0; // this is to avoid a compilation warning
    300         m_cachedIndex = m_vector.m_start-1;
    301         ++(*this);
    302       }
    303       else
    304       {
    305         ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
    306         m_currentEl = m_vector.m_llStart;
    307         while (m_currentEl>=0 && internal::abs(llElements[m_currentEl].value)<=m_epsilon)
    308           m_currentEl = llElements[m_currentEl].next;
    309         if (m_currentEl<0)
    310         {
    311           m_cachedValue = 0; // this is to avoid a compilation warning
    312           m_cachedIndex = -1;
    313         }
    314         else
    315         {
    316           m_cachedIndex = llElements[m_currentEl].index;
    317           m_cachedValue = llElements[m_currentEl].value;
    318         }
    319       }
    320     }
    321 
    322     Index index() const { return m_cachedIndex; }
    323     Scalar value() const { return m_cachedValue; }
    324 
    325     operator bool() const { return m_cachedIndex>=0; }
    326 
    327     Iterator& operator++()
    328     {
    329       if (m_isDense)
    330       {
    331         do {
    332           ++m_cachedIndex;
    333         } while (m_cachedIndex<m_vector.m_end && internal::abs(m_vector.m_buffer[m_cachedIndex])<m_epsilon);
    334         if (m_cachedIndex<m_vector.m_end)
    335           m_cachedValue = m_vector.m_buffer[m_cachedIndex];
    336         else
    337           m_cachedIndex=-1;
    338       }
    339       else
    340       {
    341         ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
    342         do {
    343           m_currentEl = llElements[m_currentEl].next;
    344         } while (m_currentEl>=0 && internal::abs(llElements[m_currentEl].value)<m_epsilon);
    345         if (m_currentEl<0)
    346         {
    347           m_cachedIndex = -1;
    348         }
    349         else
    350         {
    351           m_cachedIndex = llElements[m_currentEl].index;
    352           m_cachedValue = llElements[m_currentEl].value;
    353         }
    354       }
    355       return *this;
    356     }
    357 
    358   protected:
    359     const AmbiVector& m_vector; // the target vector
    360     Index m_currentEl;            // the current element in sparse/linked-list mode
    361     RealScalar m_epsilon;       // epsilon used to prune zero coefficients
    362     Index m_cachedIndex;          // current coordinate
    363     Scalar m_cachedValue;       // current value
    364     bool m_isDense;             // mode of the vector
    365 };
    366 
    367 } // end namespace internal
    368 
    369 } // end namespace Eigen
    370 
    371 #endif // EIGEN_AMBIVECTOR_H
    372