Home | History | Annotate | Download | only in SparseCore
      1 // This file is part of Eigen, a lightweight C++ template library
      2 // for linear algebra.
      3 //
      4 // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud (at) inria.fr>
      5 //
      6 // This Source Code Form is subject to the terms of the Mozilla
      7 // Public License v. 2.0. If a copy of the MPL was not distributed
      8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
      9 
     10 #ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
     11 #define EIGEN_SPARSE_SELFADJOINTVIEW_H
     12 
     13 namespace Eigen {
     14 
     15 /** \ingroup SparseCore_Module
     16   * \class SparseSelfAdjointView
     17   *
     18   * \brief Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
     19   *
     20   * \param MatrixType the type of the dense matrix storing the coefficients
     21   * \param UpLo can be either \c #Lower or \c #Upper
     22   *
     23   * This class is an expression of a sefladjoint matrix from a triangular part of a matrix
     24   * with given dense storage of the coefficients. It is the return type of MatrixBase::selfadjointView()
     25   * and most of the time this is the only way that it is used.
     26   *
     27   * \sa SparseMatrixBase::selfadjointView()
     28   */
     29 template<typename Lhs, typename Rhs, int UpLo>
     30 class SparseSelfAdjointTimeDenseProduct;
     31 
     32 template<typename Lhs, typename Rhs, int UpLo>
     33 class DenseTimeSparseSelfAdjointProduct;
     34 
     35 namespace internal {
     36 
     37 template<typename MatrixType, unsigned int UpLo>
     38 struct traits<SparseSelfAdjointView<MatrixType,UpLo> > : traits<MatrixType> {
     39 };
     40 
     41 template<int SrcUpLo,int DstUpLo,typename MatrixType,int DestOrder>
     42 void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm = 0);
     43 
     44 template<int UpLo,typename MatrixType,int DestOrder>
     45 void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm = 0);
     46 
     47 }
     48 
     49 template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
     50   : public EigenBase<SparseSelfAdjointView<MatrixType,UpLo> >
     51 {
     52   public:
     53 
     54     typedef typename MatrixType::Scalar Scalar;
     55     typedef typename MatrixType::Index Index;
     56     typedef Matrix<Index,Dynamic,1> VectorI;
     57     typedef typename MatrixType::Nested MatrixTypeNested;
     58     typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
     59 
     60     inline SparseSelfAdjointView(const MatrixType& matrix) : m_matrix(matrix)
     61     {
     62       eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
     63     }
     64 
     65     inline Index rows() const { return m_matrix.rows(); }
     66     inline Index cols() const { return m_matrix.cols(); }
     67 
     68     /** \internal \returns a reference to the nested matrix */
     69     const _MatrixTypeNested& matrix() const { return m_matrix; }
     70     _MatrixTypeNested& matrix() { return m_matrix.const_cast_derived(); }
     71 
     72     /** \returns an expression of the matrix product between a sparse self-adjoint matrix \c *this and a sparse matrix \a rhs.
     73       *
     74       * Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product.
     75       * Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product.
     76       */
     77     template<typename OtherDerived>
     78     SparseSparseProduct<typename OtherDerived::PlainObject, OtherDerived>
     79     operator*(const SparseMatrixBase<OtherDerived>& rhs) const
     80     {
     81       return SparseSparseProduct<typename OtherDerived::PlainObject, OtherDerived>(*this, rhs.derived());
     82     }
     83 
     84     /** \returns an expression of the matrix product between a sparse matrix \a lhs and a sparse self-adjoint matrix \a rhs.
     85       *
     86       * Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product.
     87       * Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product.
     88       */
     89     template<typename OtherDerived> friend
     90     SparseSparseProduct<OtherDerived, typename OtherDerived::PlainObject >
     91     operator*(const SparseMatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
     92     {
     93       return SparseSparseProduct<OtherDerived, typename OtherDerived::PlainObject>(lhs.derived(), rhs);
     94     }
     95 
     96     /** Efficient sparse self-adjoint matrix times dense vector/matrix product */
     97     template<typename OtherDerived>
     98     SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,UpLo>
     99     operator*(const MatrixBase<OtherDerived>& rhs) const
    100     {
    101       return SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,UpLo>(m_matrix, rhs.derived());
    102     }
    103 
    104     /** Efficient dense vector/matrix times sparse self-adjoint matrix product */
    105     template<typename OtherDerived> friend
    106     DenseTimeSparseSelfAdjointProduct<OtherDerived,MatrixType,UpLo>
    107     operator*(const MatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
    108     {
    109       return DenseTimeSparseSelfAdjointProduct<OtherDerived,_MatrixTypeNested,UpLo>(lhs.derived(), rhs.m_matrix);
    110     }
    111 
    112     /** Perform a symmetric rank K update of the selfadjoint matrix \c *this:
    113       * \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix.
    114       *
    115       * \returns a reference to \c *this
    116       *
    117       * To perform \f$ this = this + \alpha ( u^* u ) \f$ you can simply
    118       * call this function with u.adjoint().
    119       */
    120     template<typename DerivedU>
    121     SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
    122 
    123     /** \internal triggered by sparse_matrix = SparseSelfadjointView; */
    124     template<typename DestScalar,int StorageOrder> void evalTo(SparseMatrix<DestScalar,StorageOrder,Index>& _dest) const
    125     {
    126       internal::permute_symm_to_fullsymm<UpLo>(m_matrix, _dest);
    127     }
    128 
    129     template<typename DestScalar> void evalTo(DynamicSparseMatrix<DestScalar,ColMajor,Index>& _dest) const
    130     {
    131       // TODO directly evaluate into _dest;
    132       SparseMatrix<DestScalar,ColMajor,Index> tmp(_dest.rows(),_dest.cols());
    133       internal::permute_symm_to_fullsymm<UpLo>(m_matrix, tmp);
    134       _dest = tmp;
    135     }
    136 
    137     /** \returns an expression of P H P^-1 */
    138     SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo> twistedBy(const PermutationMatrix<Dynamic,Dynamic,Index>& perm) const
    139     {
    140       return SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo>(m_matrix, perm);
    141     }
    142 
    143     template<typename SrcMatrixType,int SrcUpLo>
    144     SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct<SrcMatrixType,SrcUpLo>& permutedMatrix)
    145     {
    146       permutedMatrix.evalTo(*this);
    147       return *this;
    148     }
    149 
    150 
    151     SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src)
    152     {
    153       PermutationMatrix<Dynamic> pnull;
    154       return *this = src.twistedBy(pnull);
    155     }
    156 
    157     template<typename SrcMatrixType,unsigned int SrcUpLo>
    158     SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType,SrcUpLo>& src)
    159     {
    160       PermutationMatrix<Dynamic> pnull;
    161       return *this = src.twistedBy(pnull);
    162     }
    163 
    164 
    165     // const SparseLLT<PlainObject, UpLo> llt() const;
    166     // const SparseLDLT<PlainObject, UpLo> ldlt() const;
    167 
    168   protected:
    169 
    170     typename MatrixType::Nested m_matrix;
    171     mutable VectorI m_countPerRow;
    172     mutable VectorI m_countPerCol;
    173 };
    174 
    175 /***************************************************************************
    176 * Implementation of SparseMatrixBase methods
    177 ***************************************************************************/
    178 
    179 template<typename Derived>
    180 template<unsigned int UpLo>
    181 const SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView() const
    182 {
    183   return derived();
    184 }
    185 
    186 template<typename Derived>
    187 template<unsigned int UpLo>
    188 SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView()
    189 {
    190   return derived();
    191 }
    192 
    193 /***************************************************************************
    194 * Implementation of SparseSelfAdjointView methods
    195 ***************************************************************************/
    196 
    197 template<typename MatrixType, unsigned int UpLo>
    198 template<typename DerivedU>
    199 SparseSelfAdjointView<MatrixType,UpLo>&
    200 SparseSelfAdjointView<MatrixType,UpLo>::rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha)
    201 {
    202   SparseMatrix<Scalar,MatrixType::Flags&RowMajorBit?RowMajor:ColMajor> tmp = u * u.adjoint();
    203   if(alpha==Scalar(0))
    204     m_matrix.const_cast_derived() = tmp.template triangularView<UpLo>();
    205   else
    206     m_matrix.const_cast_derived() += alpha * tmp.template triangularView<UpLo>();
    207 
    208   return *this;
    209 }
    210 
    211 /***************************************************************************
    212 * Implementation of sparse self-adjoint time dense matrix
    213 ***************************************************************************/
    214 
    215 namespace internal {
    216 template<typename Lhs, typename Rhs, int UpLo>
    217 struct traits<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo> >
    218  : traits<ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo>, Lhs, Rhs> >
    219 {
    220   typedef Dense StorageKind;
    221 };
    222 }
    223 
    224 template<typename Lhs, typename Rhs, int UpLo>
    225 class SparseSelfAdjointTimeDenseProduct
    226   : public ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo>, Lhs, Rhs>
    227 {
    228   public:
    229     EIGEN_PRODUCT_PUBLIC_INTERFACE(SparseSelfAdjointTimeDenseProduct)
    230 
    231     SparseSelfAdjointTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
    232     {}
    233 
    234     template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
    235     {
    236       EIGEN_ONLY_USED_FOR_DEBUG(alpha);
    237       // TODO use alpha
    238       eigen_assert(alpha==Scalar(1) && "alpha != 1 is not implemented yet, sorry");
    239       typedef typename internal::remove_all<Lhs>::type _Lhs;
    240       typedef typename _Lhs::InnerIterator LhsInnerIterator;
    241       enum {
    242         LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit,
    243         ProcessFirstHalf =
    244                  ((UpLo&(Upper|Lower))==(Upper|Lower))
    245               || ( (UpLo&Upper) && !LhsIsRowMajor)
    246               || ( (UpLo&Lower) && LhsIsRowMajor),
    247         ProcessSecondHalf = !ProcessFirstHalf
    248       };
    249       for (Index j=0; j<m_lhs.outerSize(); ++j)
    250       {
    251         LhsInnerIterator i(m_lhs,j);
    252         if (ProcessSecondHalf)
    253         {
    254           while (i && i.index()<j) ++i;
    255           if(i && i.index()==j)
    256           {
    257             dest.row(j) += i.value() * m_rhs.row(j);
    258             ++i;
    259           }
    260         }
    261         for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
    262         {
    263           Index a = LhsIsRowMajor ? j : i.index();
    264           Index b = LhsIsRowMajor ? i.index() : j;
    265           typename Lhs::Scalar v = i.value();
    266           dest.row(a) += (v) * m_rhs.row(b);
    267           dest.row(b) += numext::conj(v) * m_rhs.row(a);
    268         }
    269         if (ProcessFirstHalf && i && (i.index()==j))
    270           dest.row(j) += i.value() * m_rhs.row(j);
    271       }
    272     }
    273 
    274   private:
    275     SparseSelfAdjointTimeDenseProduct& operator=(const SparseSelfAdjointTimeDenseProduct&);
    276 };
    277 
    278 namespace internal {
    279 template<typename Lhs, typename Rhs, int UpLo>
    280 struct traits<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo> >
    281  : traits<ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo>, Lhs, Rhs> >
    282 {};
    283 }
    284 
    285 template<typename Lhs, typename Rhs, int UpLo>
    286 class DenseTimeSparseSelfAdjointProduct
    287   : public ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo>, Lhs, Rhs>
    288 {
    289   public:
    290     EIGEN_PRODUCT_PUBLIC_INTERFACE(DenseTimeSparseSelfAdjointProduct)
    291 
    292     DenseTimeSparseSelfAdjointProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
    293     {}
    294 
    295     template<typename Dest> void scaleAndAddTo(Dest& /*dest*/, const Scalar& /*alpha*/) const
    296     {
    297       // TODO
    298     }
    299 
    300   private:
    301     DenseTimeSparseSelfAdjointProduct& operator=(const DenseTimeSparseSelfAdjointProduct&);
    302 };
    303 
    304 /***************************************************************************
    305 * Implementation of symmetric copies and permutations
    306 ***************************************************************************/
    307 namespace internal {
    308 
    309 template<typename MatrixType, int UpLo>
    310 struct traits<SparseSymmetricPermutationProduct<MatrixType,UpLo> > : traits<MatrixType> {
    311 };
    312 
    313 template<int UpLo,typename MatrixType,int DestOrder>
    314 void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm)
    315 {
    316   typedef typename MatrixType::Index Index;
    317   typedef typename MatrixType::Scalar Scalar;
    318   typedef SparseMatrix<Scalar,DestOrder,Index> Dest;
    319   typedef Matrix<Index,Dynamic,1> VectorI;
    320 
    321   Dest& dest(_dest.derived());
    322   enum {
    323     StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
    324   };
    325 
    326   Index size = mat.rows();
    327   VectorI count;
    328   count.resize(size);
    329   count.setZero();
    330   dest.resize(size,size);
    331   for(Index j = 0; j<size; ++j)
    332   {
    333     Index jp = perm ? perm[j] : j;
    334     for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
    335     {
    336       Index i = it.index();
    337       Index r = it.row();
    338       Index c = it.col();
    339       Index ip = perm ? perm[i] : i;
    340       if(UpLo==(Upper|Lower))
    341         count[StorageOrderMatch ? jp : ip]++;
    342       else if(r==c)
    343         count[ip]++;
    344       else if(( UpLo==Lower && r>c) || ( UpLo==Upper && r<c))
    345       {
    346         count[ip]++;
    347         count[jp]++;
    348       }
    349     }
    350   }
    351   Index nnz = count.sum();
    352 
    353   // reserve space
    354   dest.resizeNonZeros(nnz);
    355   dest.outerIndexPtr()[0] = 0;
    356   for(Index j=0; j<size; ++j)
    357     dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
    358   for(Index j=0; j<size; ++j)
    359     count[j] = dest.outerIndexPtr()[j];
    360 
    361   // copy data
    362   for(Index j = 0; j<size; ++j)
    363   {
    364     for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
    365     {
    366       Index i = it.index();
    367       Index r = it.row();
    368       Index c = it.col();
    369 
    370       Index jp = perm ? perm[j] : j;
    371       Index ip = perm ? perm[i] : i;
    372 
    373       if(UpLo==(Upper|Lower))
    374       {
    375         Index k = count[StorageOrderMatch ? jp : ip]++;
    376         dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
    377         dest.valuePtr()[k] = it.value();
    378       }
    379       else if(r==c)
    380       {
    381         Index k = count[ip]++;
    382         dest.innerIndexPtr()[k] = ip;
    383         dest.valuePtr()[k] = it.value();
    384       }
    385       else if(( (UpLo&Lower)==Lower && r>c) || ( (UpLo&Upper)==Upper && r<c))
    386       {
    387         if(!StorageOrderMatch)
    388           std::swap(ip,jp);
    389         Index k = count[jp]++;
    390         dest.innerIndexPtr()[k] = ip;
    391         dest.valuePtr()[k] = it.value();
    392         k = count[ip]++;
    393         dest.innerIndexPtr()[k] = jp;
    394         dest.valuePtr()[k] = numext::conj(it.value());
    395       }
    396     }
    397   }
    398 }
    399 
    400 template<int _SrcUpLo,int _DstUpLo,typename MatrixType,int DstOrder>
    401 void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm)
    402 {
    403   typedef typename MatrixType::Index Index;
    404   typedef typename MatrixType::Scalar Scalar;
    405   SparseMatrix<Scalar,DstOrder,Index>& dest(_dest.derived());
    406   typedef Matrix<Index,Dynamic,1> VectorI;
    407   enum {
    408     SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
    409     StorageOrderMatch = int(SrcOrder) == int(DstOrder),
    410     DstUpLo = DstOrder==RowMajor ? (_DstUpLo==Upper ? Lower : Upper) : _DstUpLo,
    411     SrcUpLo = SrcOrder==RowMajor ? (_SrcUpLo==Upper ? Lower : Upper) : _SrcUpLo
    412   };
    413 
    414   Index size = mat.rows();
    415   VectorI count(size);
    416   count.setZero();
    417   dest.resize(size,size);
    418   for(Index j = 0; j<size; ++j)
    419   {
    420     Index jp = perm ? perm[j] : j;
    421     for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
    422     {
    423       Index i = it.index();
    424       if((int(SrcUpLo)==int(Lower) && i<j) || (int(SrcUpLo)==int(Upper) && i>j))
    425         continue;
    426 
    427       Index ip = perm ? perm[i] : i;
    428       count[int(DstUpLo)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
    429     }
    430   }
    431   dest.outerIndexPtr()[0] = 0;
    432   for(Index j=0; j<size; ++j)
    433     dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
    434   dest.resizeNonZeros(dest.outerIndexPtr()[size]);
    435   for(Index j=0; j<size; ++j)
    436     count[j] = dest.outerIndexPtr()[j];
    437 
    438   for(Index j = 0; j<size; ++j)
    439   {
    440 
    441     for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
    442     {
    443       Index i = it.index();
    444       if((int(SrcUpLo)==int(Lower) && i<j) || (int(SrcUpLo)==int(Upper) && i>j))
    445         continue;
    446 
    447       Index jp = perm ? perm[j] : j;
    448       Index ip = perm? perm[i] : i;
    449 
    450       Index k = count[int(DstUpLo)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
    451       dest.innerIndexPtr()[k] = int(DstUpLo)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
    452 
    453       if(!StorageOrderMatch) std::swap(ip,jp);
    454       if( ((int(DstUpLo)==int(Lower) && ip<jp) || (int(DstUpLo)==int(Upper) && ip>jp)))
    455         dest.valuePtr()[k] = numext::conj(it.value());
    456       else
    457         dest.valuePtr()[k] = it.value();
    458     }
    459   }
    460 }
    461 
    462 }
    463 
    464 template<typename MatrixType,int UpLo>
    465 class SparseSymmetricPermutationProduct
    466   : public EigenBase<SparseSymmetricPermutationProduct<MatrixType,UpLo> >
    467 {
    468   public:
    469     typedef typename MatrixType::Scalar Scalar;
    470     typedef typename MatrixType::Index Index;
    471   protected:
    472     typedef PermutationMatrix<Dynamic,Dynamic,Index> Perm;
    473   public:
    474     typedef Matrix<Index,Dynamic,1> VectorI;
    475     typedef typename MatrixType::Nested MatrixTypeNested;
    476     typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
    477 
    478     SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm)
    479       : m_matrix(mat), m_perm(perm)
    480     {}
    481 
    482     inline Index rows() const { return m_matrix.rows(); }
    483     inline Index cols() const { return m_matrix.cols(); }
    484 
    485     template<typename DestScalar, int Options, typename DstIndex>
    486     void evalTo(SparseMatrix<DestScalar,Options,DstIndex>& _dest) const
    487     {
    488 //       internal::permute_symm_to_fullsymm<UpLo>(m_matrix,_dest,m_perm.indices().data());
    489       SparseMatrix<DestScalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp;
    490       internal::permute_symm_to_fullsymm<UpLo>(m_matrix,tmp,m_perm.indices().data());
    491       _dest = tmp;
    492     }
    493 
    494     template<typename DestType,unsigned int DestUpLo> void evalTo(SparseSelfAdjointView<DestType,DestUpLo>& dest) const
    495     {
    496       internal::permute_symm_to_symm<UpLo,DestUpLo>(m_matrix,dest.matrix(),m_perm.indices().data());
    497     }
    498 
    499   protected:
    500     MatrixTypeNested m_matrix;
    501     const Perm& m_perm;
    502 
    503 };
    504 
    505 } // end namespace Eigen
    506 
    507 #endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
    508