Home | History | Annotate | Download | only in products
      1 // This file is part of Eigen, a lightweight C++ template library
      2 // for linear algebra.
      3 //
      4 // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud (at) inria.fr>
      5 //
      6 // This Source Code Form is subject to the terms of the Mozilla
      7 // Public License v. 2.0. If a copy of the MPL was not distributed
      8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
      9 
     10 #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
     11 #define EIGEN_GENERAL_MATRIX_MATRIX_H
     12 
     13 namespace Eigen {
     14 
     15 namespace internal {
     16 
     17 template<typename _LhsScalar, typename _RhsScalar> class level3_blocking;
     18 
     19 /* Specialization for a row-major destination matrix => simple transposition of the product */
     20 template<
     21   typename Index,
     22   typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
     23   typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
     24 struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor>
     25 {
     26   typedef gebp_traits<RhsScalar,LhsScalar> Traits;
     27 
     28   typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
     29   static EIGEN_STRONG_INLINE void run(
     30     Index rows, Index cols, Index depth,
     31     const LhsScalar* lhs, Index lhsStride,
     32     const RhsScalar* rhs, Index rhsStride,
     33     ResScalar* res, Index resStride,
     34     ResScalar alpha,
     35     level3_blocking<RhsScalar,LhsScalar>& blocking,
     36     GemmParallelInfo<Index>* info = 0)
     37   {
     38     // transpose the product such that the result is column major
     39     general_matrix_matrix_product<Index,
     40       RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
     41       LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
     42       ColMajor>
     43     ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info);
     44   }
     45 };
     46 
     47 /*  Specialization for a col-major destination matrix
     48  *    => Blocking algorithm following Goto's paper */
     49 template<
     50   typename Index,
     51   typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
     52   typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
     53 struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor>
     54 {
     55 
     56 typedef gebp_traits<LhsScalar,RhsScalar> Traits;
     57 
     58 typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
     59 static void run(Index rows, Index cols, Index depth,
     60   const LhsScalar* _lhs, Index lhsStride,
     61   const RhsScalar* _rhs, Index rhsStride,
     62   ResScalar* _res, Index resStride,
     63   ResScalar alpha,
     64   level3_blocking<LhsScalar,RhsScalar>& blocking,
     65   GemmParallelInfo<Index>* info = 0)
     66 {
     67   typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper;
     68   typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper;
     69   typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper;
     70   LhsMapper lhs(_lhs,lhsStride);
     71   RhsMapper rhs(_rhs,rhsStride);
     72   ResMapper res(_res, resStride);
     73 
     74   Index kc = blocking.kc();                   // cache block size along the K direction
     75   Index mc = (std::min)(rows,blocking.mc());  // cache block size along the M direction
     76   Index nc = (std::min)(cols,blocking.nc());  // cache block size along the N direction
     77 
     78   gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
     79   gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs;
     80   gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
     81 
     82 #ifdef EIGEN_HAS_OPENMP
     83   if(info)
     84   {
     85     // this is the parallel version!
     86     int tid = omp_get_thread_num();
     87     int threads = omp_get_num_threads();
     88 
     89     LhsScalar* blockA = blocking.blockA();
     90     eigen_internal_assert(blockA!=0);
     91 
     92     std::size_t sizeB = kc*nc;
     93     ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0);
     94 
     95     // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
     96     for(Index k=0; k<depth; k+=kc)
     97     {
     98       const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
     99 
    100       // In order to reduce the chance that a thread has to wait for the other,
    101       // let's start by packing B'.
    102       pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc);
    103 
    104       // Pack A_k to A' in a parallel fashion:
    105       // each thread packs the sub block A_k,i to A'_i where i is the thread id.
    106 
    107       // However, before copying to A'_i, we have to make sure that no other thread is still using it,
    108       // i.e., we test that info[tid].users equals 0.
    109       // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
    110       while(info[tid].users!=0) {}
    111       info[tid].users += threads;
    112 
    113       pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length);
    114 
    115       // Notify the other threads that the part A'_i is ready to go.
    116       info[tid].sync = k;
    117 
    118       // Computes C_i += A' * B' per A'_i
    119       for(int shift=0; shift<threads; ++shift)
    120       {
    121         int i = (tid+shift)%threads;
    122 
    123         // At this point we have to make sure that A'_i has been updated by the thread i,
    124         // we use testAndSetOrdered to mimic a volatile access.
    125         // However, no need to wait for the B' part which has been updated by the current thread!
    126         if (shift>0) {
    127           while(info[i].sync!=k) {
    128           }
    129         }
    130 
    131         gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha);
    132       }
    133 
    134       // Then keep going as usual with the remaining B'
    135       for(Index j=nc; j<cols; j+=nc)
    136       {
    137         const Index actual_nc = (std::min)(j+nc,cols)-j;
    138 
    139         // pack B_k,j to B'
    140         pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc);
    141 
    142         // C_j += A' * B'
    143         gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha);
    144       }
    145 
    146       // Release all the sub blocks A'_i of A' for the current thread,
    147       // i.e., we simply decrement the number of users by 1
    148       for(Index i=0; i<threads; ++i)
    149         info[i].users -= 1;
    150     }
    151   }
    152   else
    153 #endif // EIGEN_HAS_OPENMP
    154   {
    155     EIGEN_UNUSED_VARIABLE(info);
    156 
    157     // this is the sequential version!
    158     std::size_t sizeA = kc*mc;
    159     std::size_t sizeB = kc*nc;
    160 
    161     ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
    162     ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
    163 
    164     const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols;
    165 
    166     // For each horizontal panel of the rhs, and corresponding panel of the lhs...
    167     for(Index i2=0; i2<rows; i2+=mc)
    168     {
    169       const Index actual_mc = (std::min)(i2+mc,rows)-i2;
    170 
    171       for(Index k2=0; k2<depth; k2+=kc)
    172       {
    173         const Index actual_kc = (std::min)(k2+kc,depth)-k2;
    174 
    175         // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
    176         // => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching)
    177         // Note that this panel will be read as many times as the number of blocks in the rhs's
    178         // horizontal panel which is, in practice, a very low number.
    179         pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc);
    180 
    181         // For each kc x nc block of the rhs's horizontal panel...
    182         for(Index j2=0; j2<cols; j2+=nc)
    183         {
    184           const Index actual_nc = (std::min)(j2+nc,cols)-j2;
    185 
    186           // We pack the rhs's block into a sequential chunk of memory (L2 caching)
    187           // Note that this block will be read a very high number of times, which is equal to the number of
    188           // micro horizontal panel of the large rhs's panel (e.g., rows/12 times).
    189           if((!pack_rhs_once) || i2==0)
    190             pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc);
    191 
    192           // Everything is packed, we can now call the panel * block kernel:
    193           gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha);
    194         }
    195       }
    196     }
    197   }
    198 }
    199 
    200 };
    201 
    202 /*********************************************************************************
    203 *  Specialization of generic_product_impl for "large" GEMM, i.e.,
    204 *  implementation of the high level wrapper to general_matrix_matrix_product
    205 **********************************************************************************/
    206 
    207 template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
    208 struct gemm_functor
    209 {
    210   gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking)
    211     : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
    212   {}
    213 
    214   void initParallelSession(Index num_threads) const
    215   {
    216     m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads);
    217     m_blocking.allocateA();
    218   }
    219 
    220   void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const
    221   {
    222     if(cols==-1)
    223       cols = m_rhs.cols();
    224 
    225     Gemm::run(rows, cols, m_lhs.cols(),
    226               &m_lhs.coeffRef(row,0), m_lhs.outerStride(),
    227               &m_rhs.coeffRef(0,col), m_rhs.outerStride(),
    228               (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(),
    229               m_actualAlpha, m_blocking, info);
    230   }
    231 
    232   typedef typename Gemm::Traits Traits;
    233 
    234   protected:
    235     const Lhs& m_lhs;
    236     const Rhs& m_rhs;
    237     Dest& m_dest;
    238     Scalar m_actualAlpha;
    239     BlockingType& m_blocking;
    240 };
    241 
    242 template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1,
    243 bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space;
    244 
    245 template<typename _LhsScalar, typename _RhsScalar>
    246 class level3_blocking
    247 {
    248     typedef _LhsScalar LhsScalar;
    249     typedef _RhsScalar RhsScalar;
    250 
    251   protected:
    252     LhsScalar* m_blockA;
    253     RhsScalar* m_blockB;
    254 
    255     Index m_mc;
    256     Index m_nc;
    257     Index m_kc;
    258 
    259   public:
    260 
    261     level3_blocking()
    262       : m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0)
    263     {}
    264 
    265     inline Index mc() const { return m_mc; }
    266     inline Index nc() const { return m_nc; }
    267     inline Index kc() const { return m_kc; }
    268 
    269     inline LhsScalar* blockA() { return m_blockA; }
    270     inline RhsScalar* blockB() { return m_blockB; }
    271 };
    272 
    273 template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
    274 class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */>
    275   : public level3_blocking<
    276       typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
    277       typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
    278 {
    279     enum {
    280       Transpose = StorageOrder==RowMajor,
    281       ActualRows = Transpose ? MaxCols : MaxRows,
    282       ActualCols = Transpose ? MaxRows : MaxCols
    283     };
    284     typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
    285     typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
    286     typedef gebp_traits<LhsScalar,RhsScalar> Traits;
    287     enum {
    288       SizeA = ActualRows * MaxDepth,
    289       SizeB = ActualCols * MaxDepth
    290     };
    291 
    292 #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES
    293     EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA];
    294     EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB];
    295 #else
    296     EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1];
    297     EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1];
    298 #endif
    299 
    300   public:
    301 
    302     gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/)
    303     {
    304       this->m_mc = ActualRows;
    305       this->m_nc = ActualCols;
    306       this->m_kc = MaxDepth;
    307 #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES
    308       this->m_blockA = m_staticA;
    309       this->m_blockB = m_staticB;
    310 #else
    311       this->m_blockA = reinterpret_cast<LhsScalar*>((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));
    312       this->m_blockB = reinterpret_cast<RhsScalar*>((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));
    313 #endif
    314     }
    315 
    316     void initParallel(Index, Index, Index, Index)
    317     {}
    318 
    319     inline void allocateA() {}
    320     inline void allocateB() {}
    321     inline void allocateAll() {}
    322 };
    323 
    324 template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
    325 class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false>
    326   : public level3_blocking<
    327       typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
    328       typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
    329 {
    330     enum {
    331       Transpose = StorageOrder==RowMajor
    332     };
    333     typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
    334     typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
    335     typedef gebp_traits<LhsScalar,RhsScalar> Traits;
    336 
    337     Index m_sizeA;
    338     Index m_sizeB;
    339 
    340   public:
    341 
    342     gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking)
    343     {
    344       this->m_mc = Transpose ? cols : rows;
    345       this->m_nc = Transpose ? rows : cols;
    346       this->m_kc = depth;
    347 
    348       if(l3_blocking)
    349       {
    350         computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads);
    351       }
    352       else  // no l3 blocking
    353       {
    354         Index n = this->m_nc;
    355         computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, n, num_threads);
    356       }
    357 
    358       m_sizeA = this->m_mc * this->m_kc;
    359       m_sizeB = this->m_kc * this->m_nc;
    360     }
    361 
    362     void initParallel(Index rows, Index cols, Index depth, Index num_threads)
    363     {
    364       this->m_mc = Transpose ? cols : rows;
    365       this->m_nc = Transpose ? rows : cols;
    366       this->m_kc = depth;
    367 
    368       eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0);
    369       Index m = this->m_mc;
    370       computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads);
    371       m_sizeA = this->m_mc * this->m_kc;
    372       m_sizeB = this->m_kc * this->m_nc;
    373     }
    374 
    375     void allocateA()
    376     {
    377       if(this->m_blockA==0)
    378         this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
    379     }
    380 
    381     void allocateB()
    382     {
    383       if(this->m_blockB==0)
    384         this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
    385     }
    386 
    387     void allocateAll()
    388     {
    389       allocateA();
    390       allocateB();
    391     }
    392 
    393     ~gemm_blocking_space()
    394     {
    395       aligned_delete(this->m_blockA, m_sizeA);
    396       aligned_delete(this->m_blockB, m_sizeB);
    397     }
    398 };
    399 
    400 } // end namespace internal
    401 
    402 namespace internal {
    403 
    404 template<typename Lhs, typename Rhs>
    405 struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct>
    406   : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> >
    407 {
    408   typedef typename Product<Lhs,Rhs>::Scalar Scalar;
    409   typedef typename Lhs::Scalar LhsScalar;
    410   typedef typename Rhs::Scalar RhsScalar;
    411 
    412   typedef internal::blas_traits<Lhs> LhsBlasTraits;
    413   typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
    414   typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned;
    415 
    416   typedef internal::blas_traits<Rhs> RhsBlasTraits;
    417   typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
    418   typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned;
    419 
    420   enum {
    421     MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)
    422   };
    423 
    424   typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct;
    425 
    426   template<typename Dst>
    427   static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
    428   {
    429     if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0)
    430       lazyproduct::evalTo(dst, lhs, rhs);
    431     else
    432     {
    433       dst.setZero();
    434       scaleAndAddTo(dst, lhs, rhs, Scalar(1));
    435     }
    436   }
    437 
    438   template<typename Dst>
    439   static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
    440   {
    441     if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0)
    442       lazyproduct::addTo(dst, lhs, rhs);
    443     else
    444       scaleAndAddTo(dst,lhs, rhs, Scalar(1));
    445   }
    446 
    447   template<typename Dst>
    448   static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
    449   {
    450     if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0)
    451       lazyproduct::subTo(dst, lhs, rhs);
    452     else
    453       scaleAndAddTo(dst, lhs, rhs, Scalar(-1));
    454   }
    455 
    456   template<typename Dest>
    457   static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha)
    458   {
    459     eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols());
    460     if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0)
    461       return;
    462 
    463     typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);
    464     typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);
    465 
    466     Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs)
    467                                * RhsBlasTraits::extractScalarFactor(a_rhs);
    468 
    469     typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar,
    470             Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
    471 
    472     typedef internal::gemm_functor<
    473       Scalar, Index,
    474       internal::general_matrix_matrix_product<
    475         Index,
    476         LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
    477         RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
    478         (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>,
    479       ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor;
    480 
    481     BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true);
    482     internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>
    483         (GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), a_lhs.cols(), Dest::Flags&RowMajorBit);
    484   }
    485 };
    486 
    487 } // end namespace internal
    488 
    489 } // end namespace Eigen
    490 
    491 #endif // EIGEN_GENERAL_MATRIX_MATRIX_H
    492