Home | History | Annotate | Download | only in SparseLU
      1 // This file is part of Eigen, a lightweight C++ template library
      2 // for linear algebra.
      3 //
      4 // Copyright (C) 2012 Gael Guennebaud <gael.guennebaud (at) inria.fr>
      5 //
      6 // This Source Code Form is subject to the terms of the Mozilla
      7 // Public License v. 2.0. If a copy of the MPL was not distributed
      8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
      9 
     10 #ifndef EIGEN_SPARSELU_GEMM_KERNEL_H
     11 #define EIGEN_SPARSELU_GEMM_KERNEL_H
     12 
     13 namespace Eigen {
     14 
     15 namespace internal {
     16 
     17 
     18 /** \internal
     19   * A general matrix-matrix product kernel optimized for the SparseLU factorization.
     20   *  - A, B, and C must be column major
     21   *  - lda and ldc must be multiples of the respective packet size
     22   *  - C must have the same alignment as A
     23   */
     24 template<typename Scalar>
     25 EIGEN_DONT_INLINE
     26 void sparselu_gemm(Index m, Index n, Index d, const Scalar* A, Index lda, const Scalar* B, Index ldb, Scalar* C, Index ldc)
     27 {
     28   using namespace Eigen::internal;
     29 
     30   typedef typename packet_traits<Scalar>::type Packet;
     31   enum {
     32     NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
     33     PacketSize = packet_traits<Scalar>::size,
     34     PM = 8,                             // peeling in M
     35     RN = 2,                             // register blocking
     36     RK = NumberOfRegisters>=16 ? 4 : 2, // register blocking
     37     BM = 4096/sizeof(Scalar),           // number of rows of A-C per chunk
     38     SM = PM*PacketSize                  // step along M
     39   };
     40   Index d_end = (d/RK)*RK;    // number of columns of A (rows of B) suitable for full register blocking
     41   Index n_end = (n/RN)*RN;    // number of columns of B-C suitable for processing RN columns at once
     42   Index i0 = internal::first_default_aligned(A,m);
     43 
     44   eigen_internal_assert(((lda%PacketSize)==0) && ((ldc%PacketSize)==0) && (i0==internal::first_default_aligned(C,m)));
     45 
     46   // handle the non aligned rows of A and C without any optimization:
     47   for(Index i=0; i<i0; ++i)
     48   {
     49     for(Index j=0; j<n; ++j)
     50     {
     51       Scalar c = C[i+j*ldc];
     52       for(Index k=0; k<d; ++k)
     53         c += B[k+j*ldb] * A[i+k*lda];
     54       C[i+j*ldc] = c;
     55     }
     56   }
     57   // process the remaining rows per chunk of BM rows
     58   for(Index ib=i0; ib<m; ib+=BM)
     59   {
     60     Index actual_b = std::min<Index>(BM, m-ib);                 // actual number of rows
     61     Index actual_b_end1 = (actual_b/SM)*SM;                   // actual number of rows suitable for peeling
     62     Index actual_b_end2 = (actual_b/PacketSize)*PacketSize;   // actual number of rows suitable for vectorization
     63 
     64     // Let's process two columns of B-C at once
     65     for(Index j=0; j<n_end; j+=RN)
     66     {
     67       const Scalar* Bc0 = B+(j+0)*ldb;
     68       const Scalar* Bc1 = B+(j+1)*ldb;
     69 
     70       for(Index k=0; k<d_end; k+=RK)
     71       {
     72 
     73         // load and expand a RN x RK block of B
     74         Packet b00, b10, b20, b30, b01, b11, b21, b31;
     75                   { b00 = pset1<Packet>(Bc0[0]); }
     76                   { b10 = pset1<Packet>(Bc0[1]); }
     77         if(RK==4) { b20 = pset1<Packet>(Bc0[2]); }
     78         if(RK==4) { b30 = pset1<Packet>(Bc0[3]); }
     79                   { b01 = pset1<Packet>(Bc1[0]); }
     80                   { b11 = pset1<Packet>(Bc1[1]); }
     81         if(RK==4) { b21 = pset1<Packet>(Bc1[2]); }
     82         if(RK==4) { b31 = pset1<Packet>(Bc1[3]); }
     83 
     84         Packet a0, a1, a2, a3, c0, c1, t0, t1;
     85 
     86         const Scalar* A0 = A+ib+(k+0)*lda;
     87         const Scalar* A1 = A+ib+(k+1)*lda;
     88         const Scalar* A2 = A+ib+(k+2)*lda;
     89         const Scalar* A3 = A+ib+(k+3)*lda;
     90 
     91         Scalar* C0 = C+ib+(j+0)*ldc;
     92         Scalar* C1 = C+ib+(j+1)*ldc;
     93 
     94                   a0 = pload<Packet>(A0);
     95                   a1 = pload<Packet>(A1);
     96         if(RK==4)
     97         {
     98           a2 = pload<Packet>(A2);
     99           a3 = pload<Packet>(A3);
    100         }
    101         else
    102         {
    103           // workaround "may be used uninitialized in this function" warning
    104           a2 = a3 = a0;
    105         }
    106 
    107 #define KMADD(c, a, b, tmp) {tmp = b; tmp = pmul(a,tmp); c = padd(c,tmp);}
    108 #define WORK(I)  \
    109                      c0 = pload<Packet>(C0+i+(I)*PacketSize);    \
    110                      c1 = pload<Packet>(C1+i+(I)*PacketSize);    \
    111                      KMADD(c0, a0, b00, t0)                      \
    112                      KMADD(c1, a0, b01, t1)                      \
    113                      a0 = pload<Packet>(A0+i+(I+1)*PacketSize);  \
    114                      KMADD(c0, a1, b10, t0)                      \
    115                      KMADD(c1, a1, b11, t1)                      \
    116                      a1 = pload<Packet>(A1+i+(I+1)*PacketSize);  \
    117           if(RK==4){ KMADD(c0, a2, b20, t0)                     }\
    118           if(RK==4){ KMADD(c1, a2, b21, t1)                     }\
    119           if(RK==4){ a2 = pload<Packet>(A2+i+(I+1)*PacketSize); }\
    120           if(RK==4){ KMADD(c0, a3, b30, t0)                     }\
    121           if(RK==4){ KMADD(c1, a3, b31, t1)                     }\
    122           if(RK==4){ a3 = pload<Packet>(A3+i+(I+1)*PacketSize); }\
    123                      pstore(C0+i+(I)*PacketSize, c0);            \
    124                      pstore(C1+i+(I)*PacketSize, c1)
    125 
    126         // process rows of A' - C' with aggressive vectorization and peeling
    127         for(Index i=0; i<actual_b_end1; i+=PacketSize*8)
    128         {
    129           EIGEN_ASM_COMMENT("SPARSELU_GEMML_KERNEL1");
    130                     prefetch((A0+i+(5)*PacketSize));
    131                     prefetch((A1+i+(5)*PacketSize));
    132           if(RK==4) prefetch((A2+i+(5)*PacketSize));
    133           if(RK==4) prefetch((A3+i+(5)*PacketSize));
    134 
    135           WORK(0);
    136           WORK(1);
    137           WORK(2);
    138           WORK(3);
    139           WORK(4);
    140           WORK(5);
    141           WORK(6);
    142           WORK(7);
    143         }
    144         // process the remaining rows with vectorization only
    145         for(Index i=actual_b_end1; i<actual_b_end2; i+=PacketSize)
    146         {
    147           WORK(0);
    148         }
    149 #undef WORK
    150         // process the remaining rows without vectorization
    151         for(Index i=actual_b_end2; i<actual_b; ++i)
    152         {
    153           if(RK==4)
    154           {
    155             C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1]+A2[i]*Bc0[2]+A3[i]*Bc0[3];
    156             C1[i] += A0[i]*Bc1[0]+A1[i]*Bc1[1]+A2[i]*Bc1[2]+A3[i]*Bc1[3];
    157           }
    158           else
    159           {
    160             C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1];
    161             C1[i] += A0[i]*Bc1[0]+A1[i]*Bc1[1];
    162           }
    163         }
    164 
    165         Bc0 += RK;
    166         Bc1 += RK;
    167       } // peeled loop on k
    168     } // peeled loop on the columns j
    169     // process the last column (we now perform a matrix-vector product)
    170     if((n-n_end)>0)
    171     {
    172       const Scalar* Bc0 = B+(n-1)*ldb;
    173 
    174       for(Index k=0; k<d_end; k+=RK)
    175       {
    176 
    177         // load and expand a 1 x RK block of B
    178         Packet b00, b10, b20, b30;
    179                   b00 = pset1<Packet>(Bc0[0]);
    180                   b10 = pset1<Packet>(Bc0[1]);
    181         if(RK==4) b20 = pset1<Packet>(Bc0[2]);
    182         if(RK==4) b30 = pset1<Packet>(Bc0[3]);
    183 
    184         Packet a0, a1, a2, a3, c0, t0/*, t1*/;
    185 
    186         const Scalar* A0 = A+ib+(k+0)*lda;
    187         const Scalar* A1 = A+ib+(k+1)*lda;
    188         const Scalar* A2 = A+ib+(k+2)*lda;
    189         const Scalar* A3 = A+ib+(k+3)*lda;
    190 
    191         Scalar* C0 = C+ib+(n_end)*ldc;
    192 
    193                   a0 = pload<Packet>(A0);
    194                   a1 = pload<Packet>(A1);
    195         if(RK==4)
    196         {
    197           a2 = pload<Packet>(A2);
    198           a3 = pload<Packet>(A3);
    199         }
    200         else
    201         {
    202           // workaround "may be used uninitialized in this function" warning
    203           a2 = a3 = a0;
    204         }
    205 
    206 #define WORK(I) \
    207                    c0 = pload<Packet>(C0+i+(I)*PacketSize);     \
    208                    KMADD(c0, a0, b00, t0)                       \
    209                    a0 = pload<Packet>(A0+i+(I+1)*PacketSize);   \
    210                    KMADD(c0, a1, b10, t0)                       \
    211                    a1 = pload<Packet>(A1+i+(I+1)*PacketSize);   \
    212         if(RK==4){ KMADD(c0, a2, b20, t0)                      }\
    213         if(RK==4){ a2 = pload<Packet>(A2+i+(I+1)*PacketSize);  }\
    214         if(RK==4){ KMADD(c0, a3, b30, t0)                      }\
    215         if(RK==4){ a3 = pload<Packet>(A3+i+(I+1)*PacketSize);  }\
    216                    pstore(C0+i+(I)*PacketSize, c0);
    217 
    218         // agressive vectorization and peeling
    219         for(Index i=0; i<actual_b_end1; i+=PacketSize*8)
    220         {
    221           EIGEN_ASM_COMMENT("SPARSELU_GEMML_KERNEL2");
    222           WORK(0);
    223           WORK(1);
    224           WORK(2);
    225           WORK(3);
    226           WORK(4);
    227           WORK(5);
    228           WORK(6);
    229           WORK(7);
    230         }
    231         // vectorization only
    232         for(Index i=actual_b_end1; i<actual_b_end2; i+=PacketSize)
    233         {
    234           WORK(0);
    235         }
    236         // remaining scalars
    237         for(Index i=actual_b_end2; i<actual_b; ++i)
    238         {
    239           if(RK==4)
    240             C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1]+A2[i]*Bc0[2]+A3[i]*Bc0[3];
    241           else
    242             C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1];
    243         }
    244 
    245         Bc0 += RK;
    246 #undef WORK
    247       }
    248     }
    249 
    250     // process the last columns of A, corresponding to the last rows of B
    251     Index rd = d-d_end;
    252     if(rd>0)
    253     {
    254       for(Index j=0; j<n; ++j)
    255       {
    256         enum {
    257           Alignment = PacketSize>1 ? Aligned : 0
    258         };
    259         typedef Map<Matrix<Scalar,Dynamic,1>, Alignment > MapVector;
    260         typedef Map<const Matrix<Scalar,Dynamic,1>, Alignment > ConstMapVector;
    261         if(rd==1)       MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b);
    262 
    263         else if(rd==2)  MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b)
    264                                                         + B[1+d_end+j*ldb] * ConstMapVector(A+(d_end+1)*lda+ib, actual_b);
    265 
    266         else            MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b)
    267                                                         + B[1+d_end+j*ldb] * ConstMapVector(A+(d_end+1)*lda+ib, actual_b)
    268                                                         + B[2+d_end+j*ldb] * ConstMapVector(A+(d_end+2)*lda+ib, actual_b);
    269       }
    270     }
    271 
    272   } // blocking on the rows of A and C
    273 }
    274 #undef KMADD
    275 
    276 } // namespace internal
    277 
    278 } // namespace Eigen
    279 
    280 #endif // EIGEN_SPARSELU_GEMM_KERNEL_H
    281