1 // This file is part of Eigen, a lightweight C++ template library 2 // for linear algebra. 3 // 4 // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud (at) inria.fr> 5 // 6 // This Source Code Form is subject to the terms of the Mozilla 7 // Public License v. 2.0. If a copy of the MPL was not distributed 8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. 9 10 #ifndef EIGEN_SELFADJOINT_MATRIX_VECTOR_H 11 #define EIGEN_SELFADJOINT_MATRIX_VECTOR_H 12 13 namespace Eigen { 14 15 namespace internal { 16 17 /* Optimized selfadjoint matrix * vector product: 18 * This algorithm processes 2 columns at onces that allows to both reduce 19 * the number of load/stores of the result by a factor 2 and to reduce 20 * the instruction dependency. 21 */ 22 23 template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs, int Version=Specialized> 24 struct selfadjoint_matrix_vector_product; 25 26 template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs, int Version> 27 struct selfadjoint_matrix_vector_product 28 29 { 30 static EIGEN_DONT_INLINE void run( 31 Index size, 32 const Scalar* lhs, Index lhsStride, 33 const Scalar* rhs, 34 Scalar* res, 35 Scalar alpha); 36 }; 37 38 template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs, int Version> 39 EIGEN_DONT_INLINE void selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,Version>::run( 40 Index size, 41 const Scalar* lhs, Index lhsStride, 42 const Scalar* rhs, 43 Scalar* res, 44 Scalar alpha) 45 { 46 typedef typename packet_traits<Scalar>::type Packet; 47 typedef typename NumTraits<Scalar>::Real RealScalar; 48 const Index PacketSize = sizeof(Packet)/sizeof(Scalar); 49 50 enum { 51 IsRowMajor = StorageOrder==RowMajor ? 1 : 0, 52 IsLower = UpLo == Lower ? 1 : 0, 53 FirstTriangular = IsRowMajor == IsLower 54 }; 55 56 conj_helper<Scalar,Scalar,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, IsRowMajor), ConjugateRhs> cj0; 57 conj_helper<Scalar,Scalar,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> cj1; 58 conj_helper<RealScalar,Scalar,false, ConjugateRhs> cjd; 59 60 conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, IsRowMajor), ConjugateRhs> pcj0; 61 conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> pcj1; 62 63 Scalar cjAlpha = ConjugateRhs ? numext::conj(alpha) : alpha; 64 65 66 Index bound = (std::max)(Index(0),size-8) & 0xfffffffe; 67 if (FirstTriangular) 68 bound = size - bound; 69 70 for (Index j=FirstTriangular ? bound : 0; 71 j<(FirstTriangular ? size : bound);j+=2) 72 { 73 const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride; 74 const Scalar* EIGEN_RESTRICT A1 = lhs + (j+1)*lhsStride; 75 76 Scalar t0 = cjAlpha * rhs[j]; 77 Packet ptmp0 = pset1<Packet>(t0); 78 Scalar t1 = cjAlpha * rhs[j+1]; 79 Packet ptmp1 = pset1<Packet>(t1); 80 81 Scalar t2(0); 82 Packet ptmp2 = pset1<Packet>(t2); 83 Scalar t3(0); 84 Packet ptmp3 = pset1<Packet>(t3); 85 86 Index starti = FirstTriangular ? 0 : j+2; 87 Index endi = FirstTriangular ? j : size; 88 Index alignedStart = (starti) + internal::first_default_aligned(&res[starti], endi-starti); 89 Index alignedEnd = alignedStart + ((endi-alignedStart)/(PacketSize))*(PacketSize); 90 91 res[j] += cjd.pmul(numext::real(A0[j]), t0); 92 res[j+1] += cjd.pmul(numext::real(A1[j+1]), t1); 93 if(FirstTriangular) 94 { 95 res[j] += cj0.pmul(A1[j], t1); 96 t3 += cj1.pmul(A1[j], rhs[j]); 97 } 98 else 99 { 100 res[j+1] += cj0.pmul(A0[j+1],t0); 101 t2 += cj1.pmul(A0[j+1], rhs[j+1]); 102 } 103 104 for (Index i=starti; i<alignedStart; ++i) 105 { 106 res[i] += cj0.pmul(A0[i], t0) + cj0.pmul(A1[i],t1); 107 t2 += cj1.pmul(A0[i], rhs[i]); 108 t3 += cj1.pmul(A1[i], rhs[i]); 109 } 110 // Yes this an optimization for gcc 4.3 and 4.4 (=> huge speed up) 111 // gcc 4.2 does this optimization automatically. 112 const Scalar* EIGEN_RESTRICT a0It = A0 + alignedStart; 113 const Scalar* EIGEN_RESTRICT a1It = A1 + alignedStart; 114 const Scalar* EIGEN_RESTRICT rhsIt = rhs + alignedStart; 115 Scalar* EIGEN_RESTRICT resIt = res + alignedStart; 116 for (Index i=alignedStart; i<alignedEnd; i+=PacketSize) 117 { 118 Packet A0i = ploadu<Packet>(a0It); a0It += PacketSize; 119 Packet A1i = ploadu<Packet>(a1It); a1It += PacketSize; 120 Packet Bi = ploadu<Packet>(rhsIt); rhsIt += PacketSize; // FIXME should be aligned in most cases 121 Packet Xi = pload <Packet>(resIt); 122 123 Xi = pcj0.pmadd(A0i,ptmp0, pcj0.pmadd(A1i,ptmp1,Xi)); 124 ptmp2 = pcj1.pmadd(A0i, Bi, ptmp2); 125 ptmp3 = pcj1.pmadd(A1i, Bi, ptmp3); 126 pstore(resIt,Xi); resIt += PacketSize; 127 } 128 for (Index i=alignedEnd; i<endi; i++) 129 { 130 res[i] += cj0.pmul(A0[i], t0) + cj0.pmul(A1[i],t1); 131 t2 += cj1.pmul(A0[i], rhs[i]); 132 t3 += cj1.pmul(A1[i], rhs[i]); 133 } 134 135 res[j] += alpha * (t2 + predux(ptmp2)); 136 res[j+1] += alpha * (t3 + predux(ptmp3)); 137 } 138 for (Index j=FirstTriangular ? 0 : bound;j<(FirstTriangular ? bound : size);j++) 139 { 140 const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride; 141 142 Scalar t1 = cjAlpha * rhs[j]; 143 Scalar t2(0); 144 res[j] += cjd.pmul(numext::real(A0[j]), t1); 145 for (Index i=FirstTriangular ? 0 : j+1; i<(FirstTriangular ? j : size); i++) 146 { 147 res[i] += cj0.pmul(A0[i], t1); 148 t2 += cj1.pmul(A0[i], rhs[i]); 149 } 150 res[j] += alpha * t2; 151 } 152 } 153 154 } // end namespace internal 155 156 /*************************************************************************** 157 * Wrapper to product_selfadjoint_vector 158 ***************************************************************************/ 159 160 namespace internal { 161 162 template<typename Lhs, int LhsMode, typename Rhs> 163 struct selfadjoint_product_impl<Lhs,LhsMode,false,Rhs,0,true> 164 { 165 typedef typename Product<Lhs,Rhs>::Scalar Scalar; 166 167 typedef internal::blas_traits<Lhs> LhsBlasTraits; 168 typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; 169 typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned; 170 171 typedef internal::blas_traits<Rhs> RhsBlasTraits; 172 typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; 173 typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned; 174 175 enum { LhsUpLo = LhsMode&(Upper|Lower) }; 176 177 template<typename Dest> 178 static void run(Dest& dest, const Lhs &a_lhs, const Rhs &a_rhs, const Scalar& alpha) 179 { 180 typedef typename Dest::Scalar ResScalar; 181 typedef typename Rhs::Scalar RhsScalar; 182 typedef Map<Matrix<ResScalar,Dynamic,1>, EIGEN_PLAIN_ENUM_MIN(AlignedMax,internal::packet_traits<ResScalar>::size)> MappedDest; 183 184 eigen_assert(dest.rows()==a_lhs.rows() && dest.cols()==a_rhs.cols()); 185 186 typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs); 187 typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs); 188 189 Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs) 190 * RhsBlasTraits::extractScalarFactor(a_rhs); 191 192 enum { 193 EvalToDest = (Dest::InnerStrideAtCompileTime==1), 194 UseRhs = (ActualRhsTypeCleaned::InnerStrideAtCompileTime==1) 195 }; 196 197 internal::gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,!EvalToDest> static_dest; 198 internal::gemv_static_vector_if<RhsScalar,ActualRhsTypeCleaned::SizeAtCompileTime,ActualRhsTypeCleaned::MaxSizeAtCompileTime,!UseRhs> static_rhs; 199 200 ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(), 201 EvalToDest ? dest.data() : static_dest.data()); 202 203 ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,rhs.size(), 204 UseRhs ? const_cast<RhsScalar*>(rhs.data()) : static_rhs.data()); 205 206 if(!EvalToDest) 207 { 208 #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN 209 Index size = dest.size(); 210 EIGEN_DENSE_STORAGE_CTOR_PLUGIN 211 #endif 212 MappedDest(actualDestPtr, dest.size()) = dest; 213 } 214 215 if(!UseRhs) 216 { 217 #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN 218 Index size = rhs.size(); 219 EIGEN_DENSE_STORAGE_CTOR_PLUGIN 220 #endif 221 Map<typename ActualRhsTypeCleaned::PlainObject>(actualRhsPtr, rhs.size()) = rhs; 222 } 223 224 225 internal::selfadjoint_matrix_vector_product<Scalar, Index, (internal::traits<ActualLhsTypeCleaned>::Flags&RowMajorBit) ? RowMajor : ColMajor, 226 int(LhsUpLo), bool(LhsBlasTraits::NeedToConjugate), bool(RhsBlasTraits::NeedToConjugate)>::run 227 ( 228 lhs.rows(), // size 229 &lhs.coeffRef(0,0), lhs.outerStride(), // lhs info 230 actualRhsPtr, // rhs info 231 actualDestPtr, // result info 232 actualAlpha // scale factor 233 ); 234 235 if(!EvalToDest) 236 dest = MappedDest(actualDestPtr, dest.size()); 237 } 238 }; 239 240 template<typename Lhs, typename Rhs, int RhsMode> 241 struct selfadjoint_product_impl<Lhs,0,true,Rhs,RhsMode,false> 242 { 243 typedef typename Product<Lhs,Rhs>::Scalar Scalar; 244 enum { RhsUpLo = RhsMode&(Upper|Lower) }; 245 246 template<typename Dest> 247 static void run(Dest& dest, const Lhs &a_lhs, const Rhs &a_rhs, const Scalar& alpha) 248 { 249 // let's simply transpose the product 250 Transpose<Dest> destT(dest); 251 selfadjoint_product_impl<Transpose<const Rhs>, int(RhsUpLo)==Upper ? Lower : Upper, false, 252 Transpose<const Lhs>, 0, true>::run(destT, a_rhs.transpose(), a_lhs.transpose(), alpha); 253 } 254 }; 255 256 } // end namespace internal 257 258 } // end namespace Eigen 259 260 #endif // EIGEN_SELFADJOINT_MATRIX_VECTOR_H 261