1 // This file is part of Eigen, a lightweight C++ template library 2 // for linear algebra. 3 // 4 // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud (at) inria.fr> 5 // 6 // This Source Code Form is subject to the terms of the Mozilla 7 // Public License v. 2.0. If a copy of the MPL was not distributed 8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. 9 10 #ifndef EIGEN_PACKET_MATH_SSE_H 11 #define EIGEN_PACKET_MATH_SSE_H 12 13 namespace Eigen { 14 15 namespace internal { 16 17 #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 18 #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8 19 #endif 20 21 #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 22 #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*)) 23 #endif 24 25 typedef __m128 Packet4f; 26 typedef __m128i Packet4i; 27 typedef __m128d Packet2d; 28 29 template<> struct is_arithmetic<__m128> { enum { value = true }; }; 30 template<> struct is_arithmetic<__m128i> { enum { value = true }; }; 31 template<> struct is_arithmetic<__m128d> { enum { value = true }; }; 32 33 #define vec4f_swizzle1(v,p,q,r,s) \ 34 (_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), ((s)<<6|(r)<<4|(q)<<2|(p))))) 35 36 #define vec4i_swizzle1(v,p,q,r,s) \ 37 (_mm_shuffle_epi32( v, ((s)<<6|(r)<<4|(q)<<2|(p)))) 38 39 #define vec2d_swizzle1(v,p,q) \ 40 (_mm_castsi128_pd(_mm_shuffle_epi32( _mm_castpd_si128(v), ((q*2+1)<<6|(q*2)<<4|(p*2+1)<<2|(p*2))))) 41 42 #define vec4f_swizzle2(a,b,p,q,r,s) \ 43 (_mm_shuffle_ps( (a), (b), ((s)<<6|(r)<<4|(q)<<2|(p)))) 44 45 #define vec4i_swizzle2(a,b,p,q,r,s) \ 46 (_mm_castps_si128( (_mm_shuffle_ps( _mm_castsi128_ps(a), _mm_castsi128_ps(b), ((s)<<6|(r)<<4|(q)<<2|(p)))))) 47 48 #define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \ 49 const Packet4f p4f_##NAME = pset1<Packet4f>(X) 50 51 #define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \ 52 const Packet4f p4f_##NAME = _mm_castsi128_ps(pset1<Packet4i>(X)) 53 54 #define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \ 55 const Packet4i p4i_##NAME = pset1<Packet4i>(X) 56 57 58 template<> struct packet_traits<float> : default_packet_traits 59 { 60 typedef Packet4f type; 61 enum { 62 Vectorizable = 1, 63 AlignedOnScalar = 1, 64 size=4, 65 66 HasDiv = 1, 67 HasSin = EIGEN_FAST_MATH, 68 HasCos = EIGEN_FAST_MATH, 69 HasLog = 1, 70 HasExp = 1, 71 HasSqrt = 1 72 }; 73 }; 74 template<> struct packet_traits<double> : default_packet_traits 75 { 76 typedef Packet2d type; 77 enum { 78 Vectorizable = 1, 79 AlignedOnScalar = 1, 80 size=2, 81 82 HasDiv = 1 83 }; 84 }; 85 template<> struct packet_traits<int> : default_packet_traits 86 { 87 typedef Packet4i type; 88 enum { 89 // FIXME check the Has* 90 Vectorizable = 1, 91 AlignedOnScalar = 1, 92 size=4 93 }; 94 }; 95 96 template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4}; }; 97 template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2}; }; 98 template<> struct unpacket_traits<Packet4i> { typedef int type; enum {size=4}; }; 99 100 #if defined(_MSC_VER) && (_MSC_VER==1500) 101 // Workaround MSVC 9 internal compiler error. 102 // TODO: It has been detected with win64 builds (amd64), so let's check whether it also happens in 32bits+SSE mode 103 // TODO: let's check whether there does not exist a better fix, like adding a pset0() function. (it crashed on pset1(0)). 104 template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return _mm_set_ps(from,from,from,from); } 105 template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set_pd(from,from); } 106 template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return _mm_set_epi32(from,from,from,from); } 107 #else 108 template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return _mm_set1_ps(from); } 109 template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set1_pd(from); } 110 template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return _mm_set1_epi32(from); } 111 #endif 112 113 template<> EIGEN_STRONG_INLINE Packet4f plset<float>(const float& a) { return _mm_add_ps(pset1<Packet4f>(a), _mm_set_ps(3,2,1,0)); } 114 template<> EIGEN_STRONG_INLINE Packet2d plset<double>(const double& a) { return _mm_add_pd(pset1<Packet2d>(a),_mm_set_pd(1,0)); } 115 template<> EIGEN_STRONG_INLINE Packet4i plset<int>(const int& a) { return _mm_add_epi32(pset1<Packet4i>(a),_mm_set_epi32(3,2,1,0)); } 116 117 template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_add_ps(a,b); } 118 template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_add_pd(a,b); } 119 template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_add_epi32(a,b); } 120 121 template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_sub_ps(a,b); } 122 template<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_sub_pd(a,b); } 123 template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_sub_epi32(a,b); } 124 125 template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) 126 { 127 const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000)); 128 return _mm_xor_ps(a,mask); 129 } 130 template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a) 131 { 132 const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x80000000,0x0,0x80000000)); 133 return _mm_xor_pd(a,mask); 134 } 135 template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) 136 { 137 return psub(_mm_setr_epi32(0,0,0,0), a); 138 } 139 140 template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_mul_ps(a,b); } 141 template<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_mul_pd(a,b); } 142 template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b) 143 { 144 #ifdef EIGEN_VECTORIZE_SSE4_1 145 return _mm_mullo_epi32(a,b); 146 #else 147 // this version is slightly faster than 4 scalar products 148 return vec4i_swizzle1( 149 vec4i_swizzle2( 150 _mm_mul_epu32(a,b), 151 _mm_mul_epu32(vec4i_swizzle1(a,1,0,3,2), 152 vec4i_swizzle1(b,1,0,3,2)), 153 0,2,0,2), 154 0,2,1,3); 155 #endif 156 } 157 158 template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_div_ps(a,b); } 159 template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_div_pd(a,b); } 160 template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, const Packet4i& /*b*/) 161 { eigen_assert(false && "packet integer division are not supported by SSE"); 162 return pset1<Packet4i>(0); 163 } 164 165 // for some weird raisons, it has to be overloaded for packet of integers 166 template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); } 167 168 template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_min_ps(a,b); } 169 template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_min_pd(a,b); } 170 template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) 171 { 172 // after some bench, this version *is* faster than a scalar implementation 173 Packet4i mask = _mm_cmplt_epi32(a,b); 174 return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b)); 175 } 176 177 template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_max_ps(a,b); } 178 template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_max_pd(a,b); } 179 template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) 180 { 181 // after some bench, this version *is* faster than a scalar implementation 182 Packet4i mask = _mm_cmpgt_epi32(a,b); 183 return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b)); 184 } 185 186 template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_and_ps(a,b); } 187 template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_and_pd(a,b); } 188 template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_and_si128(a,b); } 189 190 template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_or_ps(a,b); } 191 template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_or_pd(a,b); } 192 template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_or_si128(a,b); } 193 194 template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_xor_ps(a,b); } 195 template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_xor_pd(a,b); } 196 template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_xor_si128(a,b); } 197 198 template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_andnot_ps(a,b); } 199 template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_andnot_pd(a,b); } 200 template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_andnot_si128(a,b); } 201 202 template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_ps(from); } 203 template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_pd(from); } 204 template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast<const Packet4i*>(from)); } 205 206 #if defined(_MSC_VER) 207 template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from) { 208 EIGEN_DEBUG_UNALIGNED_LOAD 209 #if (_MSC_VER==1600) 210 // NOTE Some version of MSVC10 generates bad code when using _mm_loadu_ps 211 // (i.e., it does not generate an unaligned load!! 212 // TODO On most architectures this version should also be faster than a single _mm_loadu_ps 213 // so we could also enable it for MSVC08 but first we have to make this later does not generate crap when doing so... 214 __m128 res = _mm_loadl_pi(_mm_set1_ps(0.0f), (const __m64*)(from)); 215 res = _mm_loadh_pi(res, (const __m64*)(from+2)); 216 return res; 217 #else 218 return _mm_loadu_ps(from); 219 #endif 220 } 221 template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_pd(from); } 222 template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_si128(reinterpret_cast<const Packet4i*>(from)); } 223 #else 224 // Fast unaligned loads. Note that here we cannot directly use intrinsics: this would 225 // require pointer casting to incompatible pointer types and leads to invalid code 226 // because of the strict aliasing rule. The "dummy" stuff are required to enforce 227 // a correct instruction dependency. 228 // TODO: do the same for MSVC (ICC is compatible) 229 // NOTE: with the code below, MSVC's compiler crashes! 230 231 #if defined(__GNUC__) && defined(__i386__) 232 // bug 195: gcc/i386 emits weird x87 fldl/fstpl instructions for _mm_load_sd 233 #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 1 234 #elif defined(__clang__) 235 // bug 201: Segfaults in __mm_loadh_pd with clang 2.8 236 #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 1 237 #else 238 #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 0 239 #endif 240 241 template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from) 242 { 243 EIGEN_DEBUG_UNALIGNED_LOAD 244 #if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 245 return _mm_loadu_ps(from); 246 #else 247 __m128d res; 248 res = _mm_load_sd((const double*)(from)) ; 249 res = _mm_loadh_pd(res, (const double*)(from+2)) ; 250 return _mm_castpd_ps(res); 251 #endif 252 } 253 template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from) 254 { 255 EIGEN_DEBUG_UNALIGNED_LOAD 256 #if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 257 return _mm_loadu_pd(from); 258 #else 259 __m128d res; 260 res = _mm_load_sd(from) ; 261 res = _mm_loadh_pd(res,from+1); 262 return res; 263 #endif 264 } 265 template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from) 266 { 267 EIGEN_DEBUG_UNALIGNED_LOAD 268 #if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 269 return _mm_loadu_si128(reinterpret_cast<const Packet4i*>(from)); 270 #else 271 __m128d res; 272 res = _mm_load_sd((const double*)(from)) ; 273 res = _mm_loadh_pd(res, (const double*)(from+2)) ; 274 return _mm_castpd_si128(res); 275 #endif 276 } 277 #endif 278 279 template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from) 280 { 281 return vec4f_swizzle1(_mm_castpd_ps(_mm_load_sd(reinterpret_cast<const double*>(from))), 0, 0, 1, 1); 282 } 283 template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double* from) 284 { return pset1<Packet2d>(from[0]); } 285 template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int* from) 286 { 287 Packet4i tmp; 288 tmp = _mm_loadl_epi64(reinterpret_cast<const Packet4i*>(from)); 289 return vec4i_swizzle1(tmp, 0, 0, 1, 1); 290 } 291 292 template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_ps(to, from); } 293 template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_pd(to, from); } 294 template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<Packet4i*>(to), from); } 295 296 template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) { 297 EIGEN_DEBUG_UNALIGNED_STORE 298 _mm_storel_pd((to), from); 299 _mm_storeh_pd((to+1), from); 300 } 301 template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), _mm_castps_pd(from)); } 302 template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), _mm_castsi128_pd(from)); } 303 304 // some compilers might be tempted to perform multiple moves instead of using a vector path. 305 template<> EIGEN_STRONG_INLINE void pstore1<Packet4f>(float* to, const float& a) 306 { 307 Packet4f pa = _mm_set_ss(a); 308 pstore(to, vec4f_swizzle1(pa,0,0,0,0)); 309 } 310 // some compilers might be tempted to perform multiple moves instead of using a vector path. 311 template<> EIGEN_STRONG_INLINE void pstore1<Packet2d>(double* to, const double& a) 312 { 313 Packet2d pa = _mm_set_sd(a); 314 pstore(to, vec2d_swizzle1(pa,0,0)); 315 } 316 317 template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); } 318 template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); } 319 template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); } 320 321 #if defined(_MSC_VER) && defined(_WIN64) && !defined(__INTEL_COMPILER) 322 // The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010 323 // Direct of the struct members fixed bug #62. 324 template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { return a.m128_f32[0]; } 325 template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return a.m128d_f64[0]; } 326 template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; } 327 #elif defined(_MSC_VER) && !defined(__INTEL_COMPILER) 328 // The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010 329 template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { float x = _mm_cvtss_f32(a); return x; } 330 template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { double x = _mm_cvtsd_f64(a); return x; } 331 template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; } 332 #else 333 template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { return _mm_cvtss_f32(a); } 334 template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return _mm_cvtsd_f64(a); } 335 template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { return _mm_cvtsi128_si32(a); } 336 #endif 337 338 template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) 339 { return _mm_shuffle_ps(a,a,0x1B); } 340 template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a) 341 { return _mm_shuffle_pd(a,a,0x1); } 342 template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) 343 { return _mm_shuffle_epi32(a,0x1B); } 344 345 346 template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) 347 { 348 const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF)); 349 return _mm_and_ps(a,mask); 350 } 351 template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a) 352 { 353 const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF)); 354 return _mm_and_pd(a,mask); 355 } 356 template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) 357 { 358 #ifdef EIGEN_VECTORIZE_SSSE3 359 return _mm_abs_epi32(a); 360 #else 361 Packet4i aux = _mm_srai_epi32(a,31); 362 return _mm_sub_epi32(_mm_xor_si128(a,aux),aux); 363 #endif 364 } 365 366 EIGEN_STRONG_INLINE void punpackp(Packet4f* vecs) 367 { 368 vecs[1] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x55)); 369 vecs[2] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xAA)); 370 vecs[3] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xFF)); 371 vecs[0] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x00)); 372 } 373 374 #ifdef EIGEN_VECTORIZE_SSE3 375 // TODO implement SSE2 versions as well as integer versions 376 template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs) 377 { 378 return _mm_hadd_ps(_mm_hadd_ps(vecs[0], vecs[1]),_mm_hadd_ps(vecs[2], vecs[3])); 379 } 380 template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs) 381 { 382 return _mm_hadd_pd(vecs[0], vecs[1]); 383 } 384 // SSSE3 version: 385 // EIGEN_STRONG_INLINE Packet4i preduxp(const Packet4i* vecs) 386 // { 387 // return _mm_hadd_epi32(_mm_hadd_epi32(vecs[0], vecs[1]),_mm_hadd_epi32(vecs[2], vecs[3])); 388 // } 389 390 template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a) 391 { 392 Packet4f tmp0 = _mm_hadd_ps(a,a); 393 return pfirst(_mm_hadd_ps(tmp0, tmp0)); 394 } 395 396 template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a) { return pfirst(_mm_hadd_pd(a, a)); } 397 398 // SSSE3 version: 399 // EIGEN_STRONG_INLINE float predux(const Packet4i& a) 400 // { 401 // Packet4i tmp0 = _mm_hadd_epi32(a,a); 402 // return pfirst(_mm_hadd_epi32(tmp0, tmp0)); 403 // } 404 #else 405 // SSE2 versions 406 template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a) 407 { 408 Packet4f tmp = _mm_add_ps(a, _mm_movehl_ps(a,a)); 409 return pfirst(_mm_add_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1))); 410 } 411 template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a) 412 { 413 return pfirst(_mm_add_sd(a, _mm_unpackhi_pd(a,a))); 414 } 415 416 template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs) 417 { 418 Packet4f tmp0, tmp1, tmp2; 419 tmp0 = _mm_unpacklo_ps(vecs[0], vecs[1]); 420 tmp1 = _mm_unpackhi_ps(vecs[0], vecs[1]); 421 tmp2 = _mm_unpackhi_ps(vecs[2], vecs[3]); 422 tmp0 = _mm_add_ps(tmp0, tmp1); 423 tmp1 = _mm_unpacklo_ps(vecs[2], vecs[3]); 424 tmp1 = _mm_add_ps(tmp1, tmp2); 425 tmp2 = _mm_movehl_ps(tmp1, tmp0); 426 tmp0 = _mm_movelh_ps(tmp0, tmp1); 427 return _mm_add_ps(tmp0, tmp2); 428 } 429 430 template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs) 431 { 432 return _mm_add_pd(_mm_unpacklo_pd(vecs[0], vecs[1]), _mm_unpackhi_pd(vecs[0], vecs[1])); 433 } 434 #endif // SSE3 435 436 template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a) 437 { 438 Packet4i tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a,a)); 439 return pfirst(tmp) + pfirst(_mm_shuffle_epi32(tmp, 1)); 440 } 441 442 template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs) 443 { 444 Packet4i tmp0, tmp1, tmp2; 445 tmp0 = _mm_unpacklo_epi32(vecs[0], vecs[1]); 446 tmp1 = _mm_unpackhi_epi32(vecs[0], vecs[1]); 447 tmp2 = _mm_unpackhi_epi32(vecs[2], vecs[3]); 448 tmp0 = _mm_add_epi32(tmp0, tmp1); 449 tmp1 = _mm_unpacklo_epi32(vecs[2], vecs[3]); 450 tmp1 = _mm_add_epi32(tmp1, tmp2); 451 tmp2 = _mm_unpacklo_epi64(tmp0, tmp1); 452 tmp0 = _mm_unpackhi_epi64(tmp0, tmp1); 453 return _mm_add_epi32(tmp0, tmp2); 454 } 455 456 // Other reduction functions: 457 458 // mul 459 template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a) 460 { 461 Packet4f tmp = _mm_mul_ps(a, _mm_movehl_ps(a,a)); 462 return pfirst(_mm_mul_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1))); 463 } 464 template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a) 465 { 466 return pfirst(_mm_mul_sd(a, _mm_unpackhi_pd(a,a))); 467 } 468 template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a) 469 { 470 // after some experiments, it is seems this is the fastest way to implement it 471 // for GCC (eg., reusing pmul is very slow !) 472 // TODO try to call _mm_mul_epu32 directly 473 EIGEN_ALIGN16 int aux[4]; 474 pstore(aux, a); 475 return (aux[0] * aux[1]) * (aux[2] * aux[3]);; 476 } 477 478 // min 479 template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a) 480 { 481 Packet4f tmp = _mm_min_ps(a, _mm_movehl_ps(a,a)); 482 return pfirst(_mm_min_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1))); 483 } 484 template<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a) 485 { 486 return pfirst(_mm_min_sd(a, _mm_unpackhi_pd(a,a))); 487 } 488 template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a) 489 { 490 // after some experiments, it is seems this is the fastest way to implement it 491 // for GCC (eg., it does not like using std::min after the pstore !!) 492 EIGEN_ALIGN16 int aux[4]; 493 pstore(aux, a); 494 register int aux0 = aux[0]<aux[1] ? aux[0] : aux[1]; 495 register int aux2 = aux[2]<aux[3] ? aux[2] : aux[3]; 496 return aux0<aux2 ? aux0 : aux2; 497 } 498 499 // max 500 template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a) 501 { 502 Packet4f tmp = _mm_max_ps(a, _mm_movehl_ps(a,a)); 503 return pfirst(_mm_max_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1))); 504 } 505 template<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a) 506 { 507 return pfirst(_mm_max_sd(a, _mm_unpackhi_pd(a,a))); 508 } 509 template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a) 510 { 511 // after some experiments, it is seems this is the fastest way to implement it 512 // for GCC (eg., it does not like using std::min after the pstore !!) 513 EIGEN_ALIGN16 int aux[4]; 514 pstore(aux, a); 515 register int aux0 = aux[0]>aux[1] ? aux[0] : aux[1]; 516 register int aux2 = aux[2]>aux[3] ? aux[2] : aux[3]; 517 return aux0>aux2 ? aux0 : aux2; 518 } 519 520 #if (defined __GNUC__) 521 // template <> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) 522 // { 523 // Packet4f res = b; 524 // asm("mulps %[a], %[b] \n\taddps %[c], %[b]" : [b] "+x" (res) : [a] "x" (a), [c] "x" (c)); 525 // return res; 526 // } 527 // EIGEN_STRONG_INLINE Packet4i _mm_alignr_epi8(const Packet4i& a, const Packet4i& b, const int i) 528 // { 529 // Packet4i res = a; 530 // asm("palignr %[i], %[a], %[b] " : [b] "+x" (res) : [a] "x" (a), [i] "i" (i)); 531 // return res; 532 // } 533 #endif 534 535 #ifdef EIGEN_VECTORIZE_SSSE3 536 // SSSE3 versions 537 template<int Offset> 538 struct palign_impl<Offset,Packet4f> 539 { 540 static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second) 541 { 542 if (Offset!=0) 543 first = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(second), _mm_castps_si128(first), Offset*4)); 544 } 545 }; 546 547 template<int Offset> 548 struct palign_impl<Offset,Packet4i> 549 { 550 static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second) 551 { 552 if (Offset!=0) 553 first = _mm_alignr_epi8(second,first, Offset*4); 554 } 555 }; 556 557 template<int Offset> 558 struct palign_impl<Offset,Packet2d> 559 { 560 static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second) 561 { 562 if (Offset==1) 563 first = _mm_castsi128_pd(_mm_alignr_epi8(_mm_castpd_si128(second), _mm_castpd_si128(first), 8)); 564 } 565 }; 566 #else 567 // SSE2 versions 568 template<int Offset> 569 struct palign_impl<Offset,Packet4f> 570 { 571 static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second) 572 { 573 if (Offset==1) 574 { 575 first = _mm_move_ss(first,second); 576 first = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(first),0x39)); 577 } 578 else if (Offset==2) 579 { 580 first = _mm_movehl_ps(first,first); 581 first = _mm_movelh_ps(first,second); 582 } 583 else if (Offset==3) 584 { 585 first = _mm_move_ss(first,second); 586 first = _mm_shuffle_ps(first,second,0x93); 587 } 588 } 589 }; 590 591 template<int Offset> 592 struct palign_impl<Offset,Packet4i> 593 { 594 static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second) 595 { 596 if (Offset==1) 597 { 598 first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second))); 599 first = _mm_shuffle_epi32(first,0x39); 600 } 601 else if (Offset==2) 602 { 603 first = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(first))); 604 first = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second))); 605 } 606 else if (Offset==3) 607 { 608 first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second))); 609 first = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second),0x93)); 610 } 611 } 612 }; 613 614 template<int Offset> 615 struct palign_impl<Offset,Packet2d> 616 { 617 static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second) 618 { 619 if (Offset==1) 620 { 621 first = _mm_castps_pd(_mm_movehl_ps(_mm_castpd_ps(first),_mm_castpd_ps(first))); 622 first = _mm_castps_pd(_mm_movelh_ps(_mm_castpd_ps(first),_mm_castpd_ps(second))); 623 } 624 } 625 }; 626 #endif 627 628 } // end namespace internal 629 630 } // end namespace Eigen 631 632 #endif // EIGEN_PACKET_MATH_SSE_H 633