1 /* 2 * Copyright 2015 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef SkNx_sse_DEFINED 9 #define SkNx_sse_DEFINED 10 11 #include "SkTypes.h" 12 13 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 14 #include <smmintrin.h> 15 #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 16 #include <tmmintrin.h> 17 #else 18 #include <emmintrin.h> 19 #endif 20 21 // This file may assume <= SSE2, but must check SK_CPU_SSE_LEVEL for anything more recent. 22 // If you do, make sure this is in a static inline function... anywhere else risks violating ODR. 23 24 namespace { // NOLINT(google-build-namespaces) 25 26 // Emulate _mm_floor_ps() with SSE2: 27 // - roundtrip through integers via truncation 28 // - subtract 1 if that's too big (possible for negative values). 29 // This restricts the domain of our inputs to a maximum somehwere around 2^31. 30 // Seems plenty big. 31 AI static __m128 emulate_mm_floor_ps(__m128 v) { 32 __m128 roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(v)); 33 __m128 too_big = _mm_cmpgt_ps(roundtrip, v); 34 return _mm_sub_ps(roundtrip, _mm_and_ps(too_big, _mm_set1_ps(1.0f))); 35 } 36 37 template <> 38 class SkNx<2, float> { 39 public: 40 AI SkNx(const __m128& vec) : fVec(vec) {} 41 42 AI SkNx() {} 43 AI SkNx(float val) : fVec(_mm_set1_ps(val)) {} 44 AI static SkNx Load(const void* ptr) { 45 return _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*)ptr)); 46 } 47 AI SkNx(float a, float b) : fVec(_mm_setr_ps(a,b,0,0)) {} 48 49 AI void store(void* ptr) const { _mm_storel_pi((__m64*)ptr, fVec); } 50 51 AI static void Load2(const void* ptr, SkNx* x, SkNx* y) { 52 const float* m = (const float*)ptr; 53 *x = SkNx{m[0], m[2]}; 54 *y = SkNx{m[1], m[3]}; 55 } 56 57 AI static void Store2(void* dst, const SkNx& a, const SkNx& b) { 58 auto vals = _mm_unpacklo_ps(a.fVec, b.fVec); 59 _mm_storeu_ps((float*)dst, vals); 60 } 61 62 AI static void Store3(void* dst, const SkNx& a, const SkNx& b, const SkNx& c) { 63 auto lo = _mm_setr_ps(a[0], b[0], c[0], a[1]), 64 hi = _mm_setr_ps(b[1], c[1], 0, 0); 65 _mm_storeu_ps((float*)dst, lo); 66 _mm_storel_pi(((__m64*)dst) + 2, hi); 67 } 68 69 AI static void Store4(void* dst, const SkNx& a, const SkNx& b, const SkNx& c, const SkNx& d) { 70 auto lo = _mm_setr_ps(a[0], b[0], c[0], d[0]), 71 hi = _mm_setr_ps(a[1], b[1], c[1], d[1]); 72 _mm_storeu_ps((float*)dst, lo); 73 _mm_storeu_ps(((float*)dst) + 4, hi); 74 } 75 76 AI SkNx operator - () const { return _mm_xor_ps(_mm_set1_ps(-0.0f), fVec); } 77 78 AI SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); } 79 AI SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); } 80 AI SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); } 81 AI SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); } 82 83 AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec); } 84 AI SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec); } 85 AI SkNx operator < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec); } 86 AI SkNx operator > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec); } 87 AI SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec); } 88 AI SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec); } 89 90 AI static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.fVec); } 91 AI static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); } 92 93 AI SkNx abs() const { return _mm_andnot_ps(_mm_set1_ps(-0.0f), fVec); } 94 AI SkNx floor() const { 95 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 96 return _mm_floor_ps(fVec); 97 #else 98 return emulate_mm_floor_ps(fVec); 99 #endif 100 } 101 102 AI SkNx sqrt() const { return _mm_sqrt_ps (fVec); } 103 AI SkNx rsqrt() const { return _mm_rsqrt_ps(fVec); } 104 AI SkNx invert() const { return _mm_rcp_ps(fVec); } 105 106 AI float operator[](int k) const { 107 SkASSERT(0 <= k && k < 2); 108 union { __m128 v; float fs[4]; } pun = {fVec}; 109 return pun.fs[k&1]; 110 } 111 112 AI bool allTrue() const { return 0xff == (_mm_movemask_epi8(_mm_castps_si128(fVec)) & 0xff); } 113 AI bool anyTrue() const { return 0x00 != (_mm_movemask_epi8(_mm_castps_si128(fVec)) & 0xff); } 114 115 AI SkNx thenElse(const SkNx& t, const SkNx& e) const { 116 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 117 return _mm_blendv_ps(e.fVec, t.fVec, fVec); 118 #else 119 return _mm_or_ps(_mm_and_ps (fVec, t.fVec), 120 _mm_andnot_ps(fVec, e.fVec)); 121 #endif 122 } 123 124 __m128 fVec; 125 }; 126 127 template <> 128 class SkNx<4, float> { 129 public: 130 AI SkNx(const __m128& vec) : fVec(vec) {} 131 132 AI SkNx() {} 133 AI SkNx(float val) : fVec( _mm_set1_ps(val) ) {} 134 AI SkNx(float a, float b, float c, float d) : fVec(_mm_setr_ps(a,b,c,d)) {} 135 136 AI static SkNx Load(const void* ptr) { return _mm_loadu_ps((const float*)ptr); } 137 AI void store(void* ptr) const { _mm_storeu_ps((float*)ptr, fVec); } 138 139 AI static void Load2(const void* ptr, SkNx* x, SkNx* y) { 140 SkNx lo = SkNx::Load((const float*)ptr+0), 141 hi = SkNx::Load((const float*)ptr+4); 142 *x = SkNx{lo[0], lo[2], hi[0], hi[2]}; 143 *y = SkNx{lo[1], lo[3], hi[1], hi[3]}; 144 } 145 146 AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) { 147 __m128 v0 = _mm_loadu_ps(((float*)ptr) + 0), 148 v1 = _mm_loadu_ps(((float*)ptr) + 4), 149 v2 = _mm_loadu_ps(((float*)ptr) + 8), 150 v3 = _mm_loadu_ps(((float*)ptr) + 12); 151 _MM_TRANSPOSE4_PS(v0, v1, v2, v3); 152 *r = v0; 153 *g = v1; 154 *b = v2; 155 *a = v3; 156 } 157 AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) { 158 __m128 v0 = r.fVec, 159 v1 = g.fVec, 160 v2 = b.fVec, 161 v3 = a.fVec; 162 _MM_TRANSPOSE4_PS(v0, v1, v2, v3); 163 _mm_storeu_ps(((float*) dst) + 0, v0); 164 _mm_storeu_ps(((float*) dst) + 4, v1); 165 _mm_storeu_ps(((float*) dst) + 8, v2); 166 _mm_storeu_ps(((float*) dst) + 12, v3); 167 } 168 169 AI SkNx operator - () const { return _mm_xor_ps(_mm_set1_ps(-0.0f), fVec); } 170 171 AI SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); } 172 AI SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); } 173 AI SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); } 174 AI SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); } 175 176 AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec); } 177 AI SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec); } 178 AI SkNx operator < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec); } 179 AI SkNx operator > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec); } 180 AI SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec); } 181 AI SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec); } 182 183 AI static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.fVec); } 184 AI static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); } 185 186 AI SkNx abs() const { return _mm_andnot_ps(_mm_set1_ps(-0.0f), fVec); } 187 AI SkNx floor() const { 188 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 189 return _mm_floor_ps(fVec); 190 #else 191 return emulate_mm_floor_ps(fVec); 192 #endif 193 } 194 195 AI SkNx sqrt() const { return _mm_sqrt_ps (fVec); } 196 AI SkNx rsqrt() const { return _mm_rsqrt_ps(fVec); } 197 AI SkNx invert() const { return _mm_rcp_ps(fVec); } 198 199 AI float operator[](int k) const { 200 SkASSERT(0 <= k && k < 4); 201 union { __m128 v; float fs[4]; } pun = {fVec}; 202 return pun.fs[k&3]; 203 } 204 205 AI float min() const { 206 SkNx min = Min(*this, _mm_shuffle_ps(fVec, fVec, _MM_SHUFFLE(2,3,0,1))); 207 min = Min(min, _mm_shuffle_ps(min.fVec, min.fVec, _MM_SHUFFLE(0,1,2,3))); 208 return min[0]; 209 } 210 211 AI float max() const { 212 SkNx max = Max(*this, _mm_shuffle_ps(fVec, fVec, _MM_SHUFFLE(2,3,0,1))); 213 max = Max(max, _mm_shuffle_ps(max.fVec, max.fVec, _MM_SHUFFLE(0,1,2,3))); 214 return max[0]; 215 } 216 217 AI bool allTrue() const { return 0xffff == _mm_movemask_epi8(_mm_castps_si128(fVec)); } 218 AI bool anyTrue() const { return 0x0000 != _mm_movemask_epi8(_mm_castps_si128(fVec)); } 219 220 AI SkNx thenElse(const SkNx& t, const SkNx& e) const { 221 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 222 return _mm_blendv_ps(e.fVec, t.fVec, fVec); 223 #else 224 return _mm_or_ps(_mm_and_ps (fVec, t.fVec), 225 _mm_andnot_ps(fVec, e.fVec)); 226 #endif 227 } 228 229 __m128 fVec; 230 }; 231 232 AI static __m128i mullo32(__m128i a, __m128i b) { 233 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 234 return _mm_mullo_epi32(a, b); 235 #else 236 __m128i mul20 = _mm_mul_epu32(a, b), 237 mul31 = _mm_mul_epu32(_mm_srli_si128(a, 4), _mm_srli_si128(b, 4)); 238 return _mm_unpacklo_epi32(_mm_shuffle_epi32(mul20, _MM_SHUFFLE(0,0,2,0)), 239 _mm_shuffle_epi32(mul31, _MM_SHUFFLE(0,0,2,0))); 240 #endif 241 } 242 243 template <> 244 class SkNx<4, int32_t> { 245 public: 246 AI SkNx(const __m128i& vec) : fVec(vec) {} 247 248 AI SkNx() {} 249 AI SkNx(int32_t val) : fVec(_mm_set1_epi32(val)) {} 250 AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); } 251 AI SkNx(int32_t a, int32_t b, int32_t c, int32_t d) : fVec(_mm_setr_epi32(a,b,c,d)) {} 252 253 AI void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); } 254 255 AI SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec); } 256 AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec); } 257 AI SkNx operator * (const SkNx& o) const { return mullo32(fVec, o.fVec); } 258 259 AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); } 260 AI SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); } 261 AI SkNx operator ^ (const SkNx& o) const { return _mm_xor_si128(fVec, o.fVec); } 262 263 AI SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); } 264 AI SkNx operator >> (int bits) const { return _mm_srai_epi32(fVec, bits); } 265 266 AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_epi32 (fVec, o.fVec); } 267 AI SkNx operator < (const SkNx& o) const { return _mm_cmplt_epi32 (fVec, o.fVec); } 268 AI SkNx operator > (const SkNx& o) const { return _mm_cmpgt_epi32 (fVec, o.fVec); } 269 270 AI int32_t operator[](int k) const { 271 SkASSERT(0 <= k && k < 4); 272 union { __m128i v; int32_t is[4]; } pun = {fVec}; 273 return pun.is[k&3]; 274 } 275 276 AI SkNx thenElse(const SkNx& t, const SkNx& e) const { 277 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 278 return _mm_blendv_epi8(e.fVec, t.fVec, fVec); 279 #else 280 return _mm_or_si128(_mm_and_si128 (fVec, t.fVec), 281 _mm_andnot_si128(fVec, e.fVec)); 282 #endif 283 } 284 285 AI SkNx abs() const { 286 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 287 return _mm_abs_epi32(fVec); 288 #else 289 SkNx mask = (*this) >> 31; 290 return (mask ^ (*this)) - mask; 291 #endif 292 } 293 294 AI static SkNx Min(const SkNx& x, const SkNx& y) { 295 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 296 return _mm_min_epi32(x.fVec, y.fVec); 297 #else 298 return (x < y).thenElse(x, y); 299 #endif 300 } 301 302 AI static SkNx Max(const SkNx& x, const SkNx& y) { 303 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 304 return _mm_max_epi32(x.fVec, y.fVec); 305 #else 306 return (x > y).thenElse(x, y); 307 #endif 308 } 309 310 __m128i fVec; 311 }; 312 313 template <> 314 class SkNx<2, uint32_t> { 315 public: 316 AI SkNx(const __m128i& vec) : fVec(vec) {} 317 318 AI SkNx() {} 319 AI SkNx(uint32_t val) : fVec(_mm_set1_epi32(val)) {} 320 AI static SkNx Load(const void* ptr) { return _mm_loadl_epi64((const __m128i*)ptr); } 321 AI SkNx(uint32_t a, uint32_t b) : fVec(_mm_setr_epi32(a,b,0,0)) {} 322 323 AI void store(void* ptr) const { _mm_storel_epi64((__m128i*)ptr, fVec); } 324 325 AI SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec); } 326 AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec); } 327 AI SkNx operator * (const SkNx& o) const { return mullo32(fVec, o.fVec); } 328 329 AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); } 330 AI SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); } 331 AI SkNx operator ^ (const SkNx& o) const { return _mm_xor_si128(fVec, o.fVec); } 332 333 AI SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); } 334 AI SkNx operator >> (int bits) const { return _mm_srli_epi32(fVec, bits); } 335 336 AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_epi32 (fVec, o.fVec); } 337 AI SkNx operator != (const SkNx& o) const { return (*this == o) ^ 0xffffffff; } 338 // operator < and > take a little extra fiddling to make work for unsigned ints. 339 340 AI uint32_t operator[](int k) const { 341 SkASSERT(0 <= k && k < 2); 342 union { __m128i v; uint32_t us[4]; } pun = {fVec}; 343 return pun.us[k&1]; 344 } 345 346 AI SkNx thenElse(const SkNx& t, const SkNx& e) const { 347 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 348 return _mm_blendv_epi8(e.fVec, t.fVec, fVec); 349 #else 350 return _mm_or_si128(_mm_and_si128 (fVec, t.fVec), 351 _mm_andnot_si128(fVec, e.fVec)); 352 #endif 353 } 354 355 AI bool allTrue() const { return 0xff == (_mm_movemask_epi8(fVec) & 0xff); } 356 357 __m128i fVec; 358 }; 359 360 template <> 361 class SkNx<4, uint32_t> { 362 public: 363 AI SkNx(const __m128i& vec) : fVec(vec) {} 364 365 AI SkNx() {} 366 AI SkNx(uint32_t val) : fVec(_mm_set1_epi32(val)) {} 367 AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); } 368 AI SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d) : fVec(_mm_setr_epi32(a,b,c,d)) {} 369 370 AI void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); } 371 372 AI SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec); } 373 AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec); } 374 AI SkNx operator * (const SkNx& o) const { return mullo32(fVec, o.fVec); } 375 376 AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); } 377 AI SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); } 378 AI SkNx operator ^ (const SkNx& o) const { return _mm_xor_si128(fVec, o.fVec); } 379 380 AI SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); } 381 AI SkNx operator >> (int bits) const { return _mm_srli_epi32(fVec, bits); } 382 383 AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_epi32 (fVec, o.fVec); } 384 AI SkNx operator != (const SkNx& o) const { return (*this == o) ^ 0xffffffff; } 385 386 // operator < and > take a little extra fiddling to make work for unsigned ints. 387 388 AI uint32_t operator[](int k) const { 389 SkASSERT(0 <= k && k < 4); 390 union { __m128i v; uint32_t us[4]; } pun = {fVec}; 391 return pun.us[k&3]; 392 } 393 394 AI SkNx thenElse(const SkNx& t, const SkNx& e) const { 395 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 396 return _mm_blendv_epi8(e.fVec, t.fVec, fVec); 397 #else 398 return _mm_or_si128(_mm_and_si128 (fVec, t.fVec), 399 _mm_andnot_si128(fVec, e.fVec)); 400 #endif 401 } 402 403 AI SkNx mulHi(SkNx m) const { 404 SkNx v20{_mm_mul_epu32(m.fVec, fVec)}; 405 SkNx v31{_mm_mul_epu32(_mm_srli_si128(m.fVec, 4), _mm_srli_si128(fVec, 4))}; 406 407 return SkNx{v20[1], v31[1], v20[3], v31[3]}; 408 } 409 410 __m128i fVec; 411 }; 412 413 template <> 414 class SkNx<4, uint16_t> { 415 public: 416 AI SkNx(const __m128i& vec) : fVec(vec) {} 417 418 AI SkNx() {} 419 AI SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {} 420 AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d) 421 : fVec(_mm_setr_epi16(a,b,c,d,0,0,0,0)) {} 422 423 AI static SkNx Load(const void* ptr) { return _mm_loadl_epi64((const __m128i*)ptr); } 424 AI void store(void* ptr) const { _mm_storel_epi64((__m128i*)ptr, fVec); } 425 426 AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) { 427 __m128i lo = _mm_loadu_si128(((__m128i*)ptr) + 0), 428 hi = _mm_loadu_si128(((__m128i*)ptr) + 1); 429 __m128i even = _mm_unpacklo_epi16(lo, hi), // r0 r2 g0 g2 b0 b2 a0 a2 430 odd = _mm_unpackhi_epi16(lo, hi); // r1 r3 ... 431 __m128i rg = _mm_unpacklo_epi16(even, odd), // r0 r1 r2 r3 g0 g1 g2 g3 432 ba = _mm_unpackhi_epi16(even, odd); // b0 b1 ... a0 a1 ... 433 *r = rg; 434 *g = _mm_srli_si128(rg, 8); 435 *b = ba; 436 *a = _mm_srli_si128(ba, 8); 437 } 438 AI static void Load3(const void* ptr, SkNx* r, SkNx* g, SkNx* b) { 439 // The idea here is to get 4 vectors that are R G B _ _ _ _ _. 440 // The second load is at a funny location to make sure we don't read past 441 // the bounds of memory. This is fine, we just need to shift it a little bit. 442 const uint8_t* ptr8 = (const uint8_t*) ptr; 443 __m128i rgb0 = _mm_loadu_si128((const __m128i*) (ptr8 + 0)); 444 __m128i rgb1 = _mm_srli_si128(rgb0, 3*2); 445 __m128i rgb2 = _mm_srli_si128(_mm_loadu_si128((const __m128i*) (ptr8 + 4*2)), 2*2); 446 __m128i rgb3 = _mm_srli_si128(rgb2, 3*2); 447 448 __m128i rrggbb01 = _mm_unpacklo_epi16(rgb0, rgb1); 449 __m128i rrggbb23 = _mm_unpacklo_epi16(rgb2, rgb3); 450 *r = _mm_unpacklo_epi32(rrggbb01, rrggbb23); 451 *g = _mm_srli_si128(r->fVec, 4*2); 452 *b = _mm_unpackhi_epi32(rrggbb01, rrggbb23); 453 } 454 AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) { 455 __m128i rg = _mm_unpacklo_epi16(r.fVec, g.fVec); 456 __m128i ba = _mm_unpacklo_epi16(b.fVec, a.fVec); 457 __m128i lo = _mm_unpacklo_epi32(rg, ba); 458 __m128i hi = _mm_unpackhi_epi32(rg, ba); 459 _mm_storeu_si128(((__m128i*) dst) + 0, lo); 460 _mm_storeu_si128(((__m128i*) dst) + 1, hi); 461 } 462 463 AI SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec); } 464 AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec); } 465 AI SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec); } 466 AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); } 467 AI SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); } 468 469 AI SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); } 470 AI SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); } 471 472 AI uint16_t operator[](int k) const { 473 SkASSERT(0 <= k && k < 4); 474 union { __m128i v; uint16_t us[8]; } pun = {fVec}; 475 return pun.us[k&3]; 476 } 477 478 __m128i fVec; 479 }; 480 481 template <> 482 class SkNx<8, uint16_t> { 483 public: 484 AI SkNx(const __m128i& vec) : fVec(vec) {} 485 486 AI SkNx() {} 487 AI SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {} 488 AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d, 489 uint16_t e, uint16_t f, uint16_t g, uint16_t h) 490 : fVec(_mm_setr_epi16(a,b,c,d,e,f,g,h)) {} 491 492 AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); } 493 AI void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); } 494 495 AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) { 496 __m128i _01 = _mm_loadu_si128(((__m128i*)ptr) + 0), 497 _23 = _mm_loadu_si128(((__m128i*)ptr) + 1), 498 _45 = _mm_loadu_si128(((__m128i*)ptr) + 2), 499 _67 = _mm_loadu_si128(((__m128i*)ptr) + 3); 500 501 __m128i _02 = _mm_unpacklo_epi16(_01, _23), // r0 r2 g0 g2 b0 b2 a0 a2 502 _13 = _mm_unpackhi_epi16(_01, _23), // r1 r3 g1 g3 b1 b3 a1 a3 503 _46 = _mm_unpacklo_epi16(_45, _67), 504 _57 = _mm_unpackhi_epi16(_45, _67); 505 506 __m128i rg0123 = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3 507 ba0123 = _mm_unpackhi_epi16(_02, _13), // b0 b1 b2 b3 a0 a1 a2 a3 508 rg4567 = _mm_unpacklo_epi16(_46, _57), 509 ba4567 = _mm_unpackhi_epi16(_46, _57); 510 511 *r = _mm_unpacklo_epi64(rg0123, rg4567); 512 *g = _mm_unpackhi_epi64(rg0123, rg4567); 513 *b = _mm_unpacklo_epi64(ba0123, ba4567); 514 *a = _mm_unpackhi_epi64(ba0123, ba4567); 515 } 516 AI static void Load3(const void* ptr, SkNx* r, SkNx* g, SkNx* b) { 517 const uint8_t* ptr8 = (const uint8_t*) ptr; 518 __m128i rgb0 = _mm_loadu_si128((const __m128i*) (ptr8 + 0*2)); 519 __m128i rgb1 = _mm_srli_si128(rgb0, 3*2); 520 __m128i rgb2 = _mm_loadu_si128((const __m128i*) (ptr8 + 6*2)); 521 __m128i rgb3 = _mm_srli_si128(rgb2, 3*2); 522 __m128i rgb4 = _mm_loadu_si128((const __m128i*) (ptr8 + 12*2)); 523 __m128i rgb5 = _mm_srli_si128(rgb4, 3*2); 524 __m128i rgb6 = _mm_srli_si128(_mm_loadu_si128((const __m128i*) (ptr8 + 16*2)), 2*2); 525 __m128i rgb7 = _mm_srli_si128(rgb6, 3*2); 526 527 __m128i rgb01 = _mm_unpacklo_epi16(rgb0, rgb1); 528 __m128i rgb23 = _mm_unpacklo_epi16(rgb2, rgb3); 529 __m128i rgb45 = _mm_unpacklo_epi16(rgb4, rgb5); 530 __m128i rgb67 = _mm_unpacklo_epi16(rgb6, rgb7); 531 532 __m128i rg03 = _mm_unpacklo_epi32(rgb01, rgb23); 533 __m128i bx03 = _mm_unpackhi_epi32(rgb01, rgb23); 534 __m128i rg47 = _mm_unpacklo_epi32(rgb45, rgb67); 535 __m128i bx47 = _mm_unpackhi_epi32(rgb45, rgb67); 536 537 *r = _mm_unpacklo_epi64(rg03, rg47); 538 *g = _mm_unpackhi_epi64(rg03, rg47); 539 *b = _mm_unpacklo_epi64(bx03, bx47); 540 } 541 AI static void Store4(void* ptr, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) { 542 __m128i rg0123 = _mm_unpacklo_epi16(r.fVec, g.fVec), // r0 g0 r1 g1 r2 g2 r3 g3 543 rg4567 = _mm_unpackhi_epi16(r.fVec, g.fVec), // r4 g4 r5 g5 r6 g6 r7 g7 544 ba0123 = _mm_unpacklo_epi16(b.fVec, a.fVec), 545 ba4567 = _mm_unpackhi_epi16(b.fVec, a.fVec); 546 547 _mm_storeu_si128((__m128i*)ptr + 0, _mm_unpacklo_epi32(rg0123, ba0123)); 548 _mm_storeu_si128((__m128i*)ptr + 1, _mm_unpackhi_epi32(rg0123, ba0123)); 549 _mm_storeu_si128((__m128i*)ptr + 2, _mm_unpacklo_epi32(rg4567, ba4567)); 550 _mm_storeu_si128((__m128i*)ptr + 3, _mm_unpackhi_epi32(rg4567, ba4567)); 551 } 552 553 AI SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec); } 554 AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec); } 555 AI SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec); } 556 AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); } 557 AI SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); } 558 559 AI SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); } 560 AI SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); } 561 562 AI static SkNx Min(const SkNx& a, const SkNx& b) { 563 // No unsigned _mm_min_epu16, so we'll shift into a space where we can use the 564 // signed version, _mm_min_epi16, then shift back. 565 const uint16_t top = 0x8000; // Keep this separate from _mm_set1_epi16 or MSVC will whine. 566 const __m128i top_8x = _mm_set1_epi16(top); 567 return _mm_add_epi8(top_8x, _mm_min_epi16(_mm_sub_epi8(a.fVec, top_8x), 568 _mm_sub_epi8(b.fVec, top_8x))); 569 } 570 571 AI SkNx mulHi(const SkNx& m) const { 572 return _mm_mulhi_epu16(fVec, m.fVec); 573 } 574 575 AI SkNx thenElse(const SkNx& t, const SkNx& e) const { 576 return _mm_or_si128(_mm_and_si128 (fVec, t.fVec), 577 _mm_andnot_si128(fVec, e.fVec)); 578 } 579 580 AI uint16_t operator[](int k) const { 581 SkASSERT(0 <= k && k < 8); 582 union { __m128i v; uint16_t us[8]; } pun = {fVec}; 583 return pun.us[k&7]; 584 } 585 586 __m128i fVec; 587 }; 588 589 template <> 590 class SkNx<4, uint8_t> { 591 public: 592 AI SkNx() {} 593 AI SkNx(const __m128i& vec) : fVec(vec) {} 594 AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d) 595 : fVec(_mm_setr_epi8(a,b,c,d, 0,0,0,0, 0,0,0,0, 0,0,0,0)) {} 596 597 AI static SkNx Load(const void* ptr) { return _mm_cvtsi32_si128(*(const int*)ptr); } 598 AI void store(void* ptr) const { *(int*)ptr = _mm_cvtsi128_si32(fVec); } 599 600 AI uint8_t operator[](int k) const { 601 SkASSERT(0 <= k && k < 4); 602 union { __m128i v; uint8_t us[16]; } pun = {fVec}; 603 return pun.us[k&3]; 604 } 605 606 // TODO as needed 607 608 __m128i fVec; 609 }; 610 611 template <> 612 class SkNx<8, uint8_t> { 613 public: 614 AI SkNx(const __m128i& vec) : fVec(vec) {} 615 616 AI SkNx() {} 617 AI SkNx(uint8_t val) : fVec(_mm_set1_epi8(val)) {} 618 AI static SkNx Load(const void* ptr) { return _mm_loadl_epi64((const __m128i*)ptr); } 619 AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d, 620 uint8_t e, uint8_t f, uint8_t g, uint8_t h) 621 : fVec(_mm_setr_epi8(a,b,c,d, e,f,g,h, 0,0,0,0, 0,0,0,0)) {} 622 623 AI void store(void* ptr) const {_mm_storel_epi64((__m128i*)ptr, fVec);} 624 625 AI SkNx saturatedAdd(const SkNx& o) const { return _mm_adds_epu8(fVec, o.fVec); } 626 627 AI SkNx operator + (const SkNx& o) const { return _mm_add_epi8(fVec, o.fVec); } 628 AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi8(fVec, o.fVec); } 629 630 AI static SkNx Min(const SkNx& a, const SkNx& b) { return _mm_min_epu8(a.fVec, b.fVec); } 631 AI SkNx operator < (const SkNx& o) const { 632 // There's no unsigned _mm_cmplt_epu8, so we flip the sign bits then use a signed compare. 633 auto flip = _mm_set1_epi8(char(0x80)); 634 return _mm_cmplt_epi8(_mm_xor_si128(flip, fVec), _mm_xor_si128(flip, o.fVec)); 635 } 636 637 AI uint8_t operator[](int k) const { 638 SkASSERT(0 <= k && k < 16); 639 union { __m128i v; uint8_t us[16]; } pun = {fVec}; 640 return pun.us[k&15]; 641 } 642 643 AI SkNx thenElse(const SkNx& t, const SkNx& e) const { 644 return _mm_or_si128(_mm_and_si128 (fVec, t.fVec), 645 _mm_andnot_si128(fVec, e.fVec)); 646 } 647 648 __m128i fVec; 649 }; 650 651 template <> 652 class SkNx<16, uint8_t> { 653 public: 654 AI SkNx(const __m128i& vec) : fVec(vec) {} 655 656 AI SkNx() {} 657 AI SkNx(uint8_t val) : fVec(_mm_set1_epi8(val)) {} 658 AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); } 659 AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d, 660 uint8_t e, uint8_t f, uint8_t g, uint8_t h, 661 uint8_t i, uint8_t j, uint8_t k, uint8_t l, 662 uint8_t m, uint8_t n, uint8_t o, uint8_t p) 663 : fVec(_mm_setr_epi8(a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p)) {} 664 665 AI void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); } 666 667 AI SkNx saturatedAdd(const SkNx& o) const { return _mm_adds_epu8(fVec, o.fVec); } 668 669 AI SkNx operator + (const SkNx& o) const { return _mm_add_epi8(fVec, o.fVec); } 670 AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi8(fVec, o.fVec); } 671 AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); } 672 673 AI static SkNx Min(const SkNx& a, const SkNx& b) { return _mm_min_epu8(a.fVec, b.fVec); } 674 AI SkNx operator < (const SkNx& o) const { 675 // There's no unsigned _mm_cmplt_epu8, so we flip the sign bits then use a signed compare. 676 auto flip = _mm_set1_epi8(char(0x80)); 677 return _mm_cmplt_epi8(_mm_xor_si128(flip, fVec), _mm_xor_si128(flip, o.fVec)); 678 } 679 680 AI uint8_t operator[](int k) const { 681 SkASSERT(0 <= k && k < 16); 682 union { __m128i v; uint8_t us[16]; } pun = {fVec}; 683 return pun.us[k&15]; 684 } 685 686 AI SkNx thenElse(const SkNx& t, const SkNx& e) const { 687 return _mm_or_si128(_mm_and_si128 (fVec, t.fVec), 688 _mm_andnot_si128(fVec, e.fVec)); 689 } 690 691 __m128i fVec; 692 }; 693 694 template<> AI /*static*/ Sk4f SkNx_cast<float, int32_t>(const Sk4i& src) { 695 return _mm_cvtepi32_ps(src.fVec); 696 } 697 698 template<> AI /*static*/ Sk4f SkNx_cast<float, uint32_t>(const Sk4u& src) { 699 return SkNx_cast<float>(Sk4i::Load(&src)); 700 } 701 702 template <> AI /*static*/ Sk4i SkNx_cast<int32_t, float>(const Sk4f& src) { 703 return _mm_cvttps_epi32(src.fVec); 704 } 705 706 template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, int32_t>(const Sk4i& src) { 707 #if 0 && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 708 // TODO: This seems to be causing code generation problems. Investigate? 709 return _mm_packus_epi32(src.fVec); 710 #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 711 // With SSSE3, we can just shuffle the low 2 bytes from each lane right into place. 712 const int _ = ~0; 713 return _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,1, 4,5, 8,9, 12,13, _,_,_,_,_,_,_,_)); 714 #else 715 // With SSE2, we have to sign extend our input, making _mm_packs_epi32 do the pack we want. 716 __m128i x = _mm_srai_epi32(_mm_slli_epi32(src.fVec, 16), 16); 717 return _mm_packs_epi32(x,x); 718 #endif 719 } 720 721 template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) { 722 return SkNx_cast<uint16_t>(SkNx_cast<int32_t>(src)); 723 } 724 725 template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) { 726 auto _32 = _mm_cvttps_epi32(src.fVec); 727 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 728 const int _ = ~0; 729 return _mm_shuffle_epi8(_32, _mm_setr_epi8(0,4,8,12, _,_,_,_, _,_,_,_, _,_,_,_)); 730 #else 731 auto _16 = _mm_packus_epi16(_32, _32); 732 return _mm_packus_epi16(_16, _16); 733 #endif 734 } 735 736 template<> AI /*static*/ Sk4u SkNx_cast<uint32_t, uint8_t>(const Sk4b& src) { 737 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 738 const int _ = ~0; 739 return _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,_,_,_, 1,_,_,_, 2,_,_,_, 3,_,_,_)); 740 #else 741 auto _16 = _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()); 742 return _mm_unpacklo_epi16(_16, _mm_setzero_si128()); 743 #endif 744 } 745 746 template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint8_t>(const Sk4b& src) { 747 return SkNx_cast<uint32_t>(src).fVec; 748 } 749 750 template<> AI /*static*/ Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) { 751 return _mm_cvtepi32_ps(SkNx_cast<int32_t>(src).fVec); 752 } 753 754 template<> AI /*static*/ Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) { 755 auto _32 = _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128()); 756 return _mm_cvtepi32_ps(_32); 757 } 758 759 template<> AI /*static*/ Sk8b SkNx_cast<uint8_t, int32_t>(const Sk8i& src) { 760 Sk4i lo, hi; 761 SkNx_split(src, &lo, &hi); 762 763 auto t = _mm_packs_epi32(lo.fVec, hi.fVec); 764 return _mm_packus_epi16(t, t); 765 } 766 767 template<> AI /*static*/ Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) { 768 Sk8f ab, cd; 769 SkNx_split(src, &ab, &cd); 770 771 Sk4f a,b,c,d; 772 SkNx_split(ab, &a, &b); 773 SkNx_split(cd, &c, &d); 774 775 return _mm_packus_epi16(_mm_packus_epi16(_mm_cvttps_epi32(a.fVec), 776 _mm_cvttps_epi32(b.fVec)), 777 _mm_packus_epi16(_mm_cvttps_epi32(c.fVec), 778 _mm_cvttps_epi32(d.fVec))); 779 } 780 781 template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) { 782 return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()); 783 } 784 785 template<> AI /*static*/ Sk8h SkNx_cast<uint16_t, uint8_t>(const Sk8b& src) { 786 return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()); 787 } 788 789 template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) { 790 return _mm_packus_epi16(src.fVec, src.fVec); 791 } 792 793 template<> AI /*static*/ Sk8b SkNx_cast<uint8_t, uint16_t>(const Sk8h& src) { 794 return _mm_packus_epi16(src.fVec, src.fVec); 795 } 796 797 template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint16_t>(const Sk4h& src) { 798 return _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128()); 799 } 800 801 802 template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, int32_t>(const Sk4i& src) { 803 return _mm_packus_epi16(_mm_packus_epi16(src.fVec, src.fVec), src.fVec); 804 } 805 806 template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, uint32_t>(const Sk4u& src) { 807 return _mm_packus_epi16(_mm_packus_epi16(src.fVec, src.fVec), src.fVec); 808 } 809 810 template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint32_t>(const Sk4u& src) { 811 return src.fVec; 812 } 813 814 AI static Sk4i Sk4f_round(const Sk4f& x) { 815 return _mm_cvtps_epi32(x.fVec); 816 } 817 818 } // namespace 819 820 #endif//SkNx_sse_DEFINED 821