1 /*===---- avxintrin.h - AVX intrinsics -------------------------------------=== 2 * 3 * Permission is hereby granted, free of charge, to any person obtaining a copy 4 * of this software and associated documentation files (the "Software"), to deal 5 * in the Software without restriction, including without limitation the rights 6 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 * copies of the Software, and to permit persons to whom the Software is 8 * furnished to do so, subject to the following conditions: 9 * 10 * The above copyright notice and this permission notice shall be included in 11 * all copies or substantial portions of the Software. 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 * THE SOFTWARE. 20 * 21 *===-----------------------------------------------------------------------=== 22 */ 23 24 #ifndef __IMMINTRIN_H 25 #error "Never use <avxintrin.h> directly; include <immintrin.h> instead." 26 #endif 27 28 typedef double __v4df __attribute__ ((__vector_size__ (32))); 29 typedef float __v8sf __attribute__ ((__vector_size__ (32))); 30 typedef long long __v4di __attribute__ ((__vector_size__ (32))); 31 typedef int __v8si __attribute__ ((__vector_size__ (32))); 32 typedef short __v16hi __attribute__ ((__vector_size__ (32))); 33 typedef char __v32qi __attribute__ ((__vector_size__ (32))); 34 35 typedef float __m256 __attribute__ ((__vector_size__ (32))); 36 typedef double __m256d __attribute__((__vector_size__(32))); 37 typedef long long __m256i __attribute__((__vector_size__(32))); 38 39 /* Arithmetic */ 40 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 41 _mm256_add_pd(__m256d __a, __m256d __b) 42 { 43 return __a+__b; 44 } 45 46 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 47 _mm256_add_ps(__m256 __a, __m256 __b) 48 { 49 return __a+__b; 50 } 51 52 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 53 _mm256_sub_pd(__m256d __a, __m256d __b) 54 { 55 return __a-__b; 56 } 57 58 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 59 _mm256_sub_ps(__m256 __a, __m256 __b) 60 { 61 return __a-__b; 62 } 63 64 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 65 _mm256_addsub_pd(__m256d __a, __m256d __b) 66 { 67 return (__m256d)__builtin_ia32_addsubpd256((__v4df)__a, (__v4df)__b); 68 } 69 70 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 71 _mm256_addsub_ps(__m256 __a, __m256 __b) 72 { 73 return (__m256)__builtin_ia32_addsubps256((__v8sf)__a, (__v8sf)__b); 74 } 75 76 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 77 _mm256_div_pd(__m256d __a, __m256d __b) 78 { 79 return __a / __b; 80 } 81 82 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 83 _mm256_div_ps(__m256 __a, __m256 __b) 84 { 85 return __a / __b; 86 } 87 88 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 89 _mm256_max_pd(__m256d __a, __m256d __b) 90 { 91 return (__m256d)__builtin_ia32_maxpd256((__v4df)__a, (__v4df)__b); 92 } 93 94 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 95 _mm256_max_ps(__m256 __a, __m256 __b) 96 { 97 return (__m256)__builtin_ia32_maxps256((__v8sf)__a, (__v8sf)__b); 98 } 99 100 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 101 _mm256_min_pd(__m256d __a, __m256d __b) 102 { 103 return (__m256d)__builtin_ia32_minpd256((__v4df)__a, (__v4df)__b); 104 } 105 106 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 107 _mm256_min_ps(__m256 __a, __m256 __b) 108 { 109 return (__m256)__builtin_ia32_minps256((__v8sf)__a, (__v8sf)__b); 110 } 111 112 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 113 _mm256_mul_pd(__m256d __a, __m256d __b) 114 { 115 return __a * __b; 116 } 117 118 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 119 _mm256_mul_ps(__m256 __a, __m256 __b) 120 { 121 return __a * __b; 122 } 123 124 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 125 _mm256_sqrt_pd(__m256d __a) 126 { 127 return (__m256d)__builtin_ia32_sqrtpd256((__v4df)__a); 128 } 129 130 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 131 _mm256_sqrt_ps(__m256 __a) 132 { 133 return (__m256)__builtin_ia32_sqrtps256((__v8sf)__a); 134 } 135 136 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 137 _mm256_rsqrt_ps(__m256 __a) 138 { 139 return (__m256)__builtin_ia32_rsqrtps256((__v8sf)__a); 140 } 141 142 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 143 _mm256_rcp_ps(__m256 __a) 144 { 145 return (__m256)__builtin_ia32_rcpps256((__v8sf)__a); 146 } 147 148 #define _mm256_round_pd(V, M) __extension__ ({ \ 149 __m256d __V = (V); \ 150 (__m256d)__builtin_ia32_roundpd256((__v4df)__V, (M)); }) 151 152 #define _mm256_round_ps(V, M) __extension__ ({ \ 153 __m256 __V = (V); \ 154 (__m256)__builtin_ia32_roundps256((__v8sf)__V, (M)); }) 155 156 #define _mm256_ceil_pd(V) _mm256_round_pd((V), _MM_FROUND_CEIL) 157 #define _mm256_floor_pd(V) _mm256_round_pd((V), _MM_FROUND_FLOOR) 158 #define _mm256_ceil_ps(V) _mm256_round_ps((V), _MM_FROUND_CEIL) 159 #define _mm256_floor_ps(V) _mm256_round_ps((V), _MM_FROUND_FLOOR) 160 161 /* Logical */ 162 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 163 _mm256_and_pd(__m256d __a, __m256d __b) 164 { 165 return (__m256d)((__v4di)__a & (__v4di)__b); 166 } 167 168 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 169 _mm256_and_ps(__m256 __a, __m256 __b) 170 { 171 return (__m256)((__v8si)__a & (__v8si)__b); 172 } 173 174 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 175 _mm256_andnot_pd(__m256d __a, __m256d __b) 176 { 177 return (__m256d)(~(__v4di)__a & (__v4di)__b); 178 } 179 180 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 181 _mm256_andnot_ps(__m256 __a, __m256 __b) 182 { 183 return (__m256)(~(__v8si)__a & (__v8si)__b); 184 } 185 186 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 187 _mm256_or_pd(__m256d __a, __m256d __b) 188 { 189 return (__m256d)((__v4di)__a | (__v4di)__b); 190 } 191 192 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 193 _mm256_or_ps(__m256 __a, __m256 __b) 194 { 195 return (__m256)((__v8si)__a | (__v8si)__b); 196 } 197 198 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 199 _mm256_xor_pd(__m256d __a, __m256d __b) 200 { 201 return (__m256d)((__v4di)__a ^ (__v4di)__b); 202 } 203 204 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 205 _mm256_xor_ps(__m256 __a, __m256 __b) 206 { 207 return (__m256)((__v8si)__a ^ (__v8si)__b); 208 } 209 210 /* Horizontal arithmetic */ 211 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 212 _mm256_hadd_pd(__m256d __a, __m256d __b) 213 { 214 return (__m256d)__builtin_ia32_haddpd256((__v4df)__a, (__v4df)__b); 215 } 216 217 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 218 _mm256_hadd_ps(__m256 __a, __m256 __b) 219 { 220 return (__m256)__builtin_ia32_haddps256((__v8sf)__a, (__v8sf)__b); 221 } 222 223 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 224 _mm256_hsub_pd(__m256d __a, __m256d __b) 225 { 226 return (__m256d)__builtin_ia32_hsubpd256((__v4df)__a, (__v4df)__b); 227 } 228 229 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 230 _mm256_hsub_ps(__m256 __a, __m256 __b) 231 { 232 return (__m256)__builtin_ia32_hsubps256((__v8sf)__a, (__v8sf)__b); 233 } 234 235 /* Vector permutations */ 236 static __inline __m128d __attribute__((__always_inline__, __nodebug__)) 237 _mm_permutevar_pd(__m128d __a, __m128i __c) 238 { 239 return (__m128d)__builtin_ia32_vpermilvarpd((__v2df)__a, (__v2di)__c); 240 } 241 242 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 243 _mm256_permutevar_pd(__m256d __a, __m256i __c) 244 { 245 return (__m256d)__builtin_ia32_vpermilvarpd256((__v4df)__a, (__v4di)__c); 246 } 247 248 static __inline __m128 __attribute__((__always_inline__, __nodebug__)) 249 _mm_permutevar_ps(__m128 __a, __m128i __c) 250 { 251 return (__m128)__builtin_ia32_vpermilvarps((__v4sf)__a, (__v4si)__c); 252 } 253 254 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 255 _mm256_permutevar_ps(__m256 __a, __m256i __c) 256 { 257 return (__m256)__builtin_ia32_vpermilvarps256((__v8sf)__a, 258 (__v8si)__c); 259 } 260 261 #define _mm_permute_pd(A, C) __extension__ ({ \ 262 __m128d __A = (A); \ 263 (__m128d)__builtin_shufflevector((__v2df)__A, (__v2df) _mm_setzero_pd(), \ 264 (C) & 0x1, ((C) & 0x2) >> 1); }) 265 266 #define _mm256_permute_pd(A, C) __extension__ ({ \ 267 __m256d __A = (A); \ 268 (__m256d)__builtin_shufflevector((__v4df)__A, (__v4df) _mm256_setzero_pd(), \ 269 (C) & 0x1, ((C) & 0x2) >> 1, \ 270 2 + (((C) & 0x4) >> 2), \ 271 2 + (((C) & 0x8) >> 3)); }) 272 273 #define _mm_permute_ps(A, C) __extension__ ({ \ 274 __m128 __A = (A); \ 275 (__m128)__builtin_shufflevector((__v4sf)__A, (__v4sf) _mm_setzero_ps(), \ 276 (C) & 0x3, ((C) & 0xc) >> 2, \ 277 ((C) & 0x30) >> 4, ((C) & 0xc0) >> 6); }) 278 279 #define _mm256_permute_ps(A, C) __extension__ ({ \ 280 __m256 __A = (A); \ 281 (__m256)__builtin_shufflevector((__v8sf)__A, (__v8sf) _mm256_setzero_ps(), \ 282 (C) & 0x3, ((C) & 0xc) >> 2, \ 283 ((C) & 0x30) >> 4, ((C) & 0xc0) >> 6, \ 284 4 + (((C) & 0x03) >> 0), \ 285 4 + (((C) & 0x0c) >> 2), \ 286 4 + (((C) & 0x30) >> 4), \ 287 4 + (((C) & 0xc0) >> 6)); }) 288 289 #define _mm256_permute2f128_pd(V1, V2, M) __extension__ ({ \ 290 __m256d __V1 = (V1); \ 291 __m256d __V2 = (V2); \ 292 (__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)__V1, (__v4df)__V2, (M)); }) 293 294 #define _mm256_permute2f128_ps(V1, V2, M) __extension__ ({ \ 295 __m256 __V1 = (V1); \ 296 __m256 __V2 = (V2); \ 297 (__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)__V1, (__v8sf)__V2, (M)); }) 298 299 #define _mm256_permute2f128_si256(V1, V2, M) __extension__ ({ \ 300 __m256i __V1 = (V1); \ 301 __m256i __V2 = (V2); \ 302 (__m256i)__builtin_ia32_vperm2f128_si256((__v8si)__V1, (__v8si)__V2, (M)); }) 303 304 /* Vector Blend */ 305 #define _mm256_blend_pd(V1, V2, M) __extension__ ({ \ 306 __m256d __V1 = (V1); \ 307 __m256d __V2 = (V2); \ 308 (__m256d)__builtin_ia32_blendpd256((__v4df)__V1, (__v4df)__V2, (M)); }) 309 310 #define _mm256_blend_ps(V1, V2, M) __extension__ ({ \ 311 __m256 __V1 = (V1); \ 312 __m256 __V2 = (V2); \ 313 (__m256)__builtin_ia32_blendps256((__v8sf)__V1, (__v8sf)__V2, (M)); }) 314 315 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 316 _mm256_blendv_pd(__m256d __a, __m256d __b, __m256d __c) 317 { 318 return (__m256d)__builtin_ia32_blendvpd256( 319 (__v4df)__a, (__v4df)__b, (__v4df)__c); 320 } 321 322 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 323 _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c) 324 { 325 return (__m256)__builtin_ia32_blendvps256( 326 (__v8sf)__a, (__v8sf)__b, (__v8sf)__c); 327 } 328 329 /* Vector Dot Product */ 330 #define _mm256_dp_ps(V1, V2, M) __extension__ ({ \ 331 __m256 __V1 = (V1); \ 332 __m256 __V2 = (V2); \ 333 (__m256)__builtin_ia32_dpps256((__v8sf)__V1, (__v8sf)__V2, (M)); }) 334 335 /* Vector shuffle */ 336 #define _mm256_shuffle_ps(a, b, mask) __extension__ ({ \ 337 __m256 __a = (a); \ 338 __m256 __b = (b); \ 339 (__m256)__builtin_shufflevector((__v8sf)__a, (__v8sf)__b, \ 340 (mask) & 0x3, ((mask) & 0xc) >> 2, \ 341 (((mask) & 0x30) >> 4) + 8, (((mask) & 0xc0) >> 6) + 8, \ 342 ((mask) & 0x3) + 4, (((mask) & 0xc) >> 2) + 4, \ 343 (((mask) & 0x30) >> 4) + 12, (((mask) & 0xc0) >> 6) + 12); }) 344 345 #define _mm256_shuffle_pd(a, b, mask) __extension__ ({ \ 346 __m256d __a = (a); \ 347 __m256d __b = (b); \ 348 (__m256d)__builtin_shufflevector((__v4df)__a, (__v4df)__b, \ 349 (mask) & 0x1, \ 350 (((mask) & 0x2) >> 1) + 4, \ 351 (((mask) & 0x4) >> 2) + 2, \ 352 (((mask) & 0x8) >> 3) + 6); }) 353 354 /* Compare */ 355 #define _CMP_EQ_OQ 0x00 /* Equal (ordered, non-signaling) */ 356 #define _CMP_LT_OS 0x01 /* Less-than (ordered, signaling) */ 357 #define _CMP_LE_OS 0x02 /* Less-than-or-equal (ordered, signaling) */ 358 #define _CMP_UNORD_Q 0x03 /* Unordered (non-signaling) */ 359 #define _CMP_NEQ_UQ 0x04 /* Not-equal (unordered, non-signaling) */ 360 #define _CMP_NLT_US 0x05 /* Not-less-than (unordered, signaling) */ 361 #define _CMP_NLE_US 0x06 /* Not-less-than-or-equal (unordered, signaling) */ 362 #define _CMP_ORD_Q 0x07 /* Ordered (nonsignaling) */ 363 #define _CMP_EQ_UQ 0x08 /* Equal (unordered, non-signaling) */ 364 #define _CMP_NGE_US 0x09 /* Not-greater-than-or-equal (unord, signaling) */ 365 #define _CMP_NGT_US 0x0a /* Not-greater-than (unordered, signaling) */ 366 #define _CMP_FALSE_OQ 0x0b /* False (ordered, non-signaling) */ 367 #define _CMP_NEQ_OQ 0x0c /* Not-equal (ordered, non-signaling) */ 368 #define _CMP_GE_OS 0x0d /* Greater-than-or-equal (ordered, signaling) */ 369 #define _CMP_GT_OS 0x0e /* Greater-than (ordered, signaling) */ 370 #define _CMP_TRUE_UQ 0x0f /* True (unordered, non-signaling) */ 371 #define _CMP_EQ_OS 0x10 /* Equal (ordered, signaling) */ 372 #define _CMP_LT_OQ 0x11 /* Less-than (ordered, non-signaling) */ 373 #define _CMP_LE_OQ 0x12 /* Less-than-or-equal (ordered, non-signaling) */ 374 #define _CMP_UNORD_S 0x13 /* Unordered (signaling) */ 375 #define _CMP_NEQ_US 0x14 /* Not-equal (unordered, signaling) */ 376 #define _CMP_NLT_UQ 0x15 /* Not-less-than (unordered, non-signaling) */ 377 #define _CMP_NLE_UQ 0x16 /* Not-less-than-or-equal (unord, non-signaling) */ 378 #define _CMP_ORD_S 0x17 /* Ordered (signaling) */ 379 #define _CMP_EQ_US 0x18 /* Equal (unordered, signaling) */ 380 #define _CMP_NGE_UQ 0x19 /* Not-greater-than-or-equal (unord, non-sign) */ 381 #define _CMP_NGT_UQ 0x1a /* Not-greater-than (unordered, non-signaling) */ 382 #define _CMP_FALSE_OS 0x1b /* False (ordered, signaling) */ 383 #define _CMP_NEQ_OS 0x1c /* Not-equal (ordered, signaling) */ 384 #define _CMP_GE_OQ 0x1d /* Greater-than-or-equal (ordered, non-signaling) */ 385 #define _CMP_GT_OQ 0x1e /* Greater-than (ordered, non-signaling) */ 386 #define _CMP_TRUE_US 0x1f /* True (unordered, signaling) */ 387 388 #define _mm_cmp_pd(a, b, c) __extension__ ({ \ 389 __m128d __a = (a); \ 390 __m128d __b = (b); \ 391 (__m128d)__builtin_ia32_cmppd((__v2df)__a, (__v2df)__b, (c)); }) 392 393 #define _mm_cmp_ps(a, b, c) __extension__ ({ \ 394 __m128 __a = (a); \ 395 __m128 __b = (b); \ 396 (__m128)__builtin_ia32_cmpps((__v4sf)__a, (__v4sf)__b, (c)); }) 397 398 #define _mm256_cmp_pd(a, b, c) __extension__ ({ \ 399 __m256d __a = (a); \ 400 __m256d __b = (b); \ 401 (__m256d)__builtin_ia32_cmppd256((__v4df)__a, (__v4df)__b, (c)); }) 402 403 #define _mm256_cmp_ps(a, b, c) __extension__ ({ \ 404 __m256 __a = (a); \ 405 __m256 __b = (b); \ 406 (__m256)__builtin_ia32_cmpps256((__v8sf)__a, (__v8sf)__b, (c)); }) 407 408 #define _mm_cmp_sd(a, b, c) __extension__ ({ \ 409 __m128d __a = (a); \ 410 __m128d __b = (b); \ 411 (__m128d)__builtin_ia32_cmpsd((__v2df)__a, (__v2df)__b, (c)); }) 412 413 #define _mm_cmp_ss(a, b, c) __extension__ ({ \ 414 __m128 __a = (a); \ 415 __m128 __b = (b); \ 416 (__m128)__builtin_ia32_cmpss((__v4sf)__a, (__v4sf)__b, (c)); }) 417 418 /* Vector extract */ 419 #define _mm256_extractf128_pd(A, O) __extension__ ({ \ 420 __m256d __A = (A); \ 421 (__m128d)__builtin_ia32_vextractf128_pd256((__v4df)__A, (O)); }) 422 423 #define _mm256_extractf128_ps(A, O) __extension__ ({ \ 424 __m256 __A = (A); \ 425 (__m128)__builtin_ia32_vextractf128_ps256((__v8sf)__A, (O)); }) 426 427 #define _mm256_extractf128_si256(A, O) __extension__ ({ \ 428 __m256i __A = (A); \ 429 (__m128i)__builtin_ia32_vextractf128_si256((__v8si)__A, (O)); }) 430 431 static __inline int __attribute__((__always_inline__, __nodebug__)) 432 _mm256_extract_epi32(__m256i __a, int const __imm) 433 { 434 __v8si __b = (__v8si)__a; 435 return __b[__imm]; 436 } 437 438 static __inline int __attribute__((__always_inline__, __nodebug__)) 439 _mm256_extract_epi16(__m256i __a, int const __imm) 440 { 441 __v16hi __b = (__v16hi)__a; 442 return __b[__imm]; 443 } 444 445 static __inline int __attribute__((__always_inline__, __nodebug__)) 446 _mm256_extract_epi8(__m256i __a, int const __imm) 447 { 448 __v32qi __b = (__v32qi)__a; 449 return __b[__imm]; 450 } 451 452 #ifdef __x86_64__ 453 static __inline long long __attribute__((__always_inline__, __nodebug__)) 454 _mm256_extract_epi64(__m256i __a, const int __imm) 455 { 456 __v4di __b = (__v4di)__a; 457 return __b[__imm]; 458 } 459 #endif 460 461 /* Vector insert */ 462 #define _mm256_insertf128_pd(V1, V2, O) __extension__ ({ \ 463 __m256d __V1 = (V1); \ 464 __m128d __V2 = (V2); \ 465 (__m256d)__builtin_ia32_vinsertf128_pd256((__v4df)__V1, (__v2df)__V2, (O)); }) 466 467 #define _mm256_insertf128_ps(V1, V2, O) __extension__ ({ \ 468 __m256 __V1 = (V1); \ 469 __m128 __V2 = (V2); \ 470 (__m256)__builtin_ia32_vinsertf128_ps256((__v8sf)__V1, (__v4sf)__V2, (O)); }) 471 472 #define _mm256_insertf128_si256(V1, V2, O) __extension__ ({ \ 473 __m256i __V1 = (V1); \ 474 __m128i __V2 = (V2); \ 475 (__m256i)__builtin_ia32_vinsertf128_si256((__v8si)__V1, (__v4si)__V2, (O)); }) 476 477 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 478 _mm256_insert_epi32(__m256i __a, int __b, int const __imm) 479 { 480 __v8si __c = (__v8si)__a; 481 __c[__imm & 7] = __b; 482 return (__m256i)__c; 483 } 484 485 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 486 _mm256_insert_epi16(__m256i __a, int __b, int const __imm) 487 { 488 __v16hi __c = (__v16hi)__a; 489 __c[__imm & 15] = __b; 490 return (__m256i)__c; 491 } 492 493 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 494 _mm256_insert_epi8(__m256i __a, int __b, int const __imm) 495 { 496 __v32qi __c = (__v32qi)__a; 497 __c[__imm & 31] = __b; 498 return (__m256i)__c; 499 } 500 501 #ifdef __x86_64__ 502 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 503 _mm256_insert_epi64(__m256i __a, int __b, int const __imm) 504 { 505 __v4di __c = (__v4di)__a; 506 __c[__imm & 3] = __b; 507 return (__m256i)__c; 508 } 509 #endif 510 511 /* Conversion */ 512 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 513 _mm256_cvtepi32_pd(__m128i __a) 514 { 515 return (__m256d)__builtin_ia32_cvtdq2pd256((__v4si) __a); 516 } 517 518 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 519 _mm256_cvtepi32_ps(__m256i __a) 520 { 521 return (__m256)__builtin_ia32_cvtdq2ps256((__v8si) __a); 522 } 523 524 static __inline __m128 __attribute__((__always_inline__, __nodebug__)) 525 _mm256_cvtpd_ps(__m256d __a) 526 { 527 return (__m128)__builtin_ia32_cvtpd2ps256((__v4df) __a); 528 } 529 530 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 531 _mm256_cvtps_epi32(__m256 __a) 532 { 533 return (__m256i)__builtin_ia32_cvtps2dq256((__v8sf) __a); 534 } 535 536 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 537 _mm256_cvtps_pd(__m128 __a) 538 { 539 return (__m256d)__builtin_ia32_cvtps2pd256((__v4sf) __a); 540 } 541 542 static __inline __m128i __attribute__((__always_inline__, __nodebug__)) 543 _mm256_cvttpd_epi32(__m256d __a) 544 { 545 return (__m128i)__builtin_ia32_cvttpd2dq256((__v4df) __a); 546 } 547 548 static __inline __m128i __attribute__((__always_inline__, __nodebug__)) 549 _mm256_cvtpd_epi32(__m256d __a) 550 { 551 return (__m128i)__builtin_ia32_cvtpd2dq256((__v4df) __a); 552 } 553 554 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 555 _mm256_cvttps_epi32(__m256 __a) 556 { 557 return (__m256i)__builtin_ia32_cvttps2dq256((__v8sf) __a); 558 } 559 560 /* Vector replicate */ 561 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 562 _mm256_movehdup_ps(__m256 __a) 563 { 564 return __builtin_shufflevector(__a, __a, 1, 1, 3, 3, 5, 5, 7, 7); 565 } 566 567 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 568 _mm256_moveldup_ps(__m256 __a) 569 { 570 return __builtin_shufflevector(__a, __a, 0, 0, 2, 2, 4, 4, 6, 6); 571 } 572 573 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 574 _mm256_movedup_pd(__m256d __a) 575 { 576 return __builtin_shufflevector(__a, __a, 0, 0, 2, 2); 577 } 578 579 /* Unpack and Interleave */ 580 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 581 _mm256_unpackhi_pd(__m256d __a, __m256d __b) 582 { 583 return __builtin_shufflevector(__a, __b, 1, 5, 1+2, 5+2); 584 } 585 586 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 587 _mm256_unpacklo_pd(__m256d __a, __m256d __b) 588 { 589 return __builtin_shufflevector(__a, __b, 0, 4, 0+2, 4+2); 590 } 591 592 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 593 _mm256_unpackhi_ps(__m256 __a, __m256 __b) 594 { 595 return __builtin_shufflevector(__a, __b, 2, 10, 2+1, 10+1, 6, 14, 6+1, 14+1); 596 } 597 598 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 599 _mm256_unpacklo_ps(__m256 __a, __m256 __b) 600 { 601 return __builtin_shufflevector(__a, __b, 0, 8, 0+1, 8+1, 4, 12, 4+1, 12+1); 602 } 603 604 /* Bit Test */ 605 static __inline int __attribute__((__always_inline__, __nodebug__)) 606 _mm_testz_pd(__m128d __a, __m128d __b) 607 { 608 return __builtin_ia32_vtestzpd((__v2df)__a, (__v2df)__b); 609 } 610 611 static __inline int __attribute__((__always_inline__, __nodebug__)) 612 _mm_testc_pd(__m128d __a, __m128d __b) 613 { 614 return __builtin_ia32_vtestcpd((__v2df)__a, (__v2df)__b); 615 } 616 617 static __inline int __attribute__((__always_inline__, __nodebug__)) 618 _mm_testnzc_pd(__m128d __a, __m128d __b) 619 { 620 return __builtin_ia32_vtestnzcpd((__v2df)__a, (__v2df)__b); 621 } 622 623 static __inline int __attribute__((__always_inline__, __nodebug__)) 624 _mm_testz_ps(__m128 __a, __m128 __b) 625 { 626 return __builtin_ia32_vtestzps((__v4sf)__a, (__v4sf)__b); 627 } 628 629 static __inline int __attribute__((__always_inline__, __nodebug__)) 630 _mm_testc_ps(__m128 __a, __m128 __b) 631 { 632 return __builtin_ia32_vtestcps((__v4sf)__a, (__v4sf)__b); 633 } 634 635 static __inline int __attribute__((__always_inline__, __nodebug__)) 636 _mm_testnzc_ps(__m128 __a, __m128 __b) 637 { 638 return __builtin_ia32_vtestnzcps((__v4sf)__a, (__v4sf)__b); 639 } 640 641 static __inline int __attribute__((__always_inline__, __nodebug__)) 642 _mm256_testz_pd(__m256d __a, __m256d __b) 643 { 644 return __builtin_ia32_vtestzpd256((__v4df)__a, (__v4df)__b); 645 } 646 647 static __inline int __attribute__((__always_inline__, __nodebug__)) 648 _mm256_testc_pd(__m256d __a, __m256d __b) 649 { 650 return __builtin_ia32_vtestcpd256((__v4df)__a, (__v4df)__b); 651 } 652 653 static __inline int __attribute__((__always_inline__, __nodebug__)) 654 _mm256_testnzc_pd(__m256d __a, __m256d __b) 655 { 656 return __builtin_ia32_vtestnzcpd256((__v4df)__a, (__v4df)__b); 657 } 658 659 static __inline int __attribute__((__always_inline__, __nodebug__)) 660 _mm256_testz_ps(__m256 __a, __m256 __b) 661 { 662 return __builtin_ia32_vtestzps256((__v8sf)__a, (__v8sf)__b); 663 } 664 665 static __inline int __attribute__((__always_inline__, __nodebug__)) 666 _mm256_testc_ps(__m256 __a, __m256 __b) 667 { 668 return __builtin_ia32_vtestcps256((__v8sf)__a, (__v8sf)__b); 669 } 670 671 static __inline int __attribute__((__always_inline__, __nodebug__)) 672 _mm256_testnzc_ps(__m256 __a, __m256 __b) 673 { 674 return __builtin_ia32_vtestnzcps256((__v8sf)__a, (__v8sf)__b); 675 } 676 677 static __inline int __attribute__((__always_inline__, __nodebug__)) 678 _mm256_testz_si256(__m256i __a, __m256i __b) 679 { 680 return __builtin_ia32_ptestz256((__v4di)__a, (__v4di)__b); 681 } 682 683 static __inline int __attribute__((__always_inline__, __nodebug__)) 684 _mm256_testc_si256(__m256i __a, __m256i __b) 685 { 686 return __builtin_ia32_ptestc256((__v4di)__a, (__v4di)__b); 687 } 688 689 static __inline int __attribute__((__always_inline__, __nodebug__)) 690 _mm256_testnzc_si256(__m256i __a, __m256i __b) 691 { 692 return __builtin_ia32_ptestnzc256((__v4di)__a, (__v4di)__b); 693 } 694 695 /* Vector extract sign mask */ 696 static __inline int __attribute__((__always_inline__, __nodebug__)) 697 _mm256_movemask_pd(__m256d __a) 698 { 699 return __builtin_ia32_movmskpd256((__v4df)__a); 700 } 701 702 static __inline int __attribute__((__always_inline__, __nodebug__)) 703 _mm256_movemask_ps(__m256 __a) 704 { 705 return __builtin_ia32_movmskps256((__v8sf)__a); 706 } 707 708 /* Vector __zero */ 709 static __inline void __attribute__((__always_inline__, __nodebug__)) 710 _mm256_zeroall(void) 711 { 712 __builtin_ia32_vzeroall(); 713 } 714 715 static __inline void __attribute__((__always_inline__, __nodebug__)) 716 _mm256_zeroupper(void) 717 { 718 __builtin_ia32_vzeroupper(); 719 } 720 721 /* Vector load with broadcast */ 722 static __inline __m128 __attribute__((__always_inline__, __nodebug__)) 723 _mm_broadcast_ss(float const *__a) 724 { 725 return (__m128)__builtin_ia32_vbroadcastss(__a); 726 } 727 728 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 729 _mm256_broadcast_sd(double const *__a) 730 { 731 return (__m256d)__builtin_ia32_vbroadcastsd256(__a); 732 } 733 734 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 735 _mm256_broadcast_ss(float const *__a) 736 { 737 return (__m256)__builtin_ia32_vbroadcastss256(__a); 738 } 739 740 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 741 _mm256_broadcast_pd(__m128d const *__a) 742 { 743 return (__m256d)__builtin_ia32_vbroadcastf128_pd256(__a); 744 } 745 746 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 747 _mm256_broadcast_ps(__m128 const *__a) 748 { 749 return (__m256)__builtin_ia32_vbroadcastf128_ps256(__a); 750 } 751 752 /* SIMD load ops */ 753 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 754 _mm256_load_pd(double const *__p) 755 { 756 return *(__m256d *)__p; 757 } 758 759 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 760 _mm256_load_ps(float const *__p) 761 { 762 return *(__m256 *)__p; 763 } 764 765 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 766 _mm256_loadu_pd(double const *__p) 767 { 768 struct __loadu_pd { 769 __m256d __v; 770 } __attribute__((packed, may_alias)); 771 return ((struct __loadu_pd*)__p)->__v; 772 } 773 774 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 775 _mm256_loadu_ps(float const *__p) 776 { 777 struct __loadu_ps { 778 __m256 __v; 779 } __attribute__((packed, may_alias)); 780 return ((struct __loadu_ps*)__p)->__v; 781 } 782 783 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 784 _mm256_load_si256(__m256i const *__p) 785 { 786 return *__p; 787 } 788 789 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 790 _mm256_loadu_si256(__m256i const *__p) 791 { 792 struct __loadu_si256 { 793 __m256i __v; 794 } __attribute__((packed, may_alias)); 795 return ((struct __loadu_si256*)__p)->__v; 796 } 797 798 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 799 _mm256_lddqu_si256(__m256i const *__p) 800 { 801 return (__m256i)__builtin_ia32_lddqu256((char const *)__p); 802 } 803 804 /* SIMD store ops */ 805 static __inline void __attribute__((__always_inline__, __nodebug__)) 806 _mm256_store_pd(double *__p, __m256d __a) 807 { 808 *(__m256d *)__p = __a; 809 } 810 811 static __inline void __attribute__((__always_inline__, __nodebug__)) 812 _mm256_store_ps(float *__p, __m256 __a) 813 { 814 *(__m256 *)__p = __a; 815 } 816 817 static __inline void __attribute__((__always_inline__, __nodebug__)) 818 _mm256_storeu_pd(double *__p, __m256d __a) 819 { 820 __builtin_ia32_storeupd256(__p, (__v4df)__a); 821 } 822 823 static __inline void __attribute__((__always_inline__, __nodebug__)) 824 _mm256_storeu_ps(float *__p, __m256 __a) 825 { 826 __builtin_ia32_storeups256(__p, (__v8sf)__a); 827 } 828 829 static __inline void __attribute__((__always_inline__, __nodebug__)) 830 _mm256_store_si256(__m256i *__p, __m256i __a) 831 { 832 *__p = __a; 833 } 834 835 static __inline void __attribute__((__always_inline__, __nodebug__)) 836 _mm256_storeu_si256(__m256i *__p, __m256i __a) 837 { 838 __builtin_ia32_storedqu256((char *)__p, (__v32qi)__a); 839 } 840 841 /* Conditional load ops */ 842 static __inline __m128d __attribute__((__always_inline__, __nodebug__)) 843 _mm_maskload_pd(double const *__p, __m128d __m) 844 { 845 return (__m128d)__builtin_ia32_maskloadpd((const __v2df *)__p, (__v2df)__m); 846 } 847 848 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 849 _mm256_maskload_pd(double const *__p, __m256d __m) 850 { 851 return (__m256d)__builtin_ia32_maskloadpd256((const __v4df *)__p, 852 (__v4df)__m); 853 } 854 855 static __inline __m128 __attribute__((__always_inline__, __nodebug__)) 856 _mm_maskload_ps(float const *__p, __m128 __m) 857 { 858 return (__m128)__builtin_ia32_maskloadps((const __v4sf *)__p, (__v4sf)__m); 859 } 860 861 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 862 _mm256_maskload_ps(float const *__p, __m256 __m) 863 { 864 return (__m256)__builtin_ia32_maskloadps256((const __v8sf *)__p, (__v8sf)__m); 865 } 866 867 /* Conditional store ops */ 868 static __inline void __attribute__((__always_inline__, __nodebug__)) 869 _mm256_maskstore_ps(float *__p, __m256 __m, __m256 __a) 870 { 871 __builtin_ia32_maskstoreps256((__v8sf *)__p, (__v8sf)__m, (__v8sf)__a); 872 } 873 874 static __inline void __attribute__((__always_inline__, __nodebug__)) 875 _mm_maskstore_pd(double *__p, __m128d __m, __m128d __a) 876 { 877 __builtin_ia32_maskstorepd((__v2df *)__p, (__v2df)__m, (__v2df)__a); 878 } 879 880 static __inline void __attribute__((__always_inline__, __nodebug__)) 881 _mm256_maskstore_pd(double *__p, __m256d __m, __m256d __a) 882 { 883 __builtin_ia32_maskstorepd256((__v4df *)__p, (__v4df)__m, (__v4df)__a); 884 } 885 886 static __inline void __attribute__((__always_inline__, __nodebug__)) 887 _mm_maskstore_ps(float *__p, __m128 __m, __m128 __a) 888 { 889 __builtin_ia32_maskstoreps((__v4sf *)__p, (__v4sf)__m, (__v4sf)__a); 890 } 891 892 /* Cacheability support ops */ 893 static __inline void __attribute__((__always_inline__, __nodebug__)) 894 _mm256_stream_si256(__m256i *__a, __m256i __b) 895 { 896 __builtin_ia32_movntdq256((__v4di *)__a, (__v4di)__b); 897 } 898 899 static __inline void __attribute__((__always_inline__, __nodebug__)) 900 _mm256_stream_pd(double *__a, __m256d __b) 901 { 902 __builtin_ia32_movntpd256(__a, (__v4df)__b); 903 } 904 905 static __inline void __attribute__((__always_inline__, __nodebug__)) 906 _mm256_stream_ps(float *__p, __m256 __a) 907 { 908 __builtin_ia32_movntps256(__p, (__v8sf)__a); 909 } 910 911 /* Create vectors */ 912 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 913 _mm256_set_pd(double __a, double __b, double __c, double __d) 914 { 915 return (__m256d){ __d, __c, __b, __a }; 916 } 917 918 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 919 _mm256_set_ps(float __a, float __b, float __c, float __d, 920 float __e, float __f, float __g, float __h) 921 { 922 return (__m256){ __h, __g, __f, __e, __d, __c, __b, __a }; 923 } 924 925 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 926 _mm256_set_epi32(int __i0, int __i1, int __i2, int __i3, 927 int __i4, int __i5, int __i6, int __i7) 928 { 929 return (__m256i)(__v8si){ __i7, __i6, __i5, __i4, __i3, __i2, __i1, __i0 }; 930 } 931 932 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 933 _mm256_set_epi16(short __w15, short __w14, short __w13, short __w12, 934 short __w11, short __w10, short __w09, short __w08, 935 short __w07, short __w06, short __w05, short __w04, 936 short __w03, short __w02, short __w01, short __w00) 937 { 938 return (__m256i)(__v16hi){ __w00, __w01, __w02, __w03, __w04, __w05, __w06, 939 __w07, __w08, __w09, __w10, __w11, __w12, __w13, __w14, __w15 }; 940 } 941 942 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 943 _mm256_set_epi8(char __b31, char __b30, char __b29, char __b28, 944 char __b27, char __b26, char __b25, char __b24, 945 char __b23, char __b22, char __b21, char __b20, 946 char __b19, char __b18, char __b17, char __b16, 947 char __b15, char __b14, char __b13, char __b12, 948 char __b11, char __b10, char __b09, char __b08, 949 char __b07, char __b06, char __b05, char __b04, 950 char __b03, char __b02, char __b01, char __b00) 951 { 952 return (__m256i)(__v32qi){ 953 __b00, __b01, __b02, __b03, __b04, __b05, __b06, __b07, 954 __b08, __b09, __b10, __b11, __b12, __b13, __b14, __b15, 955 __b16, __b17, __b18, __b19, __b20, __b21, __b22, __b23, 956 __b24, __b25, __b26, __b27, __b28, __b29, __b30, __b31 957 }; 958 } 959 960 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 961 _mm256_set_epi64x(long long __a, long long __b, long long __c, long long __d) 962 { 963 return (__m256i)(__v4di){ __d, __c, __b, __a }; 964 } 965 966 /* Create vectors with elements in reverse order */ 967 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 968 _mm256_setr_pd(double __a, double __b, double __c, double __d) 969 { 970 return (__m256d){ __a, __b, __c, __d }; 971 } 972 973 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 974 _mm256_setr_ps(float __a, float __b, float __c, float __d, 975 float __e, float __f, float __g, float __h) 976 { 977 return (__m256){ __a, __b, __c, __d, __e, __f, __g, __h }; 978 } 979 980 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 981 _mm256_setr_epi32(int __i0, int __i1, int __i2, int __i3, 982 int __i4, int __i5, int __i6, int __i7) 983 { 984 return (__m256i)(__v8si){ __i0, __i1, __i2, __i3, __i4, __i5, __i6, __i7 }; 985 } 986 987 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 988 _mm256_setr_epi16(short __w15, short __w14, short __w13, short __w12, 989 short __w11, short __w10, short __w09, short __w08, 990 short __w07, short __w06, short __w05, short __w04, 991 short __w03, short __w02, short __w01, short __w00) 992 { 993 return (__m256i)(__v16hi){ __w15, __w14, __w13, __w12, __w11, __w10, __w09, 994 __w08, __w07, __w06, __w05, __w04, __w03, __w02, __w01, __w00 }; 995 } 996 997 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 998 _mm256_setr_epi8(char __b31, char __b30, char __b29, char __b28, 999 char __b27, char __b26, char __b25, char __b24, 1000 char __b23, char __b22, char __b21, char __b20, 1001 char __b19, char __b18, char __b17, char __b16, 1002 char __b15, char __b14, char __b13, char __b12, 1003 char __b11, char __b10, char __b09, char __b08, 1004 char __b07, char __b06, char __b05, char __b04, 1005 char __b03, char __b02, char __b01, char __b00) 1006 { 1007 return (__m256i)(__v32qi){ 1008 __b31, __b30, __b29, __b28, __b27, __b26, __b25, __b24, 1009 __b23, __b22, __b21, __b20, __b19, __b18, __b17, __b16, 1010 __b15, __b14, __b13, __b12, __b11, __b10, __b09, __b08, 1011 __b07, __b06, __b05, __b04, __b03, __b02, __b01, __b00 }; 1012 } 1013 1014 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 1015 _mm256_setr_epi64x(long long __a, long long __b, long long __c, long long __d) 1016 { 1017 return (__m256i)(__v4di){ __a, __b, __c, __d }; 1018 } 1019 1020 /* Create vectors with repeated elements */ 1021 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 1022 _mm256_set1_pd(double __w) 1023 { 1024 return (__m256d){ __w, __w, __w, __w }; 1025 } 1026 1027 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 1028 _mm256_set1_ps(float __w) 1029 { 1030 return (__m256){ __w, __w, __w, __w, __w, __w, __w, __w }; 1031 } 1032 1033 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 1034 _mm256_set1_epi32(int __i) 1035 { 1036 return (__m256i)(__v8si){ __i, __i, __i, __i, __i, __i, __i, __i }; 1037 } 1038 1039 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 1040 _mm256_set1_epi16(short __w) 1041 { 1042 return (__m256i)(__v16hi){ __w, __w, __w, __w, __w, __w, __w, __w, __w, __w, 1043 __w, __w, __w, __w, __w, __w }; 1044 } 1045 1046 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 1047 _mm256_set1_epi8(char __b) 1048 { 1049 return (__m256i)(__v32qi){ __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, 1050 __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, 1051 __b, __b, __b, __b, __b, __b, __b }; 1052 } 1053 1054 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 1055 _mm256_set1_epi64x(long long __q) 1056 { 1057 return (__m256i)(__v4di){ __q, __q, __q, __q }; 1058 } 1059 1060 /* Create __zeroed vectors */ 1061 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 1062 _mm256_setzero_pd(void) 1063 { 1064 return (__m256d){ 0, 0, 0, 0 }; 1065 } 1066 1067 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 1068 _mm256_setzero_ps(void) 1069 { 1070 return (__m256){ 0, 0, 0, 0, 0, 0, 0, 0 }; 1071 } 1072 1073 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 1074 _mm256_setzero_si256(void) 1075 { 1076 return (__m256i){ 0LL, 0LL, 0LL, 0LL }; 1077 } 1078 1079 /* Cast between vector types */ 1080 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 1081 _mm256_castpd_ps(__m256d __in) 1082 { 1083 return (__m256)__in; 1084 } 1085 1086 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 1087 _mm256_castpd_si256(__m256d __in) 1088 { 1089 return (__m256i)__in; 1090 } 1091 1092 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 1093 _mm256_castps_pd(__m256 __in) 1094 { 1095 return (__m256d)__in; 1096 } 1097 1098 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 1099 _mm256_castps_si256(__m256 __in) 1100 { 1101 return (__m256i)__in; 1102 } 1103 1104 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 1105 _mm256_castsi256_ps(__m256i __in) 1106 { 1107 return (__m256)__in; 1108 } 1109 1110 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 1111 _mm256_castsi256_pd(__m256i __in) 1112 { 1113 return (__m256d)__in; 1114 } 1115 1116 static __inline __m128d __attribute__((__always_inline__, __nodebug__)) 1117 _mm256_castpd256_pd128(__m256d __in) 1118 { 1119 return __builtin_shufflevector(__in, __in, 0, 1); 1120 } 1121 1122 static __inline __m128 __attribute__((__always_inline__, __nodebug__)) 1123 _mm256_castps256_ps128(__m256 __in) 1124 { 1125 return __builtin_shufflevector(__in, __in, 0, 1, 2, 3); 1126 } 1127 1128 static __inline __m128i __attribute__((__always_inline__, __nodebug__)) 1129 _mm256_castsi256_si128(__m256i __in) 1130 { 1131 return __builtin_shufflevector(__in, __in, 0, 1); 1132 } 1133 1134 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 1135 _mm256_castpd128_pd256(__m128d __in) 1136 { 1137 __m128d __zero = _mm_setzero_pd(); 1138 return __builtin_shufflevector(__in, __zero, 0, 1, 2, 2); 1139 } 1140 1141 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 1142 _mm256_castps128_ps256(__m128 __in) 1143 { 1144 __m128 __zero = _mm_setzero_ps(); 1145 return __builtin_shufflevector(__in, __zero, 0, 1, 2, 3, 4, 4, 4, 4); 1146 } 1147 1148 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 1149 _mm256_castsi128_si256(__m128i __in) 1150 { 1151 __m128i __zero = _mm_setzero_si128(); 1152 return __builtin_shufflevector(__in, __zero, 0, 1, 2, 2); 1153 } 1154 1155 /* SIMD load ops (unaligned) */ 1156 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 1157 _mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo) 1158 { 1159 struct __loadu_ps { 1160 __m128 __v; 1161 } __attribute__((__packed__, __may_alias__)); 1162 1163 __m256 __v256 = _mm256_castps128_ps256(((struct __loadu_ps*)__addr_lo)->__v); 1164 return _mm256_insertf128_ps(__v256, ((struct __loadu_ps*)__addr_hi)->__v, 1); 1165 } 1166 1167 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 1168 _mm256_loadu2_m128d(double const *__addr_hi, double const *__addr_lo) 1169 { 1170 struct __loadu_pd { 1171 __m128d __v; 1172 } __attribute__((__packed__, __may_alias__)); 1173 1174 __m256d __v256 = _mm256_castpd128_pd256(((struct __loadu_pd*)__addr_lo)->__v); 1175 return _mm256_insertf128_pd(__v256, ((struct __loadu_pd*)__addr_hi)->__v, 1); 1176 } 1177 1178 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 1179 _mm256_loadu2_m128i(__m128i const *__addr_hi, __m128i const *__addr_lo) 1180 { 1181 struct __loadu_si128 { 1182 __m128i __v; 1183 } __attribute__((packed, may_alias)); 1184 __m256i __v256 = _mm256_castsi128_si256( 1185 ((struct __loadu_si128*)__addr_lo)->__v); 1186 return _mm256_insertf128_si256(__v256, 1187 ((struct __loadu_si128*)__addr_hi)->__v, 1); 1188 } 1189 1190 /* SIMD store ops (unaligned) */ 1191 static __inline void __attribute__((__always_inline__, __nodebug__)) 1192 _mm256_storeu2_m128(float *__addr_hi, float *__addr_lo, __m256 __a) 1193 { 1194 __m128 __v128; 1195 1196 __v128 = _mm256_castps256_ps128(__a); 1197 __builtin_ia32_storeups(__addr_lo, __v128); 1198 __v128 = _mm256_extractf128_ps(__a, 1); 1199 __builtin_ia32_storeups(__addr_hi, __v128); 1200 } 1201 1202 static __inline void __attribute__((__always_inline__, __nodebug__)) 1203 _mm256_storeu2_m128d(double *__addr_hi, double *__addr_lo, __m256d __a) 1204 { 1205 __m128d __v128; 1206 1207 __v128 = _mm256_castpd256_pd128(__a); 1208 __builtin_ia32_storeupd(__addr_lo, __v128); 1209 __v128 = _mm256_extractf128_pd(__a, 1); 1210 __builtin_ia32_storeupd(__addr_hi, __v128); 1211 } 1212 1213 static __inline void __attribute__((__always_inline__, __nodebug__)) 1214 _mm256_storeu2_m128i(__m128i *__addr_hi, __m128i *__addr_lo, __m256i __a) 1215 { 1216 __m128i __v128; 1217 1218 __v128 = _mm256_castsi256_si128(__a); 1219 __builtin_ia32_storedqu((char *)__addr_lo, (__v16qi)__v128); 1220 __v128 = _mm256_extractf128_si256(__a, 1); 1221 __builtin_ia32_storedqu((char *)__addr_hi, (__v16qi)__v128); 1222 } 1223