1 /*===---- avxintrin.h - AVX intrinsics -------------------------------------=== 2 * 3 * Permission is hereby granted, free of charge, to any person obtaining a copy 4 * of this software and associated documentation files (the "Software"), to deal 5 * in the Software without restriction, including without limitation the rights 6 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 * copies of the Software, and to permit persons to whom the Software is 8 * furnished to do so, subject to the following conditions: 9 * 10 * The above copyright notice and this permission notice shall be included in 11 * all copies or substantial portions of the Software. 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 * THE SOFTWARE. 20 * 21 *===-----------------------------------------------------------------------=== 22 */ 23 24 #ifndef __IMMINTRIN_H 25 #error "Never use <avxintrin.h> directly; include <immintrin.h> instead." 26 #endif 27 28 #ifndef __AVXINTRIN_H 29 #define __AVXINTRIN_H 30 31 typedef double __v4df __attribute__ ((__vector_size__ (32))); 32 typedef float __v8sf __attribute__ ((__vector_size__ (32))); 33 typedef long long __v4di __attribute__ ((__vector_size__ (32))); 34 typedef int __v8si __attribute__ ((__vector_size__ (32))); 35 typedef short __v16hi __attribute__ ((__vector_size__ (32))); 36 typedef char __v32qi __attribute__ ((__vector_size__ (32))); 37 38 typedef float __m256 __attribute__ ((__vector_size__ (32))); 39 typedef double __m256d __attribute__((__vector_size__(32))); 40 typedef long long __m256i __attribute__((__vector_size__(32))); 41 42 /* Arithmetic */ 43 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 44 _mm256_add_pd(__m256d __a, __m256d __b) 45 { 46 return __a+__b; 47 } 48 49 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 50 _mm256_add_ps(__m256 __a, __m256 __b) 51 { 52 return __a+__b; 53 } 54 55 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 56 _mm256_sub_pd(__m256d __a, __m256d __b) 57 { 58 return __a-__b; 59 } 60 61 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 62 _mm256_sub_ps(__m256 __a, __m256 __b) 63 { 64 return __a-__b; 65 } 66 67 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 68 _mm256_addsub_pd(__m256d __a, __m256d __b) 69 { 70 return (__m256d)__builtin_ia32_addsubpd256((__v4df)__a, (__v4df)__b); 71 } 72 73 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 74 _mm256_addsub_ps(__m256 __a, __m256 __b) 75 { 76 return (__m256)__builtin_ia32_addsubps256((__v8sf)__a, (__v8sf)__b); 77 } 78 79 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 80 _mm256_div_pd(__m256d __a, __m256d __b) 81 { 82 return __a / __b; 83 } 84 85 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 86 _mm256_div_ps(__m256 __a, __m256 __b) 87 { 88 return __a / __b; 89 } 90 91 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 92 _mm256_max_pd(__m256d __a, __m256d __b) 93 { 94 return (__m256d)__builtin_ia32_maxpd256((__v4df)__a, (__v4df)__b); 95 } 96 97 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 98 _mm256_max_ps(__m256 __a, __m256 __b) 99 { 100 return (__m256)__builtin_ia32_maxps256((__v8sf)__a, (__v8sf)__b); 101 } 102 103 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 104 _mm256_min_pd(__m256d __a, __m256d __b) 105 { 106 return (__m256d)__builtin_ia32_minpd256((__v4df)__a, (__v4df)__b); 107 } 108 109 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 110 _mm256_min_ps(__m256 __a, __m256 __b) 111 { 112 return (__m256)__builtin_ia32_minps256((__v8sf)__a, (__v8sf)__b); 113 } 114 115 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 116 _mm256_mul_pd(__m256d __a, __m256d __b) 117 { 118 return __a * __b; 119 } 120 121 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 122 _mm256_mul_ps(__m256 __a, __m256 __b) 123 { 124 return __a * __b; 125 } 126 127 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 128 _mm256_sqrt_pd(__m256d __a) 129 { 130 return (__m256d)__builtin_ia32_sqrtpd256((__v4df)__a); 131 } 132 133 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 134 _mm256_sqrt_ps(__m256 __a) 135 { 136 return (__m256)__builtin_ia32_sqrtps256((__v8sf)__a); 137 } 138 139 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 140 _mm256_rsqrt_ps(__m256 __a) 141 { 142 return (__m256)__builtin_ia32_rsqrtps256((__v8sf)__a); 143 } 144 145 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 146 _mm256_rcp_ps(__m256 __a) 147 { 148 return (__m256)__builtin_ia32_rcpps256((__v8sf)__a); 149 } 150 151 #define _mm256_round_pd(V, M) __extension__ ({ \ 152 __m256d __V = (V); \ 153 (__m256d)__builtin_ia32_roundpd256((__v4df)__V, (M)); }) 154 155 #define _mm256_round_ps(V, M) __extension__ ({ \ 156 __m256 __V = (V); \ 157 (__m256)__builtin_ia32_roundps256((__v8sf)__V, (M)); }) 158 159 #define _mm256_ceil_pd(V) _mm256_round_pd((V), _MM_FROUND_CEIL) 160 #define _mm256_floor_pd(V) _mm256_round_pd((V), _MM_FROUND_FLOOR) 161 #define _mm256_ceil_ps(V) _mm256_round_ps((V), _MM_FROUND_CEIL) 162 #define _mm256_floor_ps(V) _mm256_round_ps((V), _MM_FROUND_FLOOR) 163 164 /* Logical */ 165 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 166 _mm256_and_pd(__m256d __a, __m256d __b) 167 { 168 return (__m256d)((__v4di)__a & (__v4di)__b); 169 } 170 171 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 172 _mm256_and_ps(__m256 __a, __m256 __b) 173 { 174 return (__m256)((__v8si)__a & (__v8si)__b); 175 } 176 177 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 178 _mm256_andnot_pd(__m256d __a, __m256d __b) 179 { 180 return (__m256d)(~(__v4di)__a & (__v4di)__b); 181 } 182 183 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 184 _mm256_andnot_ps(__m256 __a, __m256 __b) 185 { 186 return (__m256)(~(__v8si)__a & (__v8si)__b); 187 } 188 189 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 190 _mm256_or_pd(__m256d __a, __m256d __b) 191 { 192 return (__m256d)((__v4di)__a | (__v4di)__b); 193 } 194 195 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 196 _mm256_or_ps(__m256 __a, __m256 __b) 197 { 198 return (__m256)((__v8si)__a | (__v8si)__b); 199 } 200 201 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 202 _mm256_xor_pd(__m256d __a, __m256d __b) 203 { 204 return (__m256d)((__v4di)__a ^ (__v4di)__b); 205 } 206 207 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 208 _mm256_xor_ps(__m256 __a, __m256 __b) 209 { 210 return (__m256)((__v8si)__a ^ (__v8si)__b); 211 } 212 213 /* Horizontal arithmetic */ 214 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 215 _mm256_hadd_pd(__m256d __a, __m256d __b) 216 { 217 return (__m256d)__builtin_ia32_haddpd256((__v4df)__a, (__v4df)__b); 218 } 219 220 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 221 _mm256_hadd_ps(__m256 __a, __m256 __b) 222 { 223 return (__m256)__builtin_ia32_haddps256((__v8sf)__a, (__v8sf)__b); 224 } 225 226 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 227 _mm256_hsub_pd(__m256d __a, __m256d __b) 228 { 229 return (__m256d)__builtin_ia32_hsubpd256((__v4df)__a, (__v4df)__b); 230 } 231 232 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 233 _mm256_hsub_ps(__m256 __a, __m256 __b) 234 { 235 return (__m256)__builtin_ia32_hsubps256((__v8sf)__a, (__v8sf)__b); 236 } 237 238 /* Vector permutations */ 239 static __inline __m128d __attribute__((__always_inline__, __nodebug__)) 240 _mm_permutevar_pd(__m128d __a, __m128i __c) 241 { 242 return (__m128d)__builtin_ia32_vpermilvarpd((__v2df)__a, (__v2di)__c); 243 } 244 245 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 246 _mm256_permutevar_pd(__m256d __a, __m256i __c) 247 { 248 return (__m256d)__builtin_ia32_vpermilvarpd256((__v4df)__a, (__v4di)__c); 249 } 250 251 static __inline __m128 __attribute__((__always_inline__, __nodebug__)) 252 _mm_permutevar_ps(__m128 __a, __m128i __c) 253 { 254 return (__m128)__builtin_ia32_vpermilvarps((__v4sf)__a, (__v4si)__c); 255 } 256 257 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 258 _mm256_permutevar_ps(__m256 __a, __m256i __c) 259 { 260 return (__m256)__builtin_ia32_vpermilvarps256((__v8sf)__a, 261 (__v8si)__c); 262 } 263 264 #define _mm_permute_pd(A, C) __extension__ ({ \ 265 __m128d __A = (A); \ 266 (__m128d)__builtin_shufflevector((__v2df)__A, (__v2df) _mm_setzero_pd(), \ 267 (C) & 0x1, ((C) & 0x2) >> 1); }) 268 269 #define _mm256_permute_pd(A, C) __extension__ ({ \ 270 __m256d __A = (A); \ 271 (__m256d)__builtin_shufflevector((__v4df)__A, (__v4df) _mm256_setzero_pd(), \ 272 (C) & 0x1, ((C) & 0x2) >> 1, \ 273 2 + (((C) & 0x4) >> 2), \ 274 2 + (((C) & 0x8) >> 3)); }) 275 276 #define _mm_permute_ps(A, C) __extension__ ({ \ 277 __m128 __A = (A); \ 278 (__m128)__builtin_shufflevector((__v4sf)__A, (__v4sf) _mm_setzero_ps(), \ 279 (C) & 0x3, ((C) & 0xc) >> 2, \ 280 ((C) & 0x30) >> 4, ((C) & 0xc0) >> 6); }) 281 282 #define _mm256_permute_ps(A, C) __extension__ ({ \ 283 __m256 __A = (A); \ 284 (__m256)__builtin_shufflevector((__v8sf)__A, (__v8sf) _mm256_setzero_ps(), \ 285 (C) & 0x3, ((C) & 0xc) >> 2, \ 286 ((C) & 0x30) >> 4, ((C) & 0xc0) >> 6, \ 287 4 + (((C) & 0x03) >> 0), \ 288 4 + (((C) & 0x0c) >> 2), \ 289 4 + (((C) & 0x30) >> 4), \ 290 4 + (((C) & 0xc0) >> 6)); }) 291 292 #define _mm256_permute2f128_pd(V1, V2, M) __extension__ ({ \ 293 __m256d __V1 = (V1); \ 294 __m256d __V2 = (V2); \ 295 (__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)__V1, (__v4df)__V2, (M)); }) 296 297 #define _mm256_permute2f128_ps(V1, V2, M) __extension__ ({ \ 298 __m256 __V1 = (V1); \ 299 __m256 __V2 = (V2); \ 300 (__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)__V1, (__v8sf)__V2, (M)); }) 301 302 #define _mm256_permute2f128_si256(V1, V2, M) __extension__ ({ \ 303 __m256i __V1 = (V1); \ 304 __m256i __V2 = (V2); \ 305 (__m256i)__builtin_ia32_vperm2f128_si256((__v8si)__V1, (__v8si)__V2, (M)); }) 306 307 /* Vector Blend */ 308 #define _mm256_blend_pd(V1, V2, M) __extension__ ({ \ 309 __m256d __V1 = (V1); \ 310 __m256d __V2 = (V2); \ 311 (__m256d)__builtin_ia32_blendpd256((__v4df)__V1, (__v4df)__V2, (M)); }) 312 313 #define _mm256_blend_ps(V1, V2, M) __extension__ ({ \ 314 __m256 __V1 = (V1); \ 315 __m256 __V2 = (V2); \ 316 (__m256)__builtin_ia32_blendps256((__v8sf)__V1, (__v8sf)__V2, (M)); }) 317 318 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 319 _mm256_blendv_pd(__m256d __a, __m256d __b, __m256d __c) 320 { 321 return (__m256d)__builtin_ia32_blendvpd256( 322 (__v4df)__a, (__v4df)__b, (__v4df)__c); 323 } 324 325 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 326 _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c) 327 { 328 return (__m256)__builtin_ia32_blendvps256( 329 (__v8sf)__a, (__v8sf)__b, (__v8sf)__c); 330 } 331 332 /* Vector Dot Product */ 333 #define _mm256_dp_ps(V1, V2, M) __extension__ ({ \ 334 __m256 __V1 = (V1); \ 335 __m256 __V2 = (V2); \ 336 (__m256)__builtin_ia32_dpps256((__v8sf)__V1, (__v8sf)__V2, (M)); }) 337 338 /* Vector shuffle */ 339 #define _mm256_shuffle_ps(a, b, mask) __extension__ ({ \ 340 __m256 __a = (a); \ 341 __m256 __b = (b); \ 342 (__m256)__builtin_shufflevector((__v8sf)__a, (__v8sf)__b, \ 343 (mask) & 0x3, ((mask) & 0xc) >> 2, \ 344 (((mask) & 0x30) >> 4) + 8, (((mask) & 0xc0) >> 6) + 8, \ 345 ((mask) & 0x3) + 4, (((mask) & 0xc) >> 2) + 4, \ 346 (((mask) & 0x30) >> 4) + 12, (((mask) & 0xc0) >> 6) + 12); }) 347 348 #define _mm256_shuffle_pd(a, b, mask) __extension__ ({ \ 349 __m256d __a = (a); \ 350 __m256d __b = (b); \ 351 (__m256d)__builtin_shufflevector((__v4df)__a, (__v4df)__b, \ 352 (mask) & 0x1, \ 353 (((mask) & 0x2) >> 1) + 4, \ 354 (((mask) & 0x4) >> 2) + 2, \ 355 (((mask) & 0x8) >> 3) + 6); }) 356 357 /* Compare */ 358 #define _CMP_EQ_OQ 0x00 /* Equal (ordered, non-signaling) */ 359 #define _CMP_LT_OS 0x01 /* Less-than (ordered, signaling) */ 360 #define _CMP_LE_OS 0x02 /* Less-than-or-equal (ordered, signaling) */ 361 #define _CMP_UNORD_Q 0x03 /* Unordered (non-signaling) */ 362 #define _CMP_NEQ_UQ 0x04 /* Not-equal (unordered, non-signaling) */ 363 #define _CMP_NLT_US 0x05 /* Not-less-than (unordered, signaling) */ 364 #define _CMP_NLE_US 0x06 /* Not-less-than-or-equal (unordered, signaling) */ 365 #define _CMP_ORD_Q 0x07 /* Ordered (nonsignaling) */ 366 #define _CMP_EQ_UQ 0x08 /* Equal (unordered, non-signaling) */ 367 #define _CMP_NGE_US 0x09 /* Not-greater-than-or-equal (unord, signaling) */ 368 #define _CMP_NGT_US 0x0a /* Not-greater-than (unordered, signaling) */ 369 #define _CMP_FALSE_OQ 0x0b /* False (ordered, non-signaling) */ 370 #define _CMP_NEQ_OQ 0x0c /* Not-equal (ordered, non-signaling) */ 371 #define _CMP_GE_OS 0x0d /* Greater-than-or-equal (ordered, signaling) */ 372 #define _CMP_GT_OS 0x0e /* Greater-than (ordered, signaling) */ 373 #define _CMP_TRUE_UQ 0x0f /* True (unordered, non-signaling) */ 374 #define _CMP_EQ_OS 0x10 /* Equal (ordered, signaling) */ 375 #define _CMP_LT_OQ 0x11 /* Less-than (ordered, non-signaling) */ 376 #define _CMP_LE_OQ 0x12 /* Less-than-or-equal (ordered, non-signaling) */ 377 #define _CMP_UNORD_S 0x13 /* Unordered (signaling) */ 378 #define _CMP_NEQ_US 0x14 /* Not-equal (unordered, signaling) */ 379 #define _CMP_NLT_UQ 0x15 /* Not-less-than (unordered, non-signaling) */ 380 #define _CMP_NLE_UQ 0x16 /* Not-less-than-or-equal (unord, non-signaling) */ 381 #define _CMP_ORD_S 0x17 /* Ordered (signaling) */ 382 #define _CMP_EQ_US 0x18 /* Equal (unordered, signaling) */ 383 #define _CMP_NGE_UQ 0x19 /* Not-greater-than-or-equal (unord, non-sign) */ 384 #define _CMP_NGT_UQ 0x1a /* Not-greater-than (unordered, non-signaling) */ 385 #define _CMP_FALSE_OS 0x1b /* False (ordered, signaling) */ 386 #define _CMP_NEQ_OS 0x1c /* Not-equal (ordered, signaling) */ 387 #define _CMP_GE_OQ 0x1d /* Greater-than-or-equal (ordered, non-signaling) */ 388 #define _CMP_GT_OQ 0x1e /* Greater-than (ordered, non-signaling) */ 389 #define _CMP_TRUE_US 0x1f /* True (unordered, signaling) */ 390 391 #define _mm_cmp_pd(a, b, c) __extension__ ({ \ 392 __m128d __a = (a); \ 393 __m128d __b = (b); \ 394 (__m128d)__builtin_ia32_cmppd((__v2df)__a, (__v2df)__b, (c)); }) 395 396 #define _mm_cmp_ps(a, b, c) __extension__ ({ \ 397 __m128 __a = (a); \ 398 __m128 __b = (b); \ 399 (__m128)__builtin_ia32_cmpps((__v4sf)__a, (__v4sf)__b, (c)); }) 400 401 #define _mm256_cmp_pd(a, b, c) __extension__ ({ \ 402 __m256d __a = (a); \ 403 __m256d __b = (b); \ 404 (__m256d)__builtin_ia32_cmppd256((__v4df)__a, (__v4df)__b, (c)); }) 405 406 #define _mm256_cmp_ps(a, b, c) __extension__ ({ \ 407 __m256 __a = (a); \ 408 __m256 __b = (b); \ 409 (__m256)__builtin_ia32_cmpps256((__v8sf)__a, (__v8sf)__b, (c)); }) 410 411 #define _mm_cmp_sd(a, b, c) __extension__ ({ \ 412 __m128d __a = (a); \ 413 __m128d __b = (b); \ 414 (__m128d)__builtin_ia32_cmpsd((__v2df)__a, (__v2df)__b, (c)); }) 415 416 #define _mm_cmp_ss(a, b, c) __extension__ ({ \ 417 __m128 __a = (a); \ 418 __m128 __b = (b); \ 419 (__m128)__builtin_ia32_cmpss((__v4sf)__a, (__v4sf)__b, (c)); }) 420 421 /* Vector extract */ 422 #define _mm256_extractf128_pd(A, O) __extension__ ({ \ 423 __m256d __A = (A); \ 424 (__m128d)__builtin_ia32_vextractf128_pd256((__v4df)__A, (O)); }) 425 426 #define _mm256_extractf128_ps(A, O) __extension__ ({ \ 427 __m256 __A = (A); \ 428 (__m128)__builtin_ia32_vextractf128_ps256((__v8sf)__A, (O)); }) 429 430 #define _mm256_extractf128_si256(A, O) __extension__ ({ \ 431 __m256i __A = (A); \ 432 (__m128i)__builtin_ia32_vextractf128_si256((__v8si)__A, (O)); }) 433 434 static __inline int __attribute__((__always_inline__, __nodebug__)) 435 _mm256_extract_epi32(__m256i __a, int const __imm) 436 { 437 __v8si __b = (__v8si)__a; 438 return __b[__imm]; 439 } 440 441 static __inline int __attribute__((__always_inline__, __nodebug__)) 442 _mm256_extract_epi16(__m256i __a, int const __imm) 443 { 444 __v16hi __b = (__v16hi)__a; 445 return __b[__imm]; 446 } 447 448 static __inline int __attribute__((__always_inline__, __nodebug__)) 449 _mm256_extract_epi8(__m256i __a, int const __imm) 450 { 451 __v32qi __b = (__v32qi)__a; 452 return __b[__imm]; 453 } 454 455 #ifdef __x86_64__ 456 static __inline long long __attribute__((__always_inline__, __nodebug__)) 457 _mm256_extract_epi64(__m256i __a, const int __imm) 458 { 459 __v4di __b = (__v4di)__a; 460 return __b[__imm]; 461 } 462 #endif 463 464 /* Vector insert */ 465 #define _mm256_insertf128_pd(V1, V2, O) __extension__ ({ \ 466 __m256d __V1 = (V1); \ 467 __m128d __V2 = (V2); \ 468 (__m256d)__builtin_ia32_vinsertf128_pd256((__v4df)__V1, (__v2df)__V2, (O)); }) 469 470 #define _mm256_insertf128_ps(V1, V2, O) __extension__ ({ \ 471 __m256 __V1 = (V1); \ 472 __m128 __V2 = (V2); \ 473 (__m256)__builtin_ia32_vinsertf128_ps256((__v8sf)__V1, (__v4sf)__V2, (O)); }) 474 475 #define _mm256_insertf128_si256(V1, V2, O) __extension__ ({ \ 476 __m256i __V1 = (V1); \ 477 __m128i __V2 = (V2); \ 478 (__m256i)__builtin_ia32_vinsertf128_si256((__v8si)__V1, (__v4si)__V2, (O)); }) 479 480 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 481 _mm256_insert_epi32(__m256i __a, int __b, int const __imm) 482 { 483 __v8si __c = (__v8si)__a; 484 __c[__imm & 7] = __b; 485 return (__m256i)__c; 486 } 487 488 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 489 _mm256_insert_epi16(__m256i __a, int __b, int const __imm) 490 { 491 __v16hi __c = (__v16hi)__a; 492 __c[__imm & 15] = __b; 493 return (__m256i)__c; 494 } 495 496 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 497 _mm256_insert_epi8(__m256i __a, int __b, int const __imm) 498 { 499 __v32qi __c = (__v32qi)__a; 500 __c[__imm & 31] = __b; 501 return (__m256i)__c; 502 } 503 504 #ifdef __x86_64__ 505 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 506 _mm256_insert_epi64(__m256i __a, int __b, int const __imm) 507 { 508 __v4di __c = (__v4di)__a; 509 __c[__imm & 3] = __b; 510 return (__m256i)__c; 511 } 512 #endif 513 514 /* Conversion */ 515 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 516 _mm256_cvtepi32_pd(__m128i __a) 517 { 518 return (__m256d)__builtin_ia32_cvtdq2pd256((__v4si) __a); 519 } 520 521 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 522 _mm256_cvtepi32_ps(__m256i __a) 523 { 524 return (__m256)__builtin_ia32_cvtdq2ps256((__v8si) __a); 525 } 526 527 static __inline __m128 __attribute__((__always_inline__, __nodebug__)) 528 _mm256_cvtpd_ps(__m256d __a) 529 { 530 return (__m128)__builtin_ia32_cvtpd2ps256((__v4df) __a); 531 } 532 533 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 534 _mm256_cvtps_epi32(__m256 __a) 535 { 536 return (__m256i)__builtin_ia32_cvtps2dq256((__v8sf) __a); 537 } 538 539 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 540 _mm256_cvtps_pd(__m128 __a) 541 { 542 return (__m256d)__builtin_ia32_cvtps2pd256((__v4sf) __a); 543 } 544 545 static __inline __m128i __attribute__((__always_inline__, __nodebug__)) 546 _mm256_cvttpd_epi32(__m256d __a) 547 { 548 return (__m128i)__builtin_ia32_cvttpd2dq256((__v4df) __a); 549 } 550 551 static __inline __m128i __attribute__((__always_inline__, __nodebug__)) 552 _mm256_cvtpd_epi32(__m256d __a) 553 { 554 return (__m128i)__builtin_ia32_cvtpd2dq256((__v4df) __a); 555 } 556 557 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 558 _mm256_cvttps_epi32(__m256 __a) 559 { 560 return (__m256i)__builtin_ia32_cvttps2dq256((__v8sf) __a); 561 } 562 563 /* Vector replicate */ 564 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 565 _mm256_movehdup_ps(__m256 __a) 566 { 567 return __builtin_shufflevector(__a, __a, 1, 1, 3, 3, 5, 5, 7, 7); 568 } 569 570 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 571 _mm256_moveldup_ps(__m256 __a) 572 { 573 return __builtin_shufflevector(__a, __a, 0, 0, 2, 2, 4, 4, 6, 6); 574 } 575 576 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 577 _mm256_movedup_pd(__m256d __a) 578 { 579 return __builtin_shufflevector(__a, __a, 0, 0, 2, 2); 580 } 581 582 /* Unpack and Interleave */ 583 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 584 _mm256_unpackhi_pd(__m256d __a, __m256d __b) 585 { 586 return __builtin_shufflevector(__a, __b, 1, 5, 1+2, 5+2); 587 } 588 589 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 590 _mm256_unpacklo_pd(__m256d __a, __m256d __b) 591 { 592 return __builtin_shufflevector(__a, __b, 0, 4, 0+2, 4+2); 593 } 594 595 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 596 _mm256_unpackhi_ps(__m256 __a, __m256 __b) 597 { 598 return __builtin_shufflevector(__a, __b, 2, 10, 2+1, 10+1, 6, 14, 6+1, 14+1); 599 } 600 601 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 602 _mm256_unpacklo_ps(__m256 __a, __m256 __b) 603 { 604 return __builtin_shufflevector(__a, __b, 0, 8, 0+1, 8+1, 4, 12, 4+1, 12+1); 605 } 606 607 /* Bit Test */ 608 static __inline int __attribute__((__always_inline__, __nodebug__)) 609 _mm_testz_pd(__m128d __a, __m128d __b) 610 { 611 return __builtin_ia32_vtestzpd((__v2df)__a, (__v2df)__b); 612 } 613 614 static __inline int __attribute__((__always_inline__, __nodebug__)) 615 _mm_testc_pd(__m128d __a, __m128d __b) 616 { 617 return __builtin_ia32_vtestcpd((__v2df)__a, (__v2df)__b); 618 } 619 620 static __inline int __attribute__((__always_inline__, __nodebug__)) 621 _mm_testnzc_pd(__m128d __a, __m128d __b) 622 { 623 return __builtin_ia32_vtestnzcpd((__v2df)__a, (__v2df)__b); 624 } 625 626 static __inline int __attribute__((__always_inline__, __nodebug__)) 627 _mm_testz_ps(__m128 __a, __m128 __b) 628 { 629 return __builtin_ia32_vtestzps((__v4sf)__a, (__v4sf)__b); 630 } 631 632 static __inline int __attribute__((__always_inline__, __nodebug__)) 633 _mm_testc_ps(__m128 __a, __m128 __b) 634 { 635 return __builtin_ia32_vtestcps((__v4sf)__a, (__v4sf)__b); 636 } 637 638 static __inline int __attribute__((__always_inline__, __nodebug__)) 639 _mm_testnzc_ps(__m128 __a, __m128 __b) 640 { 641 return __builtin_ia32_vtestnzcps((__v4sf)__a, (__v4sf)__b); 642 } 643 644 static __inline int __attribute__((__always_inline__, __nodebug__)) 645 _mm256_testz_pd(__m256d __a, __m256d __b) 646 { 647 return __builtin_ia32_vtestzpd256((__v4df)__a, (__v4df)__b); 648 } 649 650 static __inline int __attribute__((__always_inline__, __nodebug__)) 651 _mm256_testc_pd(__m256d __a, __m256d __b) 652 { 653 return __builtin_ia32_vtestcpd256((__v4df)__a, (__v4df)__b); 654 } 655 656 static __inline int __attribute__((__always_inline__, __nodebug__)) 657 _mm256_testnzc_pd(__m256d __a, __m256d __b) 658 { 659 return __builtin_ia32_vtestnzcpd256((__v4df)__a, (__v4df)__b); 660 } 661 662 static __inline int __attribute__((__always_inline__, __nodebug__)) 663 _mm256_testz_ps(__m256 __a, __m256 __b) 664 { 665 return __builtin_ia32_vtestzps256((__v8sf)__a, (__v8sf)__b); 666 } 667 668 static __inline int __attribute__((__always_inline__, __nodebug__)) 669 _mm256_testc_ps(__m256 __a, __m256 __b) 670 { 671 return __builtin_ia32_vtestcps256((__v8sf)__a, (__v8sf)__b); 672 } 673 674 static __inline int __attribute__((__always_inline__, __nodebug__)) 675 _mm256_testnzc_ps(__m256 __a, __m256 __b) 676 { 677 return __builtin_ia32_vtestnzcps256((__v8sf)__a, (__v8sf)__b); 678 } 679 680 static __inline int __attribute__((__always_inline__, __nodebug__)) 681 _mm256_testz_si256(__m256i __a, __m256i __b) 682 { 683 return __builtin_ia32_ptestz256((__v4di)__a, (__v4di)__b); 684 } 685 686 static __inline int __attribute__((__always_inline__, __nodebug__)) 687 _mm256_testc_si256(__m256i __a, __m256i __b) 688 { 689 return __builtin_ia32_ptestc256((__v4di)__a, (__v4di)__b); 690 } 691 692 static __inline int __attribute__((__always_inline__, __nodebug__)) 693 _mm256_testnzc_si256(__m256i __a, __m256i __b) 694 { 695 return __builtin_ia32_ptestnzc256((__v4di)__a, (__v4di)__b); 696 } 697 698 /* Vector extract sign mask */ 699 static __inline int __attribute__((__always_inline__, __nodebug__)) 700 _mm256_movemask_pd(__m256d __a) 701 { 702 return __builtin_ia32_movmskpd256((__v4df)__a); 703 } 704 705 static __inline int __attribute__((__always_inline__, __nodebug__)) 706 _mm256_movemask_ps(__m256 __a) 707 { 708 return __builtin_ia32_movmskps256((__v8sf)__a); 709 } 710 711 /* Vector __zero */ 712 static __inline void __attribute__((__always_inline__, __nodebug__)) 713 _mm256_zeroall(void) 714 { 715 __builtin_ia32_vzeroall(); 716 } 717 718 static __inline void __attribute__((__always_inline__, __nodebug__)) 719 _mm256_zeroupper(void) 720 { 721 __builtin_ia32_vzeroupper(); 722 } 723 724 /* Vector load with broadcast */ 725 static __inline __m128 __attribute__((__always_inline__, __nodebug__)) 726 _mm_broadcast_ss(float const *__a) 727 { 728 return (__m128)__builtin_ia32_vbroadcastss(__a); 729 } 730 731 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 732 _mm256_broadcast_sd(double const *__a) 733 { 734 return (__m256d)__builtin_ia32_vbroadcastsd256(__a); 735 } 736 737 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 738 _mm256_broadcast_ss(float const *__a) 739 { 740 return (__m256)__builtin_ia32_vbroadcastss256(__a); 741 } 742 743 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 744 _mm256_broadcast_pd(__m128d const *__a) 745 { 746 return (__m256d)__builtin_ia32_vbroadcastf128_pd256(__a); 747 } 748 749 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 750 _mm256_broadcast_ps(__m128 const *__a) 751 { 752 return (__m256)__builtin_ia32_vbroadcastf128_ps256(__a); 753 } 754 755 /* SIMD load ops */ 756 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 757 _mm256_load_pd(double const *__p) 758 { 759 return *(__m256d *)__p; 760 } 761 762 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 763 _mm256_load_ps(float const *__p) 764 { 765 return *(__m256 *)__p; 766 } 767 768 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 769 _mm256_loadu_pd(double const *__p) 770 { 771 struct __loadu_pd { 772 __m256d __v; 773 } __attribute__((packed, may_alias)); 774 return ((struct __loadu_pd*)__p)->__v; 775 } 776 777 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 778 _mm256_loadu_ps(float const *__p) 779 { 780 struct __loadu_ps { 781 __m256 __v; 782 } __attribute__((packed, may_alias)); 783 return ((struct __loadu_ps*)__p)->__v; 784 } 785 786 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 787 _mm256_load_si256(__m256i const *__p) 788 { 789 return *__p; 790 } 791 792 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 793 _mm256_loadu_si256(__m256i const *__p) 794 { 795 struct __loadu_si256 { 796 __m256i __v; 797 } __attribute__((packed, may_alias)); 798 return ((struct __loadu_si256*)__p)->__v; 799 } 800 801 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 802 _mm256_lddqu_si256(__m256i const *__p) 803 { 804 return (__m256i)__builtin_ia32_lddqu256((char const *)__p); 805 } 806 807 /* SIMD store ops */ 808 static __inline void __attribute__((__always_inline__, __nodebug__)) 809 _mm256_store_pd(double *__p, __m256d __a) 810 { 811 *(__m256d *)__p = __a; 812 } 813 814 static __inline void __attribute__((__always_inline__, __nodebug__)) 815 _mm256_store_ps(float *__p, __m256 __a) 816 { 817 *(__m256 *)__p = __a; 818 } 819 820 static __inline void __attribute__((__always_inline__, __nodebug__)) 821 _mm256_storeu_pd(double *__p, __m256d __a) 822 { 823 __builtin_ia32_storeupd256(__p, (__v4df)__a); 824 } 825 826 static __inline void __attribute__((__always_inline__, __nodebug__)) 827 _mm256_storeu_ps(float *__p, __m256 __a) 828 { 829 __builtin_ia32_storeups256(__p, (__v8sf)__a); 830 } 831 832 static __inline void __attribute__((__always_inline__, __nodebug__)) 833 _mm256_store_si256(__m256i *__p, __m256i __a) 834 { 835 *__p = __a; 836 } 837 838 static __inline void __attribute__((__always_inline__, __nodebug__)) 839 _mm256_storeu_si256(__m256i *__p, __m256i __a) 840 { 841 __builtin_ia32_storedqu256((char *)__p, (__v32qi)__a); 842 } 843 844 /* Conditional load ops */ 845 static __inline __m128d __attribute__((__always_inline__, __nodebug__)) 846 _mm_maskload_pd(double const *__p, __m128d __m) 847 { 848 return (__m128d)__builtin_ia32_maskloadpd((const __v2df *)__p, (__v2df)__m); 849 } 850 851 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 852 _mm256_maskload_pd(double const *__p, __m256d __m) 853 { 854 return (__m256d)__builtin_ia32_maskloadpd256((const __v4df *)__p, 855 (__v4df)__m); 856 } 857 858 static __inline __m128 __attribute__((__always_inline__, __nodebug__)) 859 _mm_maskload_ps(float const *__p, __m128 __m) 860 { 861 return (__m128)__builtin_ia32_maskloadps((const __v4sf *)__p, (__v4sf)__m); 862 } 863 864 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 865 _mm256_maskload_ps(float const *__p, __m256 __m) 866 { 867 return (__m256)__builtin_ia32_maskloadps256((const __v8sf *)__p, (__v8sf)__m); 868 } 869 870 /* Conditional store ops */ 871 static __inline void __attribute__((__always_inline__, __nodebug__)) 872 _mm256_maskstore_ps(float *__p, __m256 __m, __m256 __a) 873 { 874 __builtin_ia32_maskstoreps256((__v8sf *)__p, (__v8sf)__m, (__v8sf)__a); 875 } 876 877 static __inline void __attribute__((__always_inline__, __nodebug__)) 878 _mm_maskstore_pd(double *__p, __m128d __m, __m128d __a) 879 { 880 __builtin_ia32_maskstorepd((__v2df *)__p, (__v2df)__m, (__v2df)__a); 881 } 882 883 static __inline void __attribute__((__always_inline__, __nodebug__)) 884 _mm256_maskstore_pd(double *__p, __m256d __m, __m256d __a) 885 { 886 __builtin_ia32_maskstorepd256((__v4df *)__p, (__v4df)__m, (__v4df)__a); 887 } 888 889 static __inline void __attribute__((__always_inline__, __nodebug__)) 890 _mm_maskstore_ps(float *__p, __m128 __m, __m128 __a) 891 { 892 __builtin_ia32_maskstoreps((__v4sf *)__p, (__v4sf)__m, (__v4sf)__a); 893 } 894 895 /* Cacheability support ops */ 896 static __inline void __attribute__((__always_inline__, __nodebug__)) 897 _mm256_stream_si256(__m256i *__a, __m256i __b) 898 { 899 __builtin_ia32_movntdq256((__v4di *)__a, (__v4di)__b); 900 } 901 902 static __inline void __attribute__((__always_inline__, __nodebug__)) 903 _mm256_stream_pd(double *__a, __m256d __b) 904 { 905 __builtin_ia32_movntpd256(__a, (__v4df)__b); 906 } 907 908 static __inline void __attribute__((__always_inline__, __nodebug__)) 909 _mm256_stream_ps(float *__p, __m256 __a) 910 { 911 __builtin_ia32_movntps256(__p, (__v8sf)__a); 912 } 913 914 /* Create vectors */ 915 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 916 _mm256_set_pd(double __a, double __b, double __c, double __d) 917 { 918 return (__m256d){ __d, __c, __b, __a }; 919 } 920 921 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 922 _mm256_set_ps(float __a, float __b, float __c, float __d, 923 float __e, float __f, float __g, float __h) 924 { 925 return (__m256){ __h, __g, __f, __e, __d, __c, __b, __a }; 926 } 927 928 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 929 _mm256_set_epi32(int __i0, int __i1, int __i2, int __i3, 930 int __i4, int __i5, int __i6, int __i7) 931 { 932 return (__m256i)(__v8si){ __i7, __i6, __i5, __i4, __i3, __i2, __i1, __i0 }; 933 } 934 935 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 936 _mm256_set_epi16(short __w15, short __w14, short __w13, short __w12, 937 short __w11, short __w10, short __w09, short __w08, 938 short __w07, short __w06, short __w05, short __w04, 939 short __w03, short __w02, short __w01, short __w00) 940 { 941 return (__m256i)(__v16hi){ __w00, __w01, __w02, __w03, __w04, __w05, __w06, 942 __w07, __w08, __w09, __w10, __w11, __w12, __w13, __w14, __w15 }; 943 } 944 945 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 946 _mm256_set_epi8(char __b31, char __b30, char __b29, char __b28, 947 char __b27, char __b26, char __b25, char __b24, 948 char __b23, char __b22, char __b21, char __b20, 949 char __b19, char __b18, char __b17, char __b16, 950 char __b15, char __b14, char __b13, char __b12, 951 char __b11, char __b10, char __b09, char __b08, 952 char __b07, char __b06, char __b05, char __b04, 953 char __b03, char __b02, char __b01, char __b00) 954 { 955 return (__m256i)(__v32qi){ 956 __b00, __b01, __b02, __b03, __b04, __b05, __b06, __b07, 957 __b08, __b09, __b10, __b11, __b12, __b13, __b14, __b15, 958 __b16, __b17, __b18, __b19, __b20, __b21, __b22, __b23, 959 __b24, __b25, __b26, __b27, __b28, __b29, __b30, __b31 960 }; 961 } 962 963 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 964 _mm256_set_epi64x(long long __a, long long __b, long long __c, long long __d) 965 { 966 return (__m256i)(__v4di){ __d, __c, __b, __a }; 967 } 968 969 /* Create vectors with elements in reverse order */ 970 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 971 _mm256_setr_pd(double __a, double __b, double __c, double __d) 972 { 973 return (__m256d){ __a, __b, __c, __d }; 974 } 975 976 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 977 _mm256_setr_ps(float __a, float __b, float __c, float __d, 978 float __e, float __f, float __g, float __h) 979 { 980 return (__m256){ __a, __b, __c, __d, __e, __f, __g, __h }; 981 } 982 983 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 984 _mm256_setr_epi32(int __i0, int __i1, int __i2, int __i3, 985 int __i4, int __i5, int __i6, int __i7) 986 { 987 return (__m256i)(__v8si){ __i0, __i1, __i2, __i3, __i4, __i5, __i6, __i7 }; 988 } 989 990 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 991 _mm256_setr_epi16(short __w15, short __w14, short __w13, short __w12, 992 short __w11, short __w10, short __w09, short __w08, 993 short __w07, short __w06, short __w05, short __w04, 994 short __w03, short __w02, short __w01, short __w00) 995 { 996 return (__m256i)(__v16hi){ __w15, __w14, __w13, __w12, __w11, __w10, __w09, 997 __w08, __w07, __w06, __w05, __w04, __w03, __w02, __w01, __w00 }; 998 } 999 1000 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 1001 _mm256_setr_epi8(char __b31, char __b30, char __b29, char __b28, 1002 char __b27, char __b26, char __b25, char __b24, 1003 char __b23, char __b22, char __b21, char __b20, 1004 char __b19, char __b18, char __b17, char __b16, 1005 char __b15, char __b14, char __b13, char __b12, 1006 char __b11, char __b10, char __b09, char __b08, 1007 char __b07, char __b06, char __b05, char __b04, 1008 char __b03, char __b02, char __b01, char __b00) 1009 { 1010 return (__m256i)(__v32qi){ 1011 __b31, __b30, __b29, __b28, __b27, __b26, __b25, __b24, 1012 __b23, __b22, __b21, __b20, __b19, __b18, __b17, __b16, 1013 __b15, __b14, __b13, __b12, __b11, __b10, __b09, __b08, 1014 __b07, __b06, __b05, __b04, __b03, __b02, __b01, __b00 }; 1015 } 1016 1017 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 1018 _mm256_setr_epi64x(long long __a, long long __b, long long __c, long long __d) 1019 { 1020 return (__m256i)(__v4di){ __a, __b, __c, __d }; 1021 } 1022 1023 /* Create vectors with repeated elements */ 1024 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 1025 _mm256_set1_pd(double __w) 1026 { 1027 return (__m256d){ __w, __w, __w, __w }; 1028 } 1029 1030 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 1031 _mm256_set1_ps(float __w) 1032 { 1033 return (__m256){ __w, __w, __w, __w, __w, __w, __w, __w }; 1034 } 1035 1036 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 1037 _mm256_set1_epi32(int __i) 1038 { 1039 return (__m256i)(__v8si){ __i, __i, __i, __i, __i, __i, __i, __i }; 1040 } 1041 1042 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 1043 _mm256_set1_epi16(short __w) 1044 { 1045 return (__m256i)(__v16hi){ __w, __w, __w, __w, __w, __w, __w, __w, __w, __w, 1046 __w, __w, __w, __w, __w, __w }; 1047 } 1048 1049 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 1050 _mm256_set1_epi8(char __b) 1051 { 1052 return (__m256i)(__v32qi){ __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, 1053 __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, 1054 __b, __b, __b, __b, __b, __b, __b }; 1055 } 1056 1057 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 1058 _mm256_set1_epi64x(long long __q) 1059 { 1060 return (__m256i)(__v4di){ __q, __q, __q, __q }; 1061 } 1062 1063 /* Create __zeroed vectors */ 1064 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 1065 _mm256_setzero_pd(void) 1066 { 1067 return (__m256d){ 0, 0, 0, 0 }; 1068 } 1069 1070 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 1071 _mm256_setzero_ps(void) 1072 { 1073 return (__m256){ 0, 0, 0, 0, 0, 0, 0, 0 }; 1074 } 1075 1076 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 1077 _mm256_setzero_si256(void) 1078 { 1079 return (__m256i){ 0LL, 0LL, 0LL, 0LL }; 1080 } 1081 1082 /* Cast between vector types */ 1083 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 1084 _mm256_castpd_ps(__m256d __a) 1085 { 1086 return (__m256)__a; 1087 } 1088 1089 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 1090 _mm256_castpd_si256(__m256d __a) 1091 { 1092 return (__m256i)__a; 1093 } 1094 1095 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 1096 _mm256_castps_pd(__m256 __a) 1097 { 1098 return (__m256d)__a; 1099 } 1100 1101 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 1102 _mm256_castps_si256(__m256 __a) 1103 { 1104 return (__m256i)__a; 1105 } 1106 1107 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 1108 _mm256_castsi256_ps(__m256i __a) 1109 { 1110 return (__m256)__a; 1111 } 1112 1113 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 1114 _mm256_castsi256_pd(__m256i __a) 1115 { 1116 return (__m256d)__a; 1117 } 1118 1119 static __inline __m128d __attribute__((__always_inline__, __nodebug__)) 1120 _mm256_castpd256_pd128(__m256d __a) 1121 { 1122 return __builtin_shufflevector(__a, __a, 0, 1); 1123 } 1124 1125 static __inline __m128 __attribute__((__always_inline__, __nodebug__)) 1126 _mm256_castps256_ps128(__m256 __a) 1127 { 1128 return __builtin_shufflevector(__a, __a, 0, 1, 2, 3); 1129 } 1130 1131 static __inline __m128i __attribute__((__always_inline__, __nodebug__)) 1132 _mm256_castsi256_si128(__m256i __a) 1133 { 1134 return __builtin_shufflevector(__a, __a, 0, 1); 1135 } 1136 1137 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 1138 _mm256_castpd128_pd256(__m128d __a) 1139 { 1140 return __builtin_shufflevector(__a, __a, 0, 1, -1, -1); 1141 } 1142 1143 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 1144 _mm256_castps128_ps256(__m128 __a) 1145 { 1146 return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, -1, -1, -1, -1); 1147 } 1148 1149 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 1150 _mm256_castsi128_si256(__m128i __a) 1151 { 1152 return __builtin_shufflevector(__a, __a, 0, 1, -1, -1); 1153 } 1154 1155 /* SIMD load ops (unaligned) */ 1156 static __inline __m256 __attribute__((__always_inline__, __nodebug__)) 1157 _mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo) 1158 { 1159 struct __loadu_ps { 1160 __m128 __v; 1161 } __attribute__((__packed__, __may_alias__)); 1162 1163 __m256 __v256 = _mm256_castps128_ps256(((struct __loadu_ps*)__addr_lo)->__v); 1164 return _mm256_insertf128_ps(__v256, ((struct __loadu_ps*)__addr_hi)->__v, 1); 1165 } 1166 1167 static __inline __m256d __attribute__((__always_inline__, __nodebug__)) 1168 _mm256_loadu2_m128d(double const *__addr_hi, double const *__addr_lo) 1169 { 1170 struct __loadu_pd { 1171 __m128d __v; 1172 } __attribute__((__packed__, __may_alias__)); 1173 1174 __m256d __v256 = _mm256_castpd128_pd256(((struct __loadu_pd*)__addr_lo)->__v); 1175 return _mm256_insertf128_pd(__v256, ((struct __loadu_pd*)__addr_hi)->__v, 1); 1176 } 1177 1178 static __inline __m256i __attribute__((__always_inline__, __nodebug__)) 1179 _mm256_loadu2_m128i(__m128i const *__addr_hi, __m128i const *__addr_lo) 1180 { 1181 struct __loadu_si128 { 1182 __m128i __v; 1183 } __attribute__((packed, may_alias)); 1184 __m256i __v256 = _mm256_castsi128_si256( 1185 ((struct __loadu_si128*)__addr_lo)->__v); 1186 return _mm256_insertf128_si256(__v256, 1187 ((struct __loadu_si128*)__addr_hi)->__v, 1); 1188 } 1189 1190 /* SIMD store ops (unaligned) */ 1191 static __inline void __attribute__((__always_inline__, __nodebug__)) 1192 _mm256_storeu2_m128(float *__addr_hi, float *__addr_lo, __m256 __a) 1193 { 1194 __m128 __v128; 1195 1196 __v128 = _mm256_castps256_ps128(__a); 1197 __builtin_ia32_storeups(__addr_lo, __v128); 1198 __v128 = _mm256_extractf128_ps(__a, 1); 1199 __builtin_ia32_storeups(__addr_hi, __v128); 1200 } 1201 1202 static __inline void __attribute__((__always_inline__, __nodebug__)) 1203 _mm256_storeu2_m128d(double *__addr_hi, double *__addr_lo, __m256d __a) 1204 { 1205 __m128d __v128; 1206 1207 __v128 = _mm256_castpd256_pd128(__a); 1208 __builtin_ia32_storeupd(__addr_lo, __v128); 1209 __v128 = _mm256_extractf128_pd(__a, 1); 1210 __builtin_ia32_storeupd(__addr_hi, __v128); 1211 } 1212 1213 static __inline void __attribute__((__always_inline__, __nodebug__)) 1214 _mm256_storeu2_m128i(__m128i *__addr_hi, __m128i *__addr_lo, __m256i __a) 1215 { 1216 __m128i __v128; 1217 1218 __v128 = _mm256_castsi256_si128(__a); 1219 __builtin_ia32_storedqu((char *)__addr_lo, (__v16qi)__v128); 1220 __v128 = _mm256_extractf128_si256(__a, 1); 1221 __builtin_ia32_storedqu((char *)__addr_hi, (__v16qi)__v128); 1222 } 1223 1224 #endif /* __AVXINTRIN_H */ 1225