Home | History | Annotate | Download | only in Headers
      1 /*===---- avxintrin.h - AVX intrinsics -------------------------------------===
      2  *
      3  * Permission is hereby granted, free of charge, to any person obtaining a copy
      4  * of this software and associated documentation files (the "Software"), to deal
      5  * in the Software without restriction, including without limitation the rights
      6  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
      7  * copies of the Software, and to permit persons to whom the Software is
      8  * furnished to do so, subject to the following conditions:
      9  *
     10  * The above copyright notice and this permission notice shall be included in
     11  * all copies or substantial portions of the Software.
     12  *
     13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
     16  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     17  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     18  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
     19  * THE SOFTWARE.
     20  *
     21  *===-----------------------------------------------------------------------===
     22  */
     23 
     24 #ifndef __IMMINTRIN_H
     25 #error "Never use <avxintrin.h> directly; include <immintrin.h> instead."
     26 #endif
     27 
     28 #ifndef __AVXINTRIN_H
     29 #define __AVXINTRIN_H
     30 
     31 typedef double __v4df __attribute__ ((__vector_size__ (32)));
     32 typedef float __v8sf __attribute__ ((__vector_size__ (32)));
     33 typedef long long __v4di __attribute__ ((__vector_size__ (32)));
     34 typedef int __v8si __attribute__ ((__vector_size__ (32)));
     35 typedef short __v16hi __attribute__ ((__vector_size__ (32)));
     36 typedef char __v32qi __attribute__ ((__vector_size__ (32)));
     37 
     38 /* We need an explicitly signed variant for char. Note that this shouldn't
     39  * appear in the interface though. */
     40 typedef signed char __v32qs __attribute__((__vector_size__(32)));
     41 
     42 typedef float __m256 __attribute__ ((__vector_size__ (32)));
     43 typedef double __m256d __attribute__((__vector_size__(32)));
     44 typedef long long __m256i __attribute__((__vector_size__(32)));
     45 
     46 /* Define the default attributes for the functions in this file. */
     47 #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx")))
     48 
     49 /* Arithmetic */
     50 static __inline __m256d __DEFAULT_FN_ATTRS
     51 _mm256_add_pd(__m256d __a, __m256d __b)
     52 {
     53   return __a+__b;
     54 }
     55 
     56 static __inline __m256 __DEFAULT_FN_ATTRS
     57 _mm256_add_ps(__m256 __a, __m256 __b)
     58 {
     59   return __a+__b;
     60 }
     61 
     62 static __inline __m256d __DEFAULT_FN_ATTRS
     63 _mm256_sub_pd(__m256d __a, __m256d __b)
     64 {
     65   return __a-__b;
     66 }
     67 
     68 static __inline __m256 __DEFAULT_FN_ATTRS
     69 _mm256_sub_ps(__m256 __a, __m256 __b)
     70 {
     71   return __a-__b;
     72 }
     73 
     74 static __inline __m256d __DEFAULT_FN_ATTRS
     75 _mm256_addsub_pd(__m256d __a, __m256d __b)
     76 {
     77   return (__m256d)__builtin_ia32_addsubpd256((__v4df)__a, (__v4df)__b);
     78 }
     79 
     80 static __inline __m256 __DEFAULT_FN_ATTRS
     81 _mm256_addsub_ps(__m256 __a, __m256 __b)
     82 {
     83   return (__m256)__builtin_ia32_addsubps256((__v8sf)__a, (__v8sf)__b);
     84 }
     85 
     86 static __inline __m256d __DEFAULT_FN_ATTRS
     87 _mm256_div_pd(__m256d __a, __m256d __b)
     88 {
     89   return __a / __b;
     90 }
     91 
     92 static __inline __m256 __DEFAULT_FN_ATTRS
     93 _mm256_div_ps(__m256 __a, __m256 __b)
     94 {
     95   return __a / __b;
     96 }
     97 
     98 static __inline __m256d __DEFAULT_FN_ATTRS
     99 _mm256_max_pd(__m256d __a, __m256d __b)
    100 {
    101   return (__m256d)__builtin_ia32_maxpd256((__v4df)__a, (__v4df)__b);
    102 }
    103 
    104 static __inline __m256 __DEFAULT_FN_ATTRS
    105 _mm256_max_ps(__m256 __a, __m256 __b)
    106 {
    107   return (__m256)__builtin_ia32_maxps256((__v8sf)__a, (__v8sf)__b);
    108 }
    109 
    110 static __inline __m256d __DEFAULT_FN_ATTRS
    111 _mm256_min_pd(__m256d __a, __m256d __b)
    112 {
    113   return (__m256d)__builtin_ia32_minpd256((__v4df)__a, (__v4df)__b);
    114 }
    115 
    116 static __inline __m256 __DEFAULT_FN_ATTRS
    117 _mm256_min_ps(__m256 __a, __m256 __b)
    118 {
    119   return (__m256)__builtin_ia32_minps256((__v8sf)__a, (__v8sf)__b);
    120 }
    121 
    122 static __inline __m256d __DEFAULT_FN_ATTRS
    123 _mm256_mul_pd(__m256d __a, __m256d __b)
    124 {
    125   return __a * __b;
    126 }
    127 
    128 static __inline __m256 __DEFAULT_FN_ATTRS
    129 _mm256_mul_ps(__m256 __a, __m256 __b)
    130 {
    131   return __a * __b;
    132 }
    133 
    134 static __inline __m256d __DEFAULT_FN_ATTRS
    135 _mm256_sqrt_pd(__m256d __a)
    136 {
    137   return (__m256d)__builtin_ia32_sqrtpd256((__v4df)__a);
    138 }
    139 
    140 static __inline __m256 __DEFAULT_FN_ATTRS
    141 _mm256_sqrt_ps(__m256 __a)
    142 {
    143   return (__m256)__builtin_ia32_sqrtps256((__v8sf)__a);
    144 }
    145 
    146 static __inline __m256 __DEFAULT_FN_ATTRS
    147 _mm256_rsqrt_ps(__m256 __a)
    148 {
    149   return (__m256)__builtin_ia32_rsqrtps256((__v8sf)__a);
    150 }
    151 
    152 static __inline __m256 __DEFAULT_FN_ATTRS
    153 _mm256_rcp_ps(__m256 __a)
    154 {
    155   return (__m256)__builtin_ia32_rcpps256((__v8sf)__a);
    156 }
    157 
    158 #define _mm256_round_pd(V, M) __extension__ ({ \
    159     (__m256d)__builtin_ia32_roundpd256((__v4df)(__m256d)(V), (M)); })
    160 
    161 #define _mm256_round_ps(V, M) __extension__ ({ \
    162   (__m256)__builtin_ia32_roundps256((__v8sf)(__m256)(V), (M)); })
    163 
    164 #define _mm256_ceil_pd(V)  _mm256_round_pd((V), _MM_FROUND_CEIL)
    165 #define _mm256_floor_pd(V) _mm256_round_pd((V), _MM_FROUND_FLOOR)
    166 #define _mm256_ceil_ps(V)  _mm256_round_ps((V), _MM_FROUND_CEIL)
    167 #define _mm256_floor_ps(V) _mm256_round_ps((V), _MM_FROUND_FLOOR)
    168 
    169 /* Logical */
    170 static __inline __m256d __DEFAULT_FN_ATTRS
    171 _mm256_and_pd(__m256d __a, __m256d __b)
    172 {
    173   return (__m256d)((__v4di)__a & (__v4di)__b);
    174 }
    175 
    176 static __inline __m256 __DEFAULT_FN_ATTRS
    177 _mm256_and_ps(__m256 __a, __m256 __b)
    178 {
    179   return (__m256)((__v8si)__a & (__v8si)__b);
    180 }
    181 
    182 static __inline __m256d __DEFAULT_FN_ATTRS
    183 _mm256_andnot_pd(__m256d __a, __m256d __b)
    184 {
    185   return (__m256d)(~(__v4di)__a & (__v4di)__b);
    186 }
    187 
    188 static __inline __m256 __DEFAULT_FN_ATTRS
    189 _mm256_andnot_ps(__m256 __a, __m256 __b)
    190 {
    191   return (__m256)(~(__v8si)__a & (__v8si)__b);
    192 }
    193 
    194 static __inline __m256d __DEFAULT_FN_ATTRS
    195 _mm256_or_pd(__m256d __a, __m256d __b)
    196 {
    197   return (__m256d)((__v4di)__a | (__v4di)__b);
    198 }
    199 
    200 static __inline __m256 __DEFAULT_FN_ATTRS
    201 _mm256_or_ps(__m256 __a, __m256 __b)
    202 {
    203   return (__m256)((__v8si)__a | (__v8si)__b);
    204 }
    205 
    206 static __inline __m256d __DEFAULT_FN_ATTRS
    207 _mm256_xor_pd(__m256d __a, __m256d __b)
    208 {
    209   return (__m256d)((__v4di)__a ^ (__v4di)__b);
    210 }
    211 
    212 static __inline __m256 __DEFAULT_FN_ATTRS
    213 _mm256_xor_ps(__m256 __a, __m256 __b)
    214 {
    215   return (__m256)((__v8si)__a ^ (__v8si)__b);
    216 }
    217 
    218 /* Horizontal arithmetic */
    219 static __inline __m256d __DEFAULT_FN_ATTRS
    220 _mm256_hadd_pd(__m256d __a, __m256d __b)
    221 {
    222   return (__m256d)__builtin_ia32_haddpd256((__v4df)__a, (__v4df)__b);
    223 }
    224 
    225 static __inline __m256 __DEFAULT_FN_ATTRS
    226 _mm256_hadd_ps(__m256 __a, __m256 __b)
    227 {
    228   return (__m256)__builtin_ia32_haddps256((__v8sf)__a, (__v8sf)__b);
    229 }
    230 
    231 static __inline __m256d __DEFAULT_FN_ATTRS
    232 _mm256_hsub_pd(__m256d __a, __m256d __b)
    233 {
    234   return (__m256d)__builtin_ia32_hsubpd256((__v4df)__a, (__v4df)__b);
    235 }
    236 
    237 static __inline __m256 __DEFAULT_FN_ATTRS
    238 _mm256_hsub_ps(__m256 __a, __m256 __b)
    239 {
    240   return (__m256)__builtin_ia32_hsubps256((__v8sf)__a, (__v8sf)__b);
    241 }
    242 
    243 /* Vector permutations */
    244 static __inline __m128d __DEFAULT_FN_ATTRS
    245 _mm_permutevar_pd(__m128d __a, __m128i __c)
    246 {
    247   return (__m128d)__builtin_ia32_vpermilvarpd((__v2df)__a, (__v2di)__c);
    248 }
    249 
    250 static __inline __m256d __DEFAULT_FN_ATTRS
    251 _mm256_permutevar_pd(__m256d __a, __m256i __c)
    252 {
    253   return (__m256d)__builtin_ia32_vpermilvarpd256((__v4df)__a, (__v4di)__c);
    254 }
    255 
    256 static __inline __m128 __DEFAULT_FN_ATTRS
    257 _mm_permutevar_ps(__m128 __a, __m128i __c)
    258 {
    259   return (__m128)__builtin_ia32_vpermilvarps((__v4sf)__a, (__v4si)__c);
    260 }
    261 
    262 static __inline __m256 __DEFAULT_FN_ATTRS
    263 _mm256_permutevar_ps(__m256 __a, __m256i __c)
    264 {
    265   return (__m256)__builtin_ia32_vpermilvarps256((__v8sf)__a, (__v8si)__c);
    266 }
    267 
    268 #define _mm_permute_pd(A, C) __extension__ ({ \
    269   (__m128d)__builtin_shufflevector((__v2df)(__m128d)(A), \
    270                                    (__v2df)_mm_setzero_pd(), \
    271                                    (C) & 0x1, ((C) & 0x2) >> 1); })
    272 
    273 #define _mm256_permute_pd(A, C) __extension__ ({ \
    274   (__m256d)__builtin_shufflevector((__v4df)(__m256d)(A), \
    275                                    (__v4df)_mm256_setzero_pd(), \
    276                                    (C) & 0x1, ((C) & 0x2) >> 1, \
    277                                    2 + (((C) & 0x4) >> 2), \
    278                                    2 + (((C) & 0x8) >> 3)); })
    279 
    280 #define _mm_permute_ps(A, C) __extension__ ({ \
    281   (__m128)__builtin_shufflevector((__v4sf)(__m128)(A), \
    282                                   (__v4sf)_mm_setzero_ps(), \
    283                                    (C) & 0x3, ((C) & 0xc) >> 2, \
    284                                    ((C) & 0x30) >> 4, ((C) & 0xc0) >> 6); })
    285 
    286 #define _mm256_permute_ps(A, C) __extension__ ({ \
    287   (__m256)__builtin_shufflevector((__v8sf)(__m256)(A), \
    288                                   (__v8sf)_mm256_setzero_ps(), \
    289                                   (C) & 0x3, ((C) & 0xc) >> 2, \
    290                                   ((C) & 0x30) >> 4, ((C) & 0xc0) >> 6, \
    291                                   4 + (((C) & 0x03) >> 0), \
    292                                   4 + (((C) & 0x0c) >> 2), \
    293                                   4 + (((C) & 0x30) >> 4), \
    294                                   4 + (((C) & 0xc0) >> 6)); })
    295 
    296 #define _mm256_permute2f128_pd(V1, V2, M) __extension__ ({ \
    297   (__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)(__m256d)(V1), \
    298                                            (__v4df)(__m256d)(V2), (M)); })
    299 
    300 #define _mm256_permute2f128_ps(V1, V2, M) __extension__ ({ \
    301   (__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)(__m256)(V1), \
    302                                           (__v8sf)(__m256)(V2), (M)); })
    303 
    304 #define _mm256_permute2f128_si256(V1, V2, M) __extension__ ({ \
    305   (__m256i)__builtin_ia32_vperm2f128_si256((__v8si)(__m256i)(V1), \
    306                                            (__v8si)(__m256i)(V2), (M)); })
    307 
    308 /* Vector Blend */
    309 #define _mm256_blend_pd(V1, V2, M) __extension__ ({ \
    310   (__m256d)__builtin_shufflevector((__v4df)(__m256d)(V1), \
    311                                    (__v4df)(__m256d)(V2), \
    312                                    (((M) & 0x01) ? 4 : 0), \
    313                                    (((M) & 0x02) ? 5 : 1), \
    314                                    (((M) & 0x04) ? 6 : 2), \
    315                                    (((M) & 0x08) ? 7 : 3)); })
    316 
    317 #define _mm256_blend_ps(V1, V2, M) __extension__ ({ \
    318   (__m256)__builtin_shufflevector((__v8sf)(__m256)(V1), \
    319                                   (__v8sf)(__m256)(V2), \
    320                                   (((M) & 0x01) ?  8 : 0), \
    321                                   (((M) & 0x02) ?  9 : 1), \
    322                                   (((M) & 0x04) ? 10 : 2), \
    323                                   (((M) & 0x08) ? 11 : 3), \
    324                                   (((M) & 0x10) ? 12 : 4), \
    325                                   (((M) & 0x20) ? 13 : 5), \
    326                                   (((M) & 0x40) ? 14 : 6), \
    327                                   (((M) & 0x80) ? 15 : 7)); })
    328 
    329 static __inline __m256d __DEFAULT_FN_ATTRS
    330 _mm256_blendv_pd(__m256d __a, __m256d __b, __m256d __c)
    331 {
    332   return (__m256d)__builtin_ia32_blendvpd256(
    333     (__v4df)__a, (__v4df)__b, (__v4df)__c);
    334 }
    335 
    336 static __inline __m256 __DEFAULT_FN_ATTRS
    337 _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
    338 {
    339   return (__m256)__builtin_ia32_blendvps256(
    340     (__v8sf)__a, (__v8sf)__b, (__v8sf)__c);
    341 }
    342 
    343 /* Vector Dot Product */
    344 #define _mm256_dp_ps(V1, V2, M) __extension__ ({ \
    345   (__m256)__builtin_ia32_dpps256((__v8sf)(__m256)(V1), \
    346                                  (__v8sf)(__m256)(V2), (M)); })
    347 
    348 /* Vector shuffle */
    349 #define _mm256_shuffle_ps(a, b, mask) __extension__ ({ \
    350         (__m256)__builtin_shufflevector((__v8sf)(__m256)(a), \
    351                                         (__v8sf)(__m256)(b), \
    352                                         (mask) & 0x3, \
    353                                         ((mask) & 0xc) >> 2, \
    354                                         (((mask) & 0x30) >> 4) + 8, \
    355                                         (((mask) & 0xc0) >> 6) + 8, \
    356                                         ((mask) & 0x3) + 4, \
    357                                         (((mask) & 0xc) >> 2) + 4, \
    358                                         (((mask) & 0x30) >> 4) + 12, \
    359                                         (((mask) & 0xc0) >> 6) + 12); })
    360 
    361 #define _mm256_shuffle_pd(a, b, mask) __extension__ ({ \
    362         (__m256d)__builtin_shufflevector((__v4df)(__m256d)(a), \
    363                                          (__v4df)(__m256d)(b), \
    364                                          (mask) & 0x1, \
    365                                          (((mask) & 0x2) >> 1) + 4, \
    366                                          (((mask) & 0x4) >> 2) + 2, \
    367                                          (((mask) & 0x8) >> 3) + 6); })
    368 
    369 /* Compare */
    370 #define _CMP_EQ_OQ    0x00 /* Equal (ordered, non-signaling)  */
    371 #define _CMP_LT_OS    0x01 /* Less-than (ordered, signaling)  */
    372 #define _CMP_LE_OS    0x02 /* Less-than-or-equal (ordered, signaling)  */
    373 #define _CMP_UNORD_Q  0x03 /* Unordered (non-signaling)  */
    374 #define _CMP_NEQ_UQ   0x04 /* Not-equal (unordered, non-signaling)  */
    375 #define _CMP_NLT_US   0x05 /* Not-less-than (unordered, signaling)  */
    376 #define _CMP_NLE_US   0x06 /* Not-less-than-or-equal (unordered, signaling)  */
    377 #define _CMP_ORD_Q    0x07 /* Ordered (nonsignaling)   */
    378 #define _CMP_EQ_UQ    0x08 /* Equal (unordered, non-signaling)  */
    379 #define _CMP_NGE_US   0x09 /* Not-greater-than-or-equal (unord, signaling)  */
    380 #define _CMP_NGT_US   0x0a /* Not-greater-than (unordered, signaling)  */
    381 #define _CMP_FALSE_OQ 0x0b /* False (ordered, non-signaling)  */
    382 #define _CMP_NEQ_OQ   0x0c /* Not-equal (ordered, non-signaling)  */
    383 #define _CMP_GE_OS    0x0d /* Greater-than-or-equal (ordered, signaling)  */
    384 #define _CMP_GT_OS    0x0e /* Greater-than (ordered, signaling)  */
    385 #define _CMP_TRUE_UQ  0x0f /* True (unordered, non-signaling)  */
    386 #define _CMP_EQ_OS    0x10 /* Equal (ordered, signaling)  */
    387 #define _CMP_LT_OQ    0x11 /* Less-than (ordered, non-signaling)  */
    388 #define _CMP_LE_OQ    0x12 /* Less-than-or-equal (ordered, non-signaling)  */
    389 #define _CMP_UNORD_S  0x13 /* Unordered (signaling)  */
    390 #define _CMP_NEQ_US   0x14 /* Not-equal (unordered, signaling)  */
    391 #define _CMP_NLT_UQ   0x15 /* Not-less-than (unordered, non-signaling)  */
    392 #define _CMP_NLE_UQ   0x16 /* Not-less-than-or-equal (unord, non-signaling)  */
    393 #define _CMP_ORD_S    0x17 /* Ordered (signaling)  */
    394 #define _CMP_EQ_US    0x18 /* Equal (unordered, signaling)  */
    395 #define _CMP_NGE_UQ   0x19 /* Not-greater-than-or-equal (unord, non-sign)  */
    396 #define _CMP_NGT_UQ   0x1a /* Not-greater-than (unordered, non-signaling)  */
    397 #define _CMP_FALSE_OS 0x1b /* False (ordered, signaling)  */
    398 #define _CMP_NEQ_OS   0x1c /* Not-equal (ordered, signaling)  */
    399 #define _CMP_GE_OQ    0x1d /* Greater-than-or-equal (ordered, non-signaling)  */
    400 #define _CMP_GT_OQ    0x1e /* Greater-than (ordered, non-signaling)  */
    401 #define _CMP_TRUE_US  0x1f /* True (unordered, signaling)  */
    402 
    403 #define _mm_cmp_pd(a, b, c) __extension__ ({ \
    404   (__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), \
    405                                 (__v2df)(__m128d)(b), (c)); })
    406 
    407 #define _mm_cmp_ps(a, b, c) __extension__ ({ \
    408   (__m128)__builtin_ia32_cmpps((__v4sf)(__m128)(a), \
    409                                (__v4sf)(__m128)(b), (c)); })
    410 
    411 #define _mm256_cmp_pd(a, b, c) __extension__ ({ \
    412   (__m256d)__builtin_ia32_cmppd256((__v4df)(__m256d)(a), \
    413                                    (__v4df)(__m256d)(b), (c)); })
    414 
    415 #define _mm256_cmp_ps(a, b, c) __extension__ ({ \
    416   (__m256)__builtin_ia32_cmpps256((__v8sf)(__m256)(a), \
    417                                   (__v8sf)(__m256)(b), (c)); })
    418 
    419 #define _mm_cmp_sd(a, b, c) __extension__ ({ \
    420   (__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), \
    421                                 (__v2df)(__m128d)(b), (c)); })
    422 
    423 #define _mm_cmp_ss(a, b, c) __extension__ ({ \
    424   (__m128)__builtin_ia32_cmpss((__v4sf)(__m128)(a), \
    425                                (__v4sf)(__m128)(b), (c)); })
    426 
    427 static __inline int __DEFAULT_FN_ATTRS
    428 _mm256_extract_epi32(__m256i __a, const int __imm)
    429 {
    430   __v8si __b = (__v8si)__a;
    431   return __b[__imm & 7];
    432 }
    433 
    434 static __inline int __DEFAULT_FN_ATTRS
    435 _mm256_extract_epi16(__m256i __a, const int __imm)
    436 {
    437   __v16hi __b = (__v16hi)__a;
    438   return __b[__imm & 15];
    439 }
    440 
    441 static __inline int __DEFAULT_FN_ATTRS
    442 _mm256_extract_epi8(__m256i __a, const int __imm)
    443 {
    444   __v32qi __b = (__v32qi)__a;
    445   return __b[__imm & 31];
    446 }
    447 
    448 #ifdef __x86_64__
    449 static __inline long long  __DEFAULT_FN_ATTRS
    450 _mm256_extract_epi64(__m256i __a, const int __imm)
    451 {
    452   __v4di __b = (__v4di)__a;
    453   return __b[__imm & 3];
    454 }
    455 #endif
    456 
    457 static __inline __m256i __DEFAULT_FN_ATTRS
    458 _mm256_insert_epi32(__m256i __a, int __b, int const __imm)
    459 {
    460   __v8si __c = (__v8si)__a;
    461   __c[__imm & 7] = __b;
    462   return (__m256i)__c;
    463 }
    464 
    465 static __inline __m256i __DEFAULT_FN_ATTRS
    466 _mm256_insert_epi16(__m256i __a, int __b, int const __imm)
    467 {
    468   __v16hi __c = (__v16hi)__a;
    469   __c[__imm & 15] = __b;
    470   return (__m256i)__c;
    471 }
    472 
    473 static __inline __m256i __DEFAULT_FN_ATTRS
    474 _mm256_insert_epi8(__m256i __a, int __b, int const __imm)
    475 {
    476   __v32qi __c = (__v32qi)__a;
    477   __c[__imm & 31] = __b;
    478   return (__m256i)__c;
    479 }
    480 
    481 #ifdef __x86_64__
    482 static __inline __m256i __DEFAULT_FN_ATTRS
    483 _mm256_insert_epi64(__m256i __a, long long __b, int const __imm)
    484 {
    485   __v4di __c = (__v4di)__a;
    486   __c[__imm & 3] = __b;
    487   return (__m256i)__c;
    488 }
    489 #endif
    490 
    491 /* Conversion */
    492 static __inline __m256d __DEFAULT_FN_ATTRS
    493 _mm256_cvtepi32_pd(__m128i __a)
    494 {
    495   return (__m256d)__builtin_ia32_cvtdq2pd256((__v4si) __a);
    496 }
    497 
    498 static __inline __m256 __DEFAULT_FN_ATTRS
    499 _mm256_cvtepi32_ps(__m256i __a)
    500 {
    501   return (__m256)__builtin_ia32_cvtdq2ps256((__v8si) __a);
    502 }
    503 
    504 static __inline __m128 __DEFAULT_FN_ATTRS
    505 _mm256_cvtpd_ps(__m256d __a)
    506 {
    507   return (__m128)__builtin_ia32_cvtpd2ps256((__v4df) __a);
    508 }
    509 
    510 static __inline __m256i __DEFAULT_FN_ATTRS
    511 _mm256_cvtps_epi32(__m256 __a)
    512 {
    513   return (__m256i)__builtin_ia32_cvtps2dq256((__v8sf) __a);
    514 }
    515 
    516 static __inline __m256d __DEFAULT_FN_ATTRS
    517 _mm256_cvtps_pd(__m128 __a)
    518 {
    519   return (__m256d)__builtin_ia32_cvtps2pd256((__v4sf) __a);
    520 }
    521 
    522 static __inline __m128i __DEFAULT_FN_ATTRS
    523 _mm256_cvttpd_epi32(__m256d __a)
    524 {
    525   return (__m128i)__builtin_ia32_cvttpd2dq256((__v4df) __a);
    526 }
    527 
    528 static __inline __m128i __DEFAULT_FN_ATTRS
    529 _mm256_cvtpd_epi32(__m256d __a)
    530 {
    531   return (__m128i)__builtin_ia32_cvtpd2dq256((__v4df) __a);
    532 }
    533 
    534 static __inline __m256i __DEFAULT_FN_ATTRS
    535 _mm256_cvttps_epi32(__m256 __a)
    536 {
    537   return (__m256i)__builtin_ia32_cvttps2dq256((__v8sf) __a);
    538 }
    539 
    540 /* Vector replicate */
    541 static __inline __m256 __DEFAULT_FN_ATTRS
    542 _mm256_movehdup_ps(__m256 __a)
    543 {
    544   return __builtin_shufflevector(__a, __a, 1, 1, 3, 3, 5, 5, 7, 7);
    545 }
    546 
    547 static __inline __m256 __DEFAULT_FN_ATTRS
    548 _mm256_moveldup_ps(__m256 __a)
    549 {
    550   return __builtin_shufflevector(__a, __a, 0, 0, 2, 2, 4, 4, 6, 6);
    551 }
    552 
    553 static __inline __m256d __DEFAULT_FN_ATTRS
    554 _mm256_movedup_pd(__m256d __a)
    555 {
    556   return __builtin_shufflevector(__a, __a, 0, 0, 2, 2);
    557 }
    558 
    559 /* Unpack and Interleave */
    560 static __inline __m256d __DEFAULT_FN_ATTRS
    561 _mm256_unpackhi_pd(__m256d __a, __m256d __b)
    562 {
    563   return __builtin_shufflevector(__a, __b, 1, 5, 1+2, 5+2);
    564 }
    565 
    566 static __inline __m256d __DEFAULT_FN_ATTRS
    567 _mm256_unpacklo_pd(__m256d __a, __m256d __b)
    568 {
    569   return __builtin_shufflevector(__a, __b, 0, 4, 0+2, 4+2);
    570 }
    571 
    572 static __inline __m256 __DEFAULT_FN_ATTRS
    573 _mm256_unpackhi_ps(__m256 __a, __m256 __b)
    574 {
    575   return __builtin_shufflevector(__a, __b, 2, 10, 2+1, 10+1, 6, 14, 6+1, 14+1);
    576 }
    577 
    578 static __inline __m256 __DEFAULT_FN_ATTRS
    579 _mm256_unpacklo_ps(__m256 __a, __m256 __b)
    580 {
    581   return __builtin_shufflevector(__a, __b, 0, 8, 0+1, 8+1, 4, 12, 4+1, 12+1);
    582 }
    583 
    584 /* Bit Test */
    585 static __inline int __DEFAULT_FN_ATTRS
    586 _mm_testz_pd(__m128d __a, __m128d __b)
    587 {
    588   return __builtin_ia32_vtestzpd((__v2df)__a, (__v2df)__b);
    589 }
    590 
    591 static __inline int __DEFAULT_FN_ATTRS
    592 _mm_testc_pd(__m128d __a, __m128d __b)
    593 {
    594   return __builtin_ia32_vtestcpd((__v2df)__a, (__v2df)__b);
    595 }
    596 
    597 static __inline int __DEFAULT_FN_ATTRS
    598 _mm_testnzc_pd(__m128d __a, __m128d __b)
    599 {
    600   return __builtin_ia32_vtestnzcpd((__v2df)__a, (__v2df)__b);
    601 }
    602 
    603 static __inline int __DEFAULT_FN_ATTRS
    604 _mm_testz_ps(__m128 __a, __m128 __b)
    605 {
    606   return __builtin_ia32_vtestzps((__v4sf)__a, (__v4sf)__b);
    607 }
    608 
    609 static __inline int __DEFAULT_FN_ATTRS
    610 _mm_testc_ps(__m128 __a, __m128 __b)
    611 {
    612   return __builtin_ia32_vtestcps((__v4sf)__a, (__v4sf)__b);
    613 }
    614 
    615 static __inline int __DEFAULT_FN_ATTRS
    616 _mm_testnzc_ps(__m128 __a, __m128 __b)
    617 {
    618   return __builtin_ia32_vtestnzcps((__v4sf)__a, (__v4sf)__b);
    619 }
    620 
    621 static __inline int __DEFAULT_FN_ATTRS
    622 _mm256_testz_pd(__m256d __a, __m256d __b)
    623 {
    624   return __builtin_ia32_vtestzpd256((__v4df)__a, (__v4df)__b);
    625 }
    626 
    627 static __inline int __DEFAULT_FN_ATTRS
    628 _mm256_testc_pd(__m256d __a, __m256d __b)
    629 {
    630   return __builtin_ia32_vtestcpd256((__v4df)__a, (__v4df)__b);
    631 }
    632 
    633 static __inline int __DEFAULT_FN_ATTRS
    634 _mm256_testnzc_pd(__m256d __a, __m256d __b)
    635 {
    636   return __builtin_ia32_vtestnzcpd256((__v4df)__a, (__v4df)__b);
    637 }
    638 
    639 static __inline int __DEFAULT_FN_ATTRS
    640 _mm256_testz_ps(__m256 __a, __m256 __b)
    641 {
    642   return __builtin_ia32_vtestzps256((__v8sf)__a, (__v8sf)__b);
    643 }
    644 
    645 static __inline int __DEFAULT_FN_ATTRS
    646 _mm256_testc_ps(__m256 __a, __m256 __b)
    647 {
    648   return __builtin_ia32_vtestcps256((__v8sf)__a, (__v8sf)__b);
    649 }
    650 
    651 static __inline int __DEFAULT_FN_ATTRS
    652 _mm256_testnzc_ps(__m256 __a, __m256 __b)
    653 {
    654   return __builtin_ia32_vtestnzcps256((__v8sf)__a, (__v8sf)__b);
    655 }
    656 
    657 static __inline int __DEFAULT_FN_ATTRS
    658 _mm256_testz_si256(__m256i __a, __m256i __b)
    659 {
    660   return __builtin_ia32_ptestz256((__v4di)__a, (__v4di)__b);
    661 }
    662 
    663 static __inline int __DEFAULT_FN_ATTRS
    664 _mm256_testc_si256(__m256i __a, __m256i __b)
    665 {
    666   return __builtin_ia32_ptestc256((__v4di)__a, (__v4di)__b);
    667 }
    668 
    669 static __inline int __DEFAULT_FN_ATTRS
    670 _mm256_testnzc_si256(__m256i __a, __m256i __b)
    671 {
    672   return __builtin_ia32_ptestnzc256((__v4di)__a, (__v4di)__b);
    673 }
    674 
    675 /* Vector extract sign mask */
    676 static __inline int __DEFAULT_FN_ATTRS
    677 _mm256_movemask_pd(__m256d __a)
    678 {
    679   return __builtin_ia32_movmskpd256((__v4df)__a);
    680 }
    681 
    682 static __inline int __DEFAULT_FN_ATTRS
    683 _mm256_movemask_ps(__m256 __a)
    684 {
    685   return __builtin_ia32_movmskps256((__v8sf)__a);
    686 }
    687 
    688 /* Vector __zero */
    689 static __inline void __DEFAULT_FN_ATTRS
    690 _mm256_zeroall(void)
    691 {
    692   __builtin_ia32_vzeroall();
    693 }
    694 
    695 static __inline void __DEFAULT_FN_ATTRS
    696 _mm256_zeroupper(void)
    697 {
    698   __builtin_ia32_vzeroupper();
    699 }
    700 
    701 /* Vector load with broadcast */
    702 static __inline __m128 __DEFAULT_FN_ATTRS
    703 _mm_broadcast_ss(float const *__a)
    704 {
    705   float __f = *__a;
    706   return (__m128)(__v4sf){ __f, __f, __f, __f };
    707 }
    708 
    709 static __inline __m256d __DEFAULT_FN_ATTRS
    710 _mm256_broadcast_sd(double const *__a)
    711 {
    712   double __d = *__a;
    713   return (__m256d)(__v4df){ __d, __d, __d, __d };
    714 }
    715 
    716 static __inline __m256 __DEFAULT_FN_ATTRS
    717 _mm256_broadcast_ss(float const *__a)
    718 {
    719   float __f = *__a;
    720   return (__m256)(__v8sf){ __f, __f, __f, __f, __f, __f, __f, __f };
    721 }
    722 
    723 static __inline __m256d __DEFAULT_FN_ATTRS
    724 _mm256_broadcast_pd(__m128d const *__a)
    725 {
    726   return (__m256d)__builtin_ia32_vbroadcastf128_pd256(__a);
    727 }
    728 
    729 static __inline __m256 __DEFAULT_FN_ATTRS
    730 _mm256_broadcast_ps(__m128 const *__a)
    731 {
    732   return (__m256)__builtin_ia32_vbroadcastf128_ps256(__a);
    733 }
    734 
    735 /* SIMD load ops */
    736 static __inline __m256d __DEFAULT_FN_ATTRS
    737 _mm256_load_pd(double const *__p)
    738 {
    739   return *(__m256d *)__p;
    740 }
    741 
    742 static __inline __m256 __DEFAULT_FN_ATTRS
    743 _mm256_load_ps(float const *__p)
    744 {
    745   return *(__m256 *)__p;
    746 }
    747 
    748 static __inline __m256d __DEFAULT_FN_ATTRS
    749 _mm256_loadu_pd(double const *__p)
    750 {
    751   struct __loadu_pd {
    752     __m256d __v;
    753   } __attribute__((__packed__, __may_alias__));
    754   return ((struct __loadu_pd*)__p)->__v;
    755 }
    756 
    757 static __inline __m256 __DEFAULT_FN_ATTRS
    758 _mm256_loadu_ps(float const *__p)
    759 {
    760   struct __loadu_ps {
    761     __m256 __v;
    762   } __attribute__((__packed__, __may_alias__));
    763   return ((struct __loadu_ps*)__p)->__v;
    764 }
    765 
    766 static __inline __m256i __DEFAULT_FN_ATTRS
    767 _mm256_load_si256(__m256i const *__p)
    768 {
    769   return *__p;
    770 }
    771 
    772 static __inline __m256i __DEFAULT_FN_ATTRS
    773 _mm256_loadu_si256(__m256i const *__p)
    774 {
    775   struct __loadu_si256 {
    776     __m256i __v;
    777   } __attribute__((__packed__, __may_alias__));
    778   return ((struct __loadu_si256*)__p)->__v;
    779 }
    780 
    781 static __inline __m256i __DEFAULT_FN_ATTRS
    782 _mm256_lddqu_si256(__m256i const *__p)
    783 {
    784   return (__m256i)__builtin_ia32_lddqu256((char const *)__p);
    785 }
    786 
    787 /* SIMD store ops */
    788 static __inline void __DEFAULT_FN_ATTRS
    789 _mm256_store_pd(double *__p, __m256d __a)
    790 {
    791   *(__m256d *)__p = __a;
    792 }
    793 
    794 static __inline void __DEFAULT_FN_ATTRS
    795 _mm256_store_ps(float *__p, __m256 __a)
    796 {
    797   *(__m256 *)__p = __a;
    798 }
    799 
    800 static __inline void __DEFAULT_FN_ATTRS
    801 _mm256_storeu_pd(double *__p, __m256d __a)
    802 {
    803   __builtin_ia32_storeupd256(__p, (__v4df)__a);
    804 }
    805 
    806 static __inline void __DEFAULT_FN_ATTRS
    807 _mm256_storeu_ps(float *__p, __m256 __a)
    808 {
    809   __builtin_ia32_storeups256(__p, (__v8sf)__a);
    810 }
    811 
    812 static __inline void __DEFAULT_FN_ATTRS
    813 _mm256_store_si256(__m256i *__p, __m256i __a)
    814 {
    815   *__p = __a;
    816 }
    817 
    818 static __inline void __DEFAULT_FN_ATTRS
    819 _mm256_storeu_si256(__m256i *__p, __m256i __a)
    820 {
    821   __builtin_ia32_storedqu256((char *)__p, (__v32qi)__a);
    822 }
    823 
    824 /* Conditional load ops */
    825 static __inline __m128d __DEFAULT_FN_ATTRS
    826 _mm_maskload_pd(double const *__p, __m128i __m)
    827 {
    828   return (__m128d)__builtin_ia32_maskloadpd((const __v2df *)__p, (__v2di)__m);
    829 }
    830 
    831 static __inline __m256d __DEFAULT_FN_ATTRS
    832 _mm256_maskload_pd(double const *__p, __m256i __m)
    833 {
    834   return (__m256d)__builtin_ia32_maskloadpd256((const __v4df *)__p,
    835                                                (__v4di)__m);
    836 }
    837 
    838 static __inline __m128 __DEFAULT_FN_ATTRS
    839 _mm_maskload_ps(float const *__p, __m128i __m)
    840 {
    841   return (__m128)__builtin_ia32_maskloadps((const __v4sf *)__p, (__v4si)__m);
    842 }
    843 
    844 static __inline __m256 __DEFAULT_FN_ATTRS
    845 _mm256_maskload_ps(float const *__p, __m256i __m)
    846 {
    847   return (__m256)__builtin_ia32_maskloadps256((const __v8sf *)__p, (__v8si)__m);
    848 }
    849 
    850 /* Conditional store ops */
    851 static __inline void __DEFAULT_FN_ATTRS
    852 _mm256_maskstore_ps(float *__p, __m256i __m, __m256 __a)
    853 {
    854   __builtin_ia32_maskstoreps256((__v8sf *)__p, (__v8si)__m, (__v8sf)__a);
    855 }
    856 
    857 static __inline void __DEFAULT_FN_ATTRS
    858 _mm_maskstore_pd(double *__p, __m128i __m, __m128d __a)
    859 {
    860   __builtin_ia32_maskstorepd((__v2df *)__p, (__v2di)__m, (__v2df)__a);
    861 }
    862 
    863 static __inline void __DEFAULT_FN_ATTRS
    864 _mm256_maskstore_pd(double *__p, __m256i __m, __m256d __a)
    865 {
    866   __builtin_ia32_maskstorepd256((__v4df *)__p, (__v4di)__m, (__v4df)__a);
    867 }
    868 
    869 static __inline void __DEFAULT_FN_ATTRS
    870 _mm_maskstore_ps(float *__p, __m128i __m, __m128 __a)
    871 {
    872   __builtin_ia32_maskstoreps((__v4sf *)__p, (__v4si)__m, (__v4sf)__a);
    873 }
    874 
    875 /* Cacheability support ops */
    876 static __inline void __DEFAULT_FN_ATTRS
    877 _mm256_stream_si256(__m256i *__a, __m256i __b)
    878 {
    879   __builtin_ia32_movntdq256((__v4di *)__a, (__v4di)__b);
    880 }
    881 
    882 static __inline void __DEFAULT_FN_ATTRS
    883 _mm256_stream_pd(double *__a, __m256d __b)
    884 {
    885   __builtin_ia32_movntpd256(__a, (__v4df)__b);
    886 }
    887 
    888 static __inline void __DEFAULT_FN_ATTRS
    889 _mm256_stream_ps(float *__p, __m256 __a)
    890 {
    891   __builtin_ia32_movntps256(__p, (__v8sf)__a);
    892 }
    893 
    894 /* Create vectors */
    895 static __inline__ __m256d __DEFAULT_FN_ATTRS
    896 _mm256_undefined_pd()
    897 {
    898   return (__m256d)__builtin_ia32_undef256();
    899 }
    900 
    901 static __inline__ __m256 __DEFAULT_FN_ATTRS
    902 _mm256_undefined_ps()
    903 {
    904   return (__m256)__builtin_ia32_undef256();
    905 }
    906 
    907 static __inline__ __m256i __DEFAULT_FN_ATTRS
    908 _mm256_undefined_si256()
    909 {
    910   return (__m256i)__builtin_ia32_undef256();
    911 }
    912 
    913 static __inline __m256d __DEFAULT_FN_ATTRS
    914 _mm256_set_pd(double __a, double __b, double __c, double __d)
    915 {
    916   return (__m256d){ __d, __c, __b, __a };
    917 }
    918 
    919 static __inline __m256 __DEFAULT_FN_ATTRS
    920 _mm256_set_ps(float __a, float __b, float __c, float __d,
    921               float __e, float __f, float __g, float __h)
    922 {
    923   return (__m256){ __h, __g, __f, __e, __d, __c, __b, __a };
    924 }
    925 
    926 static __inline __m256i __DEFAULT_FN_ATTRS
    927 _mm256_set_epi32(int __i0, int __i1, int __i2, int __i3,
    928                  int __i4, int __i5, int __i6, int __i7)
    929 {
    930   return (__m256i)(__v8si){ __i7, __i6, __i5, __i4, __i3, __i2, __i1, __i0 };
    931 }
    932 
    933 static __inline __m256i __DEFAULT_FN_ATTRS
    934 _mm256_set_epi16(short __w15, short __w14, short __w13, short __w12,
    935                  short __w11, short __w10, short __w09, short __w08,
    936                  short __w07, short __w06, short __w05, short __w04,
    937                  short __w03, short __w02, short __w01, short __w00)
    938 {
    939   return (__m256i)(__v16hi){ __w00, __w01, __w02, __w03, __w04, __w05, __w06,
    940     __w07, __w08, __w09, __w10, __w11, __w12, __w13, __w14, __w15 };
    941 }
    942 
    943 static __inline __m256i __DEFAULT_FN_ATTRS
    944 _mm256_set_epi8(char __b31, char __b30, char __b29, char __b28,
    945                 char __b27, char __b26, char __b25, char __b24,
    946                 char __b23, char __b22, char __b21, char __b20,
    947                 char __b19, char __b18, char __b17, char __b16,
    948                 char __b15, char __b14, char __b13, char __b12,
    949                 char __b11, char __b10, char __b09, char __b08,
    950                 char __b07, char __b06, char __b05, char __b04,
    951                 char __b03, char __b02, char __b01, char __b00)
    952 {
    953   return (__m256i)(__v32qi){
    954     __b00, __b01, __b02, __b03, __b04, __b05, __b06, __b07,
    955     __b08, __b09, __b10, __b11, __b12, __b13, __b14, __b15,
    956     __b16, __b17, __b18, __b19, __b20, __b21, __b22, __b23,
    957     __b24, __b25, __b26, __b27, __b28, __b29, __b30, __b31
    958   };
    959 }
    960 
    961 static __inline __m256i __DEFAULT_FN_ATTRS
    962 _mm256_set_epi64x(long long __a, long long __b, long long __c, long long __d)
    963 {
    964   return (__m256i)(__v4di){ __d, __c, __b, __a };
    965 }
    966 
    967 /* Create vectors with elements in reverse order */
    968 static __inline __m256d __DEFAULT_FN_ATTRS
    969 _mm256_setr_pd(double __a, double __b, double __c, double __d)
    970 {
    971   return (__m256d){ __a, __b, __c, __d };
    972 }
    973 
    974 static __inline __m256 __DEFAULT_FN_ATTRS
    975 _mm256_setr_ps(float __a, float __b, float __c, float __d,
    976                float __e, float __f, float __g, float __h)
    977 {
    978   return (__m256){ __a, __b, __c, __d, __e, __f, __g, __h };
    979 }
    980 
    981 static __inline __m256i __DEFAULT_FN_ATTRS
    982 _mm256_setr_epi32(int __i0, int __i1, int __i2, int __i3,
    983                   int __i4, int __i5, int __i6, int __i7)
    984 {
    985   return (__m256i)(__v8si){ __i0, __i1, __i2, __i3, __i4, __i5, __i6, __i7 };
    986 }
    987 
    988 static __inline __m256i __DEFAULT_FN_ATTRS
    989 _mm256_setr_epi16(short __w15, short __w14, short __w13, short __w12,
    990        short __w11, short __w10, short __w09, short __w08,
    991        short __w07, short __w06, short __w05, short __w04,
    992        short __w03, short __w02, short __w01, short __w00)
    993 {
    994   return (__m256i)(__v16hi){ __w15, __w14, __w13, __w12, __w11, __w10, __w09,
    995     __w08, __w07, __w06, __w05, __w04, __w03, __w02, __w01, __w00 };
    996 }
    997 
    998 static __inline __m256i __DEFAULT_FN_ATTRS
    999 _mm256_setr_epi8(char __b31, char __b30, char __b29, char __b28,
   1000                  char __b27, char __b26, char __b25, char __b24,
   1001                  char __b23, char __b22, char __b21, char __b20,
   1002                  char __b19, char __b18, char __b17, char __b16,
   1003                  char __b15, char __b14, char __b13, char __b12,
   1004                  char __b11, char __b10, char __b09, char __b08,
   1005                  char __b07, char __b06, char __b05, char __b04,
   1006                  char __b03, char __b02, char __b01, char __b00)
   1007 {
   1008   return (__m256i)(__v32qi){
   1009     __b31, __b30, __b29, __b28, __b27, __b26, __b25, __b24,
   1010     __b23, __b22, __b21, __b20, __b19, __b18, __b17, __b16,
   1011     __b15, __b14, __b13, __b12, __b11, __b10, __b09, __b08,
   1012     __b07, __b06, __b05, __b04, __b03, __b02, __b01, __b00 };
   1013 }
   1014 
   1015 static __inline __m256i __DEFAULT_FN_ATTRS
   1016 _mm256_setr_epi64x(long long __a, long long __b, long long __c, long long __d)
   1017 {
   1018   return (__m256i)(__v4di){ __a, __b, __c, __d };
   1019 }
   1020 
   1021 /* Create vectors with repeated elements */
   1022 static __inline __m256d __DEFAULT_FN_ATTRS
   1023 _mm256_set1_pd(double __w)
   1024 {
   1025   return (__m256d){ __w, __w, __w, __w };
   1026 }
   1027 
   1028 static __inline __m256 __DEFAULT_FN_ATTRS
   1029 _mm256_set1_ps(float __w)
   1030 {
   1031   return (__m256){ __w, __w, __w, __w, __w, __w, __w, __w };
   1032 }
   1033 
   1034 static __inline __m256i __DEFAULT_FN_ATTRS
   1035 _mm256_set1_epi32(int __i)
   1036 {
   1037   return (__m256i)(__v8si){ __i, __i, __i, __i, __i, __i, __i, __i };
   1038 }
   1039 
   1040 static __inline __m256i __DEFAULT_FN_ATTRS
   1041 _mm256_set1_epi16(short __w)
   1042 {
   1043   return (__m256i)(__v16hi){ __w, __w, __w, __w, __w, __w, __w, __w, __w, __w,
   1044     __w, __w, __w, __w, __w, __w };
   1045 }
   1046 
   1047 static __inline __m256i __DEFAULT_FN_ATTRS
   1048 _mm256_set1_epi8(char __b)
   1049 {
   1050   return (__m256i)(__v32qi){ __b, __b, __b, __b, __b, __b, __b, __b, __b, __b,
   1051     __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b,
   1052     __b, __b, __b, __b, __b, __b, __b };
   1053 }
   1054 
   1055 static __inline __m256i __DEFAULT_FN_ATTRS
   1056 _mm256_set1_epi64x(long long __q)
   1057 {
   1058   return (__m256i)(__v4di){ __q, __q, __q, __q };
   1059 }
   1060 
   1061 /* Create __zeroed vectors */
   1062 static __inline __m256d __DEFAULT_FN_ATTRS
   1063 _mm256_setzero_pd(void)
   1064 {
   1065   return (__m256d){ 0, 0, 0, 0 };
   1066 }
   1067 
   1068 static __inline __m256 __DEFAULT_FN_ATTRS
   1069 _mm256_setzero_ps(void)
   1070 {
   1071   return (__m256){ 0, 0, 0, 0, 0, 0, 0, 0 };
   1072 }
   1073 
   1074 static __inline __m256i __DEFAULT_FN_ATTRS
   1075 _mm256_setzero_si256(void)
   1076 {
   1077   return (__m256i){ 0LL, 0LL, 0LL, 0LL };
   1078 }
   1079 
   1080 /* Cast between vector types */
   1081 static __inline __m256 __DEFAULT_FN_ATTRS
   1082 _mm256_castpd_ps(__m256d __a)
   1083 {
   1084   return (__m256)__a;
   1085 }
   1086 
   1087 static __inline __m256i __DEFAULT_FN_ATTRS
   1088 _mm256_castpd_si256(__m256d __a)
   1089 {
   1090   return (__m256i)__a;
   1091 }
   1092 
   1093 static __inline __m256d __DEFAULT_FN_ATTRS
   1094 _mm256_castps_pd(__m256 __a)
   1095 {
   1096   return (__m256d)__a;
   1097 }
   1098 
   1099 static __inline __m256i __DEFAULT_FN_ATTRS
   1100 _mm256_castps_si256(__m256 __a)
   1101 {
   1102   return (__m256i)__a;
   1103 }
   1104 
   1105 static __inline __m256 __DEFAULT_FN_ATTRS
   1106 _mm256_castsi256_ps(__m256i __a)
   1107 {
   1108   return (__m256)__a;
   1109 }
   1110 
   1111 static __inline __m256d __DEFAULT_FN_ATTRS
   1112 _mm256_castsi256_pd(__m256i __a)
   1113 {
   1114   return (__m256d)__a;
   1115 }
   1116 
   1117 static __inline __m128d __DEFAULT_FN_ATTRS
   1118 _mm256_castpd256_pd128(__m256d __a)
   1119 {
   1120   return __builtin_shufflevector(__a, __a, 0, 1);
   1121 }
   1122 
   1123 static __inline __m128 __DEFAULT_FN_ATTRS
   1124 _mm256_castps256_ps128(__m256 __a)
   1125 {
   1126   return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
   1127 }
   1128 
   1129 static __inline __m128i __DEFAULT_FN_ATTRS
   1130 _mm256_castsi256_si128(__m256i __a)
   1131 {
   1132   return __builtin_shufflevector(__a, __a, 0, 1);
   1133 }
   1134 
   1135 static __inline __m256d __DEFAULT_FN_ATTRS
   1136 _mm256_castpd128_pd256(__m128d __a)
   1137 {
   1138   return __builtin_shufflevector(__a, __a, 0, 1, -1, -1);
   1139 }
   1140 
   1141 static __inline __m256 __DEFAULT_FN_ATTRS
   1142 _mm256_castps128_ps256(__m128 __a)
   1143 {
   1144   return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, -1, -1, -1, -1);
   1145 }
   1146 
   1147 static __inline __m256i __DEFAULT_FN_ATTRS
   1148 _mm256_castsi128_si256(__m128i __a)
   1149 {
   1150   return __builtin_shufflevector(__a, __a, 0, 1, -1, -1);
   1151 }
   1152 
   1153 /*
   1154    Vector insert.
   1155    We use macros rather than inlines because we only want to accept
   1156    invocations where the immediate M is a constant expression.
   1157 */
   1158 #define _mm256_insertf128_ps(V1, V2, M) __extension__ ({ \
   1159   (__m256)__builtin_shufflevector( \
   1160     (__v8sf)(__m256)(V1), \
   1161     (__v8sf)_mm256_castps128_ps256((__m128)(V2)), \
   1162     (((M) & 1) ?  0 :  8), \
   1163     (((M) & 1) ?  1 :  9), \
   1164     (((M) & 1) ?  2 : 10), \
   1165     (((M) & 1) ?  3 : 11), \
   1166     (((M) & 1) ?  8 :  4), \
   1167     (((M) & 1) ?  9 :  5), \
   1168     (((M) & 1) ? 10 :  6), \
   1169     (((M) & 1) ? 11 :  7) );})
   1170 
   1171 #define _mm256_insertf128_pd(V1, V2, M) __extension__ ({ \
   1172   (__m256d)__builtin_shufflevector( \
   1173     (__v4df)(__m256d)(V1), \
   1174     (__v4df)_mm256_castpd128_pd256((__m128d)(V2)), \
   1175     (((M) & 1) ? 0 : 4), \
   1176     (((M) & 1) ? 1 : 5), \
   1177     (((M) & 1) ? 4 : 2), \
   1178     (((M) & 1) ? 5 : 3) );})
   1179 
   1180 #define _mm256_insertf128_si256(V1, V2, M) __extension__ ({ \
   1181   (__m256i)__builtin_shufflevector( \
   1182     (__v4di)(__m256i)(V1), \
   1183     (__v4di)_mm256_castsi128_si256((__m128i)(V2)), \
   1184     (((M) & 1) ? 0 : 4), \
   1185     (((M) & 1) ? 1 : 5), \
   1186     (((M) & 1) ? 4 : 2), \
   1187     (((M) & 1) ? 5 : 3) );})
   1188 
   1189 /*
   1190    Vector extract.
   1191    We use macros rather than inlines because we only want to accept
   1192    invocations where the immediate M is a constant expression.
   1193 */
   1194 #define _mm256_extractf128_ps(V, M) __extension__ ({ \
   1195   (__m128)__builtin_shufflevector( \
   1196     (__v8sf)(__m256)(V), \
   1197     (__v8sf)(_mm256_setzero_ps()), \
   1198     (((M) & 1) ? 4 : 0), \
   1199     (((M) & 1) ? 5 : 1), \
   1200     (((M) & 1) ? 6 : 2), \
   1201     (((M) & 1) ? 7 : 3) );})
   1202 
   1203 #define _mm256_extractf128_pd(V, M) __extension__ ({ \
   1204   (__m128d)__builtin_shufflevector( \
   1205     (__v4df)(__m256d)(V), \
   1206     (__v4df)(_mm256_setzero_pd()), \
   1207     (((M) & 1) ? 2 : 0), \
   1208     (((M) & 1) ? 3 : 1) );})
   1209 
   1210 #define _mm256_extractf128_si256(V, M) __extension__ ({ \
   1211   (__m128i)__builtin_shufflevector( \
   1212     (__v4di)(__m256i)(V), \
   1213     (__v4di)(_mm256_setzero_si256()), \
   1214     (((M) & 1) ? 2 : 0), \
   1215     (((M) & 1) ? 3 : 1) );})
   1216 
   1217 /* SIMD load ops (unaligned) */
   1218 static __inline __m256 __DEFAULT_FN_ATTRS
   1219 _mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo)
   1220 {
   1221   struct __loadu_ps {
   1222     __m128 __v;
   1223   } __attribute__((__packed__, __may_alias__));
   1224 
   1225   __m256 __v256 = _mm256_castps128_ps256(((struct __loadu_ps*)__addr_lo)->__v);
   1226   return _mm256_insertf128_ps(__v256, ((struct __loadu_ps*)__addr_hi)->__v, 1);
   1227 }
   1228 
   1229 static __inline __m256d __DEFAULT_FN_ATTRS
   1230 _mm256_loadu2_m128d(double const *__addr_hi, double const *__addr_lo)
   1231 {
   1232   struct __loadu_pd {
   1233     __m128d __v;
   1234   } __attribute__((__packed__, __may_alias__));
   1235 
   1236   __m256d __v256 = _mm256_castpd128_pd256(((struct __loadu_pd*)__addr_lo)->__v);
   1237   return _mm256_insertf128_pd(__v256, ((struct __loadu_pd*)__addr_hi)->__v, 1);
   1238 }
   1239 
   1240 static __inline __m256i __DEFAULT_FN_ATTRS
   1241 _mm256_loadu2_m128i(__m128i const *__addr_hi, __m128i const *__addr_lo)
   1242 {
   1243   struct __loadu_si128 {
   1244     __m128i __v;
   1245   } __attribute__((__packed__, __may_alias__));
   1246   __m256i __v256 = _mm256_castsi128_si256(
   1247     ((struct __loadu_si128*)__addr_lo)->__v);
   1248   return _mm256_insertf128_si256(__v256,
   1249                                  ((struct __loadu_si128*)__addr_hi)->__v, 1);
   1250 }
   1251 
   1252 /* SIMD store ops (unaligned) */
   1253 static __inline void __DEFAULT_FN_ATTRS
   1254 _mm256_storeu2_m128(float *__addr_hi, float *__addr_lo, __m256 __a)
   1255 {
   1256   __m128 __v128;
   1257 
   1258   __v128 = _mm256_castps256_ps128(__a);
   1259   __builtin_ia32_storeups(__addr_lo, __v128);
   1260   __v128 = _mm256_extractf128_ps(__a, 1);
   1261   __builtin_ia32_storeups(__addr_hi, __v128);
   1262 }
   1263 
   1264 static __inline void __DEFAULT_FN_ATTRS
   1265 _mm256_storeu2_m128d(double *__addr_hi, double *__addr_lo, __m256d __a)
   1266 {
   1267   __m128d __v128;
   1268 
   1269   __v128 = _mm256_castpd256_pd128(__a);
   1270   __builtin_ia32_storeupd(__addr_lo, __v128);
   1271   __v128 = _mm256_extractf128_pd(__a, 1);
   1272   __builtin_ia32_storeupd(__addr_hi, __v128);
   1273 }
   1274 
   1275 static __inline void __DEFAULT_FN_ATTRS
   1276 _mm256_storeu2_m128i(__m128i *__addr_hi, __m128i *__addr_lo, __m256i __a)
   1277 {
   1278   __m128i __v128;
   1279 
   1280   __v128 = _mm256_castsi256_si128(__a);
   1281   __builtin_ia32_storedqu((char *)__addr_lo, (__v16qi)__v128);
   1282   __v128 = _mm256_extractf128_si256(__a, 1);
   1283   __builtin_ia32_storedqu((char *)__addr_hi, (__v16qi)__v128);
   1284 }
   1285 
   1286 static __inline __m256 __DEFAULT_FN_ATTRS
   1287 _mm256_set_m128 (__m128 __hi, __m128 __lo) {
   1288   return (__m256) __builtin_shufflevector(__lo, __hi, 0, 1, 2, 3, 4, 5, 6, 7);
   1289 }
   1290 
   1291 static __inline __m256d __DEFAULT_FN_ATTRS
   1292 _mm256_set_m128d (__m128d __hi, __m128d __lo) {
   1293   return (__m256d)_mm256_set_m128((__m128)__hi, (__m128)__lo);
   1294 }
   1295 
   1296 static __inline __m256i __DEFAULT_FN_ATTRS
   1297 _mm256_set_m128i (__m128i __hi, __m128i __lo) {
   1298   return (__m256i)_mm256_set_m128((__m128)__hi, (__m128)__lo);
   1299 }
   1300 
   1301 static __inline __m256 __DEFAULT_FN_ATTRS
   1302 _mm256_setr_m128 (__m128 __lo, __m128 __hi) {
   1303   return _mm256_set_m128(__hi, __lo);
   1304 }
   1305 
   1306 static __inline __m256d __DEFAULT_FN_ATTRS
   1307 _mm256_setr_m128d (__m128d __lo, __m128d __hi) {
   1308   return (__m256d)_mm256_set_m128((__m128)__hi, (__m128)__lo);
   1309 }
   1310 
   1311 static __inline __m256i __DEFAULT_FN_ATTRS
   1312 _mm256_setr_m128i (__m128i __lo, __m128i __hi) {
   1313   return (__m256i)_mm256_set_m128((__m128)__hi, (__m128)__lo);
   1314 }
   1315 
   1316 #undef __DEFAULT_FN_ATTRS
   1317 
   1318 #endif /* __AVXINTRIN_H */
   1319