Home | History | Annotate | Download | only in include
      1 /*===---- xmmintrin.h - SSE intrinsics -------------------------------------===
      2  *
      3  * Permission is hereby granted, free of charge, to any person obtaining a copy
      4  * of this software and associated documentation files (the "Software"), to deal
      5  * in the Software without restriction, including without limitation the rights
      6  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
      7  * copies of the Software, and to permit persons to whom the Software is
      8  * furnished to do so, subject to the following conditions:
      9  *
     10  * The above copyright notice and this permission notice shall be included in
     11  * all copies or substantial portions of the Software.
     12  *
     13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
     16  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     17  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     18  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
     19  * THE SOFTWARE.
     20  *
     21  *===-----------------------------------------------------------------------===
     22  */
     23 
     24 #ifndef __XMMINTRIN_H
     25 #define __XMMINTRIN_H
     26 
     27 #ifndef __SSE__
     28 #error "SSE instruction set not enabled"
     29 #else
     30 
     31 #include <mmintrin.h>
     32 
     33 typedef int __v4si __attribute__((__vector_size__(16)));
     34 typedef float __v4sf __attribute__((__vector_size__(16)));
     35 typedef float __m128 __attribute__((__vector_size__(16)));
     36 
     37 // This header should only be included in a hosted environment as it depends on
     38 // a standard library to provide allocation routines.
     39 #if __STDC_HOSTED__
     40 #include <mm_malloc.h>
     41 #endif
     42 
     43 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
     44 _mm_add_ss(__m128 __a, __m128 __b)
     45 {
     46   __a[0] += __b[0];
     47   return __a;
     48 }
     49 
     50 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
     51 _mm_add_ps(__m128 __a, __m128 __b)
     52 {
     53   return __a + __b;
     54 }
     55 
     56 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
     57 _mm_sub_ss(__m128 __a, __m128 __b)
     58 {
     59   __a[0] -= __b[0];
     60   return __a;
     61 }
     62 
     63 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
     64 _mm_sub_ps(__m128 __a, __m128 __b)
     65 {
     66   return __a - __b;
     67 }
     68 
     69 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
     70 _mm_mul_ss(__m128 __a, __m128 __b)
     71 {
     72   __a[0] *= __b[0];
     73   return __a;
     74 }
     75 
     76 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
     77 _mm_mul_ps(__m128 __a, __m128 __b)
     78 {
     79   return __a * __b;
     80 }
     81 
     82 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
     83 _mm_div_ss(__m128 __a, __m128 __b)
     84 {
     85   __a[0] /= __b[0];
     86   return __a;
     87 }
     88 
     89 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
     90 _mm_div_ps(__m128 __a, __m128 __b)
     91 {
     92   return __a / __b;
     93 }
     94 
     95 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
     96 _mm_sqrt_ss(__m128 __a)
     97 {
     98   __m128 __c = __builtin_ia32_sqrtss(__a);
     99   return (__m128) { __c[0], __a[1], __a[2], __a[3] };
    100 }
    101 
    102 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    103 _mm_sqrt_ps(__m128 __a)
    104 {
    105   return __builtin_ia32_sqrtps(__a);
    106 }
    107 
    108 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    109 _mm_rcp_ss(__m128 __a)
    110 {
    111   __m128 __c = __builtin_ia32_rcpss(__a);
    112   return (__m128) { __c[0], __a[1], __a[2], __a[3] };
    113 }
    114 
    115 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    116 _mm_rcp_ps(__m128 __a)
    117 {
    118   return __builtin_ia32_rcpps(__a);
    119 }
    120 
    121 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    122 _mm_rsqrt_ss(__m128 __a)
    123 {
    124   __m128 __c = __builtin_ia32_rsqrtss(__a);
    125   return (__m128) { __c[0], __a[1], __a[2], __a[3] };
    126 }
    127 
    128 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    129 _mm_rsqrt_ps(__m128 __a)
    130 {
    131   return __builtin_ia32_rsqrtps(__a);
    132 }
    133 
    134 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    135 _mm_min_ss(__m128 __a, __m128 __b)
    136 {
    137   return __builtin_ia32_minss(__a, __b);
    138 }
    139 
    140 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    141 _mm_min_ps(__m128 __a, __m128 __b)
    142 {
    143   return __builtin_ia32_minps(__a, __b);
    144 }
    145 
    146 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    147 _mm_max_ss(__m128 __a, __m128 __b)
    148 {
    149   return __builtin_ia32_maxss(__a, __b);
    150 }
    151 
    152 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    153 _mm_max_ps(__m128 __a, __m128 __b)
    154 {
    155   return __builtin_ia32_maxps(__a, __b);
    156 }
    157 
    158 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    159 _mm_and_ps(__m128 __a, __m128 __b)
    160 {
    161   return (__m128)((__v4si)__a & (__v4si)__b);
    162 }
    163 
    164 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    165 _mm_andnot_ps(__m128 __a, __m128 __b)
    166 {
    167   return (__m128)(~(__v4si)__a & (__v4si)__b);
    168 }
    169 
    170 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    171 _mm_or_ps(__m128 __a, __m128 __b)
    172 {
    173   return (__m128)((__v4si)__a | (__v4si)__b);
    174 }
    175 
    176 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    177 _mm_xor_ps(__m128 __a, __m128 __b)
    178 {
    179   return (__m128)((__v4si)__a ^ (__v4si)__b);
    180 }
    181 
    182 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    183 _mm_cmpeq_ss(__m128 __a, __m128 __b)
    184 {
    185   return (__m128)__builtin_ia32_cmpss(__a, __b, 0);
    186 }
    187 
    188 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    189 _mm_cmpeq_ps(__m128 __a, __m128 __b)
    190 {
    191   return (__m128)__builtin_ia32_cmpps(__a, __b, 0);
    192 }
    193 
    194 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    195 _mm_cmplt_ss(__m128 __a, __m128 __b)
    196 {
    197   return (__m128)__builtin_ia32_cmpss(__a, __b, 1);
    198 }
    199 
    200 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    201 _mm_cmplt_ps(__m128 __a, __m128 __b)
    202 {
    203   return (__m128)__builtin_ia32_cmpps(__a, __b, 1);
    204 }
    205 
    206 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    207 _mm_cmple_ss(__m128 __a, __m128 __b)
    208 {
    209   return (__m128)__builtin_ia32_cmpss(__a, __b, 2);
    210 }
    211 
    212 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    213 _mm_cmple_ps(__m128 __a, __m128 __b)
    214 {
    215   return (__m128)__builtin_ia32_cmpps(__a, __b, 2);
    216 }
    217 
    218 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    219 _mm_cmpgt_ss(__m128 __a, __m128 __b)
    220 {
    221   return (__m128)__builtin_ia32_cmpss(__b, __a, 1);
    222 }
    223 
    224 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    225 _mm_cmpgt_ps(__m128 __a, __m128 __b)
    226 {
    227   return (__m128)__builtin_ia32_cmpps(__b, __a, 1);
    228 }
    229 
    230 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    231 _mm_cmpge_ss(__m128 __a, __m128 __b)
    232 {
    233   return (__m128)__builtin_ia32_cmpss(__b, __a, 2);
    234 }
    235 
    236 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    237 _mm_cmpge_ps(__m128 __a, __m128 __b)
    238 {
    239   return (__m128)__builtin_ia32_cmpps(__b, __a, 2);
    240 }
    241 
    242 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    243 _mm_cmpneq_ss(__m128 __a, __m128 __b)
    244 {
    245   return (__m128)__builtin_ia32_cmpss(__a, __b, 4);
    246 }
    247 
    248 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    249 _mm_cmpneq_ps(__m128 __a, __m128 __b)
    250 {
    251   return (__m128)__builtin_ia32_cmpps(__a, __b, 4);
    252 }
    253 
    254 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    255 _mm_cmpnlt_ss(__m128 __a, __m128 __b)
    256 {
    257   return (__m128)__builtin_ia32_cmpss(__a, __b, 5);
    258 }
    259 
    260 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    261 _mm_cmpnlt_ps(__m128 __a, __m128 __b)
    262 {
    263   return (__m128)__builtin_ia32_cmpps(__a, __b, 5);
    264 }
    265 
    266 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    267 _mm_cmpnle_ss(__m128 __a, __m128 __b)
    268 {
    269   return (__m128)__builtin_ia32_cmpss(__a, __b, 6);
    270 }
    271 
    272 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    273 _mm_cmpnle_ps(__m128 __a, __m128 __b)
    274 {
    275   return (__m128)__builtin_ia32_cmpps(__a, __b, 6);
    276 }
    277 
    278 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    279 _mm_cmpngt_ss(__m128 __a, __m128 __b)
    280 {
    281   return (__m128)__builtin_ia32_cmpss(__b, __a, 5);
    282 }
    283 
    284 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    285 _mm_cmpngt_ps(__m128 __a, __m128 __b)
    286 {
    287   return (__m128)__builtin_ia32_cmpps(__b, __a, 5);
    288 }
    289 
    290 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    291 _mm_cmpnge_ss(__m128 __a, __m128 __b)
    292 {
    293   return (__m128)__builtin_ia32_cmpss(__b, __a, 6);
    294 }
    295 
    296 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    297 _mm_cmpnge_ps(__m128 __a, __m128 __b)
    298 {
    299   return (__m128)__builtin_ia32_cmpps(__b, __a, 6);
    300 }
    301 
    302 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    303 _mm_cmpord_ss(__m128 __a, __m128 __b)
    304 {
    305   return (__m128)__builtin_ia32_cmpss(__a, __b, 7);
    306 }
    307 
    308 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    309 _mm_cmpord_ps(__m128 __a, __m128 __b)
    310 {
    311   return (__m128)__builtin_ia32_cmpps(__a, __b, 7);
    312 }
    313 
    314 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    315 _mm_cmpunord_ss(__m128 __a, __m128 __b)
    316 {
    317   return (__m128)__builtin_ia32_cmpss(__a, __b, 3);
    318 }
    319 
    320 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    321 _mm_cmpunord_ps(__m128 __a, __m128 __b)
    322 {
    323   return (__m128)__builtin_ia32_cmpps(__a, __b, 3);
    324 }
    325 
    326 static __inline__ int __attribute__((__always_inline__, __nodebug__))
    327 _mm_comieq_ss(__m128 __a, __m128 __b)
    328 {
    329   return __builtin_ia32_comieq(__a, __b);
    330 }
    331 
    332 static __inline__ int __attribute__((__always_inline__, __nodebug__))
    333 _mm_comilt_ss(__m128 __a, __m128 __b)
    334 {
    335   return __builtin_ia32_comilt(__a, __b);
    336 }
    337 
    338 static __inline__ int __attribute__((__always_inline__, __nodebug__))
    339 _mm_comile_ss(__m128 __a, __m128 __b)
    340 {
    341   return __builtin_ia32_comile(__a, __b);
    342 }
    343 
    344 static __inline__ int __attribute__((__always_inline__, __nodebug__))
    345 _mm_comigt_ss(__m128 __a, __m128 __b)
    346 {
    347   return __builtin_ia32_comigt(__a, __b);
    348 }
    349 
    350 static __inline__ int __attribute__((__always_inline__, __nodebug__))
    351 _mm_comige_ss(__m128 __a, __m128 __b)
    352 {
    353   return __builtin_ia32_comige(__a, __b);
    354 }
    355 
    356 static __inline__ int __attribute__((__always_inline__, __nodebug__))
    357 _mm_comineq_ss(__m128 __a, __m128 __b)
    358 {
    359   return __builtin_ia32_comineq(__a, __b);
    360 }
    361 
    362 static __inline__ int __attribute__((__always_inline__, __nodebug__))
    363 _mm_ucomieq_ss(__m128 __a, __m128 __b)
    364 {
    365   return __builtin_ia32_ucomieq(__a, __b);
    366 }
    367 
    368 static __inline__ int __attribute__((__always_inline__, __nodebug__))
    369 _mm_ucomilt_ss(__m128 __a, __m128 __b)
    370 {
    371   return __builtin_ia32_ucomilt(__a, __b);
    372 }
    373 
    374 static __inline__ int __attribute__((__always_inline__, __nodebug__))
    375 _mm_ucomile_ss(__m128 __a, __m128 __b)
    376 {
    377   return __builtin_ia32_ucomile(__a, __b);
    378 }
    379 
    380 static __inline__ int __attribute__((__always_inline__, __nodebug__))
    381 _mm_ucomigt_ss(__m128 __a, __m128 __b)
    382 {
    383   return __builtin_ia32_ucomigt(__a, __b);
    384 }
    385 
    386 static __inline__ int __attribute__((__always_inline__, __nodebug__))
    387 _mm_ucomige_ss(__m128 __a, __m128 __b)
    388 {
    389   return __builtin_ia32_ucomige(__a, __b);
    390 }
    391 
    392 static __inline__ int __attribute__((__always_inline__, __nodebug__))
    393 _mm_ucomineq_ss(__m128 __a, __m128 __b)
    394 {
    395   return __builtin_ia32_ucomineq(__a, __b);
    396 }
    397 
    398 static __inline__ int __attribute__((__always_inline__, __nodebug__))
    399 _mm_cvtss_si32(__m128 __a)
    400 {
    401   return __builtin_ia32_cvtss2si(__a);
    402 }
    403 
    404 static __inline__ int __attribute__((__always_inline__, __nodebug__))
    405 _mm_cvt_ss2si(__m128 __a)
    406 {
    407   return _mm_cvtss_si32(__a);
    408 }
    409 
    410 #ifdef __x86_64__
    411 
    412 static __inline__ long long __attribute__((__always_inline__, __nodebug__))
    413 _mm_cvtss_si64(__m128 __a)
    414 {
    415   return __builtin_ia32_cvtss2si64(__a);
    416 }
    417 
    418 #endif
    419 
    420 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
    421 _mm_cvtps_pi32(__m128 __a)
    422 {
    423   return (__m64)__builtin_ia32_cvtps2pi(__a);
    424 }
    425 
    426 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
    427 _mm_cvt_ps2pi(__m128 __a)
    428 {
    429   return _mm_cvtps_pi32(__a);
    430 }
    431 
    432 static __inline__ int __attribute__((__always_inline__, __nodebug__))
    433 _mm_cvttss_si32(__m128 __a)
    434 {
    435   return __a[0];
    436 }
    437 
    438 static __inline__ int __attribute__((__always_inline__, __nodebug__))
    439 _mm_cvtt_ss2si(__m128 __a)
    440 {
    441   return _mm_cvttss_si32(__a);
    442 }
    443 
    444 static __inline__ long long __attribute__((__always_inline__, __nodebug__))
    445 _mm_cvttss_si64(__m128 __a)
    446 {
    447   return __a[0];
    448 }
    449 
    450 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
    451 _mm_cvttps_pi32(__m128 __a)
    452 {
    453   return (__m64)__builtin_ia32_cvttps2pi(__a);
    454 }
    455 
    456 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
    457 _mm_cvtt_ps2pi(__m128 __a)
    458 {
    459   return _mm_cvttps_pi32(__a);
    460 }
    461 
    462 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    463 _mm_cvtsi32_ss(__m128 __a, int __b)
    464 {
    465   __a[0] = __b;
    466   return __a;
    467 }
    468 
    469 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    470 _mm_cvt_si2ss(__m128 __a, int __b)
    471 {
    472   return _mm_cvtsi32_ss(__a, __b);
    473 }
    474 
    475 #ifdef __x86_64__
    476 
    477 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    478 _mm_cvtsi64_ss(__m128 __a, long long __b)
    479 {
    480   __a[0] = __b;
    481   return __a;
    482 }
    483 
    484 #endif
    485 
    486 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    487 _mm_cvtpi32_ps(__m128 __a, __m64 __b)
    488 {
    489   return __builtin_ia32_cvtpi2ps(__a, (__v2si)__b);
    490 }
    491 
    492 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    493 _mm_cvt_pi2ps(__m128 __a, __m64 __b)
    494 {
    495   return _mm_cvtpi32_ps(__a, __b);
    496 }
    497 
    498 static __inline__ float __attribute__((__always_inline__, __nodebug__))
    499 _mm_cvtss_f32(__m128 __a)
    500 {
    501   return __a[0];
    502 }
    503 
    504 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    505 _mm_loadh_pi(__m128 __a, const __m64 *__p)
    506 {
    507   typedef float __mm_loadh_pi_v2f32 __attribute__((__vector_size__(8)));
    508   struct __mm_loadh_pi_struct {
    509     __mm_loadh_pi_v2f32 __u;
    510   } __attribute__((__packed__, __may_alias__));
    511   __mm_loadh_pi_v2f32 __b = ((struct __mm_loadh_pi_struct*)__p)->__u;
    512   __m128 __bb = __builtin_shufflevector(__b, __b, 0, 1, 0, 1);
    513   return __builtin_shufflevector(__a, __bb, 0, 1, 4, 5);
    514 }
    515 
    516 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    517 _mm_loadl_pi(__m128 __a, const __m64 *__p)
    518 {
    519   typedef float __mm_loadl_pi_v2f32 __attribute__((__vector_size__(8)));
    520   struct __mm_loadl_pi_struct {
    521     __mm_loadl_pi_v2f32 __u;
    522   } __attribute__((__packed__, __may_alias__));
    523   __mm_loadl_pi_v2f32 __b = ((struct __mm_loadl_pi_struct*)__p)->__u;
    524   __m128 __bb = __builtin_shufflevector(__b, __b, 0, 1, 0, 1);
    525   return __builtin_shufflevector(__a, __bb, 4, 5, 2, 3);
    526 }
    527 
    528 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    529 _mm_load_ss(const float *__p)
    530 {
    531   struct __mm_load_ss_struct {
    532     float __u;
    533   } __attribute__((__packed__, __may_alias__));
    534   float __u = ((struct __mm_load_ss_struct*)__p)->__u;
    535   return (__m128){ __u, 0, 0, 0 };
    536 }
    537 
    538 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    539 _mm_load1_ps(const float *__p)
    540 {
    541   struct __mm_load1_ps_struct {
    542     float __u;
    543   } __attribute__((__packed__, __may_alias__));
    544   float __u = ((struct __mm_load1_ps_struct*)__p)->__u;
    545   return (__m128){ __u, __u, __u, __u };
    546 }
    547 
    548 #define        _mm_load_ps1(p) _mm_load1_ps(p)
    549 
    550 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    551 _mm_load_ps(const float *__p)
    552 {
    553   return *(__m128*)__p;
    554 }
    555 
    556 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    557 _mm_loadu_ps(const float *__p)
    558 {
    559   struct __loadu_ps {
    560     __m128 __v;
    561   } __attribute__((__packed__, __may_alias__));
    562   return ((struct __loadu_ps*)__p)->__v;
    563 }
    564 
    565 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    566 _mm_loadr_ps(const float *__p)
    567 {
    568   __m128 __a = _mm_load_ps(__p);
    569   return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
    570 }
    571 
    572 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    573 _mm_set_ss(float __w)
    574 {
    575   return (__m128){ __w, 0, 0, 0 };
    576 }
    577 
    578 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    579 _mm_set1_ps(float __w)
    580 {
    581   return (__m128){ __w, __w, __w, __w };
    582 }
    583 
    584 // Microsoft specific.
    585 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    586 _mm_set_ps1(float __w)
    587 {
    588     return _mm_set1_ps(__w);
    589 }
    590 
    591 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    592 _mm_set_ps(float __z, float __y, float __x, float __w)
    593 {
    594   return (__m128){ __w, __x, __y, __z };
    595 }
    596 
    597 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    598 _mm_setr_ps(float __z, float __y, float __x, float __w)
    599 {
    600   return (__m128){ __z, __y, __x, __w };
    601 }
    602 
    603 static __inline__ __m128 __attribute__((__always_inline__))
    604 _mm_setzero_ps(void)
    605 {
    606   return (__m128){ 0, 0, 0, 0 };
    607 }
    608 
    609 static __inline__ void __attribute__((__always_inline__))
    610 _mm_storeh_pi(__m64 *__p, __m128 __a)
    611 {
    612   __builtin_ia32_storehps((__v2si *)__p, __a);
    613 }
    614 
    615 static __inline__ void __attribute__((__always_inline__))
    616 _mm_storel_pi(__m64 *__p, __m128 __a)
    617 {
    618   __builtin_ia32_storelps((__v2si *)__p, __a);
    619 }
    620 
    621 static __inline__ void __attribute__((__always_inline__))
    622 _mm_store_ss(float *__p, __m128 __a)
    623 {
    624   struct __mm_store_ss_struct {
    625     float __u;
    626   } __attribute__((__packed__, __may_alias__));
    627   ((struct __mm_store_ss_struct*)__p)->__u = __a[0];
    628 }
    629 
    630 static __inline__ void __attribute__((__always_inline__, __nodebug__))
    631 _mm_storeu_ps(float *__p, __m128 __a)
    632 {
    633   __builtin_ia32_storeups(__p, __a);
    634 }
    635 
    636 static __inline__ void __attribute__((__always_inline__, __nodebug__))
    637 _mm_store1_ps(float *__p, __m128 __a)
    638 {
    639   __a = __builtin_shufflevector(__a, __a, 0, 0, 0, 0);
    640   _mm_storeu_ps(__p, __a);
    641 }
    642 
    643 static __inline__ void __attribute__((__always_inline__, __nodebug__))
    644 _mm_store_ps1(float *__p, __m128 __a)
    645 {
    646     return _mm_store1_ps(__p, __a);
    647 }
    648 
    649 static __inline__ void __attribute__((__always_inline__, __nodebug__))
    650 _mm_store_ps(float *__p, __m128 __a)
    651 {
    652   *(__m128 *)__p = __a;
    653 }
    654 
    655 static __inline__ void __attribute__((__always_inline__, __nodebug__))
    656 _mm_storer_ps(float *__p, __m128 __a)
    657 {
    658   __a = __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
    659   _mm_store_ps(__p, __a);
    660 }
    661 
    662 #define _MM_HINT_T0 3
    663 #define _MM_HINT_T1 2
    664 #define _MM_HINT_T2 1
    665 #define _MM_HINT_NTA 0
    666 
    667 /* FIXME: We have to #define this because "sel" must be a constant integer, and
    668    Sema doesn't do any form of constant propagation yet. */
    669 
    670 #define _mm_prefetch(a, sel) (__builtin_prefetch((void *)(a), 0, (sel)))
    671 
    672 static __inline__ void __attribute__((__always_inline__, __nodebug__))
    673 _mm_stream_pi(__m64 *__p, __m64 __a)
    674 {
    675   __builtin_ia32_movntq(__p, __a);
    676 }
    677 
    678 static __inline__ void __attribute__((__always_inline__, __nodebug__))
    679 _mm_stream_ps(float *__p, __m128 __a)
    680 {
    681   __builtin_ia32_movntps(__p, __a);
    682 }
    683 
    684 static __inline__ void __attribute__((__always_inline__, __nodebug__))
    685 _mm_sfence(void)
    686 {
    687   __builtin_ia32_sfence();
    688 }
    689 
    690 static __inline__ int __attribute__((__always_inline__, __nodebug__))
    691 _mm_extract_pi16(__m64 __a, int __n)
    692 {
    693   __v4hi __b = (__v4hi)__a;
    694   return (unsigned short)__b[__n & 3];
    695 }
    696 
    697 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
    698 _mm_insert_pi16(__m64 __a, int __d, int __n)
    699 {
    700    __v4hi __b = (__v4hi)__a;
    701    __b[__n & 3] = __d;
    702    return (__m64)__b;
    703 }
    704 
    705 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
    706 _mm_max_pi16(__m64 __a, __m64 __b)
    707 {
    708   return (__m64)__builtin_ia32_pmaxsw((__v4hi)__a, (__v4hi)__b);
    709 }
    710 
    711 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
    712 _mm_max_pu8(__m64 __a, __m64 __b)
    713 {
    714   return (__m64)__builtin_ia32_pmaxub((__v8qi)__a, (__v8qi)__b);
    715 }
    716 
    717 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
    718 _mm_min_pi16(__m64 __a, __m64 __b)
    719 {
    720   return (__m64)__builtin_ia32_pminsw((__v4hi)__a, (__v4hi)__b);
    721 }
    722 
    723 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
    724 _mm_min_pu8(__m64 __a, __m64 __b)
    725 {
    726   return (__m64)__builtin_ia32_pminub((__v8qi)__a, (__v8qi)__b);
    727 }
    728 
    729 static __inline__ int __attribute__((__always_inline__, __nodebug__))
    730 _mm_movemask_pi8(__m64 __a)
    731 {
    732   return __builtin_ia32_pmovmskb((__v8qi)__a);
    733 }
    734 
    735 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
    736 _mm_mulhi_pu16(__m64 __a, __m64 __b)
    737 {
    738   return (__m64)__builtin_ia32_pmulhuw((__v4hi)__a, (__v4hi)__b);
    739 }
    740 
    741 #define _mm_shuffle_pi16(a, n) __extension__ ({ \
    742   __m64 __a = (a); \
    743   (__m64)__builtin_ia32_pshufw((__v4hi)__a, (n)); })
    744 
    745 static __inline__ void __attribute__((__always_inline__, __nodebug__))
    746 _mm_maskmove_si64(__m64 __d, __m64 __n, char *__p)
    747 {
    748   __builtin_ia32_maskmovq((__v8qi)__d, (__v8qi)__n, __p);
    749 }
    750 
    751 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
    752 _mm_avg_pu8(__m64 __a, __m64 __b)
    753 {
    754   return (__m64)__builtin_ia32_pavgb((__v8qi)__a, (__v8qi)__b);
    755 }
    756 
    757 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
    758 _mm_avg_pu16(__m64 __a, __m64 __b)
    759 {
    760   return (__m64)__builtin_ia32_pavgw((__v4hi)__a, (__v4hi)__b);
    761 }
    762 
    763 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
    764 _mm_sad_pu8(__m64 __a, __m64 __b)
    765 {
    766   return (__m64)__builtin_ia32_psadbw((__v8qi)__a, (__v8qi)__b);
    767 }
    768 
    769 static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
    770 _mm_getcsr(void)
    771 {
    772   return __builtin_ia32_stmxcsr();
    773 }
    774 
    775 static __inline__ void __attribute__((__always_inline__, __nodebug__))
    776 _mm_setcsr(unsigned int __i)
    777 {
    778   __builtin_ia32_ldmxcsr(__i);
    779 }
    780 
    781 #define _mm_shuffle_ps(a, b, mask) __extension__ ({ \
    782   __m128 __a = (a); \
    783   __m128 __b = (b); \
    784   (__m128)__builtin_shufflevector((__v4sf)__a, (__v4sf)__b, \
    785                                   (mask) & 0x3, ((mask) & 0xc) >> 2, \
    786                                   (((mask) & 0x30) >> 4) + 4, \
    787                                   (((mask) & 0xc0) >> 6) + 4); })
    788 
    789 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    790 _mm_unpackhi_ps(__m128 __a, __m128 __b)
    791 {
    792   return __builtin_shufflevector(__a, __b, 2, 6, 3, 7);
    793 }
    794 
    795 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    796 _mm_unpacklo_ps(__m128 __a, __m128 __b)
    797 {
    798   return __builtin_shufflevector(__a, __b, 0, 4, 1, 5);
    799 }
    800 
    801 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    802 _mm_move_ss(__m128 __a, __m128 __b)
    803 {
    804   return __builtin_shufflevector(__a, __b, 4, 1, 2, 3);
    805 }
    806 
    807 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    808 _mm_movehl_ps(__m128 __a, __m128 __b)
    809 {
    810   return __builtin_shufflevector(__a, __b, 6, 7, 2, 3);
    811 }
    812 
    813 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    814 _mm_movelh_ps(__m128 __a, __m128 __b)
    815 {
    816   return __builtin_shufflevector(__a, __b, 0, 1, 4, 5);
    817 }
    818 
    819 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    820 _mm_cvtpi16_ps(__m64 __a)
    821 {
    822   __m64 __b, __c;
    823   __m128 __r;
    824 
    825   __b = _mm_setzero_si64();
    826   __b = _mm_cmpgt_pi16(__b, __a);
    827   __c = _mm_unpackhi_pi16(__a, __b);
    828   __r = _mm_setzero_ps();
    829   __r = _mm_cvtpi32_ps(__r, __c);
    830   __r = _mm_movelh_ps(__r, __r);
    831   __c = _mm_unpacklo_pi16(__a, __b);
    832   __r = _mm_cvtpi32_ps(__r, __c);
    833 
    834   return __r;
    835 }
    836 
    837 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    838 _mm_cvtpu16_ps(__m64 __a)
    839 {
    840   __m64 __b, __c;
    841   __m128 __r;
    842 
    843   __b = _mm_setzero_si64();
    844   __c = _mm_unpackhi_pi16(__a, __b);
    845   __r = _mm_setzero_ps();
    846   __r = _mm_cvtpi32_ps(__r, __c);
    847   __r = _mm_movelh_ps(__r, __r);
    848   __c = _mm_unpacklo_pi16(__a, __b);
    849   __r = _mm_cvtpi32_ps(__r, __c);
    850 
    851   return __r;
    852 }
    853 
    854 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    855 _mm_cvtpi8_ps(__m64 __a)
    856 {
    857   __m64 __b;
    858 
    859   __b = _mm_setzero_si64();
    860   __b = _mm_cmpgt_pi8(__b, __a);
    861   __b = _mm_unpacklo_pi8(__a, __b);
    862 
    863   return _mm_cvtpi16_ps(__b);
    864 }
    865 
    866 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    867 _mm_cvtpu8_ps(__m64 __a)
    868 {
    869   __m64 __b;
    870 
    871   __b = _mm_setzero_si64();
    872   __b = _mm_unpacklo_pi8(__a, __b);
    873 
    874   return _mm_cvtpi16_ps(__b);
    875 }
    876 
    877 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
    878 _mm_cvtpi32x2_ps(__m64 __a, __m64 __b)
    879 {
    880   __m128 __c;
    881 
    882   __c = _mm_setzero_ps();
    883   __c = _mm_cvtpi32_ps(__c, __b);
    884   __c = _mm_movelh_ps(__c, __c);
    885 
    886   return _mm_cvtpi32_ps(__c, __a);
    887 }
    888 
    889 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
    890 _mm_cvtps_pi16(__m128 __a)
    891 {
    892   __m64 __b, __c;
    893 
    894   __b = _mm_cvtps_pi32(__a);
    895   __a = _mm_movehl_ps(__a, __a);
    896   __c = _mm_cvtps_pi32(__a);
    897 
    898   return _mm_packs_pi16(__b, __c);
    899 }
    900 
    901 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
    902 _mm_cvtps_pi8(__m128 __a)
    903 {
    904   __m64 __b, __c;
    905 
    906   __b = _mm_cvtps_pi16(__a);
    907   __c = _mm_setzero_si64();
    908 
    909   return _mm_packs_pi16(__b, __c);
    910 }
    911 
    912 static __inline__ int __attribute__((__always_inline__, __nodebug__))
    913 _mm_movemask_ps(__m128 __a)
    914 {
    915   return __builtin_ia32_movmskps(__a);
    916 }
    917 
    918 #define _MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
    919 
    920 #define _MM_EXCEPT_INVALID    (0x0001)
    921 #define _MM_EXCEPT_DENORM     (0x0002)
    922 #define _MM_EXCEPT_DIV_ZERO   (0x0004)
    923 #define _MM_EXCEPT_OVERFLOW   (0x0008)
    924 #define _MM_EXCEPT_UNDERFLOW  (0x0010)
    925 #define _MM_EXCEPT_INEXACT    (0x0020)
    926 #define _MM_EXCEPT_MASK       (0x003f)
    927 
    928 #define _MM_MASK_INVALID      (0x0080)
    929 #define _MM_MASK_DENORM       (0x0100)
    930 #define _MM_MASK_DIV_ZERO     (0x0200)
    931 #define _MM_MASK_OVERFLOW     (0x0400)
    932 #define _MM_MASK_UNDERFLOW    (0x0800)
    933 #define _MM_MASK_INEXACT      (0x1000)
    934 #define _MM_MASK_MASK         (0x1f80)
    935 
    936 #define _MM_ROUND_NEAREST     (0x0000)
    937 #define _MM_ROUND_DOWN        (0x2000)
    938 #define _MM_ROUND_UP          (0x4000)
    939 #define _MM_ROUND_TOWARD_ZERO (0x6000)
    940 #define _MM_ROUND_MASK        (0x6000)
    941 
    942 #define _MM_FLUSH_ZERO_MASK   (0x8000)
    943 #define _MM_FLUSH_ZERO_ON     (0x8000)
    944 #define _MM_FLUSH_ZERO_OFF    (0x0000)
    945 
    946 #define _MM_GET_EXCEPTION_MASK() (_mm_getcsr() & _MM_MASK_MASK)
    947 #define _MM_GET_EXCEPTION_STATE() (_mm_getcsr() & _MM_EXCEPT_MASK)
    948 #define _MM_GET_FLUSH_ZERO_MODE() (_mm_getcsr() & _MM_FLUSH_ZERO_MASK)
    949 #define _MM_GET_ROUNDING_MODE() (_mm_getcsr() & _MM_ROUND_MASK)
    950 
    951 #define _MM_SET_EXCEPTION_MASK(x) (_mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | (x)))
    952 #define _MM_SET_EXCEPTION_STATE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | (x)))
    953 #define _MM_SET_FLUSH_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | (x)))
    954 #define _MM_SET_ROUNDING_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | (x)))
    955 
    956 #define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
    957 do { \
    958   __m128 tmp3, tmp2, tmp1, tmp0; \
    959   tmp0 = _mm_unpacklo_ps((row0), (row1)); \
    960   tmp2 = _mm_unpacklo_ps((row2), (row3)); \
    961   tmp1 = _mm_unpackhi_ps((row0), (row1)); \
    962   tmp3 = _mm_unpackhi_ps((row2), (row3)); \
    963   (row0) = _mm_movelh_ps(tmp0, tmp2); \
    964   (row1) = _mm_movehl_ps(tmp2, tmp0); \
    965   (row2) = _mm_movelh_ps(tmp1, tmp3); \
    966   (row3) = _mm_movehl_ps(tmp3, tmp1); \
    967 } while (0)
    968 
    969 /* Aliases for compatibility. */
    970 #define _m_pextrw _mm_extract_pi16
    971 #define _m_pinsrw _mm_insert_pi16
    972 #define _m_pmaxsw _mm_max_pi16
    973 #define _m_pmaxub _mm_max_pu8
    974 #define _m_pminsw _mm_min_pi16
    975 #define _m_pminub _mm_min_pu8
    976 #define _m_pmovmskb _mm_movemask_pi8
    977 #define _m_pmulhuw _mm_mulhi_pu16
    978 #define _m_pshufw _mm_shuffle_pi16
    979 #define _m_maskmovq _mm_maskmove_si64
    980 #define _m_pavgb _mm_avg_pu8
    981 #define _m_pavgw _mm_avg_pu16
    982 #define _m_psadbw _mm_sad_pu8
    983 #define _m_ _mm_
    984 #define _m_ _mm_
    985 
    986 #if !__has_feature(modules)
    987 /* Ugly hack for backwards-compatibility (compatible with gcc) */
    988 #ifdef __SSE2__
    989 #include <emmintrin.h>
    990 #endif
    991 #endif
    992 
    993 #endif /* __SSE__ */
    994 
    995 #endif /* __XMMINTRIN_H */
    996