Home | History | Annotate | Download | only in include
      1 /* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007
      2    Free Software Foundation, Inc.
      3 
      4    This file is part of GCC.
      5 
      6    GCC is free software; you can redistribute it and/or modify
      7    it under the terms of the GNU General Public License as published by
      8    the Free Software Foundation; either version 2, or (at your option)
      9    any later version.
     10 
     11    GCC is distributed in the hope that it will be useful,
     12    but WITHOUT ANY WARRANTY; without even the implied warranty of
     13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     14    GNU General Public License for more details.
     15 
     16    You should have received a copy of the GNU General Public License
     17    along with GCC; see the file COPYING.  If not, write to
     18    the Free Software Foundation, 51 Franklin Street, Fifth Floor,
     19    Boston, MA 02110-1301, USA.  */
     20 
     21 /* As a special exception, if you include this header file into source
     22    files compiled by GCC, this header file does not by itself cause
     23    the resulting executable to be covered by the GNU General Public
     24    License.  This exception does not however invalidate any other
     25    reasons why the executable file might be covered by the GNU General
     26    Public License.  */
     27 
     28 /* Implemented from the specification included in the Intel C++ Compiler
     29    User Guide and Reference, version 9.0.  */
     30 
     31 #ifndef _XMMINTRIN_H_INCLUDED
     32 #define _XMMINTRIN_H_INCLUDED
     33 
     34 #ifndef __SSE__
     35 # error "SSE instruction set not enabled"
     36 #else
     37 
     38 /* We need type definitions from the MMX header file.  */
     39 #include <mmintrin.h>
     40 
     41 /* Get _mm_malloc () and _mm_free ().  */
     42 #include <mm_malloc.h>
     43 
     44 /* The Intel API is flexible enough that we must allow aliasing with other
     45    vector types, and their scalar components.  */
     46 typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__));
     47 
     48 /* Internal data types for implementing the intrinsics.  */
     49 typedef float __v4sf __attribute__ ((__vector_size__ (16)));
     50 
     51 /* Create a selector for use with the SHUFPS instruction.  */
     52 #define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \
     53  (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0))
     54 
     55 /* Constants for use with _mm_prefetch.  */
     56 enum _mm_hint
     57 {
     58   _MM_HINT_T0 = 3,
     59   _MM_HINT_T1 = 2,
     60   _MM_HINT_T2 = 1,
     61   _MM_HINT_NTA = 0
     62 };
     63 
     64 /* Bits in the MXCSR.  */
     65 #define _MM_EXCEPT_MASK       0x003f
     66 #define _MM_EXCEPT_INVALID    0x0001
     67 #define _MM_EXCEPT_DENORM     0x0002
     68 #define _MM_EXCEPT_DIV_ZERO   0x0004
     69 #define _MM_EXCEPT_OVERFLOW   0x0008
     70 #define _MM_EXCEPT_UNDERFLOW  0x0010
     71 #define _MM_EXCEPT_INEXACT    0x0020
     72 
     73 #define _MM_MASK_MASK         0x1f80
     74 #define _MM_MASK_INVALID      0x0080
     75 #define _MM_MASK_DENORM       0x0100
     76 #define _MM_MASK_DIV_ZERO     0x0200
     77 #define _MM_MASK_OVERFLOW     0x0400
     78 #define _MM_MASK_UNDERFLOW    0x0800
     79 #define _MM_MASK_INEXACT      0x1000
     80 
     81 #define _MM_ROUND_MASK        0x6000
     82 #define _MM_ROUND_NEAREST     0x0000
     83 #define _MM_ROUND_DOWN        0x2000
     84 #define _MM_ROUND_UP          0x4000
     85 #define _MM_ROUND_TOWARD_ZERO 0x6000
     86 
     87 #define _MM_FLUSH_ZERO_MASK   0x8000
     88 #define _MM_FLUSH_ZERO_ON     0x8000
     89 #define _MM_FLUSH_ZERO_OFF    0x0000
     90 
     91 /* Create a vector of zeros.  */
     92 static __inline __m128 __attribute__((__always_inline__))
     93 _mm_setzero_ps (void)
     94 {
     95   return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f };
     96 }
     97 
     98 /* Perform the respective operation on the lower SPFP (single-precision
     99    floating-point) values of A and B; the upper three SPFP values are
    100    passed through from A.  */
    101 
    102 static __inline __m128 __attribute__((__always_inline__))
    103 _mm_add_ss (__m128 __A, __m128 __B)
    104 {
    105   return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B);
    106 }
    107 
    108 static __inline __m128 __attribute__((__always_inline__))
    109 _mm_sub_ss (__m128 __A, __m128 __B)
    110 {
    111   return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B);
    112 }
    113 
    114 static __inline __m128 __attribute__((__always_inline__))
    115 _mm_mul_ss (__m128 __A, __m128 __B)
    116 {
    117   return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B);
    118 }
    119 
    120 static __inline __m128 __attribute__((__always_inline__))
    121 _mm_div_ss (__m128 __A, __m128 __B)
    122 {
    123   return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B);
    124 }
    125 
    126 static __inline __m128 __attribute__((__always_inline__))
    127 _mm_sqrt_ss (__m128 __A)
    128 {
    129   return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A);
    130 }
    131 
    132 static __inline __m128 __attribute__((__always_inline__))
    133 _mm_rcp_ss (__m128 __A)
    134 {
    135   return (__m128) __builtin_ia32_rcpss ((__v4sf)__A);
    136 }
    137 
    138 static __inline __m128 __attribute__((__always_inline__))
    139 _mm_rsqrt_ss (__m128 __A)
    140 {
    141   return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A);
    142 }
    143 
    144 static __inline __m128 __attribute__((__always_inline__))
    145 _mm_min_ss (__m128 __A, __m128 __B)
    146 {
    147   return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B);
    148 }
    149 
    150 static __inline __m128 __attribute__((__always_inline__))
    151 _mm_max_ss (__m128 __A, __m128 __B)
    152 {
    153   return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B);
    154 }
    155 
    156 /* Perform the respective operation on the four SPFP values in A and B.  */
    157 
    158 static __inline __m128 __attribute__((__always_inline__))
    159 _mm_add_ps (__m128 __A, __m128 __B)
    160 {
    161   return (__m128) __builtin_ia32_addps ((__v4sf)__A, (__v4sf)__B);
    162 }
    163 
    164 static __inline __m128 __attribute__((__always_inline__))
    165 _mm_sub_ps (__m128 __A, __m128 __B)
    166 {
    167   return (__m128) __builtin_ia32_subps ((__v4sf)__A, (__v4sf)__B);
    168 }
    169 
    170 static __inline __m128 __attribute__((__always_inline__))
    171 _mm_mul_ps (__m128 __A, __m128 __B)
    172 {
    173   return (__m128) __builtin_ia32_mulps ((__v4sf)__A, (__v4sf)__B);
    174 }
    175 
    176 static __inline __m128 __attribute__((__always_inline__))
    177 _mm_div_ps (__m128 __A, __m128 __B)
    178 {
    179   return (__m128) __builtin_ia32_divps ((__v4sf)__A, (__v4sf)__B);
    180 }
    181 
    182 static __inline __m128 __attribute__((__always_inline__))
    183 _mm_sqrt_ps (__m128 __A)
    184 {
    185   return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A);
    186 }
    187 
    188 static __inline __m128 __attribute__((__always_inline__))
    189 _mm_rcp_ps (__m128 __A)
    190 {
    191   return (__m128) __builtin_ia32_rcpps ((__v4sf)__A);
    192 }
    193 
    194 static __inline __m128 __attribute__((__always_inline__))
    195 _mm_rsqrt_ps (__m128 __A)
    196 {
    197   return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A);
    198 }
    199 
    200 static __inline __m128 __attribute__((__always_inline__))
    201 _mm_min_ps (__m128 __A, __m128 __B)
    202 {
    203   return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B);
    204 }
    205 
    206 static __inline __m128 __attribute__((__always_inline__))
    207 _mm_max_ps (__m128 __A, __m128 __B)
    208 {
    209   return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B);
    210 }
    211 
    212 /* Perform logical bit-wise operations on 128-bit values.  */
    213 
    214 static __inline __m128 __attribute__((__always_inline__))
    215 _mm_and_ps (__m128 __A, __m128 __B)
    216 {
    217   return __builtin_ia32_andps (__A, __B);
    218 }
    219 
    220 static __inline __m128 __attribute__((__always_inline__))
    221 _mm_andnot_ps (__m128 __A, __m128 __B)
    222 {
    223   return __builtin_ia32_andnps (__A, __B);
    224 }
    225 
    226 static __inline __m128 __attribute__((__always_inline__))
    227 _mm_or_ps (__m128 __A, __m128 __B)
    228 {
    229   return __builtin_ia32_orps (__A, __B);
    230 }
    231 
    232 static __inline __m128 __attribute__((__always_inline__))
    233 _mm_xor_ps (__m128 __A, __m128 __B)
    234 {
    235   return __builtin_ia32_xorps (__A, __B);
    236 }
    237 
    238 /* Perform a comparison on the lower SPFP values of A and B.  If the
    239    comparison is true, place a mask of all ones in the result, otherwise a
    240    mask of zeros.  The upper three SPFP values are passed through from A.  */
    241 
    242 static __inline __m128 __attribute__((__always_inline__))
    243 _mm_cmpeq_ss (__m128 __A, __m128 __B)
    244 {
    245   return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B);
    246 }
    247 
    248 static __inline __m128 __attribute__((__always_inline__))
    249 _mm_cmplt_ss (__m128 __A, __m128 __B)
    250 {
    251   return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B);
    252 }
    253 
    254 static __inline __m128 __attribute__((__always_inline__))
    255 _mm_cmple_ss (__m128 __A, __m128 __B)
    256 {
    257   return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B);
    258 }
    259 
    260 static __inline __m128 __attribute__((__always_inline__))
    261 _mm_cmpgt_ss (__m128 __A, __m128 __B)
    262 {
    263   return (__m128) __builtin_ia32_movss ((__v4sf) __A,
    264 					(__v4sf)
    265 					__builtin_ia32_cmpltss ((__v4sf) __B,
    266 								(__v4sf)
    267 								__A));
    268 }
    269 
    270 static __inline __m128 __attribute__((__always_inline__))
    271 _mm_cmpge_ss (__m128 __A, __m128 __B)
    272 {
    273   return (__m128) __builtin_ia32_movss ((__v4sf) __A,
    274 					(__v4sf)
    275 					__builtin_ia32_cmpless ((__v4sf) __B,
    276 								(__v4sf)
    277 								__A));
    278 }
    279 
    280 static __inline __m128 __attribute__((__always_inline__))
    281 _mm_cmpneq_ss (__m128 __A, __m128 __B)
    282 {
    283   return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B);
    284 }
    285 
    286 static __inline __m128 __attribute__((__always_inline__))
    287 _mm_cmpnlt_ss (__m128 __A, __m128 __B)
    288 {
    289   return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B);
    290 }
    291 
    292 static __inline __m128 __attribute__((__always_inline__))
    293 _mm_cmpnle_ss (__m128 __A, __m128 __B)
    294 {
    295   return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B);
    296 }
    297 
    298 static __inline __m128 __attribute__((__always_inline__))
    299 _mm_cmpngt_ss (__m128 __A, __m128 __B)
    300 {
    301   return (__m128) __builtin_ia32_movss ((__v4sf) __A,
    302 					(__v4sf)
    303 					__builtin_ia32_cmpnltss ((__v4sf) __B,
    304 								 (__v4sf)
    305 								 __A));
    306 }
    307 
    308 static __inline __m128 __attribute__((__always_inline__))
    309 _mm_cmpnge_ss (__m128 __A, __m128 __B)
    310 {
    311   return (__m128) __builtin_ia32_movss ((__v4sf) __A,
    312 					(__v4sf)
    313 					__builtin_ia32_cmpnless ((__v4sf) __B,
    314 								 (__v4sf)
    315 								 __A));
    316 }
    317 
    318 static __inline __m128 __attribute__((__always_inline__))
    319 _mm_cmpord_ss (__m128 __A, __m128 __B)
    320 {
    321   return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B);
    322 }
    323 
    324 static __inline __m128 __attribute__((__always_inline__))
    325 _mm_cmpunord_ss (__m128 __A, __m128 __B)
    326 {
    327   return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B);
    328 }
    329 
    330 /* Perform a comparison on the four SPFP values of A and B.  For each
    331    element, if the comparison is true, place a mask of all ones in the
    332    result, otherwise a mask of zeros.  */
    333 
    334 static __inline __m128 __attribute__((__always_inline__))
    335 _mm_cmpeq_ps (__m128 __A, __m128 __B)
    336 {
    337   return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B);
    338 }
    339 
    340 static __inline __m128 __attribute__((__always_inline__))
    341 _mm_cmplt_ps (__m128 __A, __m128 __B)
    342 {
    343   return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B);
    344 }
    345 
    346 static __inline __m128 __attribute__((__always_inline__))
    347 _mm_cmple_ps (__m128 __A, __m128 __B)
    348 {
    349   return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B);
    350 }
    351 
    352 static __inline __m128 __attribute__((__always_inline__))
    353 _mm_cmpgt_ps (__m128 __A, __m128 __B)
    354 {
    355   return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B);
    356 }
    357 
    358 static __inline __m128 __attribute__((__always_inline__))
    359 _mm_cmpge_ps (__m128 __A, __m128 __B)
    360 {
    361   return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B);
    362 }
    363 
    364 static __inline __m128 __attribute__((__always_inline__))
    365 _mm_cmpneq_ps (__m128 __A, __m128 __B)
    366 {
    367   return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B);
    368 }
    369 
    370 static __inline __m128 __attribute__((__always_inline__))
    371 _mm_cmpnlt_ps (__m128 __A, __m128 __B)
    372 {
    373   return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B);
    374 }
    375 
    376 static __inline __m128 __attribute__((__always_inline__))
    377 _mm_cmpnle_ps (__m128 __A, __m128 __B)
    378 {
    379   return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B);
    380 }
    381 
    382 static __inline __m128 __attribute__((__always_inline__))
    383 _mm_cmpngt_ps (__m128 __A, __m128 __B)
    384 {
    385   return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B);
    386 }
    387 
    388 static __inline __m128 __attribute__((__always_inline__))
    389 _mm_cmpnge_ps (__m128 __A, __m128 __B)
    390 {
    391   return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B);
    392 }
    393 
    394 static __inline __m128 __attribute__((__always_inline__))
    395 _mm_cmpord_ps (__m128 __A, __m128 __B)
    396 {
    397   return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B);
    398 }
    399 
    400 static __inline __m128 __attribute__((__always_inline__))
    401 _mm_cmpunord_ps (__m128 __A, __m128 __B)
    402 {
    403   return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B);
    404 }
    405 
    406 /* Compare the lower SPFP values of A and B and return 1 if true
    407    and 0 if false.  */
    408 
    409 static __inline int __attribute__((__always_inline__))
    410 _mm_comieq_ss (__m128 __A, __m128 __B)
    411 {
    412   return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B);
    413 }
    414 
    415 static __inline int __attribute__((__always_inline__))
    416 _mm_comilt_ss (__m128 __A, __m128 __B)
    417 {
    418   return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B);
    419 }
    420 
    421 static __inline int __attribute__((__always_inline__))
    422 _mm_comile_ss (__m128 __A, __m128 __B)
    423 {
    424   return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B);
    425 }
    426 
    427 static __inline int __attribute__((__always_inline__))
    428 _mm_comigt_ss (__m128 __A, __m128 __B)
    429 {
    430   return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B);
    431 }
    432 
    433 static __inline int __attribute__((__always_inline__))
    434 _mm_comige_ss (__m128 __A, __m128 __B)
    435 {
    436   return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B);
    437 }
    438 
    439 static __inline int __attribute__((__always_inline__))
    440 _mm_comineq_ss (__m128 __A, __m128 __B)
    441 {
    442   return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B);
    443 }
    444 
    445 static __inline int __attribute__((__always_inline__))
    446 _mm_ucomieq_ss (__m128 __A, __m128 __B)
    447 {
    448   return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B);
    449 }
    450 
    451 static __inline int __attribute__((__always_inline__))
    452 _mm_ucomilt_ss (__m128 __A, __m128 __B)
    453 {
    454   return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B);
    455 }
    456 
    457 static __inline int __attribute__((__always_inline__))
    458 _mm_ucomile_ss (__m128 __A, __m128 __B)
    459 {
    460   return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B);
    461 }
    462 
    463 static __inline int __attribute__((__always_inline__))
    464 _mm_ucomigt_ss (__m128 __A, __m128 __B)
    465 {
    466   return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B);
    467 }
    468 
    469 static __inline int __attribute__((__always_inline__))
    470 _mm_ucomige_ss (__m128 __A, __m128 __B)
    471 {
    472   return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B);
    473 }
    474 
    475 static __inline int __attribute__((__always_inline__))
    476 _mm_ucomineq_ss (__m128 __A, __m128 __B)
    477 {
    478   return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B);
    479 }
    480 
    481 /* Convert the lower SPFP value to a 32-bit integer according to the current
    482    rounding mode.  */
    483 static __inline int __attribute__((__always_inline__))
    484 _mm_cvtss_si32 (__m128 __A)
    485 {
    486   return __builtin_ia32_cvtss2si ((__v4sf) __A);
    487 }
    488 
    489 static __inline int __attribute__((__always_inline__))
    490 _mm_cvt_ss2si (__m128 __A)
    491 {
    492   return _mm_cvtss_si32 (__A);
    493 }
    494 
    495 #ifdef __x86_64__
    496 /* Convert the lower SPFP value to a 32-bit integer according to the
    497    current rounding mode.  */
    498 
    499 /* Intel intrinsic.  */
    500 static __inline long long __attribute__((__always_inline__))
    501 _mm_cvtss_si64 (__m128 __A)
    502 {
    503   return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
    504 }
    505 
    506 /* Microsoft intrinsic.  */
    507 static __inline long long __attribute__((__always_inline__))
    508 _mm_cvtss_si64x (__m128 __A)
    509 {
    510   return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
    511 }
    512 #endif
    513 
    514 /* Convert the two lower SPFP values to 32-bit integers according to the
    515    current rounding mode.  Return the integers in packed form.  */
    516 static __inline __m64 __attribute__((__always_inline__))
    517 _mm_cvtps_pi32 (__m128 __A)
    518 {
    519   return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A);
    520 }
    521 
    522 static __inline __m64 __attribute__((__always_inline__))
    523 _mm_cvt_ps2pi (__m128 __A)
    524 {
    525   return _mm_cvtps_pi32 (__A);
    526 }
    527 
    528 /* Truncate the lower SPFP value to a 32-bit integer.  */
    529 static __inline int __attribute__((__always_inline__))
    530 _mm_cvttss_si32 (__m128 __A)
    531 {
    532   return __builtin_ia32_cvttss2si ((__v4sf) __A);
    533 }
    534 
    535 static __inline int __attribute__((__always_inline__))
    536 _mm_cvtt_ss2si (__m128 __A)
    537 {
    538   return _mm_cvttss_si32 (__A);
    539 }
    540 
    541 #ifdef __x86_64__
    542 /* Truncate the lower SPFP value to a 32-bit integer.  */
    543 
    544 /* Intel intrinsic.  */
    545 static __inline long long __attribute__((__always_inline__))
    546 _mm_cvttss_si64 (__m128 __A)
    547 {
    548   return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
    549 }
    550 
    551 /* Microsoft intrinsic.  */
    552 static __inline long long __attribute__((__always_inline__))
    553 _mm_cvttss_si64x (__m128 __A)
    554 {
    555   return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
    556 }
    557 #endif
    558 
    559 /* Truncate the two lower SPFP values to 32-bit integers.  Return the
    560    integers in packed form.  */
    561 static __inline __m64 __attribute__((__always_inline__))
    562 _mm_cvttps_pi32 (__m128 __A)
    563 {
    564   return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A);
    565 }
    566 
    567 static __inline __m64 __attribute__((__always_inline__))
    568 _mm_cvtt_ps2pi (__m128 __A)
    569 {
    570   return _mm_cvttps_pi32 (__A);
    571 }
    572 
    573 /* Convert B to a SPFP value and insert it as element zero in A.  */
    574 static __inline __m128 __attribute__((__always_inline__))
    575 _mm_cvtsi32_ss (__m128 __A, int __B)
    576 {
    577   return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B);
    578 }
    579 
    580 static __inline __m128 __attribute__((__always_inline__))
    581 _mm_cvt_si2ss (__m128 __A, int __B)
    582 {
    583   return _mm_cvtsi32_ss (__A, __B);
    584 }
    585 
    586 #ifdef __x86_64__
    587 /* Convert B to a SPFP value and insert it as element zero in A.  */
    588 
    589 /* Intel intrinsic.  */
    590 static __inline __m128 __attribute__((__always_inline__))
    591 _mm_cvtsi64_ss (__m128 __A, long long __B)
    592 {
    593   return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
    594 }
    595 
    596 /* Microsoft intrinsic.  */
    597 static __inline __m128 __attribute__((__always_inline__))
    598 _mm_cvtsi64x_ss (__m128 __A, long long __B)
    599 {
    600   return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
    601 }
    602 #endif
    603 
    604 /* Convert the two 32-bit values in B to SPFP form and insert them
    605    as the two lower elements in A.  */
    606 static __inline __m128 __attribute__((__always_inline__))
    607 _mm_cvtpi32_ps (__m128 __A, __m64 __B)
    608 {
    609   return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B);
    610 }
    611 
    612 static __inline __m128 __attribute__((__always_inline__))
    613 _mm_cvt_pi2ps (__m128 __A, __m64 __B)
    614 {
    615   return _mm_cvtpi32_ps (__A, __B);
    616 }
    617 
    618 /* Convert the four signed 16-bit values in A to SPFP form.  */
    619 static __inline __m128 __attribute__((__always_inline__))
    620 _mm_cvtpi16_ps (__m64 __A)
    621 {
    622   __v4hi __sign;
    623   __v2si __hisi, __losi;
    624   __v4sf __r;
    625 
    626   /* This comparison against zero gives us a mask that can be used to
    627      fill in the missing sign bits in the unpack operations below, so
    628      that we get signed values after unpacking.  */
    629   __sign = __builtin_ia32_pcmpgtw ((__v4hi)0LL, (__v4hi)__A);
    630 
    631   /* Convert the four words to doublewords.  */
    632   __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign);
    633   __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign);
    634 
    635   /* Convert the doublewords to floating point two at a time.  */
    636   __r = (__v4sf) _mm_setzero_ps ();
    637   __r = __builtin_ia32_cvtpi2ps (__r, __hisi);
    638   __r = __builtin_ia32_movlhps (__r, __r);
    639   __r = __builtin_ia32_cvtpi2ps (__r, __losi);
    640 
    641   return (__m128) __r;
    642 }
    643 
    644 /* Convert the four unsigned 16-bit values in A to SPFP form.  */
    645 static __inline __m128 __attribute__((__always_inline__))
    646 _mm_cvtpu16_ps (__m64 __A)
    647 {
    648   __v2si __hisi, __losi;
    649   __v4sf __r;
    650 
    651   /* Convert the four words to doublewords.  */
    652   __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, (__v4hi)0LL);
    653   __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, (__v4hi)0LL);
    654 
    655   /* Convert the doublewords to floating point two at a time.  */
    656   __r = (__v4sf) _mm_setzero_ps ();
    657   __r = __builtin_ia32_cvtpi2ps (__r, __hisi);
    658   __r = __builtin_ia32_movlhps (__r, __r);
    659   __r = __builtin_ia32_cvtpi2ps (__r, __losi);
    660 
    661   return (__m128) __r;
    662 }
    663 
    664 /* Convert the low four signed 8-bit values in A to SPFP form.  */
    665 static __inline __m128 __attribute__((__always_inline__))
    666 _mm_cvtpi8_ps (__m64 __A)
    667 {
    668   __v8qi __sign;
    669 
    670   /* This comparison against zero gives us a mask that can be used to
    671      fill in the missing sign bits in the unpack operations below, so
    672      that we get signed values after unpacking.  */
    673   __sign = __builtin_ia32_pcmpgtb ((__v8qi)0LL, (__v8qi)__A);
    674 
    675   /* Convert the four low bytes to words.  */
    676   __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign);
    677 
    678   return _mm_cvtpi16_ps(__A);
    679 }
    680 
    681 /* Convert the low four unsigned 8-bit values in A to SPFP form.  */
    682 static __inline __m128 __attribute__((__always_inline__))
    683 _mm_cvtpu8_ps(__m64 __A)
    684 {
    685   __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, (__v8qi)0LL);
    686   return _mm_cvtpu16_ps(__A);
    687 }
    688 
    689 /* Convert the four signed 32-bit values in A and B to SPFP form.  */
    690 static __inline __m128 __attribute__((__always_inline__))
    691 _mm_cvtpi32x2_ps(__m64 __A, __m64 __B)
    692 {
    693   __v4sf __zero = (__v4sf) _mm_setzero_ps ();
    694   __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A);
    695   __v4sf __sfb = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__B);
    696   return (__m128) __builtin_ia32_movlhps (__sfa, __sfb);
    697 }
    698 
    699 /* Convert the four SPFP values in A to four signed 16-bit integers.  */
    700 static __inline __m64 __attribute__((__always_inline__))
    701 _mm_cvtps_pi16(__m128 __A)
    702 {
    703   __v4sf __hisf = (__v4sf)__A;
    704   __v4sf __losf = __builtin_ia32_movhlps (__hisf, __hisf);
    705   __v2si __hisi = __builtin_ia32_cvtps2pi (__hisf);
    706   __v2si __losi = __builtin_ia32_cvtps2pi (__losf);
    707   return (__m64) __builtin_ia32_packssdw (__hisi, __losi);
    708 }
    709 
    710 /* Convert the four SPFP values in A to four signed 8-bit integers.  */
    711 static __inline __m64 __attribute__((__always_inline__))
    712 _mm_cvtps_pi8(__m128 __A)
    713 {
    714   __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A);
    715   return (__m64) __builtin_ia32_packsswb (__tmp, (__v4hi)0LL);
    716 }
    717 
    718 /* Selects four specific SPFP values from A and B based on MASK.  */
    719 #if 0
    720 static __inline __m128 __attribute__((__always_inline__))
    721 _mm_shuffle_ps (__m128 __A, __m128 __B, int __mask)
    722 {
    723   return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask);
    724 }
    725 #else
    726 #define _mm_shuffle_ps(A, B, MASK) \
    727  ((__m128) __builtin_ia32_shufps ((__v4sf)(A), (__v4sf)(B), (MASK)))
    728 #endif
    729 
    730 
    731 /* Selects and interleaves the upper two SPFP values from A and B.  */
    732 static __inline __m128 __attribute__((__always_inline__))
    733 _mm_unpackhi_ps (__m128 __A, __m128 __B)
    734 {
    735   return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B);
    736 }
    737 
    738 /* Selects and interleaves the lower two SPFP values from A and B.  */
    739 static __inline __m128 __attribute__((__always_inline__))
    740 _mm_unpacklo_ps (__m128 __A, __m128 __B)
    741 {
    742   return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B);
    743 }
    744 
    745 /* Sets the upper two SPFP values with 64-bits of data loaded from P;
    746    the lower two values are passed through from A.  */
    747 static __inline __m128 __attribute__((__always_inline__))
    748 _mm_loadh_pi (__m128 __A, __m64 const *__P)
    749 {
    750   return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (__v2si *)__P);
    751 }
    752 
    753 /* Stores the upper two SPFP values of A into P.  */
    754 static __inline void __attribute__((__always_inline__))
    755 _mm_storeh_pi (__m64 *__P, __m128 __A)
    756 {
    757   __builtin_ia32_storehps ((__v2si *)__P, (__v4sf)__A);
    758 }
    759 
    760 /* Moves the upper two values of B into the lower two values of A.  */
    761 static __inline __m128 __attribute__((__always_inline__))
    762 _mm_movehl_ps (__m128 __A, __m128 __B)
    763 {
    764   return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B);
    765 }
    766 
    767 /* Moves the lower two values of B into the upper two values of A.  */
    768 static __inline __m128 __attribute__((__always_inline__))
    769 _mm_movelh_ps (__m128 __A, __m128 __B)
    770 {
    771   return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B);
    772 }
    773 
    774 /* Sets the lower two SPFP values with 64-bits of data loaded from P;
    775    the upper two values are passed through from A.  */
    776 static __inline __m128 __attribute__((__always_inline__))
    777 _mm_loadl_pi (__m128 __A, __m64 const *__P)
    778 {
    779   return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (__v2si *)__P);
    780 }
    781 
    782 /* Stores the lower two SPFP values of A into P.  */
    783 static __inline void __attribute__((__always_inline__))
    784 _mm_storel_pi (__m64 *__P, __m128 __A)
    785 {
    786   __builtin_ia32_storelps ((__v2si *)__P, (__v4sf)__A);
    787 }
    788 
    789 /* Creates a 4-bit mask from the most significant bits of the SPFP values.  */
    790 static __inline int __attribute__((__always_inline__))
    791 _mm_movemask_ps (__m128 __A)
    792 {
    793   return __builtin_ia32_movmskps ((__v4sf)__A);
    794 }
    795 
    796 /* Return the contents of the control register.  */
    797 static __inline unsigned int __attribute__((__always_inline__))
    798 _mm_getcsr (void)
    799 {
    800   return __builtin_ia32_stmxcsr ();
    801 }
    802 
    803 /* Read exception bits from the control register.  */
    804 static __inline unsigned int __attribute__((__always_inline__))
    805 _MM_GET_EXCEPTION_STATE (void)
    806 {
    807   return _mm_getcsr() & _MM_EXCEPT_MASK;
    808 }
    809 
    810 static __inline unsigned int __attribute__((__always_inline__))
    811 _MM_GET_EXCEPTION_MASK (void)
    812 {
    813   return _mm_getcsr() & _MM_MASK_MASK;
    814 }
    815 
    816 static __inline unsigned int __attribute__((__always_inline__))
    817 _MM_GET_ROUNDING_MODE (void)
    818 {
    819   return _mm_getcsr() & _MM_ROUND_MASK;
    820 }
    821 
    822 static __inline unsigned int __attribute__((__always_inline__))
    823 _MM_GET_FLUSH_ZERO_MODE (void)
    824 {
    825   return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
    826 }
    827 
    828 /* Set the control register to I.  */
    829 static __inline void __attribute__((__always_inline__))
    830 _mm_setcsr (unsigned int __I)
    831 {
    832   __builtin_ia32_ldmxcsr (__I);
    833 }
    834 
    835 /* Set exception bits in the control register.  */
    836 static __inline void __attribute__((__always_inline__))
    837 _MM_SET_EXCEPTION_STATE(unsigned int __mask)
    838 {
    839   _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask);
    840 }
    841 
    842 static __inline void __attribute__((__always_inline__))
    843 _MM_SET_EXCEPTION_MASK (unsigned int __mask)
    844 {
    845   _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask);
    846 }
    847 
    848 static __inline void __attribute__((__always_inline__))
    849 _MM_SET_ROUNDING_MODE (unsigned int __mode)
    850 {
    851   _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode);
    852 }
    853 
    854 static __inline void __attribute__((__always_inline__))
    855 _MM_SET_FLUSH_ZERO_MODE (unsigned int __mode)
    856 {
    857   _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode);
    858 }
    859 
    860 /* Create a vector with element 0 as F and the rest zero.  */
    861 static __inline __m128 __attribute__((__always_inline__))
    862 _mm_set_ss (float __F)
    863 {
    864   return __extension__ (__m128)(__v4sf){ __F, 0, 0, 0 };
    865 }
    866 
    867 /* Create a vector with all four elements equal to F.  */
    868 static __inline __m128 __attribute__((__always_inline__))
    869 _mm_set1_ps (float __F)
    870 {
    871   return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F };
    872 }
    873 
    874 static __inline __m128 __attribute__((__always_inline__))
    875 _mm_set_ps1 (float __F)
    876 {
    877   return _mm_set1_ps (__F);
    878 }
    879 
    880 /* Create a vector with element 0 as *P and the rest zero.  */
    881 static __inline __m128 __attribute__((__always_inline__))
    882 _mm_load_ss (float const *__P)
    883 {
    884   return _mm_set_ss (*__P);
    885 }
    886 
    887 /* Create a vector with all four elements equal to *P.  */
    888 static __inline __m128 __attribute__((__always_inline__))
    889 _mm_load1_ps (float const *__P)
    890 {
    891   return _mm_set1_ps (*__P);
    892 }
    893 
    894 static __inline __m128 __attribute__((__always_inline__))
    895 _mm_load_ps1 (float const *__P)
    896 {
    897   return _mm_load1_ps (__P);
    898 }
    899 
    900 /* Load four SPFP values from P.  The address must be 16-byte aligned.  */
    901 static __inline __m128 __attribute__((__always_inline__))
    902 _mm_load_ps (float const *__P)
    903 {
    904   return (__m128) *(__v4sf *)__P;
    905 }
    906 
    907 /* Load four SPFP values from P.  The address need not be 16-byte aligned.  */
    908 static __inline __m128 __attribute__((__always_inline__))
    909 _mm_loadu_ps (float const *__P)
    910 {
    911   return (__m128) __builtin_ia32_loadups (__P);
    912 }
    913 
    914 /* Load four SPFP values in reverse order.  The address must be aligned.  */
    915 static __inline __m128 __attribute__((__always_inline__))
    916 _mm_loadr_ps (float const *__P)
    917 {
    918   __v4sf __tmp = *(__v4sf *)__P;
    919   return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3));
    920 }
    921 
    922 /* Create the vector [Z Y X W].  */
    923 static __inline __m128 __attribute__((__always_inline__))
    924 _mm_set_ps (const float __Z, const float __Y, const float __X, const float __W)
    925 {
    926   return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z };
    927 }
    928 
    929 /* Create the vector [W X Y Z].  */
    930 static __inline __m128 __attribute__((__always_inline__))
    931 _mm_setr_ps (float __Z, float __Y, float __X, float __W)
    932 {
    933   return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W };
    934 }
    935 
    936 /* Stores the lower SPFP value.  */
    937 static __inline void __attribute__((__always_inline__))
    938 _mm_store_ss (float *__P, __m128 __A)
    939 {
    940   *__P = __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0);
    941 }
    942 
    943 static __inline float __attribute__((__always_inline__))
    944 _mm_cvtss_f32 (__m128 __A)
    945 {
    946   return __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0);
    947 }
    948 
    949 /* Store four SPFP values.  The address must be 16-byte aligned.  */
    950 static __inline void __attribute__((__always_inline__))
    951 _mm_store_ps (float *__P, __m128 __A)
    952 {
    953   *(__v4sf *)__P = (__v4sf)__A;
    954 }
    955 
    956 /* Store four SPFP values.  The address need not be 16-byte aligned.  */
    957 static __inline void __attribute__((__always_inline__))
    958 _mm_storeu_ps (float *__P, __m128 __A)
    959 {
    960   __builtin_ia32_storeups (__P, (__v4sf)__A);
    961 }
    962 
    963 /* Store the lower SPFP value across four words.  */
    964 static __inline void __attribute__((__always_inline__))
    965 _mm_store1_ps (float *__P, __m128 __A)
    966 {
    967   __v4sf __va = (__v4sf)__A;
    968   __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0));
    969   _mm_storeu_ps (__P, __tmp);
    970 }
    971 
    972 static __inline void __attribute__((__always_inline__))
    973 _mm_store_ps1 (float *__P, __m128 __A)
    974 {
    975   _mm_store1_ps (__P, __A);
    976 }
    977 
    978 /* Store four SPFP values in reverse order.  The address must be aligned.  */
    979 static __inline void __attribute__((__always_inline__))
    980 _mm_storer_ps (float *__P, __m128 __A)
    981 {
    982   __v4sf __va = (__v4sf)__A;
    983   __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3));
    984   _mm_store_ps (__P, __tmp);
    985 }
    986 
    987 /* Sets the low SPFP value of A from the low value of B.  */
    988 static __inline __m128 __attribute__((__always_inline__))
    989 _mm_move_ss (__m128 __A, __m128 __B)
    990 {
    991   return (__m128) __builtin_ia32_movss ((__v4sf)__A, (__v4sf)__B);
    992 }
    993 
    994 /* Extracts one of the four words of A.  The selector N must be immediate.  */
    995 #if 0
    996 static __inline int __attribute__((__always_inline__))
    997 _mm_extract_pi16 (__m64 const __A, int const __N)
    998 {
    999   return __builtin_ia32_vec_ext_v4hi ((__v4hi)__A, __N);
   1000 }
   1001 
   1002 static __inline int __attribute__((__always_inline__))
   1003 _m_pextrw (__m64 const __A, int const __N)
   1004 {
   1005   return _mm_extract_pi16 (__A, __N);
   1006 }
   1007 #else
   1008 #define _mm_extract_pi16(A, N)	__builtin_ia32_vec_ext_v4hi ((__v4hi)(A), (N))
   1009 #define _m_pextrw(A, N)		_mm_extract_pi16((A), (N))
   1010 #endif
   1011 
   1012 /* Inserts word D into one of four words of A.  The selector N must be
   1013    immediate.  */
   1014 #if 0
   1015 static __inline __m64 __attribute__((__always_inline__))
   1016 _mm_insert_pi16 (__m64 const __A, int const __D, int const __N)
   1017 {
   1018   return (__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)__A, __D, __N);
   1019 }
   1020 
   1021 static __inline __m64 __attribute__((__always_inline__))
   1022 _m_pinsrw (__m64 const __A, int const __D, int const __N)
   1023 {
   1024   return _mm_insert_pi16 (__A, __D, __N);
   1025 }
   1026 #else
   1027 #define _mm_insert_pi16(A, D, N) \
   1028   ((__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)(A), (D), (N)))
   1029 #define _m_pinsrw(A, D, N)	 _mm_insert_pi16((A), (D), (N))
   1030 #endif
   1031 
   1032 /* Compute the element-wise maximum of signed 16-bit values.  */
   1033 static __inline __m64 __attribute__((__always_inline__))
   1034 _mm_max_pi16 (__m64 __A, __m64 __B)
   1035 {
   1036   return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B);
   1037 }
   1038 
   1039 static __inline __m64 __attribute__((__always_inline__))
   1040 _m_pmaxsw (__m64 __A, __m64 __B)
   1041 {
   1042   return _mm_max_pi16 (__A, __B);
   1043 }
   1044 
   1045 /* Compute the element-wise maximum of unsigned 8-bit values.  */
   1046 static __inline __m64 __attribute__((__always_inline__))
   1047 _mm_max_pu8 (__m64 __A, __m64 __B)
   1048 {
   1049   return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B);
   1050 }
   1051 
   1052 static __inline __m64 __attribute__((__always_inline__))
   1053 _m_pmaxub (__m64 __A, __m64 __B)
   1054 {
   1055   return _mm_max_pu8 (__A, __B);
   1056 }
   1057 
   1058 /* Compute the element-wise minimum of signed 16-bit values.  */
   1059 static __inline __m64 __attribute__((__always_inline__))
   1060 _mm_min_pi16 (__m64 __A, __m64 __B)
   1061 {
   1062   return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B);
   1063 }
   1064 
   1065 static __inline __m64 __attribute__((__always_inline__))
   1066 _m_pminsw (__m64 __A, __m64 __B)
   1067 {
   1068   return _mm_min_pi16 (__A, __B);
   1069 }
   1070 
   1071 /* Compute the element-wise minimum of unsigned 8-bit values.  */
   1072 static __inline __m64 __attribute__((__always_inline__))
   1073 _mm_min_pu8 (__m64 __A, __m64 __B)
   1074 {
   1075   return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B);
   1076 }
   1077 
   1078 static __inline __m64 __attribute__((__always_inline__))
   1079 _m_pminub (__m64 __A, __m64 __B)
   1080 {
   1081   return _mm_min_pu8 (__A, __B);
   1082 }
   1083 
   1084 /* Create an 8-bit mask of the signs of 8-bit values.  */
   1085 static __inline int __attribute__((__always_inline__))
   1086 _mm_movemask_pi8 (__m64 __A)
   1087 {
   1088   return __builtin_ia32_pmovmskb ((__v8qi)__A);
   1089 }
   1090 
   1091 static __inline int __attribute__((__always_inline__))
   1092 _m_pmovmskb (__m64 __A)
   1093 {
   1094   return _mm_movemask_pi8 (__A);
   1095 }
   1096 
   1097 /* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
   1098    in B and produce the high 16 bits of the 32-bit results.  */
   1099 static __inline __m64 __attribute__((__always_inline__))
   1100 _mm_mulhi_pu16 (__m64 __A, __m64 __B)
   1101 {
   1102   return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B);
   1103 }
   1104 
   1105 static __inline __m64 __attribute__((__always_inline__))
   1106 _m_pmulhuw (__m64 __A, __m64 __B)
   1107 {
   1108   return _mm_mulhi_pu16 (__A, __B);
   1109 }
   1110 
   1111 /* Return a combination of the four 16-bit values in A.  The selector
   1112    must be an immediate.  */
   1113 #if 0
   1114 static __inline __m64 __attribute__((__always_inline__))
   1115 _mm_shuffle_pi16 (__m64 __A, int __N)
   1116 {
   1117   return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N);
   1118 }
   1119 
   1120 static __inline __m64 __attribute__((__always_inline__))
   1121 _m_pshufw (__m64 __A, int __N)
   1122 {
   1123   return _mm_shuffle_pi16 (__A, __N);
   1124 }
   1125 #else
   1126 #define _mm_shuffle_pi16(A, N) \
   1127   ((__m64) __builtin_ia32_pshufw ((__v4hi)(A), (N)))
   1128 #define _m_pshufw(A, N)		_mm_shuffle_pi16 ((A), (N))
   1129 #endif
   1130 
   1131 /* Conditionally store byte elements of A into P.  The high bit of each
   1132    byte in the selector N determines whether the corresponding byte from
   1133    A is stored.  */
   1134 static __inline void __attribute__((__always_inline__))
   1135 _mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
   1136 {
   1137   __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P);
   1138 }
   1139 
   1140 static __inline void __attribute__((__always_inline__))
   1141 _m_maskmovq (__m64 __A, __m64 __N, char *__P)
   1142 {
   1143   _mm_maskmove_si64 (__A, __N, __P);
   1144 }
   1145 
   1146 /* Compute the rounded averages of the unsigned 8-bit values in A and B.  */
   1147 static __inline __m64 __attribute__((__always_inline__))
   1148 _mm_avg_pu8 (__m64 __A, __m64 __B)
   1149 {
   1150   return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B);
   1151 }
   1152 
   1153 static __inline __m64 __attribute__((__always_inline__))
   1154 _m_pavgb (__m64 __A, __m64 __B)
   1155 {
   1156   return _mm_avg_pu8 (__A, __B);
   1157 }
   1158 
   1159 /* Compute the rounded averages of the unsigned 16-bit values in A and B.  */
   1160 static __inline __m64 __attribute__((__always_inline__))
   1161 _mm_avg_pu16 (__m64 __A, __m64 __B)
   1162 {
   1163   return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B);
   1164 }
   1165 
   1166 static __inline __m64 __attribute__((__always_inline__))
   1167 _m_pavgw (__m64 __A, __m64 __B)
   1168 {
   1169   return _mm_avg_pu16 (__A, __B);
   1170 }
   1171 
   1172 /* Compute the sum of the absolute differences of the unsigned 8-bit
   1173    values in A and B.  Return the value in the lower 16-bit word; the
   1174    upper words are cleared.  */
   1175 static __inline __m64 __attribute__((__always_inline__))
   1176 _mm_sad_pu8 (__m64 __A, __m64 __B)
   1177 {
   1178   return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B);
   1179 }
   1180 
   1181 static __inline __m64 __attribute__((__always_inline__))
   1182 _m_psadbw (__m64 __A, __m64 __B)
   1183 {
   1184   return _mm_sad_pu8 (__A, __B);
   1185 }
   1186 
   1187 /* Loads one cache line from address P to a location "closer" to the
   1188    processor.  The selector I specifies the type of prefetch operation.  */
   1189 #if 0
   1190 static __inline void __attribute__((__always_inline__))
   1191 _mm_prefetch (void *__P, enum _mm_hint __I)
   1192 {
   1193   __builtin_prefetch (__P, 0, __I);
   1194 }
   1195 #else
   1196 #define _mm_prefetch(P, I) \
   1197   __builtin_prefetch ((P), 0, (I))
   1198 #endif
   1199 
   1200 /* Stores the data in A to the address P without polluting the caches.  */
   1201 static __inline void __attribute__((__always_inline__))
   1202 _mm_stream_pi (__m64 *__P, __m64 __A)
   1203 {
   1204   __builtin_ia32_movntq ((unsigned long long *)__P, (unsigned long long)__A);
   1205 }
   1206 
   1207 /* Likewise.  The address must be 16-byte aligned.  */
   1208 static __inline void __attribute__((__always_inline__))
   1209 _mm_stream_ps (float *__P, __m128 __A)
   1210 {
   1211   __builtin_ia32_movntps (__P, (__v4sf)__A);
   1212 }
   1213 
   1214 /* Guarantees that every preceding store is globally visible before
   1215    any subsequent store.  */
   1216 static __inline void __attribute__((__always_inline__))
   1217 _mm_sfence (void)
   1218 {
   1219   __builtin_ia32_sfence ();
   1220 }
   1221 
   1222 /* The execution of the next instruction is delayed by an implementation
   1223    specific amount of time.  The instruction does not modify the
   1224    architectural state.  */
   1225 static __inline void __attribute__((__always_inline__))
   1226 _mm_pause (void)
   1227 {
   1228   __asm__ __volatile__ ("rep; nop" : : );
   1229 }
   1230 
   1231 /* Transpose the 4x4 matrix composed of row[0-3].  */
   1232 #define _MM_TRANSPOSE4_PS(row0, row1, row2, row3)			\
   1233 do {									\
   1234   __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3);	\
   1235   __v4sf __t0 = __builtin_ia32_unpcklps (__r0, __r1);			\
   1236   __v4sf __t1 = __builtin_ia32_unpcklps (__r2, __r3);			\
   1237   __v4sf __t2 = __builtin_ia32_unpckhps (__r0, __r1);			\
   1238   __v4sf __t3 = __builtin_ia32_unpckhps (__r2, __r3);			\
   1239   (row0) = __builtin_ia32_movlhps (__t0, __t1);				\
   1240   (row1) = __builtin_ia32_movhlps (__t1, __t0);				\
   1241   (row2) = __builtin_ia32_movlhps (__t2, __t3);				\
   1242   (row3) = __builtin_ia32_movhlps (__t3, __t2);				\
   1243 } while (0)
   1244 
   1245 /* For backward source compatibility.  */
   1246 #include <emmintrin.h>
   1247 
   1248 #endif /* __SSE__ */
   1249 #endif /* _XMMINTRIN_H_INCLUDED */
   1250