Home | History | Annotate | Download | only in include
      1 /* Copyright (C) 2002-2013 Free Software Foundation, Inc.
      2 
      3    This file is part of GCC.
      4 
      5    GCC is free software; you can redistribute it and/or modify
      6    it under the terms of the GNU General Public License as published by
      7    the Free Software Foundation; either version 3, or (at your option)
      8    any later version.
      9 
     10    GCC is distributed in the hope that it will be useful,
     11    but WITHOUT ANY WARRANTY; without even the implied warranty of
     12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     13    GNU General Public License for more details.
     14 
     15    Under Section 7 of GPL version 3, you are granted additional
     16    permissions described in the GCC Runtime Library Exception, version
     17    3.1, as published by the Free Software Foundation.
     18 
     19    You should have received a copy of the GNU General Public License and
     20    a copy of the GCC Runtime Library Exception along with this program;
     21    see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
     22    <http://www.gnu.org/licenses/>.  */
     23 
     24 /* Implemented from the specification included in the Intel C++ Compiler
     25    User Guide and Reference, version 9.0.  */
     26 
     27 #ifndef _XMMINTRIN_H_INCLUDED
     28 #define _XMMINTRIN_H_INCLUDED
     29 
     30 #ifndef __SSE__
     31 # error "SSE instruction set not enabled"
     32 #else
     33 
     34 /* We need type definitions from the MMX header file.  */
     35 #include <mmintrin.h>
     36 
     37 /* Get _mm_malloc () and _mm_free ().  */
     38 #include <mm_malloc.h>
     39 
     40 /* The Intel API is flexible enough that we must allow aliasing with other
     41    vector types, and their scalar components.  */
     42 typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__));
     43 
     44 /* Internal data types for implementing the intrinsics.  */
     45 typedef float __v4sf __attribute__ ((__vector_size__ (16)));
     46 
     47 /* Create a selector for use with the SHUFPS instruction.  */
     48 #define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \
     49  (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0))
     50 
     51 /* Constants for use with _mm_prefetch.  */
     52 enum _mm_hint
     53 {
     54   _MM_HINT_T0 = 3,
     55   _MM_HINT_T1 = 2,
     56   _MM_HINT_T2 = 1,
     57   _MM_HINT_NTA = 0
     58 };
     59 
     60 /* Bits in the MXCSR.  */
     61 #define _MM_EXCEPT_MASK       0x003f
     62 #define _MM_EXCEPT_INVALID    0x0001
     63 #define _MM_EXCEPT_DENORM     0x0002
     64 #define _MM_EXCEPT_DIV_ZERO   0x0004
     65 #define _MM_EXCEPT_OVERFLOW   0x0008
     66 #define _MM_EXCEPT_UNDERFLOW  0x0010
     67 #define _MM_EXCEPT_INEXACT    0x0020
     68 
     69 #define _MM_MASK_MASK         0x1f80
     70 #define _MM_MASK_INVALID      0x0080
     71 #define _MM_MASK_DENORM       0x0100
     72 #define _MM_MASK_DIV_ZERO     0x0200
     73 #define _MM_MASK_OVERFLOW     0x0400
     74 #define _MM_MASK_UNDERFLOW    0x0800
     75 #define _MM_MASK_INEXACT      0x1000
     76 
     77 #define _MM_ROUND_MASK        0x6000
     78 #define _MM_ROUND_NEAREST     0x0000
     79 #define _MM_ROUND_DOWN        0x2000
     80 #define _MM_ROUND_UP          0x4000
     81 #define _MM_ROUND_TOWARD_ZERO 0x6000
     82 
     83 #define _MM_FLUSH_ZERO_MASK   0x8000
     84 #define _MM_FLUSH_ZERO_ON     0x8000
     85 #define _MM_FLUSH_ZERO_OFF    0x0000
     86 
     87 /* Create a vector of zeros.  */
     88 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     89 _mm_setzero_ps (void)
     90 {
     91   return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f };
     92 }
     93 
     94 /* Perform the respective operation on the lower SPFP (single-precision
     95    floating-point) values of A and B; the upper three SPFP values are
     96    passed through from A.  */
     97 
     98 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
     99 _mm_add_ss (__m128 __A, __m128 __B)
    100 {
    101   return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B);
    102 }
    103 
    104 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    105 _mm_sub_ss (__m128 __A, __m128 __B)
    106 {
    107   return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B);
    108 }
    109 
    110 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    111 _mm_mul_ss (__m128 __A, __m128 __B)
    112 {
    113   return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B);
    114 }
    115 
    116 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    117 _mm_div_ss (__m128 __A, __m128 __B)
    118 {
    119   return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B);
    120 }
    121 
    122 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    123 _mm_sqrt_ss (__m128 __A)
    124 {
    125   return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A);
    126 }
    127 
    128 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    129 _mm_rcp_ss (__m128 __A)
    130 {
    131   return (__m128) __builtin_ia32_rcpss ((__v4sf)__A);
    132 }
    133 
    134 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    135 _mm_rsqrt_ss (__m128 __A)
    136 {
    137   return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A);
    138 }
    139 
    140 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    141 _mm_min_ss (__m128 __A, __m128 __B)
    142 {
    143   return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B);
    144 }
    145 
    146 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    147 _mm_max_ss (__m128 __A, __m128 __B)
    148 {
    149   return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B);
    150 }
    151 
    152 /* Perform the respective operation on the four SPFP values in A and B.  */
    153 
    154 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    155 _mm_add_ps (__m128 __A, __m128 __B)
    156 {
    157   return (__m128) __builtin_ia32_addps ((__v4sf)__A, (__v4sf)__B);
    158 }
    159 
    160 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    161 _mm_sub_ps (__m128 __A, __m128 __B)
    162 {
    163   return (__m128) __builtin_ia32_subps ((__v4sf)__A, (__v4sf)__B);
    164 }
    165 
    166 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    167 _mm_mul_ps (__m128 __A, __m128 __B)
    168 {
    169   return (__m128) __builtin_ia32_mulps ((__v4sf)__A, (__v4sf)__B);
    170 }
    171 
    172 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    173 _mm_div_ps (__m128 __A, __m128 __B)
    174 {
    175   return (__m128) __builtin_ia32_divps ((__v4sf)__A, (__v4sf)__B);
    176 }
    177 
    178 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    179 _mm_sqrt_ps (__m128 __A)
    180 {
    181   return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A);
    182 }
    183 
    184 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    185 _mm_rcp_ps (__m128 __A)
    186 {
    187   return (__m128) __builtin_ia32_rcpps ((__v4sf)__A);
    188 }
    189 
    190 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    191 _mm_rsqrt_ps (__m128 __A)
    192 {
    193   return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A);
    194 }
    195 
    196 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    197 _mm_min_ps (__m128 __A, __m128 __B)
    198 {
    199   return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B);
    200 }
    201 
    202 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    203 _mm_max_ps (__m128 __A, __m128 __B)
    204 {
    205   return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B);
    206 }
    207 
    208 /* Perform logical bit-wise operations on 128-bit values.  */
    209 
    210 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    211 _mm_and_ps (__m128 __A, __m128 __B)
    212 {
    213   return __builtin_ia32_andps (__A, __B);
    214 }
    215 
    216 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    217 _mm_andnot_ps (__m128 __A, __m128 __B)
    218 {
    219   return __builtin_ia32_andnps (__A, __B);
    220 }
    221 
    222 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    223 _mm_or_ps (__m128 __A, __m128 __B)
    224 {
    225   return __builtin_ia32_orps (__A, __B);
    226 }
    227 
    228 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    229 _mm_xor_ps (__m128 __A, __m128 __B)
    230 {
    231   return __builtin_ia32_xorps (__A, __B);
    232 }
    233 
    234 /* Perform a comparison on the lower SPFP values of A and B.  If the
    235    comparison is true, place a mask of all ones in the result, otherwise a
    236    mask of zeros.  The upper three SPFP values are passed through from A.  */
    237 
    238 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    239 _mm_cmpeq_ss (__m128 __A, __m128 __B)
    240 {
    241   return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B);
    242 }
    243 
    244 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    245 _mm_cmplt_ss (__m128 __A, __m128 __B)
    246 {
    247   return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B);
    248 }
    249 
    250 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    251 _mm_cmple_ss (__m128 __A, __m128 __B)
    252 {
    253   return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B);
    254 }
    255 
    256 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    257 _mm_cmpgt_ss (__m128 __A, __m128 __B)
    258 {
    259   return (__m128) __builtin_ia32_movss ((__v4sf) __A,
    260 					(__v4sf)
    261 					__builtin_ia32_cmpltss ((__v4sf) __B,
    262 								(__v4sf)
    263 								__A));
    264 }
    265 
    266 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    267 _mm_cmpge_ss (__m128 __A, __m128 __B)
    268 {
    269   return (__m128) __builtin_ia32_movss ((__v4sf) __A,
    270 					(__v4sf)
    271 					__builtin_ia32_cmpless ((__v4sf) __B,
    272 								(__v4sf)
    273 								__A));
    274 }
    275 
    276 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    277 _mm_cmpneq_ss (__m128 __A, __m128 __B)
    278 {
    279   return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B);
    280 }
    281 
    282 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    283 _mm_cmpnlt_ss (__m128 __A, __m128 __B)
    284 {
    285   return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B);
    286 }
    287 
    288 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    289 _mm_cmpnle_ss (__m128 __A, __m128 __B)
    290 {
    291   return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B);
    292 }
    293 
    294 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    295 _mm_cmpngt_ss (__m128 __A, __m128 __B)
    296 {
    297   return (__m128) __builtin_ia32_movss ((__v4sf) __A,
    298 					(__v4sf)
    299 					__builtin_ia32_cmpnltss ((__v4sf) __B,
    300 								 (__v4sf)
    301 								 __A));
    302 }
    303 
    304 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    305 _mm_cmpnge_ss (__m128 __A, __m128 __B)
    306 {
    307   return (__m128) __builtin_ia32_movss ((__v4sf) __A,
    308 					(__v4sf)
    309 					__builtin_ia32_cmpnless ((__v4sf) __B,
    310 								 (__v4sf)
    311 								 __A));
    312 }
    313 
    314 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    315 _mm_cmpord_ss (__m128 __A, __m128 __B)
    316 {
    317   return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B);
    318 }
    319 
    320 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    321 _mm_cmpunord_ss (__m128 __A, __m128 __B)
    322 {
    323   return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B);
    324 }
    325 
    326 /* Perform a comparison on the four SPFP values of A and B.  For each
    327    element, if the comparison is true, place a mask of all ones in the
    328    result, otherwise a mask of zeros.  */
    329 
    330 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    331 _mm_cmpeq_ps (__m128 __A, __m128 __B)
    332 {
    333   return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B);
    334 }
    335 
    336 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    337 _mm_cmplt_ps (__m128 __A, __m128 __B)
    338 {
    339   return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B);
    340 }
    341 
    342 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    343 _mm_cmple_ps (__m128 __A, __m128 __B)
    344 {
    345   return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B);
    346 }
    347 
    348 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    349 _mm_cmpgt_ps (__m128 __A, __m128 __B)
    350 {
    351   return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B);
    352 }
    353 
    354 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    355 _mm_cmpge_ps (__m128 __A, __m128 __B)
    356 {
    357   return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B);
    358 }
    359 
    360 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    361 _mm_cmpneq_ps (__m128 __A, __m128 __B)
    362 {
    363   return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B);
    364 }
    365 
    366 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    367 _mm_cmpnlt_ps (__m128 __A, __m128 __B)
    368 {
    369   return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B);
    370 }
    371 
    372 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    373 _mm_cmpnle_ps (__m128 __A, __m128 __B)
    374 {
    375   return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B);
    376 }
    377 
    378 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    379 _mm_cmpngt_ps (__m128 __A, __m128 __B)
    380 {
    381   return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B);
    382 }
    383 
    384 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    385 _mm_cmpnge_ps (__m128 __A, __m128 __B)
    386 {
    387   return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B);
    388 }
    389 
    390 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    391 _mm_cmpord_ps (__m128 __A, __m128 __B)
    392 {
    393   return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B);
    394 }
    395 
    396 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    397 _mm_cmpunord_ps (__m128 __A, __m128 __B)
    398 {
    399   return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B);
    400 }
    401 
    402 /* Compare the lower SPFP values of A and B and return 1 if true
    403    and 0 if false.  */
    404 
    405 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    406 _mm_comieq_ss (__m128 __A, __m128 __B)
    407 {
    408   return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B);
    409 }
    410 
    411 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    412 _mm_comilt_ss (__m128 __A, __m128 __B)
    413 {
    414   return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B);
    415 }
    416 
    417 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    418 _mm_comile_ss (__m128 __A, __m128 __B)
    419 {
    420   return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B);
    421 }
    422 
    423 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    424 _mm_comigt_ss (__m128 __A, __m128 __B)
    425 {
    426   return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B);
    427 }
    428 
    429 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    430 _mm_comige_ss (__m128 __A, __m128 __B)
    431 {
    432   return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B);
    433 }
    434 
    435 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    436 _mm_comineq_ss (__m128 __A, __m128 __B)
    437 {
    438   return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B);
    439 }
    440 
    441 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    442 _mm_ucomieq_ss (__m128 __A, __m128 __B)
    443 {
    444   return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B);
    445 }
    446 
    447 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    448 _mm_ucomilt_ss (__m128 __A, __m128 __B)
    449 {
    450   return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B);
    451 }
    452 
    453 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    454 _mm_ucomile_ss (__m128 __A, __m128 __B)
    455 {
    456   return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B);
    457 }
    458 
    459 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    460 _mm_ucomigt_ss (__m128 __A, __m128 __B)
    461 {
    462   return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B);
    463 }
    464 
    465 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    466 _mm_ucomige_ss (__m128 __A, __m128 __B)
    467 {
    468   return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B);
    469 }
    470 
    471 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    472 _mm_ucomineq_ss (__m128 __A, __m128 __B)
    473 {
    474   return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B);
    475 }
    476 
    477 /* Convert the lower SPFP value to a 32-bit integer according to the current
    478    rounding mode.  */
    479 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    480 _mm_cvtss_si32 (__m128 __A)
    481 {
    482   return __builtin_ia32_cvtss2si ((__v4sf) __A);
    483 }
    484 
    485 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    486 _mm_cvt_ss2si (__m128 __A)
    487 {
    488   return _mm_cvtss_si32 (__A);
    489 }
    490 
    491 #ifdef __x86_64__
    492 /* Convert the lower SPFP value to a 32-bit integer according to the
    493    current rounding mode.  */
    494 
    495 /* Intel intrinsic.  */
    496 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    497 _mm_cvtss_si64 (__m128 __A)
    498 {
    499   return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
    500 }
    501 
    502 /* Microsoft intrinsic.  */
    503 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    504 _mm_cvtss_si64x (__m128 __A)
    505 {
    506   return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
    507 }
    508 #endif
    509 
    510 /* Convert the two lower SPFP values to 32-bit integers according to the
    511    current rounding mode.  Return the integers in packed form.  */
    512 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    513 _mm_cvtps_pi32 (__m128 __A)
    514 {
    515   return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A);
    516 }
    517 
    518 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    519 _mm_cvt_ps2pi (__m128 __A)
    520 {
    521   return _mm_cvtps_pi32 (__A);
    522 }
    523 
    524 /* Truncate the lower SPFP value to a 32-bit integer.  */
    525 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    526 _mm_cvttss_si32 (__m128 __A)
    527 {
    528   return __builtin_ia32_cvttss2si ((__v4sf) __A);
    529 }
    530 
    531 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    532 _mm_cvtt_ss2si (__m128 __A)
    533 {
    534   return _mm_cvttss_si32 (__A);
    535 }
    536 
    537 #ifdef __x86_64__
    538 /* Truncate the lower SPFP value to a 32-bit integer.  */
    539 
    540 /* Intel intrinsic.  */
    541 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    542 _mm_cvttss_si64 (__m128 __A)
    543 {
    544   return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
    545 }
    546 
    547 /* Microsoft intrinsic.  */
    548 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    549 _mm_cvttss_si64x (__m128 __A)
    550 {
    551   return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
    552 }
    553 #endif
    554 
    555 /* Truncate the two lower SPFP values to 32-bit integers.  Return the
    556    integers in packed form.  */
    557 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    558 _mm_cvttps_pi32 (__m128 __A)
    559 {
    560   return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A);
    561 }
    562 
    563 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    564 _mm_cvtt_ps2pi (__m128 __A)
    565 {
    566   return _mm_cvttps_pi32 (__A);
    567 }
    568 
    569 /* Convert B to a SPFP value and insert it as element zero in A.  */
    570 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    571 _mm_cvtsi32_ss (__m128 __A, int __B)
    572 {
    573   return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B);
    574 }
    575 
    576 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    577 _mm_cvt_si2ss (__m128 __A, int __B)
    578 {
    579   return _mm_cvtsi32_ss (__A, __B);
    580 }
    581 
    582 #ifdef __x86_64__
    583 /* Convert B to a SPFP value and insert it as element zero in A.  */
    584 
    585 /* Intel intrinsic.  */
    586 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    587 _mm_cvtsi64_ss (__m128 __A, long long __B)
    588 {
    589   return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
    590 }
    591 
    592 /* Microsoft intrinsic.  */
    593 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    594 _mm_cvtsi64x_ss (__m128 __A, long long __B)
    595 {
    596   return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
    597 }
    598 #endif
    599 
    600 /* Convert the two 32-bit values in B to SPFP form and insert them
    601    as the two lower elements in A.  */
    602 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    603 _mm_cvtpi32_ps (__m128 __A, __m64 __B)
    604 {
    605   return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B);
    606 }
    607 
    608 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    609 _mm_cvt_pi2ps (__m128 __A, __m64 __B)
    610 {
    611   return _mm_cvtpi32_ps (__A, __B);
    612 }
    613 
    614 /* Convert the four signed 16-bit values in A to SPFP form.  */
    615 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    616 _mm_cvtpi16_ps (__m64 __A)
    617 {
    618   __v4hi __sign;
    619   __v2si __hisi, __losi;
    620   __v4sf __zero, __ra, __rb;
    621 
    622   /* This comparison against zero gives us a mask that can be used to
    623      fill in the missing sign bits in the unpack operations below, so
    624      that we get signed values after unpacking.  */
    625   __sign = __builtin_ia32_pcmpgtw ((__v4hi)0LL, (__v4hi)__A);
    626 
    627   /* Convert the four words to doublewords.  */
    628   __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign);
    629   __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign);
    630 
    631   /* Convert the doublewords to floating point two at a time.  */
    632   __zero = (__v4sf) _mm_setzero_ps ();
    633   __ra = __builtin_ia32_cvtpi2ps (__zero, __losi);
    634   __rb = __builtin_ia32_cvtpi2ps (__ra, __hisi);
    635 
    636   return (__m128) __builtin_ia32_movlhps (__ra, __rb);
    637 }
    638 
    639 /* Convert the four unsigned 16-bit values in A to SPFP form.  */
    640 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    641 _mm_cvtpu16_ps (__m64 __A)
    642 {
    643   __v2si __hisi, __losi;
    644   __v4sf __zero, __ra, __rb;
    645 
    646   /* Convert the four words to doublewords.  */
    647   __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, (__v4hi)0LL);
    648   __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, (__v4hi)0LL);
    649 
    650   /* Convert the doublewords to floating point two at a time.  */
    651   __zero = (__v4sf) _mm_setzero_ps ();
    652   __ra = __builtin_ia32_cvtpi2ps (__zero, __losi);
    653   __rb = __builtin_ia32_cvtpi2ps (__ra, __hisi);
    654 
    655   return (__m128) __builtin_ia32_movlhps (__ra, __rb);
    656 }
    657 
    658 /* Convert the low four signed 8-bit values in A to SPFP form.  */
    659 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    660 _mm_cvtpi8_ps (__m64 __A)
    661 {
    662   __v8qi __sign;
    663 
    664   /* This comparison against zero gives us a mask that can be used to
    665      fill in the missing sign bits in the unpack operations below, so
    666      that we get signed values after unpacking.  */
    667   __sign = __builtin_ia32_pcmpgtb ((__v8qi)0LL, (__v8qi)__A);
    668 
    669   /* Convert the four low bytes to words.  */
    670   __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign);
    671 
    672   return _mm_cvtpi16_ps(__A);
    673 }
    674 
    675 /* Convert the low four unsigned 8-bit values in A to SPFP form.  */
    676 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    677 _mm_cvtpu8_ps(__m64 __A)
    678 {
    679   __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, (__v8qi)0LL);
    680   return _mm_cvtpu16_ps(__A);
    681 }
    682 
    683 /* Convert the four signed 32-bit values in A and B to SPFP form.  */
    684 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    685 _mm_cvtpi32x2_ps(__m64 __A, __m64 __B)
    686 {
    687   __v4sf __zero = (__v4sf) _mm_setzero_ps ();
    688   __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A);
    689   __v4sf __sfb = __builtin_ia32_cvtpi2ps (__sfa, (__v2si)__B);
    690   return (__m128) __builtin_ia32_movlhps (__sfa, __sfb);
    691 }
    692 
    693 /* Convert the four SPFP values in A to four signed 16-bit integers.  */
    694 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    695 _mm_cvtps_pi16(__m128 __A)
    696 {
    697   __v4sf __hisf = (__v4sf)__A;
    698   __v4sf __losf = __builtin_ia32_movhlps (__hisf, __hisf);
    699   __v2si __hisi = __builtin_ia32_cvtps2pi (__hisf);
    700   __v2si __losi = __builtin_ia32_cvtps2pi (__losf);
    701   return (__m64) __builtin_ia32_packssdw (__hisi, __losi);
    702 }
    703 
    704 /* Convert the four SPFP values in A to four signed 8-bit integers.  */
    705 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    706 _mm_cvtps_pi8(__m128 __A)
    707 {
    708   __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A);
    709   return (__m64) __builtin_ia32_packsswb (__tmp, (__v4hi)0LL);
    710 }
    711 
    712 /* Selects four specific SPFP values from A and B based on MASK.  */
    713 #ifdef __OPTIMIZE__
    714 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    715 _mm_shuffle_ps (__m128 __A, __m128 __B, int const __mask)
    716 {
    717   return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask);
    718 }
    719 #else
    720 #define _mm_shuffle_ps(A, B, MASK)					\
    721   ((__m128) __builtin_ia32_shufps ((__v4sf)(__m128)(A),			\
    722 				   (__v4sf)(__m128)(B), (int)(MASK)))
    723 #endif
    724 
    725 /* Selects and interleaves the upper two SPFP values from A and B.  */
    726 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    727 _mm_unpackhi_ps (__m128 __A, __m128 __B)
    728 {
    729   return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B);
    730 }
    731 
    732 /* Selects and interleaves the lower two SPFP values from A and B.  */
    733 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    734 _mm_unpacklo_ps (__m128 __A, __m128 __B)
    735 {
    736   return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B);
    737 }
    738 
    739 /* Sets the upper two SPFP values with 64-bits of data loaded from P;
    740    the lower two values are passed through from A.  */
    741 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    742 _mm_loadh_pi (__m128 __A, __m64 const *__P)
    743 {
    744   return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (const __v2sf *)__P);
    745 }
    746 
    747 /* Stores the upper two SPFP values of A into P.  */
    748 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    749 _mm_storeh_pi (__m64 *__P, __m128 __A)
    750 {
    751   __builtin_ia32_storehps ((__v2sf *)__P, (__v4sf)__A);
    752 }
    753 
    754 /* Moves the upper two values of B into the lower two values of A.  */
    755 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    756 _mm_movehl_ps (__m128 __A, __m128 __B)
    757 {
    758   return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B);
    759 }
    760 
    761 /* Moves the lower two values of B into the upper two values of A.  */
    762 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    763 _mm_movelh_ps (__m128 __A, __m128 __B)
    764 {
    765   return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B);
    766 }
    767 
    768 /* Sets the lower two SPFP values with 64-bits of data loaded from P;
    769    the upper two values are passed through from A.  */
    770 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    771 _mm_loadl_pi (__m128 __A, __m64 const *__P)
    772 {
    773   return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (const __v2sf *)__P);
    774 }
    775 
    776 /* Stores the lower two SPFP values of A into P.  */
    777 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    778 _mm_storel_pi (__m64 *__P, __m128 __A)
    779 {
    780   __builtin_ia32_storelps ((__v2sf *)__P, (__v4sf)__A);
    781 }
    782 
    783 /* Creates a 4-bit mask from the most significant bits of the SPFP values.  */
    784 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    785 _mm_movemask_ps (__m128 __A)
    786 {
    787   return __builtin_ia32_movmskps ((__v4sf)__A);
    788 }
    789 
    790 /* Return the contents of the control register.  */
    791 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    792 _mm_getcsr (void)
    793 {
    794   return __builtin_ia32_stmxcsr ();
    795 }
    796 
    797 /* Read exception bits from the control register.  */
    798 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    799 _MM_GET_EXCEPTION_STATE (void)
    800 {
    801   return _mm_getcsr() & _MM_EXCEPT_MASK;
    802 }
    803 
    804 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    805 _MM_GET_EXCEPTION_MASK (void)
    806 {
    807   return _mm_getcsr() & _MM_MASK_MASK;
    808 }
    809 
    810 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    811 _MM_GET_ROUNDING_MODE (void)
    812 {
    813   return _mm_getcsr() & _MM_ROUND_MASK;
    814 }
    815 
    816 extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    817 _MM_GET_FLUSH_ZERO_MODE (void)
    818 {
    819   return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
    820 }
    821 
    822 /* Set the control register to I.  */
    823 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    824 _mm_setcsr (unsigned int __I)
    825 {
    826   __builtin_ia32_ldmxcsr (__I);
    827 }
    828 
    829 /* Set exception bits in the control register.  */
    830 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    831 _MM_SET_EXCEPTION_STATE(unsigned int __mask)
    832 {
    833   _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask);
    834 }
    835 
    836 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    837 _MM_SET_EXCEPTION_MASK (unsigned int __mask)
    838 {
    839   _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask);
    840 }
    841 
    842 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    843 _MM_SET_ROUNDING_MODE (unsigned int __mode)
    844 {
    845   _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode);
    846 }
    847 
    848 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    849 _MM_SET_FLUSH_ZERO_MODE (unsigned int __mode)
    850 {
    851   _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode);
    852 }
    853 
    854 /* Create a vector with element 0 as F and the rest zero.  */
    855 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    856 _mm_set_ss (float __F)
    857 {
    858   return __extension__ (__m128)(__v4sf){ __F, 0.0f, 0.0f, 0.0f };
    859 }
    860 
    861 /* Create a vector with all four elements equal to F.  */
    862 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    863 _mm_set1_ps (float __F)
    864 {
    865   return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F };
    866 }
    867 
    868 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    869 _mm_set_ps1 (float __F)
    870 {
    871   return _mm_set1_ps (__F);
    872 }
    873 
    874 /* Create a vector with element 0 as *P and the rest zero.  */
    875 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    876 _mm_load_ss (float const *__P)
    877 {
    878   return _mm_set_ss (*__P);
    879 }
    880 
    881 /* Create a vector with all four elements equal to *P.  */
    882 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    883 _mm_load1_ps (float const *__P)
    884 {
    885   return _mm_set1_ps (*__P);
    886 }
    887 
    888 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    889 _mm_load_ps1 (float const *__P)
    890 {
    891   return _mm_load1_ps (__P);
    892 }
    893 
    894 /* Load four SPFP values from P.  The address must be 16-byte aligned.  */
    895 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    896 _mm_load_ps (float const *__P)
    897 {
    898   return (__m128) *(__v4sf *)__P;
    899 }
    900 
    901 /* Load four SPFP values from P.  The address need not be 16-byte aligned.  */
    902 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    903 _mm_loadu_ps (float const *__P)
    904 {
    905   return (__m128) __builtin_ia32_loadups (__P);
    906 }
    907 
    908 /* Load four SPFP values in reverse order.  The address must be aligned.  */
    909 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    910 _mm_loadr_ps (float const *__P)
    911 {
    912   __v4sf __tmp = *(__v4sf *)__P;
    913   return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3));
    914 }
    915 
    916 /* Create the vector [Z Y X W].  */
    917 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    918 _mm_set_ps (const float __Z, const float __Y, const float __X, const float __W)
    919 {
    920   return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z };
    921 }
    922 
    923 /* Create the vector [W X Y Z].  */
    924 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    925 _mm_setr_ps (float __Z, float __Y, float __X, float __W)
    926 {
    927   return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W };
    928 }
    929 
    930 /* Stores the lower SPFP value.  */
    931 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    932 _mm_store_ss (float *__P, __m128 __A)
    933 {
    934   *__P = __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0);
    935 }
    936 
    937 extern __inline float __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    938 _mm_cvtss_f32 (__m128 __A)
    939 {
    940   return __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0);
    941 }
    942 
    943 /* Store four SPFP values.  The address must be 16-byte aligned.  */
    944 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    945 _mm_store_ps (float *__P, __m128 __A)
    946 {
    947   *(__v4sf *)__P = (__v4sf)__A;
    948 }
    949 
    950 /* Store four SPFP values.  The address need not be 16-byte aligned.  */
    951 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    952 _mm_storeu_ps (float *__P, __m128 __A)
    953 {
    954   __builtin_ia32_storeups (__P, (__v4sf)__A);
    955 }
    956 
    957 /* Store the lower SPFP value across four words.  */
    958 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    959 _mm_store1_ps (float *__P, __m128 __A)
    960 {
    961   __v4sf __va = (__v4sf)__A;
    962   __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0));
    963   _mm_storeu_ps (__P, __tmp);
    964 }
    965 
    966 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    967 _mm_store_ps1 (float *__P, __m128 __A)
    968 {
    969   _mm_store1_ps (__P, __A);
    970 }
    971 
    972 /* Store four SPFP values in reverse order.  The address must be aligned.  */
    973 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    974 _mm_storer_ps (float *__P, __m128 __A)
    975 {
    976   __v4sf __va = (__v4sf)__A;
    977   __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3));
    978   _mm_store_ps (__P, __tmp);
    979 }
    980 
    981 /* Sets the low SPFP value of A from the low value of B.  */
    982 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    983 _mm_move_ss (__m128 __A, __m128 __B)
    984 {
    985   return (__m128) __builtin_ia32_movss ((__v4sf)__A, (__v4sf)__B);
    986 }
    987 
    988 /* Extracts one of the four words of A.  The selector N must be immediate.  */
    989 #ifdef __OPTIMIZE__
    990 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    991 _mm_extract_pi16 (__m64 const __A, int const __N)
    992 {
    993   return __builtin_ia32_vec_ext_v4hi ((__v4hi)__A, __N);
    994 }
    995 
    996 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
    997 _m_pextrw (__m64 const __A, int const __N)
    998 {
    999   return _mm_extract_pi16 (__A, __N);
   1000 }
   1001 #else
   1002 #define _mm_extract_pi16(A, N)	\
   1003   ((int) __builtin_ia32_vec_ext_v4hi ((__v4hi)(__m64)(A), (int)(N)))
   1004 
   1005 #define _m_pextrw(A, N) _mm_extract_pi16(A, N)
   1006 #endif
   1007 
   1008 /* Inserts word D into one of four words of A.  The selector N must be
   1009    immediate.  */
   1010 #ifdef __OPTIMIZE__
   1011 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1012 _mm_insert_pi16 (__m64 const __A, int const __D, int const __N)
   1013 {
   1014   return (__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)__A, __D, __N);
   1015 }
   1016 
   1017 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1018 _m_pinsrw (__m64 const __A, int const __D, int const __N)
   1019 {
   1020   return _mm_insert_pi16 (__A, __D, __N);
   1021 }
   1022 #else
   1023 #define _mm_insert_pi16(A, D, N)				\
   1024   ((__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)(__m64)(A),	\
   1025 					(int)(D), (int)(N)))
   1026 
   1027 #define _m_pinsrw(A, D, N) _mm_insert_pi16(A, D, N)
   1028 #endif
   1029 
   1030 /* Compute the element-wise maximum of signed 16-bit values.  */
   1031 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1032 _mm_max_pi16 (__m64 __A, __m64 __B)
   1033 {
   1034   return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B);
   1035 }
   1036 
   1037 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1038 _m_pmaxsw (__m64 __A, __m64 __B)
   1039 {
   1040   return _mm_max_pi16 (__A, __B);
   1041 }
   1042 
   1043 /* Compute the element-wise maximum of unsigned 8-bit values.  */
   1044 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1045 _mm_max_pu8 (__m64 __A, __m64 __B)
   1046 {
   1047   return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B);
   1048 }
   1049 
   1050 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1051 _m_pmaxub (__m64 __A, __m64 __B)
   1052 {
   1053   return _mm_max_pu8 (__A, __B);
   1054 }
   1055 
   1056 /* Compute the element-wise minimum of signed 16-bit values.  */
   1057 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1058 _mm_min_pi16 (__m64 __A, __m64 __B)
   1059 {
   1060   return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B);
   1061 }
   1062 
   1063 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1064 _m_pminsw (__m64 __A, __m64 __B)
   1065 {
   1066   return _mm_min_pi16 (__A, __B);
   1067 }
   1068 
   1069 /* Compute the element-wise minimum of unsigned 8-bit values.  */
   1070 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1071 _mm_min_pu8 (__m64 __A, __m64 __B)
   1072 {
   1073   return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B);
   1074 }
   1075 
   1076 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1077 _m_pminub (__m64 __A, __m64 __B)
   1078 {
   1079   return _mm_min_pu8 (__A, __B);
   1080 }
   1081 
   1082 /* Create an 8-bit mask of the signs of 8-bit values.  */
   1083 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1084 _mm_movemask_pi8 (__m64 __A)
   1085 {
   1086   return __builtin_ia32_pmovmskb ((__v8qi)__A);
   1087 }
   1088 
   1089 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1090 _m_pmovmskb (__m64 __A)
   1091 {
   1092   return _mm_movemask_pi8 (__A);
   1093 }
   1094 
   1095 /* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
   1096    in B and produce the high 16 bits of the 32-bit results.  */
   1097 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1098 _mm_mulhi_pu16 (__m64 __A, __m64 __B)
   1099 {
   1100   return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B);
   1101 }
   1102 
   1103 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1104 _m_pmulhuw (__m64 __A, __m64 __B)
   1105 {
   1106   return _mm_mulhi_pu16 (__A, __B);
   1107 }
   1108 
   1109 /* Return a combination of the four 16-bit values in A.  The selector
   1110    must be an immediate.  */
   1111 #ifdef __OPTIMIZE__
   1112 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1113 _mm_shuffle_pi16 (__m64 __A, int const __N)
   1114 {
   1115   return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N);
   1116 }
   1117 
   1118 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1119 _m_pshufw (__m64 __A, int const __N)
   1120 {
   1121   return _mm_shuffle_pi16 (__A, __N);
   1122 }
   1123 #else
   1124 #define _mm_shuffle_pi16(A, N) \
   1125   ((__m64) __builtin_ia32_pshufw ((__v4hi)(__m64)(A), (int)(N)))
   1126 
   1127 #define _m_pshufw(A, N) _mm_shuffle_pi16 (A, N)
   1128 #endif
   1129 
   1130 /* Conditionally store byte elements of A into P.  The high bit of each
   1131    byte in the selector N determines whether the corresponding byte from
   1132    A is stored.  */
   1133 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1134 _mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
   1135 {
   1136   __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P);
   1137 }
   1138 
   1139 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1140 _m_maskmovq (__m64 __A, __m64 __N, char *__P)
   1141 {
   1142   _mm_maskmove_si64 (__A, __N, __P);
   1143 }
   1144 
   1145 /* Compute the rounded averages of the unsigned 8-bit values in A and B.  */
   1146 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1147 _mm_avg_pu8 (__m64 __A, __m64 __B)
   1148 {
   1149   return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B);
   1150 }
   1151 
   1152 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1153 _m_pavgb (__m64 __A, __m64 __B)
   1154 {
   1155   return _mm_avg_pu8 (__A, __B);
   1156 }
   1157 
   1158 /* Compute the rounded averages of the unsigned 16-bit values in A and B.  */
   1159 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1160 _mm_avg_pu16 (__m64 __A, __m64 __B)
   1161 {
   1162   return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B);
   1163 }
   1164 
   1165 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1166 _m_pavgw (__m64 __A, __m64 __B)
   1167 {
   1168   return _mm_avg_pu16 (__A, __B);
   1169 }
   1170 
   1171 /* Compute the sum of the absolute differences of the unsigned 8-bit
   1172    values in A and B.  Return the value in the lower 16-bit word; the
   1173    upper words are cleared.  */
   1174 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1175 _mm_sad_pu8 (__m64 __A, __m64 __B)
   1176 {
   1177   return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B);
   1178 }
   1179 
   1180 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1181 _m_psadbw (__m64 __A, __m64 __B)
   1182 {
   1183   return _mm_sad_pu8 (__A, __B);
   1184 }
   1185 
   1186 /* Loads one cache line from address P to a location "closer" to the
   1187    processor.  The selector I specifies the type of prefetch operation.  */
   1188 #ifdef __OPTIMIZE__
   1189 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1190 _mm_prefetch (const void *__P, enum _mm_hint __I)
   1191 {
   1192   __builtin_prefetch (__P, 0, __I);
   1193 }
   1194 #else
   1195 #define _mm_prefetch(P, I) \
   1196   __builtin_prefetch ((P), 0, (I))
   1197 #endif
   1198 
   1199 /* Stores the data in A to the address P without polluting the caches.  */
   1200 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1201 _mm_stream_pi (__m64 *__P, __m64 __A)
   1202 {
   1203   __builtin_ia32_movntq ((unsigned long long *)__P, (unsigned long long)__A);
   1204 }
   1205 
   1206 /* Likewise.  The address must be 16-byte aligned.  */
   1207 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1208 _mm_stream_ps (float *__P, __m128 __A)
   1209 {
   1210   __builtin_ia32_movntps (__P, (__v4sf)__A);
   1211 }
   1212 
   1213 /* Guarantees that every preceding store is globally visible before
   1214    any subsequent store.  */
   1215 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1216 _mm_sfence (void)
   1217 {
   1218   __builtin_ia32_sfence ();
   1219 }
   1220 
   1221 /* The execution of the next instruction is delayed by an implementation
   1222    specific amount of time.  The instruction does not modify the
   1223    architectural state.  */
   1224 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
   1225 _mm_pause (void)
   1226 {
   1227   __builtin_ia32_pause ();
   1228 }
   1229 
   1230 /* Transpose the 4x4 matrix composed of row[0-3].  */
   1231 #define _MM_TRANSPOSE4_PS(row0, row1, row2, row3)			\
   1232 do {									\
   1233   __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3);	\
   1234   __v4sf __t0 = __builtin_ia32_unpcklps (__r0, __r1);			\
   1235   __v4sf __t1 = __builtin_ia32_unpcklps (__r2, __r3);			\
   1236   __v4sf __t2 = __builtin_ia32_unpckhps (__r0, __r1);			\
   1237   __v4sf __t3 = __builtin_ia32_unpckhps (__r2, __r3);			\
   1238   (row0) = __builtin_ia32_movlhps (__t0, __t1);				\
   1239   (row1) = __builtin_ia32_movhlps (__t1, __t0);				\
   1240   (row2) = __builtin_ia32_movlhps (__t2, __t3);				\
   1241   (row3) = __builtin_ia32_movhlps (__t3, __t2);				\
   1242 } while (0)
   1243 
   1244 /* For backward source compatibility.  */
   1245 #ifdef __SSE2__
   1246 # include <emmintrin.h>
   1247 #endif
   1248 
   1249 #endif /* __SSE__ */
   1250 #endif /* _XMMINTRIN_H_INCLUDED */
   1251