Home | History | Annotate | Download | only in include
      1 /* APPLE LOCAL file 5612787 mainline sse4 */
      2 /* Copyright (C) 2007 Free Software Foundation, Inc.
      3 
      4    This file is part of GCC.
      5 
      6    GCC is free software; you can redistribute it and/or modify
      7    it under the terms of the GNU General Public License as published by
      8    the Free Software Foundation; either version 2, or (at your option)
      9    any later version.
     10 
     11    GCC is distributed in the hope that it will be useful,
     12    but WITHOUT ANY WARRANTY; without even the implied warranty of
     13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     14    GNU General Public License for more details.
     15 
     16    You should have received a copy of the GNU General Public License
     17    along with GCC; see the file COPYING.  If not, write to
     18    the Free Software Foundation, 59 Temple Place - Suite 330,
     19    Boston, MA 02111-1307, USA.  */
     20 
     21 /* As a special exception, if you include this header file into source
     22    files compiled by GCC, this header file does not by itself cause
     23    the resulting executable to be covered by the GNU General Public
     24    License.  This exception does not however invalidate any other
     25    reasons why the executable file might be covered by the GNU General
     26    Public License.  */
     27 
     28 /* Implemented from the specification included in the Intel C++ Compiler
     29    User Guide and Reference, version 10.0.  */
     30 
     31 #ifndef _SMMINTRIN_H_INCLUDED
     32 #define _SMMINTRIN_H_INCLUDED
     33 
     34 #ifndef __SSE4_1__
     35 # error "SSE4.1 instruction set not enabled"
     36 #else
     37 
     38 /* We need definitions from the SSSE3, SSE3, SSE2 and SSE header
     39    files.  */
     40 #include <tmmintrin.h>
     41 
     42 /* SSE4.1 */
     43 
     44 /* Rounding mode macros. */
     45 #define _MM_FROUND_TO_NEAREST_INT	0x00
     46 #define _MM_FROUND_TO_NEG_INF		0x01
     47 #define _MM_FROUND_TO_POS_INF		0x02
     48 #define _MM_FROUND_TO_ZERO		0x03
     49 #define _MM_FROUND_CUR_DIRECTION	0x04
     50 
     51 #define _MM_FROUND_RAISE_EXC		0x00
     52 #define _MM_FROUND_NO_EXC		0x08
     53 
     54 #define _MM_FROUND_NINT		\
     55   (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_RAISE_EXC)
     56 #define _MM_FROUND_FLOOR	\
     57   (_MM_FROUND_TO_NEG_INF | _MM_FROUND_RAISE_EXC)
     58 #define _MM_FROUND_CEIL		\
     59   (_MM_FROUND_TO_POS_INF | _MM_FROUND_RAISE_EXC)
     60 #define _MM_FROUND_TRUNC	\
     61   (_MM_FROUND_TO_ZERO | _MM_FROUND_RAISE_EXC)
     62 #define _MM_FROUND_RINT		\
     63   (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_RAISE_EXC)
     64 #define _MM_FROUND_NEARBYINT	\
     65   (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC)
     66 
     67 /* APPLE LOCAL begin nodebug inline 4152603 */
     68 #define __always_inline__ __always_inline__, __nodebug__
     69 /* APPLE LOCAL end nodebug inline 4152603 */
     70 
     71 /* APPLE LOCAL begin radar 5618945 */
     72 #undef __STATIC_INLINE
     73 #ifdef __GNUC_STDC_INLINE__
     74 #define __STATIC_INLINE __inline
     75 #else
     76 #define __STATIC_INLINE static __inline
     77 #endif
     78 /* APPLE LOCAL end radar 5618945 */
     79 
     80 /* Integer blend instructions - select data from 2 sources using
     81    constant/variable mask.  */
     82 
     83 #ifdef __OPTIMIZE__
     84 __STATIC_INLINE __m128i __attribute__((__always_inline__))
     85 _mm_blend_epi16 (__m128i __X, __m128i __Y, const int __M)
     86 {
     87   return (__m128i) __builtin_ia32_pblendw128 ((__v8hi)__X,
     88 					      (__v8hi)__Y,
     89 					      __M);
     90 }
     91 #else
     92 #define _mm_blend_epi16(X, Y, M) \
     93   ((__m128i) __builtin_ia32_pblendw128 ((__v8hi)(X), (__v8hi)(Y), (M)))
     94 #endif
     95 
     96 __STATIC_INLINE __m128i __attribute__((__always_inline__))
     97 _mm_blendv_epi8 (__m128i __X, __m128i __Y, __m128i __M)
     98 {
     99   return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__X,
    100 					       (__v16qi)__Y,
    101 					       (__v16qi)__M);
    102 }
    103 
    104 /* Single precision floating point blend instructions - select data
    105    from 2 sources using constant/variable mask.  */
    106 
    107 #ifdef __OPTIMIZE__
    108 __STATIC_INLINE __m128 __attribute__((__always_inline__))
    109 _mm_blend_ps (__m128 __X, __m128 __Y, const int __M)
    110 {
    111   return (__m128) __builtin_ia32_blendps ((__v4sf)__X,
    112 					  (__v4sf)__Y,
    113 					  __M);
    114 }
    115 #else
    116 #define _mm_blend_ps(X, Y, M) \
    117   ((__m128) __builtin_ia32_blendps ((__v4sf)(X), (__v4sf)(Y), (M)))
    118 #endif
    119 
    120 __STATIC_INLINE __m128 __attribute__((__always_inline__))
    121 _mm_blendv_ps (__m128 __X, __m128 __Y, __m128 __M)
    122 {
    123   return (__m128) __builtin_ia32_blendvps ((__v4sf)__X,
    124 					   (__v4sf)__Y,
    125 					   (__v4sf)__M);
    126 }
    127 
    128 /* Double precision floating point blend instructions - select data
    129    from 2 sources using constant/variable mask.  */
    130 
    131 #ifdef __OPTIMIZE__
    132 __STATIC_INLINE __m128d __attribute__((__always_inline__))
    133 _mm_blend_pd (__m128d __X, __m128d __Y, const int __M)
    134 {
    135   return (__m128d) __builtin_ia32_blendpd ((__v2df)__X,
    136 					   (__v2df)__Y,
    137 					   __M);
    138 }
    139 #else
    140 #define _mm_blend_pd(X, Y, M) \
    141   ((__m128d) __builtin_ia32_blendpd ((__v2df)(X), (__v2df)(Y), (M)))
    142 #endif
    143 
    144 __STATIC_INLINE __m128d __attribute__((__always_inline__))
    145 _mm_blendv_pd (__m128d __X, __m128d __Y, __m128d __M)
    146 {
    147   return (__m128d) __builtin_ia32_blendvpd ((__v2df)__X,
    148 					    (__v2df)__Y,
    149 					    (__v2df)__M);
    150 }
    151 
    152 /* Dot product instructions with mask-defined summing and zeroing parts
    153    of result.  */
    154 
    155 #ifdef __OPTIMIZE__
    156 __STATIC_INLINE __m128 __attribute__((__always_inline__))
    157 _mm_dp_ps (__m128 __X, __m128 __Y, const int __M)
    158 {
    159   return (__m128) __builtin_ia32_dpps ((__v4sf)__X,
    160 				       (__v4sf)__Y,
    161 				       __M);
    162 }
    163 
    164 __STATIC_INLINE __m128d __attribute__((__always_inline__))
    165 _mm_dp_pd (__m128d __X, __m128d __Y, const int __M)
    166 {
    167   return (__m128d) __builtin_ia32_dppd ((__v2df)__X,
    168 					(__v2df)__Y,
    169 					__M);
    170 }
    171 #else
    172 #define _mm_dp_ps(X, Y, M) \
    173   ((__m128) __builtin_ia32_dpps ((__v4sf)(X), (__v4sf)(Y), (M)))
    174 
    175 #define _mm_dp_pd(X, Y, M) \
    176   ((__m128d) __builtin_ia32_dppd ((__v2df)(X), (__v2df)(Y), (M)))
    177 #endif
    178 
    179 /* Packed integer 64-bit comparison, zeroing or filling with ones
    180    corresponding parts of result.  */
    181 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    182 _mm_cmpeq_epi64 (__m128i __X, __m128i __Y)
    183 {
    184   return (__m128i) __builtin_ia32_pcmpeqq ((__v2di)__X, (__v2di)__Y);
    185 }
    186 
    187 /*  Min/max packed integer instructions.  */
    188 
    189 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    190 _mm_min_epi8 (__m128i __X, __m128i __Y)
    191 {
    192   return (__m128i) __builtin_ia32_pminsb128 ((__v16qi)__X, (__v16qi)__Y);
    193 }
    194 
    195 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    196 _mm_max_epi8 (__m128i __X, __m128i __Y)
    197 {
    198   return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi)__X, (__v16qi)__Y);
    199 }
    200 
    201 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    202 _mm_min_epu16 (__m128i __X, __m128i __Y)
    203 {
    204   return (__m128i) __builtin_ia32_pminuw128 ((__v8hi)__X, (__v8hi)__Y);
    205 }
    206 
    207 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    208 _mm_max_epu16 (__m128i __X, __m128i __Y)
    209 {
    210   return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi)__X, (__v8hi)__Y);
    211 }
    212 
    213 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    214 _mm_min_epi32 (__m128i __X, __m128i __Y)
    215 {
    216   return (__m128i) __builtin_ia32_pminsd128 ((__v4si)__X, (__v4si)__Y);
    217 }
    218 
    219 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    220 _mm_max_epi32 (__m128i __X, __m128i __Y)
    221 {
    222   return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si)__X, (__v4si)__Y);
    223 }
    224 
    225 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    226 _mm_min_epu32 (__m128i __X, __m128i __Y)
    227 {
    228   return (__m128i) __builtin_ia32_pminud128 ((__v4si)__X, (__v4si)__Y);
    229 }
    230 
    231 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    232 _mm_max_epu32 (__m128i __X, __m128i __Y)
    233 {
    234   return (__m128i) __builtin_ia32_pmaxud128 ((__v4si)__X, (__v4si)__Y);
    235 }
    236 
    237 /* Packed integer 32-bit multiplication with truncation of upper
    238    halves of results.  */
    239 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    240 _mm_mullo_epi32 (__m128i __X, __m128i __Y)
    241 {
    242   return (__m128i) __builtin_ia32_pmulld128 ((__v4si)__X, (__v4si)__Y);
    243 }
    244 
    245 /* Packed integer 32-bit multiplication of 2 pairs of operands
    246    with two 64-bit results.  */
    247 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    248 _mm_mul_epi32 (__m128i __X, __m128i __Y)
    249 {
    250   return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__X, (__v4si)__Y);
    251 }
    252 
    253 /* Packed integer 128-bit bitwise comparison. Return 1 if
    254    (__V & __M) == 0.  */
    255 __STATIC_INLINE int __attribute__((__always_inline__))
    256 _mm_testz_si128 (__m128i __M, __m128i __V)
    257 {
    258   return __builtin_ia32_ptestz128 ((__v2di)__M, (__v2di)__V);
    259 }
    260 
    261 /* Packed integer 128-bit bitwise comparison. Return 1 if
    262    (__V & ~__M) == 0.  */
    263 __STATIC_INLINE int __attribute__((__always_inline__))
    264 _mm_testc_si128 (__m128i __M, __m128i __V)
    265 {
    266   return __builtin_ia32_ptestc128 ((__v2di)__M, (__v2di)__V);
    267 }
    268 
    269 /* Packed integer 128-bit bitwise comparison. Return 1 if
    270    (__V & __M) != 0 && (__V & ~__M) != 0.  */
    271 __STATIC_INLINE int __attribute__((__always_inline__))
    272 _mm_testnzc_si128 (__m128i __M, __m128i __V)
    273 {
    274   return __builtin_ia32_ptestnzc128 ((__v2di)__M, (__v2di)__V);
    275 }
    276 
    277 /* Macros for packed integer 128-bit comparison intrinsics.  */
    278 #define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V))
    279 
    280 #define _mm_test_all_ones(V) \
    281   _mm_testc_si128 ((V), _mm_cmpeq_epi32 ((V), (V)))
    282 
    283 #define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128 ((M), (V))
    284 
    285 /* Insert single precision float into packed single precision array
    286    element selected by index N.  The bits [7-6] of N define S
    287    index, the bits [5-4] define D index, and bits [3-0] define
    288    zeroing mask for D.  */
    289 
    290 #ifdef __OPTIMIZE__
    291 __STATIC_INLINE __m128 __attribute__((__always_inline__))
    292 _mm_insert_ps (__m128 __D, __m128 __S, const int __N)
    293 {
    294   return (__m128) __builtin_ia32_insertps128 ((__v4sf)__D,
    295 					      (__v4sf)__S,
    296 					      __N);
    297 }
    298 #else
    299 #define _mm_insert_ps(D, S, N) \
    300   ((__m128) __builtin_ia32_insertps128 ((__v4sf)(D), (__v4sf)(S), (N)))
    301 #endif
    302 
    303 /* Helper macro to create the N value for _mm_insert_ps.  */
    304 #define _MM_MK_INSERTPS_NDX(S, D, M) (((S) << 6) | ((D) << 4) | (M))
    305 
    306 /* Extract binary representation of single precision float from packed
    307    single precision array element of X selected by index N.  */
    308 
    309 #ifdef __OPTIMIZE__
    310 __STATIC_INLINE int __attribute__((__always_inline__))
    311 _mm_extract_ps (__m128 __X, const int __N)
    312 {
    313   union { int i; float f; } __tmp;
    314   __tmp.f = __builtin_ia32_vec_ext_v4sf ((__v4sf)__X, __N);
    315   return __tmp.i;
    316 }
    317 #else
    318 #define _mm_extract_ps(X, N) \
    319   (__extension__ 						\
    320    ({								\
    321       union { int i; float f; } __tmp;				\
    322       __tmp.f = __builtin_ia32_vec_ext_v4sf ((__v4sf)(X), (N));	\
    323       __tmp.i;							\
    324     })								\
    325    )
    326 #endif
    327 
    328 /* Extract binary representation of single precision float into
    329    D from packed single precision array element of S selected
    330    by index N.  */
    331 #define _MM_EXTRACT_FLOAT(D, S, N) \
    332   { (D) = __builtin_ia32_vec_ext_v4sf ((__v4sf)(S), (N)); }
    333 
    334 /* Extract specified single precision float element into the lower
    335    part of __m128.  */
    336 #define _MM_PICK_OUT_PS(X, N)				\
    337   _mm_insert_ps (_mm_setzero_ps (), (X), 		\
    338 		 _MM_MK_INSERTPS_NDX ((N), 0, 0x0e))
    339 
    340 /* Insert integer, S, into packed integer array element of D
    341    selected by index N.  */
    342 
    343 #ifdef __OPTIMIZE__
    344 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    345 _mm_insert_epi8 (__m128i __D, int __S, const int __N)
    346 {
    347   return (__m128i) __builtin_ia32_vec_set_v16qi ((__v16qi)__D,
    348 						 __S, __N);
    349 }
    350 
    351 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    352 _mm_insert_epi32 (__m128i __D, int __S, const int __N)
    353 {
    354   return (__m128i) __builtin_ia32_vec_set_v4si ((__v4si)__D,
    355 						 __S, __N);
    356 }
    357 
    358 #ifdef __x86_64__
    359 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    360 _mm_insert_epi64 (__m128i __D, long long __S, const int __N)
    361 {
    362   return (__m128i) __builtin_ia32_vec_set_v2di ((__v2di)__D,
    363 						 __S, __N);
    364 }
    365 #endif
    366 #else
    367 #define _mm_insert_epi8(D, S, N) \
    368   ((__m128i) __builtin_ia32_vec_set_v16qi ((__v16qi)(D), (S), (N)))
    369 
    370 #define _mm_insert_epi32(D, S, N) \
    371   ((__m128i) __builtin_ia32_vec_set_v4si ((__v4si)(D), (S), (N)))
    372 
    373 #ifdef __x86_64__
    374 #define _mm_insert_epi64(D, S, N) \
    375   ((__m128i) __builtin_ia32_vec_set_v2di ((__v2di)(D), (S), (N)))
    376 #endif
    377 #endif
    378 
    379 /* Extract integer from packed integer array element of X selected by
    380    index N.  */
    381 
    382 #ifdef __OPTIMIZE__
    383 __STATIC_INLINE int __attribute__((__always_inline__))
    384 _mm_extract_epi8 (__m128i __X, const int __N)
    385 {
    386    return __builtin_ia32_vec_ext_v16qi ((__v16qi)__X, __N);
    387 }
    388 
    389 __STATIC_INLINE int __attribute__((__always_inline__))
    390 _mm_extract_epi32 (__m128i __X, const int __N)
    391 {
    392    return __builtin_ia32_vec_ext_v4si ((__v4si)__X, __N);
    393 }
    394 
    395 #ifdef __x86_64__
    396 __STATIC_INLINE long long  __attribute__((__always_inline__))
    397 _mm_extract_epi64 (__m128i __X, const int __N)
    398 {
    399   return __builtin_ia32_vec_ext_v2di ((__v2di)__X, __N);
    400 }
    401 #endif
    402 #else
    403 #define _mm_extract_epi8(X, N) \
    404   __builtin_ia32_vec_ext_v16qi ((__v16qi) X, (N))
    405 #define _mm_extract_epi32(X, N) \
    406   __builtin_ia32_vec_ext_v4si ((__v4si) X, (N))
    407 
    408 #ifdef __x86_64__
    409 #define _mm_extract_epi64(X, N) \
    410   ((long long) __builtin_ia32_vec_ext_v2di ((__v2di)(X), (N)))
    411 #endif
    412 #endif
    413 
    414 /* Return horizontal packed word minimum and its index in bits [15:0]
    415    and bits [18:16] respectively.  */
    416 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    417 _mm_minpos_epu16 (__m128i __X)
    418 {
    419   return (__m128i) __builtin_ia32_phminposuw128 ((__v8hi)__X);
    420 }
    421 
    422 /* Packed/scalar double precision floating point rounding.  */
    423 
    424 #ifdef __OPTIMIZE__
    425 __STATIC_INLINE __m128d __attribute__((__always_inline__))
    426 _mm_round_pd (__m128d __V, const int __M)
    427 {
    428   return (__m128d) __builtin_ia32_roundpd ((__v2df)__V, __M);
    429 }
    430 
    431 __STATIC_INLINE __m128d __attribute__((__always_inline__))
    432 _mm_round_sd(__m128d __D, __m128d __V, const int __M)
    433 {
    434   return (__m128d) __builtin_ia32_roundsd ((__v2df)__D,
    435 					   (__v2df)__V,
    436 					   __M);
    437 }
    438 #else
    439 #define _mm_round_pd(V, M) \
    440   ((__m128d) __builtin_ia32_roundpd ((__v2df)(V), (M)))
    441 
    442 #define _mm_round_sd(D, V, M) \
    443   ((__m128d) __builtin_ia32_roundsd ((__v2df)(D), (__v2df)(V), (M)))
    444 #endif
    445 
    446 /* Packed/scalar single precision floating point rounding.  */
    447 
    448 #ifdef __OPTIMIZE__
    449 __STATIC_INLINE __m128 __attribute__((__always_inline__))
    450 _mm_round_ps (__m128 __V, const int __M)
    451 {
    452   return (__m128) __builtin_ia32_roundps ((__v4sf)__V, __M);
    453 }
    454 
    455 __STATIC_INLINE __m128 __attribute__((__always_inline__))
    456 _mm_round_ss (__m128 __D, __m128 __V, const int __M)
    457 {
    458   return (__m128) __builtin_ia32_roundss ((__v4sf)__D,
    459 					  (__v4sf)__V,
    460 					  __M);
    461 }
    462 #else
    463 #define _mm_round_ps(V, M) \
    464   ((__m128) __builtin_ia32_roundps ((__v4sf)(V), (M)))
    465 
    466 #define _mm_round_ss(D, V, M) \
    467   ((__m128) __builtin_ia32_roundss ((__v4sf)(D), (__v4sf)(V), (M)))
    468 #endif
    469 
    470 /* Macros for ceil/floor intrinsics.  */
    471 #define _mm_ceil_pd(V)	   _mm_round_pd ((V), _MM_FROUND_CEIL)
    472 #define _mm_ceil_sd(D, V)  _mm_round_sd ((D), (V), _MM_FROUND_CEIL)
    473 
    474 #define _mm_floor_pd(V)	   _mm_round_pd((V), _MM_FROUND_FLOOR)
    475 #define _mm_floor_sd(D, V) _mm_round_sd ((D), (V), _MM_FROUND_FLOOR)
    476 
    477 #define _mm_ceil_ps(V)	   _mm_round_ps ((V), _MM_FROUND_CEIL)
    478 #define _mm_ceil_ss(D, V)  _mm_round_ss ((D), (V), _MM_FROUND_CEIL)
    479 
    480 #define _mm_floor_ps(V)	   _mm_round_ps ((V), _MM_FROUND_FLOOR)
    481 #define _mm_floor_ss(D, V) _mm_round_ss ((D), (V), _MM_FROUND_FLOOR)
    482 
    483 /* Packed integer sign-extension.  */
    484 
    485 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    486 _mm_cvtepi8_epi32 (__m128i __X)
    487 {
    488   return (__m128i) __builtin_ia32_pmovsxbd128 ((__v16qi)__X);
    489 }
    490 
    491 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    492 _mm_cvtepi16_epi32 (__m128i __X)
    493 {
    494   return (__m128i) __builtin_ia32_pmovsxwd128 ((__v8hi)__X);
    495 }
    496 
    497 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    498 _mm_cvtepi8_epi64 (__m128i __X)
    499 {
    500   return (__m128i) __builtin_ia32_pmovsxbq128 ((__v16qi)__X);
    501 }
    502 
    503 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    504 _mm_cvtepi32_epi64 (__m128i __X)
    505 {
    506   return (__m128i) __builtin_ia32_pmovsxdq128 ((__v4si)__X);
    507 }
    508 
    509 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    510 _mm_cvtepi16_epi64 (__m128i __X)
    511 {
    512   return (__m128i) __builtin_ia32_pmovsxwq128 ((__v8hi)__X);
    513 }
    514 
    515 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    516 _mm_cvtepi8_epi16 (__m128i __X)
    517 {
    518   return (__m128i) __builtin_ia32_pmovsxbw128 ((__v16qi)__X);
    519 }
    520 
    521 /* Packed integer zero-extension. */
    522 
    523 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    524 _mm_cvtepu8_epi32 (__m128i __X)
    525 {
    526   return (__m128i) __builtin_ia32_pmovzxbd128 ((__v16qi)__X);
    527 }
    528 
    529 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    530 _mm_cvtepu16_epi32 (__m128i __X)
    531 {
    532   return (__m128i) __builtin_ia32_pmovzxwd128 ((__v8hi)__X);
    533 }
    534 
    535 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    536 _mm_cvtepu8_epi64 (__m128i __X)
    537 {
    538   return (__m128i) __builtin_ia32_pmovzxbq128 ((__v16qi)__X);
    539 }
    540 
    541 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    542 _mm_cvtepu32_epi64 (__m128i __X)
    543 {
    544   return (__m128i) __builtin_ia32_pmovzxdq128 ((__v4si)__X);
    545 }
    546 
    547 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    548 _mm_cvtepu16_epi64 (__m128i __X)
    549 {
    550   return (__m128i) __builtin_ia32_pmovzxwq128 ((__v8hi)__X);
    551 }
    552 
    553 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    554 _mm_cvtepu8_epi16 (__m128i __X)
    555 {
    556   return (__m128i) __builtin_ia32_pmovzxbw128 ((__v16qi)__X);
    557 }
    558 
    559 /* Pack 8 double words from 2 operands into 8 words of result with
    560    unsigned saturation. */
    561 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    562 _mm_packus_epi32 (__m128i __X, __m128i __Y)
    563 {
    564   return (__m128i) __builtin_ia32_packusdw128 ((__v4si)__X, (__v4si)__Y);
    565 }
    566 
    567 /* Sum absolute 8-bit integer difference of adjacent groups of 4
    568    byte integers in the first 2 operands.  Starting offsets within
    569    operands are determined by the 3rd mask operand.  */
    570 
    571 #ifdef __OPTIMIZE__
    572 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    573 _mm_mpsadbw_epu8 (__m128i __X, __m128i __Y, const int __M)
    574 {
    575   return (__m128i) __builtin_ia32_mpsadbw128 ((__v16qi)__X,
    576 					      (__v16qi)__Y, __M);
    577 }
    578 #else
    579 #define _mm_mpsadbw_epu8(X, Y, M) \
    580   ((__m128i) __builtin_ia32_mpsadbw128 ((__v16qi)(X), (__v16qi)(Y), (M)))
    581 #endif
    582 
    583 /* Load double quadword using non-temporal aligned hint.  */
    584 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    585 _mm_stream_load_si128 (__m128i *__X)
    586 {
    587   return (__m128i) __builtin_ia32_movntdqa ((__v2di *) __X);
    588 }
    589 
    590 #ifdef __SSE4_2__
    591 
    592 /* These macros specify the source data format.  */
    593 #define SIDD_UBYTE_OPS			0x00
    594 #define SIDD_UWORD_OPS			0x01
    595 #define SIDD_SBYTE_OPS			0x02
    596 #define SIDD_SWORD_OPS			0x03
    597 
    598 /* These macros specify the comparison operation.  */
    599 #define SIDD_CMP_EQUAL_ANY		0x00
    600 #define SIDD_CMP_RANGES			0x04
    601 #define SIDD_CMP_EQUAL_EACH		0x08
    602 #define SIDD_CMP_EQUAL_ORDERED		0x0c
    603 
    604 /* These macros specify the the polarity.  */
    605 #define SIDD_POSITIVE_POLARITY		0x00
    606 #define SIDD_NEGATIVE_POLARITY		0x10
    607 #define SIDD_MASKED_POSITIVE_POLARITY	0x20
    608 #define SIDD_MASKED_NEGATIVE_POLARITY	0x30
    609 
    610 /* These macros specify the output selection in _mm_cmpXstri ().  */
    611 #define SIDD_LEAST_SIGNIFICANT		0x00
    612 #define SIDD_MOST_SIGNIFICANT		0x40
    613 
    614 /* These macros specify the output selection in _mm_cmpXstrm ().  */
    615 #define SIDD_BIT_MASK			0x00
    616 #define SIDD_UNIT_MASK			0x40
    617 
    618 /* Intrinsics for text/string processing.  */
    619 
    620 #ifdef __OPTIMIZE__
    621 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    622 _mm_cmpistrm (__m128i __X, __m128i __Y, const int __M)
    623 {
    624   return (__m128i) __builtin_ia32_pcmpistrm128 ((__v16qi)__X,
    625 						(__v16qi)__Y,
    626 						__M);
    627 }
    628 
    629 __STATIC_INLINE int __attribute__((__always_inline__))
    630 _mm_cmpistri (__m128i __X, __m128i __Y, const int __M)
    631 {
    632   return __builtin_ia32_pcmpistri128 ((__v16qi)__X,
    633 				      (__v16qi)__Y,
    634 				      __M);
    635 }
    636 
    637 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    638 _mm_cmpestrm (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
    639 {
    640   return (__m128i) __builtin_ia32_pcmpestrm128 ((__v16qi)__X, __LX,
    641 						(__v16qi)__Y, __LY,
    642 						__M);
    643 }
    644 
    645 __STATIC_INLINE int __attribute__((__always_inline__))
    646 _mm_cmpestri (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
    647 {
    648   return __builtin_ia32_pcmpestri128 ((__v16qi)__X, __LX,
    649 				      (__v16qi)__Y, __LY,
    650 				      __M);
    651 }
    652 #else
    653 #define _mm_cmpistrm(X, Y, M) \
    654   ((__m128i) __builtin_ia32_pcmpistrm128 ((__v16qi)(X), (__v16qi)(Y), (M)))
    655 #define _mm_cmpistri(X, Y, M) \
    656   __builtin_ia32_pcmpistri128 ((__v16qi)(X), (__v16qi)(Y), (M))
    657 
    658 #define _mm_cmpestrm(X, LX, Y, LY, M) \
    659   ((__m128i) __builtin_ia32_pcmpestrm128 ((__v16qi)(X), (int)(LX), \
    660 					  (__v16qi)(Y), (int)(LY), (M)))
    661 #define _mm_cmpestri(X, LX, Y, LY, M) \
    662   __builtin_ia32_pcmpestri128 ((__v16qi)(X), (int)(LX), \
    663 			       (__v16qi)(Y), (int)(LY), (M))
    664 #endif
    665 
    666 /* Intrinsics for text/string processing and reading values of
    667    EFlags.  */
    668 
    669 #ifdef __OPTIMIZE__
    670 __STATIC_INLINE int __attribute__((__always_inline__))
    671 _mm_cmpistra (__m128i __X, __m128i __Y, const int __M)
    672 {
    673   return __builtin_ia32_pcmpistria128 ((__v16qi)__X,
    674 				       (__v16qi)__Y,
    675 				       __M);
    676 }
    677 
    678 __STATIC_INLINE int __attribute__((__always_inline__))
    679 _mm_cmpistrc (__m128i __X, __m128i __Y, const int __M)
    680 {
    681   return __builtin_ia32_pcmpistric128 ((__v16qi)__X,
    682 				       (__v16qi)__Y,
    683 				       __M);
    684 }
    685 
    686 __STATIC_INLINE int __attribute__((__always_inline__))
    687 _mm_cmpistro (__m128i __X, __m128i __Y, const int __M)
    688 {
    689   return __builtin_ia32_pcmpistrio128 ((__v16qi)__X,
    690 				       (__v16qi)__Y,
    691 				       __M);
    692 }
    693 
    694 __STATIC_INLINE int __attribute__((__always_inline__))
    695 _mm_cmpistrs (__m128i __X, __m128i __Y, const int __M)
    696 {
    697   return __builtin_ia32_pcmpistris128 ((__v16qi)__X,
    698 				       (__v16qi)__Y,
    699 				       __M);
    700 }
    701 
    702 __STATIC_INLINE int __attribute__((__always_inline__))
    703 _mm_cmpistrz (__m128i __X, __m128i __Y, const int __M)
    704 {
    705   return __builtin_ia32_pcmpistriz128 ((__v16qi)__X,
    706 				       (__v16qi)__Y,
    707 				       __M);
    708 }
    709 
    710 __STATIC_INLINE int __attribute__((__always_inline__))
    711 _mm_cmpestra (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
    712 {
    713   return __builtin_ia32_pcmpestria128 ((__v16qi)__X, __LX,
    714 				       (__v16qi)__Y, __LY,
    715 				       __M);
    716 }
    717 
    718 __STATIC_INLINE int __attribute__((__always_inline__))
    719 _mm_cmpestrc (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
    720 {
    721   return __builtin_ia32_pcmpestric128 ((__v16qi)__X, __LX,
    722 				       (__v16qi)__Y, __LY,
    723 				       __M);
    724 }
    725 
    726 __STATIC_INLINE int __attribute__((__always_inline__))
    727 _mm_cmpestro (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
    728 {
    729   return __builtin_ia32_pcmpestrio128 ((__v16qi)__X, __LX,
    730 				       (__v16qi)__Y, __LY,
    731 				       __M);
    732 }
    733 
    734 __STATIC_INLINE int __attribute__((__always_inline__))
    735 _mm_cmpestrs (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
    736 {
    737   return __builtin_ia32_pcmpestris128 ((__v16qi)__X, __LX,
    738 				       (__v16qi)__Y, __LY,
    739 				       __M);
    740 }
    741 
    742 __STATIC_INLINE int __attribute__((__always_inline__))
    743 _mm_cmpestrz (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
    744 {
    745   return __builtin_ia32_pcmpestriz128 ((__v16qi)__X, __LX,
    746 				       (__v16qi)__Y, __LY,
    747 				       __M);
    748 }
    749 #else
    750 #define _mm_cmpistra(X, Y, M) \
    751   __builtin_ia32_pcmpistria128 ((__v16qi)(X), (__v16qi)(Y), (M))
    752 #define _mm_cmpistrc(X, Y, M) \
    753   __builtin_ia32_pcmpistric128 ((__v16qi)(X), (__v16qi)(Y), (M))
    754 #define _mm_cmpistro(X, Y, M) \
    755   __builtin_ia32_pcmpistrio128 ((__v16qi)(X), (__v16qi)(Y), (M))
    756 #define _mm_cmpistrs(X, Y, M) \
    757   __builtin_ia32_pcmpistris128 ((__v16qi)(X), (__v16qi)(Y), (M))
    758 #define _mm_cmpistrz(X, Y, M) \
    759   __builtin_ia32_pcmpistriz128 ((__v16qi)(X), (__v16qi)(Y), (M))
    760 
    761 #define _mm_cmpestra(X, LX, Y, LY, M) \
    762   __builtin_ia32_pcmpestria128 ((__v16qi)(X), (int)(LX), \
    763 				(__v16qi)(Y), (int)(LY), (M))
    764 #define _mm_cmpestrc(X, LX, Y, LY, M) \
    765   __builtin_ia32_pcmpestric128 ((__v16qi)(X), (int)(LX), \
    766 				(__v16qi)(Y), (int)(LY), (M))
    767 #define _mm_cmpestro(X, LX, Y, LY, M) \
    768   __builtin_ia32_pcmpestrio128 ((__v16qi)(X), (int)(LX), \
    769 				(__v16qi)(Y), (int)(LY), (M))
    770 #define _mm_cmpestrs(X, LX, Y, LY, M) \
    771   __builtin_ia32_pcmpestris128 ((__v16qi)(X), (int)(LX), \
    772 				(__v16qi)(Y), (int)(LY), (M))
    773 #define _mm_cmpestrz(X, LX, Y, LY, M) \
    774   __builtin_ia32_pcmpestriz128 ((__v16qi)(X), (int)(LX), \
    775 				(__v16qi)(Y), (int)(LY), (M))
    776 #endif
    777 
    778 /* Packed integer 64-bit comparison, zeroing or filling with ones
    779    corresponding parts of result.  */
    780 __STATIC_INLINE __m128i __attribute__((__always_inline__))
    781 _mm_cmpgt_epi64 (__m128i __X, __m128i __Y)
    782 {
    783   return (__m128i) __builtin_ia32_pcmpgtq ((__v2di)__X, (__v2di)__Y);
    784 }
    785 
    786 /* Calculate a number of bits set to 1.  */
    787 __STATIC_INLINE int __attribute__((__always_inline__))
    788 _mm_popcnt_u32 (unsigned int __X)
    789 {
    790   return __builtin_popcount (__X);
    791 }
    792 
    793 #ifdef __x86_64__
    794 __STATIC_INLINE long long  __attribute__((__always_inline__))
    795 _mm_popcnt_u64 (unsigned long long __X)
    796 {
    797   return __builtin_popcountll (__X);
    798 }
    799 #endif
    800 
    801 /* Accumulate CRC32 (polynomial 0x11EDC6F41) value.  */
    802 __STATIC_INLINE unsigned int __attribute__((__always_inline__))
    803 _mm_crc32_u8 (unsigned int __C, unsigned char __V)
    804 {
    805   return __builtin_ia32_crc32qi (__C, __V);
    806 }
    807 
    808 __STATIC_INLINE unsigned int __attribute__((__always_inline__))
    809 _mm_crc32_u16 (unsigned int __C, unsigned short __V)
    810 {
    811   return __builtin_ia32_crc32hi (__C, __V);
    812 }
    813 
    814 __STATIC_INLINE unsigned int __attribute__((__always_inline__))
    815 _mm_crc32_u32 (unsigned int __C, unsigned int __V)
    816 {
    817   return __builtin_ia32_crc32si (__C, __V);
    818 }
    819 
    820 #ifdef __x86_64__
    821 __STATIC_INLINE unsigned long long __attribute__((__always_inline__))
    822 _mm_crc32_u64 (unsigned long long __C, unsigned long long __V)
    823 {
    824   return __builtin_ia32_crc32di (__C, __V);
    825 }
    826 #endif
    827 
    828 #endif /* __SSE4_2__ */
    829 
    830 #endif /* __SSE4_1__ */
    831 
    832 /* APPLE LOCAL begin nodebug inline 4152603 */
    833 #undef __always_inline__
    834 /* APPLE LOCAL end nodebug inline 4152603 */
    835 
    836 #endif /* _SMMINTRIN_H_INCLUDED */
    837