Home | History | Annotate | Download | only in common
      1 /****************************************************************************
      2 * Copyright (C) 2017 Intel Corporation.   All Rights Reserved.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice (including the next
     12 * paragraph) shall be included in all copies or substantial portions of the
     13 * Software.
     14 *
     15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     21 * IN THE SOFTWARE.
     22 ****************************************************************************/
     23 #if !defined(__SIMD_LIB_AVX2_HPP__)
     24 #error Do not include this file directly, use "simdlib.hpp" instead.
     25 #endif
     26 
     27 //============================================================================
     28 // SIMD256 AVX (2) implementation
     29 //
     30 // Since this implementation inherits from the AVX (1) implementation,
     31 // the only operations below ones that replace AVX (1) operations.
     32 // Mostly these are integer operations that are no longer emulated with SSE
     33 //============================================================================
     34 
     35 #define SIMD_IWRAPPER_1(op)  \
     36     static SIMDINLINE Integer SIMDCALL op(Integer const &a)   \
     37     {\
     38         return _mm256_##op(a);\
     39     }
     40 
     41 #define SIMD_IWRAPPER_1L(op)  \
     42     static SIMDINLINE Integer SIMDCALL op(Integer const &a)   \
     43     {\
     44         return _mm256_##op(_mm256_castsi256_si128(a));\
     45     }\
     46 
     47 #define SIMD_IWRAPPER_1I(op)  \
     48     template<int ImmT> \
     49     static SIMDINLINE Integer SIMDCALL op(Integer const &a)   \
     50     {\
     51         return _mm256_##op(a, ImmT);\
     52     }
     53 
     54 #define SIMD_IWRAPPER_1I_(op, intrin)  \
     55     template<int ImmT> \
     56     static SIMDINLINE Integer SIMDCALL op(Integer const &a)   \
     57     {\
     58         return _mm256_##intrin(a, ImmT);\
     59     }
     60 
     61 #define SIMD_IWRAPPER_2_(op, intrin)  \
     62     static SIMDINLINE Integer SIMDCALL op(Integer const &a, Integer const &b)   \
     63     {\
     64         return _mm256_##intrin(a, b);\
     65     }
     66 
     67 #define SIMD_IWRAPPER_2(op)  \
     68     static SIMDINLINE Integer SIMDCALL op(Integer const &a, Integer const &b)   \
     69     {\
     70         return _mm256_##op(a, b);\
     71     }
     72 
     73 #define SIMD_IWRAPPER_2I(op)  \
     74     template<int ImmT> \
     75     static SIMDINLINE Integer SIMDCALL op(Integer const &a, Integer const &b)   \
     76     {\
     77         return _mm256_##op(a, b, ImmT);\
     78     }
     79 
     80 #define SIMD_IWRAPPER_2I(op)  \
     81     template<int ImmT>\
     82     static SIMDINLINE Integer SIMDCALL op(Integer const &a, Integer const &b)   \
     83     {\
     84         return _mm256_##op(a, b, ImmT);\
     85     }
     86 
     87 //-----------------------------------------------------------------------
     88 // Floating point arithmetic operations
     89 //-----------------------------------------------------------------------
     90 static SIMDINLINE Float SIMDCALL fmadd_ps(Float const &a, Float const &b, Float const &c)   // return (a * b) + c
     91 {
     92     return _mm256_fmadd_ps(a, b, c);
     93 }
     94 
     95 //-----------------------------------------------------------------------
     96 // Integer (various width) arithmetic operations
     97 //-----------------------------------------------------------------------
     98 SIMD_IWRAPPER_1(abs_epi32); // return absolute_value(a) (int32)
     99 SIMD_IWRAPPER_2(add_epi32); // return a + b (int32)
    100 SIMD_IWRAPPER_2(add_epi8);  // return a + b (int8)
    101 SIMD_IWRAPPER_2(adds_epu8); // return ((a + b) > 0xff) ? 0xff : (a + b) (uint8)
    102 SIMD_IWRAPPER_2(max_epi32); // return (a > b) ? a : b (int32)
    103 SIMD_IWRAPPER_2(max_epu32); // return (a > b) ? a : b (uint32)
    104 SIMD_IWRAPPER_2(min_epi32); // return (a < b) ? a : b (int32)
    105 SIMD_IWRAPPER_2(min_epu32); // return (a < b) ? a : b (uint32)
    106 SIMD_IWRAPPER_2(mul_epi32); // return a * b (int32)
    107 
    108 // return (a * b) & 0xFFFFFFFF
    109 //
    110 // Multiply the packed 32-bit integers in a and b, producing intermediate 64-bit integers,
    111 // and store the low 32 bits of the intermediate integers in dst.
    112 SIMD_IWRAPPER_2(mullo_epi32);
    113 SIMD_IWRAPPER_2(sub_epi32); // return a - b (int32)
    114 SIMD_IWRAPPER_2(sub_epi64); // return a - b (int64)
    115 SIMD_IWRAPPER_2(subs_epu8); // return (b > a) ? 0 : (a - b) (uint8)
    116 
    117 //-----------------------------------------------------------------------
    118 // Logical operations
    119 //-----------------------------------------------------------------------
    120 SIMD_IWRAPPER_2_(and_si,    and_si256);     // return a & b       (int)
    121 SIMD_IWRAPPER_2_(andnot_si, andnot_si256);  // return (~a) & b    (int)
    122 SIMD_IWRAPPER_2_(or_si,     or_si256);      // return a | b       (int)
    123 SIMD_IWRAPPER_2_(xor_si,    xor_si256);     // return a ^ b       (int)
    124 
    125 
    126 //-----------------------------------------------------------------------
    127 // Shift operations
    128 //-----------------------------------------------------------------------
    129 SIMD_IWRAPPER_1I(slli_epi32);               // return a << ImmT
    130 SIMD_IWRAPPER_2(sllv_epi32);                // return a << b      (uint32)
    131 SIMD_IWRAPPER_1I(srai_epi32);               // return a >> ImmT   (int32)
    132 SIMD_IWRAPPER_1I(srli_epi32);               // return a >> ImmT   (uint32)
    133 SIMD_IWRAPPER_2(srlv_epi32);                // return a >> b      (uint32)
    134 SIMD_IWRAPPER_1I_(srli_si, srli_si256);     // return a >> (ImmT*8) (uint)
    135 
    136 template<int ImmT>                          // same as srli_si, but with Float cast to int
    137 static SIMDINLINE Float SIMDCALL srlisi_ps(Float const &a)
    138 {
    139     return castsi_ps(srli_si<ImmT>(castps_si(a)));
    140 }
    141 
    142 
    143 //-----------------------------------------------------------------------
    144 // Conversion operations
    145 //-----------------------------------------------------------------------
    146 SIMD_IWRAPPER_1L(cvtepu8_epi16);    // return (int16)a    (uint8 --> int16)
    147 SIMD_IWRAPPER_1L(cvtepu8_epi32);    // return (int32)a    (uint8 --> int32)
    148 SIMD_IWRAPPER_1L(cvtepu16_epi32);   // return (int32)a    (uint16 --> int32)
    149 SIMD_IWRAPPER_1L(cvtepu16_epi64);   // return (int64)a    (uint16 --> int64)
    150 SIMD_IWRAPPER_1L(cvtepu32_epi64);   // return (int64)a    (uint32 --> int64)
    151 
    152 //-----------------------------------------------------------------------
    153 // Comparison operations
    154 //-----------------------------------------------------------------------
    155 SIMD_IWRAPPER_2(cmpeq_epi8);    // return a == b (int8)
    156 SIMD_IWRAPPER_2(cmpeq_epi16);   // return a == b (int16)
    157 SIMD_IWRAPPER_2(cmpeq_epi32);   // return a == b (int32)
    158 SIMD_IWRAPPER_2(cmpeq_epi64);   // return a == b (int64)
    159 SIMD_IWRAPPER_2(cmpgt_epi8);    // return a > b (int8)
    160 SIMD_IWRAPPER_2(cmpgt_epi16);   // return a > b (int16)
    161 SIMD_IWRAPPER_2(cmpgt_epi32);   // return a > b (int32)
    162 SIMD_IWRAPPER_2(cmpgt_epi64);   // return a > b (int64)
    163 
    164 static SIMDINLINE Integer SIMDCALL cmplt_epi32(Integer const &a, Integer const &b)   // return a < b (int32)
    165 {
    166     return cmpgt_epi32(b, a);
    167 }
    168 
    169 //-----------------------------------------------------------------------
    170 // Blend / shuffle / permute operations
    171 //-----------------------------------------------------------------------
    172 SIMD_IWRAPPER_2I(blend_epi32);  // return ImmT ? b : a  (int32)
    173 SIMD_IWRAPPER_2(packs_epi16);   // See documentation for _mm256_packs_epi16 and _mm512_packs_epi16
    174 SIMD_IWRAPPER_2(packs_epi32);   // See documentation for _mm256_packs_epi32 and _mm512_packs_epi32
    175 SIMD_IWRAPPER_2(packus_epi16);  // See documentation for _mm256_packus_epi16 and _mm512_packus_epi16
    176 SIMD_IWRAPPER_2(packus_epi32);  // See documentation for _mm256_packus_epi32 and _mm512_packus_epi32
    177 SIMD_IWRAPPER_2_(permute_epi32, permutevar8x32_epi32);
    178 
    179 static SIMDINLINE Float SIMDCALL permute_ps(Float const &a, Integer const &swiz)    // return a[swiz[i]] for each 32-bit lane i (float)
    180 {
    181     return _mm256_permutevar8x32_ps(a, swiz);
    182 }
    183 
    184 SIMD_IWRAPPER_1I(shuffle_epi32);
    185 template<int ImmT>
    186 static SIMDINLINE Integer SIMDCALL shuffle_epi64(Integer const &a, Integer const &b)
    187 {
    188     return castpd_si(shuffle_pd<ImmT>(castsi_pd(a), castsi_pd(b)));
    189 }
    190 SIMD_IWRAPPER_2(shuffle_epi8);
    191 SIMD_IWRAPPER_2(unpackhi_epi16);
    192 SIMD_IWRAPPER_2(unpackhi_epi32);
    193 SIMD_IWRAPPER_2(unpackhi_epi64);
    194 SIMD_IWRAPPER_2(unpackhi_epi8);
    195 SIMD_IWRAPPER_2(unpacklo_epi16);
    196 SIMD_IWRAPPER_2(unpacklo_epi32);
    197 SIMD_IWRAPPER_2(unpacklo_epi64);
    198 SIMD_IWRAPPER_2(unpacklo_epi8);
    199 
    200 //-----------------------------------------------------------------------
    201 // Load / store operations
    202 //-----------------------------------------------------------------------
    203 template<ScaleFactor ScaleT>
    204 static SIMDINLINE Float SIMDCALL i32gather_ps(float const* p, Integer const &idx) // return *(float*)(((int8*)p) + (idx * ScaleT))
    205 {
    206     return _mm256_i32gather_ps(p, idx, static_cast<int>(ScaleT));
    207 }
    208 
    209 // for each element: (mask & (1 << 31)) ? (i32gather_ps<ScaleT>(p, idx), mask = 0) : old
    210 template<ScaleFactor ScaleT>
    211 static SIMDINLINE Float SIMDCALL mask_i32gather_ps(Float const &old, float const* p, Integer const &idx, Float const &mask)
    212 {
    213 	// g++ in debug mode needs the explicit .v suffix instead of relying on operator __m256()
    214 	// Only for this intrinsic - not sure why. :(
    215     return _mm256_mask_i32gather_ps(old.v, p, idx.v, mask.v, static_cast<int>(ScaleT));
    216 }
    217 
    218 static SIMDINLINE uint32_t SIMDCALL movemask_epi8(Integer const &a)
    219 {
    220     return static_cast<uint32_t>(_mm256_movemask_epi8(a));
    221 }
    222 
    223 //=======================================================================
    224 // Legacy interface (available only in SIMD256 width)
    225 //=======================================================================
    226 
    227 #undef SIMD_IWRAPPER_1
    228 #undef SIMD_IWRAPPER_1L
    229 #undef SIMD_IWRAPPER_1I
    230 #undef SIMD_IWRAPPER_1I_
    231 #undef SIMD_IWRAPPER_2_
    232 #undef SIMD_IWRAPPER_2
    233 #undef SIMD_IWRAPPER_2I
    234 #undef SIMD_IWRAPPER_2I
    235