1 /**************************************************************************** 2 * Copyright (C) 2017 Intel Corporation. All Rights Reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 ****************************************************************************/ 23 #if !defined(__SIMD_LIB_AVX512_HPP__) 24 #error Do not include this file directly, use "simdlib.hpp" instead. 25 #endif 26 27 //============================================================================ 28 // SIMD256 AVX (512) implementation for Core processors 29 // 30 // Since this implementation inherits from the AVX (2) implementation, 31 // the only operations below ones that replace AVX (2) operations. 32 // These use native AVX512 instructions with masking to enable a larger 33 // register set. 34 //============================================================================ 35 36 #define SIMD_DWRAPPER_1_(op, intrin, mask) \ 37 static SIMDINLINE Double SIMDCALL op(Double a) \ 38 {\ 39 return __conv(_mm512_maskz_##intrin((mask), __conv(a)));\ 40 } 41 #define SIMD_DWRAPPER_1(op) SIMD_DWRAPPER_1_(op, op, __mmask8(0xf)) 42 43 #define SIMD_DWRAPPER_1I_(op, intrin, mask) \ 44 template<int ImmT> \ 45 static SIMDINLINE Double SIMDCALL op(Double a) \ 46 {\ 47 return __conv(_mm512_maskz_##intrin((mask), __conv(a), ImmT));\ 48 } 49 #define SIMD_DWRAPPER_1I(op) SIMD_DWRAPPER_1I_(op, op, __mmask8(0xf)) 50 51 #define SIMD_DWRAPPER_2_(op, intrin, mask) \ 52 static SIMDINLINE Double SIMDCALL op(Double a, Double b) \ 53 {\ 54 return __conv(_mm512_maskz_##intrin((mask), __conv(a), __conv(b)));\ 55 } 56 #define SIMD_DWRAPPER_2(op) SIMD_DWRAPPER_2_(op, op, __mmask8(0xf)) 57 58 #define SIMD_IWRAPPER_1_(op, intrin, mask) \ 59 static SIMDINLINE Integer SIMDCALL op(Integer a) \ 60 {\ 61 return __conv(_mm512_maskz_##intrin((mask), __conv(a)));\ 62 } 63 #define SIMD_IWRAPPER_1_8(op) SIMD_IWRAPPER_1_(op, op, __mmask64(0xffffffffull)) 64 #define SIMD_IWRAPPER_1_16(op) SIMD_IWRAPPER_1_(op, op, __mmask32(0xffff)) 65 #define SIMD_IWRAPPER_1_64(op) SIMD_IWRAPPER_1_(op, op, __mmask8(0xf)) 66 67 #define SIMD_IWRAPPER_1I_(op, intrin, mask) \ 68 template<int ImmT> \ 69 static SIMDINLINE Integer SIMDCALL op(Integer a) \ 70 {\ 71 return __conv(_mm512_maskz_##intrin((mask), __conv(a), ImmT));\ 72 } 73 #define SIMD_IWRAPPER_1I_8(op) SIMD_IWRAPPER_1I_(op, op, __mmask64(0xffffffffull)) 74 #define SIMD_IWRAPPER_1I_16(op) SIMD_IWRAPPER_1I_(op, op, __mmask32(0xffff)) 75 #define SIMD_IWRAPPER_1I_64(op) SIMD_IWRAPPER_1I_(op, op, __mmask8(0xf)) 76 77 #define SIMD_IWRAPPER_2_(op, intrin, mask) \ 78 static SIMDINLINE Integer SIMDCALL op(Integer a, Integer b) \ 79 {\ 80 return __conv(_mm512_maskz_##intrin((mask), __conv(a), __conv(b)));\ 81 } 82 #define SIMD_IWRAPPER_2_8(op) SIMD_IWRAPPER_2_(op, op, __mmask64(0xffffffffull)) 83 #define SIMD_IWRAPPER_2_16(op) SIMD_IWRAPPER_2_(op, op, __mmask32(0xffff)) 84 #define SIMD_IWRAPPER_2_64(op) SIMD_IWRAPPER_2_(op, op, __mmask8(0xf)) 85 86 87 SIMD_IWRAPPER_2_8(add_epi8); // return a + b (int8) 88 SIMD_IWRAPPER_2_8(adds_epu8); // return ((a + b) > 0xff) ? 0xff : (a + b) (uint8) 89 SIMD_IWRAPPER_2_64(sub_epi64); // return a - b (int64) 90 SIMD_IWRAPPER_2_8(subs_epu8); // return (b > a) ? 0 : (a - b) (uint8) 91 SIMD_IWRAPPER_2_8(packs_epi16); // int16 --> int8 See documentation for _mm256_packs_epi16 and _mm512_packs_epi16 92 SIMD_IWRAPPER_2_16(packs_epi32); // int32 --> int16 See documentation for _mm256_packs_epi32 and _mm512_packs_epi32 93 SIMD_IWRAPPER_2_8(packus_epi16); // uint16 --> uint8 See documentation for _mm256_packus_epi16 and _mm512_packus_epi16 94 SIMD_IWRAPPER_2_16(packus_epi32); // uint32 --> uint16 See documentation for _mm256_packus_epi32 and _mm512_packus_epi32 95 SIMD_IWRAPPER_2_16(unpackhi_epi16); 96 SIMD_IWRAPPER_2_64(unpackhi_epi64); 97 SIMD_IWRAPPER_2_8(unpackhi_epi8); 98 SIMD_IWRAPPER_2_16(unpacklo_epi16); 99 SIMD_IWRAPPER_2_64(unpacklo_epi64); 100 SIMD_IWRAPPER_2_8(unpacklo_epi8); 101 102 static SIMDINLINE uint32_t SIMDCALL movemask_epi8(Integer a) 103 { 104 __mmask64 m = 0xffffffffull; 105 return static_cast<uint32_t>( 106 _mm512_mask_test_epi8_mask(m, __conv(a), _mm512_set1_epi8(0x80))); 107 } 108 109 #undef SIMD_DWRAPPER_1_ 110 #undef SIMD_DWRAPPER_1 111 #undef SIMD_DWRAPPER_1I_ 112 #undef SIMD_DWRAPPER_1I 113 #undef SIMD_DWRAPPER_2_ 114 #undef SIMD_DWRAPPER_2 115 #undef SIMD_DWRAPPER_2I 116 #undef SIMD_IWRAPPER_1_ 117 #undef SIMD_IWRAPPER_1_8 118 #undef SIMD_IWRAPPER_1_16 119 #undef SIMD_IWRAPPER_1_64 120 #undef SIMD_IWRAPPER_1I_ 121 #undef SIMD_IWRAPPER_1I_8 122 #undef SIMD_IWRAPPER_1I_16 123 #undef SIMD_IWRAPPER_1I_64 124 #undef SIMD_IWRAPPER_2_ 125 #undef SIMD_IWRAPPER_2_8 126 #undef SIMD_IWRAPPER_2_16 127 #undef SIMD_IWRAPPER_2_64 128