1 /* 2 * Copyright (c) 2016 The WebM project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11 #include <assert.h> 12 #include <emmintrin.h> 13 #include <stdio.h> 14 15 #include "./vpx_dsp_rtcd.h" 16 17 static uint64_t vpx_sum_squares_2d_i16_4x4_sse2(const int16_t *src, 18 int stride) { 19 const __m128i v_val_0_w = 20 _mm_loadl_epi64((const __m128i *)(src + 0 * stride)); 21 const __m128i v_val_1_w = 22 _mm_loadl_epi64((const __m128i *)(src + 1 * stride)); 23 const __m128i v_val_2_w = 24 _mm_loadl_epi64((const __m128i *)(src + 2 * stride)); 25 const __m128i v_val_3_w = 26 _mm_loadl_epi64((const __m128i *)(src + 3 * stride)); 27 28 const __m128i v_sq_0_d = _mm_madd_epi16(v_val_0_w, v_val_0_w); 29 const __m128i v_sq_1_d = _mm_madd_epi16(v_val_1_w, v_val_1_w); 30 const __m128i v_sq_2_d = _mm_madd_epi16(v_val_2_w, v_val_2_w); 31 const __m128i v_sq_3_d = _mm_madd_epi16(v_val_3_w, v_val_3_w); 32 33 const __m128i v_sum_01_d = _mm_add_epi32(v_sq_0_d, v_sq_1_d); 34 const __m128i v_sum_23_d = _mm_add_epi32(v_sq_2_d, v_sq_3_d); 35 const __m128i v_sum_0123_d = _mm_add_epi32(v_sum_01_d, v_sum_23_d); 36 37 const __m128i v_sum_d = 38 _mm_add_epi32(v_sum_0123_d, _mm_srli_epi64(v_sum_0123_d, 32)); 39 40 return (uint64_t)_mm_cvtsi128_si32(v_sum_d); 41 } 42 43 // TODO(jingning): Evaluate the performance impact here. 44 #ifdef __GNUC__ 45 // This prevents GCC/Clang from inlining this function into 46 // vpx_sum_squares_2d_i16_sse2, which in turn saves some stack 47 // maintenance instructions in the common case of 4x4. 48 __attribute__((noinline)) 49 #endif 50 static uint64_t 51 vpx_sum_squares_2d_i16_nxn_sse2(const int16_t *src, int stride, int size) { 52 int r, c; 53 const __m128i v_zext_mask_q = _mm_set_epi32(0, 0xffffffff, 0, 0xffffffff); 54 __m128i v_acc_q = _mm_setzero_si128(); 55 56 for (r = 0; r < size; r += 8) { 57 __m128i v_acc_d = _mm_setzero_si128(); 58 59 for (c = 0; c < size; c += 8) { 60 const int16_t *b = src + c; 61 const __m128i v_val_0_w = 62 _mm_load_si128((const __m128i *)(b + 0 * stride)); 63 const __m128i v_val_1_w = 64 _mm_load_si128((const __m128i *)(b + 1 * stride)); 65 const __m128i v_val_2_w = 66 _mm_load_si128((const __m128i *)(b + 2 * stride)); 67 const __m128i v_val_3_w = 68 _mm_load_si128((const __m128i *)(b + 3 * stride)); 69 const __m128i v_val_4_w = 70 _mm_load_si128((const __m128i *)(b + 4 * stride)); 71 const __m128i v_val_5_w = 72 _mm_load_si128((const __m128i *)(b + 5 * stride)); 73 const __m128i v_val_6_w = 74 _mm_load_si128((const __m128i *)(b + 6 * stride)); 75 const __m128i v_val_7_w = 76 _mm_load_si128((const __m128i *)(b + 7 * stride)); 77 78 const __m128i v_sq_0_d = _mm_madd_epi16(v_val_0_w, v_val_0_w); 79 const __m128i v_sq_1_d = _mm_madd_epi16(v_val_1_w, v_val_1_w); 80 const __m128i v_sq_2_d = _mm_madd_epi16(v_val_2_w, v_val_2_w); 81 const __m128i v_sq_3_d = _mm_madd_epi16(v_val_3_w, v_val_3_w); 82 const __m128i v_sq_4_d = _mm_madd_epi16(v_val_4_w, v_val_4_w); 83 const __m128i v_sq_5_d = _mm_madd_epi16(v_val_5_w, v_val_5_w); 84 const __m128i v_sq_6_d = _mm_madd_epi16(v_val_6_w, v_val_6_w); 85 const __m128i v_sq_7_d = _mm_madd_epi16(v_val_7_w, v_val_7_w); 86 87 const __m128i v_sum_01_d = _mm_add_epi32(v_sq_0_d, v_sq_1_d); 88 const __m128i v_sum_23_d = _mm_add_epi32(v_sq_2_d, v_sq_3_d); 89 const __m128i v_sum_45_d = _mm_add_epi32(v_sq_4_d, v_sq_5_d); 90 const __m128i v_sum_67_d = _mm_add_epi32(v_sq_6_d, v_sq_7_d); 91 92 const __m128i v_sum_0123_d = _mm_add_epi32(v_sum_01_d, v_sum_23_d); 93 const __m128i v_sum_4567_d = _mm_add_epi32(v_sum_45_d, v_sum_67_d); 94 95 v_acc_d = _mm_add_epi32(v_acc_d, v_sum_0123_d); 96 v_acc_d = _mm_add_epi32(v_acc_d, v_sum_4567_d); 97 } 98 99 v_acc_q = _mm_add_epi64(v_acc_q, _mm_and_si128(v_acc_d, v_zext_mask_q)); 100 v_acc_q = _mm_add_epi64(v_acc_q, _mm_srli_epi64(v_acc_d, 32)); 101 102 src += 8 * stride; 103 } 104 105 v_acc_q = _mm_add_epi64(v_acc_q, _mm_srli_si128(v_acc_q, 8)); 106 107 #if ARCH_X86_64 108 return (uint64_t)_mm_cvtsi128_si64(v_acc_q); 109 #else 110 { 111 uint64_t tmp; 112 _mm_storel_epi64((__m128i *)&tmp, v_acc_q); 113 return tmp; 114 } 115 #endif 116 } 117 118 uint64_t vpx_sum_squares_2d_i16_sse2(const int16_t *src, int stride, int size) { 119 // 4 elements per row only requires half an XMM register, so this 120 // must be a special case, but also note that over 75% of all calls 121 // are with size == 4, so it is also the common case. 122 if (size == 4) { 123 return vpx_sum_squares_2d_i16_4x4_sse2(src, stride); 124 } else { 125 // Generic case 126 assert(size % 8 == 0); 127 return vpx_sum_squares_2d_i16_nxn_sse2(src, stride, size); 128 } 129 } 130