Home | History | Annotate | Download | only in x86
      1 /*
      2  *  Copyright (c) 2012 The WebM project authors. All Rights Reserved.
      3  *
      4  *  Use of this source code is governed by a BSD-style license
      5  *  that can be found in the LICENSE file in the root of the source
      6  *  tree. An additional intellectual property rights grant can be found
      7  *  in the file PATENTS.  All contributing project authors may
      8  *  be found in the AUTHORS file in the root of the source tree.
      9  */
     10 
     11 #include <immintrin.h>  // AVX2
     12 
     13 #include "./vpx_dsp_rtcd.h"
     14 
     15 /* clang-format off */
     16 DECLARE_ALIGNED(32, static const uint8_t, bilinear_filters_avx2[512]) = {
     17   16, 0,  16, 0,  16, 0,  16, 0,  16, 0,  16, 0,  16, 0,  16, 0,
     18   16, 0,  16, 0,  16, 0,  16, 0,  16, 0,  16, 0,  16, 0,  16, 0,
     19   14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,
     20   14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,
     21   12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,
     22   12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,
     23   10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,
     24   10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,
     25   8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,
     26   8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,
     27   6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10,
     28   6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10, 6,  10,
     29   4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12,
     30   4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12, 4,  12,
     31   2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14,
     32   2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14, 2,  14,
     33 };
     34 
     35 DECLARE_ALIGNED(32, static const int8_t, adjacent_sub_avx2[32]) = {
     36   1, -1,  1, -1,  1, -1,  1, -1,  1, -1,  1, -1,  1, -1,  1, -1,
     37   1, -1,  1, -1,  1, -1,  1, -1,  1, -1,  1, -1,  1, -1,  1, -1
     38 };
     39 /* clang-format on */
     40 
     41 void vpx_get16x16var_avx2(const unsigned char *src_ptr, int source_stride,
     42                           const unsigned char *ref_ptr, int recon_stride,
     43                           unsigned int *sse, int *sum) {
     44   unsigned int i, src_2strides, ref_2strides;
     45   __m256i sum_reg = _mm256_setzero_si256();
     46   __m256i sse_reg = _mm256_setzero_si256();
     47   // process two 16 byte locations in a 256 bit register
     48   src_2strides = source_stride << 1;
     49   ref_2strides = recon_stride << 1;
     50   for (i = 0; i < 8; ++i) {
     51     // convert up values in 128 bit registers across lanes
     52     const __m256i src0 =
     53         _mm256_cvtepu8_epi16(_mm_loadu_si128((__m128i const *)(src_ptr)));
     54     const __m256i src1 = _mm256_cvtepu8_epi16(
     55         _mm_loadu_si128((__m128i const *)(src_ptr + source_stride)));
     56     const __m256i ref0 =
     57         _mm256_cvtepu8_epi16(_mm_loadu_si128((__m128i const *)(ref_ptr)));
     58     const __m256i ref1 = _mm256_cvtepu8_epi16(
     59         _mm_loadu_si128((__m128i const *)(ref_ptr + recon_stride)));
     60     const __m256i diff0 = _mm256_sub_epi16(src0, ref0);
     61     const __m256i diff1 = _mm256_sub_epi16(src1, ref1);
     62     const __m256i madd0 = _mm256_madd_epi16(diff0, diff0);
     63     const __m256i madd1 = _mm256_madd_epi16(diff1, diff1);
     64 
     65     // add to the running totals
     66     sum_reg = _mm256_add_epi16(sum_reg, _mm256_add_epi16(diff0, diff1));
     67     sse_reg = _mm256_add_epi32(sse_reg, _mm256_add_epi32(madd0, madd1));
     68 
     69     src_ptr += src_2strides;
     70     ref_ptr += ref_2strides;
     71   }
     72   {
     73     // extract the low lane and add it to the high lane
     74     const __m128i sum_reg_128 = _mm_add_epi16(
     75         _mm256_castsi256_si128(sum_reg), _mm256_extractf128_si256(sum_reg, 1));
     76     const __m128i sse_reg_128 = _mm_add_epi32(
     77         _mm256_castsi256_si128(sse_reg), _mm256_extractf128_si256(sse_reg, 1));
     78 
     79     // sum upper and lower 64 bits together and convert up to 32 bit values
     80     const __m128i sum_reg_64 =
     81         _mm_add_epi16(sum_reg_128, _mm_srli_si128(sum_reg_128, 8));
     82     const __m128i sum_int32 = _mm_cvtepi16_epi32(sum_reg_64);
     83 
     84     // unpack sse and sum registers and add
     85     const __m128i sse_sum_lo = _mm_unpacklo_epi32(sse_reg_128, sum_int32);
     86     const __m128i sse_sum_hi = _mm_unpackhi_epi32(sse_reg_128, sum_int32);
     87     const __m128i sse_sum = _mm_add_epi32(sse_sum_lo, sse_sum_hi);
     88 
     89     // perform the final summation and extract the results
     90     const __m128i res = _mm_add_epi32(sse_sum, _mm_srli_si128(sse_sum, 8));
     91     *((int *)sse) = _mm_cvtsi128_si32(res);
     92     *((int *)sum) = _mm_extract_epi32(res, 1);
     93   }
     94 }
     95 
     96 static void get32x16var_avx2(const unsigned char *src_ptr, int source_stride,
     97                              const unsigned char *ref_ptr, int recon_stride,
     98                              unsigned int *sse, int *sum) {
     99   unsigned int i, src_2strides, ref_2strides;
    100   const __m256i adj_sub = _mm256_load_si256((__m256i const *)adjacent_sub_avx2);
    101   __m256i sum_reg = _mm256_setzero_si256();
    102   __m256i sse_reg = _mm256_setzero_si256();
    103 
    104   // process 64 elements in an iteration
    105   src_2strides = source_stride << 1;
    106   ref_2strides = recon_stride << 1;
    107   for (i = 0; i < 8; i++) {
    108     const __m256i src0 = _mm256_loadu_si256((__m256i const *)(src_ptr));
    109     const __m256i src1 =
    110         _mm256_loadu_si256((__m256i const *)(src_ptr + source_stride));
    111     const __m256i ref0 = _mm256_loadu_si256((__m256i const *)(ref_ptr));
    112     const __m256i ref1 =
    113         _mm256_loadu_si256((__m256i const *)(ref_ptr + recon_stride));
    114 
    115     // unpack into pairs of source and reference values
    116     const __m256i src_ref0 = _mm256_unpacklo_epi8(src0, ref0);
    117     const __m256i src_ref1 = _mm256_unpackhi_epi8(src0, ref0);
    118     const __m256i src_ref2 = _mm256_unpacklo_epi8(src1, ref1);
    119     const __m256i src_ref3 = _mm256_unpackhi_epi8(src1, ref1);
    120 
    121     // subtract adjacent elements using src*1 + ref*-1
    122     const __m256i diff0 = _mm256_maddubs_epi16(src_ref0, adj_sub);
    123     const __m256i diff1 = _mm256_maddubs_epi16(src_ref1, adj_sub);
    124     const __m256i diff2 = _mm256_maddubs_epi16(src_ref2, adj_sub);
    125     const __m256i diff3 = _mm256_maddubs_epi16(src_ref3, adj_sub);
    126     const __m256i madd0 = _mm256_madd_epi16(diff0, diff0);
    127     const __m256i madd1 = _mm256_madd_epi16(diff1, diff1);
    128     const __m256i madd2 = _mm256_madd_epi16(diff2, diff2);
    129     const __m256i madd3 = _mm256_madd_epi16(diff3, diff3);
    130 
    131     // add to the running totals
    132     sum_reg = _mm256_add_epi16(sum_reg, _mm256_add_epi16(diff0, diff1));
    133     sum_reg = _mm256_add_epi16(sum_reg, _mm256_add_epi16(diff2, diff3));
    134     sse_reg = _mm256_add_epi32(sse_reg, _mm256_add_epi32(madd0, madd1));
    135     sse_reg = _mm256_add_epi32(sse_reg, _mm256_add_epi32(madd2, madd3));
    136 
    137     src_ptr += src_2strides;
    138     ref_ptr += ref_2strides;
    139   }
    140 
    141   {
    142     // extract the low lane and add it to the high lane
    143     const __m128i sum_reg_128 = _mm_add_epi16(
    144         _mm256_castsi256_si128(sum_reg), _mm256_extractf128_si256(sum_reg, 1));
    145     const __m128i sse_reg_128 = _mm_add_epi32(
    146         _mm256_castsi256_si128(sse_reg), _mm256_extractf128_si256(sse_reg, 1));
    147 
    148     // sum upper and lower 64 bits together and convert up to 32 bit values
    149     const __m128i sum_reg_64 =
    150         _mm_add_epi16(sum_reg_128, _mm_srli_si128(sum_reg_128, 8));
    151     const __m128i sum_int32 = _mm_cvtepi16_epi32(sum_reg_64);
    152 
    153     // unpack sse and sum registers and add
    154     const __m128i sse_sum_lo = _mm_unpacklo_epi32(sse_reg_128, sum_int32);
    155     const __m128i sse_sum_hi = _mm_unpackhi_epi32(sse_reg_128, sum_int32);
    156     const __m128i sse_sum = _mm_add_epi32(sse_sum_lo, sse_sum_hi);
    157 
    158     // perform the final summation and extract the results
    159     const __m128i res = _mm_add_epi32(sse_sum, _mm_srli_si128(sse_sum, 8));
    160     *((int *)sse) = _mm_cvtsi128_si32(res);
    161     *((int *)sum) = _mm_extract_epi32(res, 1);
    162   }
    163 }
    164 
    165 #define FILTER_SRC(filter)                               \
    166   /* filter the source */                                \
    167   exp_src_lo = _mm256_maddubs_epi16(exp_src_lo, filter); \
    168   exp_src_hi = _mm256_maddubs_epi16(exp_src_hi, filter); \
    169                                                          \
    170   /* add 8 to source */                                  \
    171   exp_src_lo = _mm256_add_epi16(exp_src_lo, pw8);        \
    172   exp_src_hi = _mm256_add_epi16(exp_src_hi, pw8);        \
    173                                                          \
    174   /* divide source by 16 */                              \
    175   exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4);         \
    176   exp_src_hi = _mm256_srai_epi16(exp_src_hi, 4);
    177 
    178 #define CALC_SUM_SSE_INSIDE_LOOP                          \
    179   /* expand each byte to 2 bytes */                       \
    180   exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg);   \
    181   exp_dst_hi = _mm256_unpackhi_epi8(dst_reg, zero_reg);   \
    182   /* source - dest */                                     \
    183   exp_src_lo = _mm256_sub_epi16(exp_src_lo, exp_dst_lo);  \
    184   exp_src_hi = _mm256_sub_epi16(exp_src_hi, exp_dst_hi);  \
    185   /* caculate sum */                                      \
    186   *sum_reg = _mm256_add_epi16(*sum_reg, exp_src_lo);      \
    187   exp_src_lo = _mm256_madd_epi16(exp_src_lo, exp_src_lo); \
    188   *sum_reg = _mm256_add_epi16(*sum_reg, exp_src_hi);      \
    189   exp_src_hi = _mm256_madd_epi16(exp_src_hi, exp_src_hi); \
    190   /* calculate sse */                                     \
    191   *sse_reg = _mm256_add_epi32(*sse_reg, exp_src_lo);      \
    192   *sse_reg = _mm256_add_epi32(*sse_reg, exp_src_hi);
    193 
    194 // final calculation to sum and sse
    195 #define CALC_SUM_AND_SSE                                                   \
    196   res_cmp = _mm256_cmpgt_epi16(zero_reg, sum_reg);                         \
    197   sse_reg_hi = _mm256_srli_si256(sse_reg, 8);                              \
    198   sum_reg_lo = _mm256_unpacklo_epi16(sum_reg, res_cmp);                    \
    199   sum_reg_hi = _mm256_unpackhi_epi16(sum_reg, res_cmp);                    \
    200   sse_reg = _mm256_add_epi32(sse_reg, sse_reg_hi);                         \
    201   sum_reg = _mm256_add_epi32(sum_reg_lo, sum_reg_hi);                      \
    202                                                                            \
    203   sse_reg_hi = _mm256_srli_si256(sse_reg, 4);                              \
    204   sum_reg_hi = _mm256_srli_si256(sum_reg, 8);                              \
    205                                                                            \
    206   sse_reg = _mm256_add_epi32(sse_reg, sse_reg_hi);                         \
    207   sum_reg = _mm256_add_epi32(sum_reg, sum_reg_hi);                         \
    208   *((int *)sse) = _mm_cvtsi128_si32(_mm256_castsi256_si128(sse_reg)) +     \
    209                   _mm_cvtsi128_si32(_mm256_extractf128_si256(sse_reg, 1)); \
    210   sum_reg_hi = _mm256_srli_si256(sum_reg, 4);                              \
    211   sum_reg = _mm256_add_epi32(sum_reg, sum_reg_hi);                         \
    212   sum = _mm_cvtsi128_si32(_mm256_castsi256_si128(sum_reg)) +               \
    213         _mm_cvtsi128_si32(_mm256_extractf128_si256(sum_reg, 1));
    214 
    215 static INLINE void spv32_x0_y0(const uint8_t *src, int src_stride,
    216                                const uint8_t *dst, int dst_stride,
    217                                const uint8_t *sec, int sec_stride, int do_sec,
    218                                int height, __m256i *sum_reg, __m256i *sse_reg) {
    219   const __m256i zero_reg = _mm256_setzero_si256();
    220   __m256i exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
    221   int i;
    222   for (i = 0; i < height; i++) {
    223     const __m256i dst_reg = _mm256_loadu_si256((__m256i const *)dst);
    224     const __m256i src_reg = _mm256_loadu_si256((__m256i const *)src);
    225     if (do_sec) {
    226       const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)sec);
    227       const __m256i avg_reg = _mm256_avg_epu8(src_reg, sec_reg);
    228       exp_src_lo = _mm256_unpacklo_epi8(avg_reg, zero_reg);
    229       exp_src_hi = _mm256_unpackhi_epi8(avg_reg, zero_reg);
    230       sec += sec_stride;
    231     } else {
    232       exp_src_lo = _mm256_unpacklo_epi8(src_reg, zero_reg);
    233       exp_src_hi = _mm256_unpackhi_epi8(src_reg, zero_reg);
    234     }
    235     CALC_SUM_SSE_INSIDE_LOOP
    236     src += src_stride;
    237     dst += dst_stride;
    238   }
    239 }
    240 
    241 // (x == 0, y == 4) or (x == 4, y == 0).  sstep determines the direction.
    242 static INLINE void spv32_half_zero(const uint8_t *src, int src_stride,
    243                                    const uint8_t *dst, int dst_stride,
    244                                    const uint8_t *sec, int sec_stride,
    245                                    int do_sec, int height, __m256i *sum_reg,
    246                                    __m256i *sse_reg, int sstep) {
    247   const __m256i zero_reg = _mm256_setzero_si256();
    248   __m256i exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
    249   int i;
    250   for (i = 0; i < height; i++) {
    251     const __m256i dst_reg = _mm256_loadu_si256((__m256i const *)dst);
    252     const __m256i src_0 = _mm256_loadu_si256((__m256i const *)src);
    253     const __m256i src_1 = _mm256_loadu_si256((__m256i const *)(src + sstep));
    254     const __m256i src_avg = _mm256_avg_epu8(src_0, src_1);
    255     if (do_sec) {
    256       const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)sec);
    257       const __m256i avg_reg = _mm256_avg_epu8(src_avg, sec_reg);
    258       exp_src_lo = _mm256_unpacklo_epi8(avg_reg, zero_reg);
    259       exp_src_hi = _mm256_unpackhi_epi8(avg_reg, zero_reg);
    260       sec += sec_stride;
    261     } else {
    262       exp_src_lo = _mm256_unpacklo_epi8(src_avg, zero_reg);
    263       exp_src_hi = _mm256_unpackhi_epi8(src_avg, zero_reg);
    264     }
    265     CALC_SUM_SSE_INSIDE_LOOP
    266     src += src_stride;
    267     dst += dst_stride;
    268   }
    269 }
    270 
    271 static INLINE void spv32_x0_y4(const uint8_t *src, int src_stride,
    272                                const uint8_t *dst, int dst_stride,
    273                                const uint8_t *sec, int sec_stride, int do_sec,
    274                                int height, __m256i *sum_reg, __m256i *sse_reg) {
    275   spv32_half_zero(src, src_stride, dst, dst_stride, sec, sec_stride, do_sec,
    276                   height, sum_reg, sse_reg, src_stride);
    277 }
    278 
    279 static INLINE void spv32_x4_y0(const uint8_t *src, int src_stride,
    280                                const uint8_t *dst, int dst_stride,
    281                                const uint8_t *sec, int sec_stride, int do_sec,
    282                                int height, __m256i *sum_reg, __m256i *sse_reg) {
    283   spv32_half_zero(src, src_stride, dst, dst_stride, sec, sec_stride, do_sec,
    284                   height, sum_reg, sse_reg, 1);
    285 }
    286 
    287 static INLINE void spv32_x4_y4(const uint8_t *src, int src_stride,
    288                                const uint8_t *dst, int dst_stride,
    289                                const uint8_t *sec, int sec_stride, int do_sec,
    290                                int height, __m256i *sum_reg, __m256i *sse_reg) {
    291   const __m256i zero_reg = _mm256_setzero_si256();
    292   const __m256i src_a = _mm256_loadu_si256((__m256i const *)src);
    293   const __m256i src_b = _mm256_loadu_si256((__m256i const *)(src + 1));
    294   __m256i prev_src_avg = _mm256_avg_epu8(src_a, src_b);
    295   __m256i exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
    296   int i;
    297   src += src_stride;
    298   for (i = 0; i < height; i++) {
    299     const __m256i dst_reg = _mm256_loadu_si256((__m256i const *)dst);
    300     const __m256i src_0 = _mm256_loadu_si256((__m256i const *)(src));
    301     const __m256i src_1 = _mm256_loadu_si256((__m256i const *)(src + 1));
    302     const __m256i src_avg = _mm256_avg_epu8(src_0, src_1);
    303     const __m256i current_avg = _mm256_avg_epu8(prev_src_avg, src_avg);
    304     prev_src_avg = src_avg;
    305 
    306     if (do_sec) {
    307       const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)sec);
    308       const __m256i avg_reg = _mm256_avg_epu8(current_avg, sec_reg);
    309       exp_src_lo = _mm256_unpacklo_epi8(avg_reg, zero_reg);
    310       exp_src_hi = _mm256_unpackhi_epi8(avg_reg, zero_reg);
    311       sec += sec_stride;
    312     } else {
    313       exp_src_lo = _mm256_unpacklo_epi8(current_avg, zero_reg);
    314       exp_src_hi = _mm256_unpackhi_epi8(current_avg, zero_reg);
    315     }
    316     // save current source average
    317     CALC_SUM_SSE_INSIDE_LOOP
    318     dst += dst_stride;
    319     src += src_stride;
    320   }
    321 }
    322 
    323 // (x == 0, y == bil) or (x == 4, y == bil).  sstep determines the direction.
    324 static INLINE void spv32_bilin_zero(const uint8_t *src, int src_stride,
    325                                     const uint8_t *dst, int dst_stride,
    326                                     const uint8_t *sec, int sec_stride,
    327                                     int do_sec, int height, __m256i *sum_reg,
    328                                     __m256i *sse_reg, int offset, int sstep) {
    329   const __m256i zero_reg = _mm256_setzero_si256();
    330   const __m256i pw8 = _mm256_set1_epi16(8);
    331   const __m256i filter = _mm256_load_si256(
    332       (__m256i const *)(bilinear_filters_avx2 + (offset << 5)));
    333   __m256i exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
    334   int i;
    335   for (i = 0; i < height; i++) {
    336     const __m256i dst_reg = _mm256_loadu_si256((__m256i const *)dst);
    337     const __m256i src_0 = _mm256_loadu_si256((__m256i const *)src);
    338     const __m256i src_1 = _mm256_loadu_si256((__m256i const *)(src + sstep));
    339     exp_src_lo = _mm256_unpacklo_epi8(src_0, src_1);
    340     exp_src_hi = _mm256_unpackhi_epi8(src_0, src_1);
    341 
    342     FILTER_SRC(filter)
    343     if (do_sec) {
    344       const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)sec);
    345       const __m256i exp_src = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
    346       const __m256i avg_reg = _mm256_avg_epu8(exp_src, sec_reg);
    347       sec += sec_stride;
    348       exp_src_lo = _mm256_unpacklo_epi8(avg_reg, zero_reg);
    349       exp_src_hi = _mm256_unpackhi_epi8(avg_reg, zero_reg);
    350     }
    351     CALC_SUM_SSE_INSIDE_LOOP
    352     src += src_stride;
    353     dst += dst_stride;
    354   }
    355 }
    356 
    357 static INLINE void spv32_x0_yb(const uint8_t *src, int src_stride,
    358                                const uint8_t *dst, int dst_stride,
    359                                const uint8_t *sec, int sec_stride, int do_sec,
    360                                int height, __m256i *sum_reg, __m256i *sse_reg,
    361                                int y_offset) {
    362   spv32_bilin_zero(src, src_stride, dst, dst_stride, sec, sec_stride, do_sec,
    363                    height, sum_reg, sse_reg, y_offset, src_stride);
    364 }
    365 
    366 static INLINE void spv32_xb_y0(const uint8_t *src, int src_stride,
    367                                const uint8_t *dst, int dst_stride,
    368                                const uint8_t *sec, int sec_stride, int do_sec,
    369                                int height, __m256i *sum_reg, __m256i *sse_reg,
    370                                int x_offset) {
    371   spv32_bilin_zero(src, src_stride, dst, dst_stride, sec, sec_stride, do_sec,
    372                    height, sum_reg, sse_reg, x_offset, 1);
    373 }
    374 
    375 static INLINE void spv32_x4_yb(const uint8_t *src, int src_stride,
    376                                const uint8_t *dst, int dst_stride,
    377                                const uint8_t *sec, int sec_stride, int do_sec,
    378                                int height, __m256i *sum_reg, __m256i *sse_reg,
    379                                int y_offset) {
    380   const __m256i zero_reg = _mm256_setzero_si256();
    381   const __m256i pw8 = _mm256_set1_epi16(8);
    382   const __m256i filter = _mm256_load_si256(
    383       (__m256i const *)(bilinear_filters_avx2 + (y_offset << 5)));
    384   const __m256i src_a = _mm256_loadu_si256((__m256i const *)src);
    385   const __m256i src_b = _mm256_loadu_si256((__m256i const *)(src + 1));
    386   __m256i prev_src_avg = _mm256_avg_epu8(src_a, src_b);
    387   __m256i exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
    388   int i;
    389   src += src_stride;
    390   for (i = 0; i < height; i++) {
    391     const __m256i dst_reg = _mm256_loadu_si256((__m256i const *)dst);
    392     const __m256i src_0 = _mm256_loadu_si256((__m256i const *)src);
    393     const __m256i src_1 = _mm256_loadu_si256((__m256i const *)(src + 1));
    394     const __m256i src_avg = _mm256_avg_epu8(src_0, src_1);
    395     exp_src_lo = _mm256_unpacklo_epi8(prev_src_avg, src_avg);
    396     exp_src_hi = _mm256_unpackhi_epi8(prev_src_avg, src_avg);
    397     prev_src_avg = src_avg;
    398 
    399     FILTER_SRC(filter)
    400     if (do_sec) {
    401       const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)sec);
    402       const __m256i exp_src_avg = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
    403       const __m256i avg_reg = _mm256_avg_epu8(exp_src_avg, sec_reg);
    404       exp_src_lo = _mm256_unpacklo_epi8(avg_reg, zero_reg);
    405       exp_src_hi = _mm256_unpackhi_epi8(avg_reg, zero_reg);
    406       sec += sec_stride;
    407     }
    408     CALC_SUM_SSE_INSIDE_LOOP
    409     dst += dst_stride;
    410     src += src_stride;
    411   }
    412 }
    413 
    414 static INLINE void spv32_xb_y4(const uint8_t *src, int src_stride,
    415                                const uint8_t *dst, int dst_stride,
    416                                const uint8_t *sec, int sec_stride, int do_sec,
    417                                int height, __m256i *sum_reg, __m256i *sse_reg,
    418                                int x_offset) {
    419   const __m256i zero_reg = _mm256_setzero_si256();
    420   const __m256i pw8 = _mm256_set1_epi16(8);
    421   const __m256i filter = _mm256_load_si256(
    422       (__m256i const *)(bilinear_filters_avx2 + (x_offset << 5)));
    423   const __m256i src_a = _mm256_loadu_si256((__m256i const *)src);
    424   const __m256i src_b = _mm256_loadu_si256((__m256i const *)(src + 1));
    425   __m256i exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
    426   __m256i src_reg, src_pack;
    427   int i;
    428   exp_src_lo = _mm256_unpacklo_epi8(src_a, src_b);
    429   exp_src_hi = _mm256_unpackhi_epi8(src_a, src_b);
    430   FILTER_SRC(filter)
    431   // convert each 16 bit to 8 bit to each low and high lane source
    432   src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
    433 
    434   src += src_stride;
    435   for (i = 0; i < height; i++) {
    436     const __m256i dst_reg = _mm256_loadu_si256((__m256i const *)dst);
    437     const __m256i src_0 = _mm256_loadu_si256((__m256i const *)src);
    438     const __m256i src_1 = _mm256_loadu_si256((__m256i const *)(src + 1));
    439     exp_src_lo = _mm256_unpacklo_epi8(src_0, src_1);
    440     exp_src_hi = _mm256_unpackhi_epi8(src_0, src_1);
    441 
    442     FILTER_SRC(filter)
    443 
    444     src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
    445     // average between previous pack to the current
    446     src_pack = _mm256_avg_epu8(src_pack, src_reg);
    447 
    448     if (do_sec) {
    449       const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)sec);
    450       const __m256i avg_pack = _mm256_avg_epu8(src_pack, sec_reg);
    451       exp_src_lo = _mm256_unpacklo_epi8(avg_pack, zero_reg);
    452       exp_src_hi = _mm256_unpackhi_epi8(avg_pack, zero_reg);
    453       sec += sec_stride;
    454     } else {
    455       exp_src_lo = _mm256_unpacklo_epi8(src_pack, zero_reg);
    456       exp_src_hi = _mm256_unpackhi_epi8(src_pack, zero_reg);
    457     }
    458     CALC_SUM_SSE_INSIDE_LOOP
    459     src_pack = src_reg;
    460     dst += dst_stride;
    461     src += src_stride;
    462   }
    463 }
    464 
    465 static INLINE void spv32_xb_yb(const uint8_t *src, int src_stride,
    466                                const uint8_t *dst, int dst_stride,
    467                                const uint8_t *sec, int sec_stride, int do_sec,
    468                                int height, __m256i *sum_reg, __m256i *sse_reg,
    469                                int x_offset, int y_offset) {
    470   const __m256i zero_reg = _mm256_setzero_si256();
    471   const __m256i pw8 = _mm256_set1_epi16(8);
    472   const __m256i xfilter = _mm256_load_si256(
    473       (__m256i const *)(bilinear_filters_avx2 + (x_offset << 5)));
    474   const __m256i yfilter = _mm256_load_si256(
    475       (__m256i const *)(bilinear_filters_avx2 + (y_offset << 5)));
    476   const __m256i src_a = _mm256_loadu_si256((__m256i const *)src);
    477   const __m256i src_b = _mm256_loadu_si256((__m256i const *)(src + 1));
    478   __m256i exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
    479   __m256i prev_src_pack, src_pack;
    480   int i;
    481   exp_src_lo = _mm256_unpacklo_epi8(src_a, src_b);
    482   exp_src_hi = _mm256_unpackhi_epi8(src_a, src_b);
    483   FILTER_SRC(xfilter)
    484   // convert each 16 bit to 8 bit to each low and high lane source
    485   prev_src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
    486   src += src_stride;
    487 
    488   for (i = 0; i < height; i++) {
    489     const __m256i dst_reg = _mm256_loadu_si256((__m256i const *)dst);
    490     const __m256i src_0 = _mm256_loadu_si256((__m256i const *)src);
    491     const __m256i src_1 = _mm256_loadu_si256((__m256i const *)(src + 1));
    492     exp_src_lo = _mm256_unpacklo_epi8(src_0, src_1);
    493     exp_src_hi = _mm256_unpackhi_epi8(src_0, src_1);
    494 
    495     FILTER_SRC(xfilter)
    496     src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
    497 
    498     // merge previous pack to current pack source
    499     exp_src_lo = _mm256_unpacklo_epi8(prev_src_pack, src_pack);
    500     exp_src_hi = _mm256_unpackhi_epi8(prev_src_pack, src_pack);
    501 
    502     FILTER_SRC(yfilter)
    503     if (do_sec) {
    504       const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)sec);
    505       const __m256i exp_src = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
    506       const __m256i avg_reg = _mm256_avg_epu8(exp_src, sec_reg);
    507       exp_src_lo = _mm256_unpacklo_epi8(avg_reg, zero_reg);
    508       exp_src_hi = _mm256_unpackhi_epi8(avg_reg, zero_reg);
    509       sec += sec_stride;
    510     }
    511 
    512     prev_src_pack = src_pack;
    513 
    514     CALC_SUM_SSE_INSIDE_LOOP
    515     dst += dst_stride;
    516     src += src_stride;
    517   }
    518 }
    519 
    520 static INLINE int sub_pix_var32xh(const uint8_t *src, int src_stride,
    521                                   int x_offset, int y_offset,
    522                                   const uint8_t *dst, int dst_stride,
    523                                   const uint8_t *sec, int sec_stride,
    524                                   int do_sec, int height, unsigned int *sse) {
    525   const __m256i zero_reg = _mm256_setzero_si256();
    526   __m256i sum_reg = _mm256_setzero_si256();
    527   __m256i sse_reg = _mm256_setzero_si256();
    528   __m256i sse_reg_hi, res_cmp, sum_reg_lo, sum_reg_hi;
    529   int sum;
    530   // x_offset = 0 and y_offset = 0
    531   if (x_offset == 0) {
    532     if (y_offset == 0) {
    533       spv32_x0_y0(src, src_stride, dst, dst_stride, sec, sec_stride, do_sec,
    534                   height, &sum_reg, &sse_reg);
    535       // x_offset = 0 and y_offset = 4
    536     } else if (y_offset == 4) {
    537       spv32_x0_y4(src, src_stride, dst, dst_stride, sec, sec_stride, do_sec,
    538                   height, &sum_reg, &sse_reg);
    539       // x_offset = 0 and y_offset = bilin interpolation
    540     } else {
    541       spv32_x0_yb(src, src_stride, dst, dst_stride, sec, sec_stride, do_sec,
    542                   height, &sum_reg, &sse_reg, y_offset);
    543     }
    544     // x_offset = 4  and y_offset = 0
    545   } else if (x_offset == 4) {
    546     if (y_offset == 0) {
    547       spv32_x4_y0(src, src_stride, dst, dst_stride, sec, sec_stride, do_sec,
    548                   height, &sum_reg, &sse_reg);
    549       // x_offset = 4  and y_offset = 4
    550     } else if (y_offset == 4) {
    551       spv32_x4_y4(src, src_stride, dst, dst_stride, sec, sec_stride, do_sec,
    552                   height, &sum_reg, &sse_reg);
    553       // x_offset = 4  and y_offset = bilin interpolation
    554     } else {
    555       spv32_x4_yb(src, src_stride, dst, dst_stride, sec, sec_stride, do_sec,
    556                   height, &sum_reg, &sse_reg, y_offset);
    557     }
    558     // x_offset = bilin interpolation and y_offset = 0
    559   } else {
    560     if (y_offset == 0) {
    561       spv32_xb_y0(src, src_stride, dst, dst_stride, sec, sec_stride, do_sec,
    562                   height, &sum_reg, &sse_reg, x_offset);
    563       // x_offset = bilin interpolation and y_offset = 4
    564     } else if (y_offset == 4) {
    565       spv32_xb_y4(src, src_stride, dst, dst_stride, sec, sec_stride, do_sec,
    566                   height, &sum_reg, &sse_reg, x_offset);
    567       // x_offset = bilin interpolation and y_offset = bilin interpolation
    568     } else {
    569       spv32_xb_yb(src, src_stride, dst, dst_stride, sec, sec_stride, do_sec,
    570                   height, &sum_reg, &sse_reg, x_offset, y_offset);
    571     }
    572   }
    573   CALC_SUM_AND_SSE
    574   return sum;
    575 }
    576 
    577 static unsigned int sub_pixel_variance32xh_avx2(
    578     const uint8_t *src, int src_stride, int x_offset, int y_offset,
    579     const uint8_t *dst, int dst_stride, int height, unsigned int *sse) {
    580   return sub_pix_var32xh(src, src_stride, x_offset, y_offset, dst, dst_stride,
    581                          NULL, 0, 0, height, sse);
    582 }
    583 
    584 static unsigned int sub_pixel_avg_variance32xh_avx2(
    585     const uint8_t *src, int src_stride, int x_offset, int y_offset,
    586     const uint8_t *dst, int dst_stride, const uint8_t *sec, int sec_stride,
    587     int height, unsigned int *sse) {
    588   return sub_pix_var32xh(src, src_stride, x_offset, y_offset, dst, dst_stride,
    589                          sec, sec_stride, 1, height, sse);
    590 }
    591 
    592 typedef void (*get_var_avx2)(const uint8_t *src, int src_stride,
    593                              const uint8_t *ref, int ref_stride,
    594                              unsigned int *sse, int *sum);
    595 
    596 static void variance_avx2(const uint8_t *src, int src_stride,
    597                           const uint8_t *ref, int ref_stride, int w, int h,
    598                           unsigned int *sse, int *sum, get_var_avx2 var_fn,
    599                           int block_size) {
    600   int i, j;
    601 
    602   *sse = 0;
    603   *sum = 0;
    604 
    605   for (i = 0; i < h; i += 16) {
    606     for (j = 0; j < w; j += block_size) {
    607       unsigned int sse0;
    608       int sum0;
    609       var_fn(&src[src_stride * i + j], src_stride, &ref[ref_stride * i + j],
    610              ref_stride, &sse0, &sum0);
    611       *sse += sse0;
    612       *sum += sum0;
    613     }
    614   }
    615 }
    616 
    617 unsigned int vpx_variance16x16_avx2(const uint8_t *src, int src_stride,
    618                                     const uint8_t *ref, int ref_stride,
    619                                     unsigned int *sse) {
    620   int sum;
    621   variance_avx2(src, src_stride, ref, ref_stride, 16, 16, sse, &sum,
    622                 vpx_get16x16var_avx2, 16);
    623   return *sse - (uint32_t)(((int64_t)sum * sum) >> 8);
    624 }
    625 
    626 unsigned int vpx_mse16x16_avx2(const uint8_t *src, int src_stride,
    627                                const uint8_t *ref, int ref_stride,
    628                                unsigned int *sse) {
    629   int sum;
    630   vpx_get16x16var_avx2(src, src_stride, ref, ref_stride, sse, &sum);
    631   return *sse;
    632 }
    633 
    634 unsigned int vpx_variance32x16_avx2(const uint8_t *src, int src_stride,
    635                                     const uint8_t *ref, int ref_stride,
    636                                     unsigned int *sse) {
    637   int sum;
    638   variance_avx2(src, src_stride, ref, ref_stride, 32, 16, sse, &sum,
    639                 get32x16var_avx2, 32);
    640   return *sse - (uint32_t)(((int64_t)sum * sum) >> 9);
    641 }
    642 
    643 unsigned int vpx_variance32x32_avx2(const uint8_t *src, int src_stride,
    644                                     const uint8_t *ref, int ref_stride,
    645                                     unsigned int *sse) {
    646   int sum;
    647   variance_avx2(src, src_stride, ref, ref_stride, 32, 32, sse, &sum,
    648                 get32x16var_avx2, 32);
    649   return *sse - (uint32_t)(((int64_t)sum * sum) >> 10);
    650 }
    651 
    652 unsigned int vpx_variance64x64_avx2(const uint8_t *src, int src_stride,
    653                                     const uint8_t *ref, int ref_stride,
    654                                     unsigned int *sse) {
    655   int sum;
    656   variance_avx2(src, src_stride, ref, ref_stride, 64, 64, sse, &sum,
    657                 get32x16var_avx2, 32);
    658   return *sse - (uint32_t)(((int64_t)sum * sum) >> 12);
    659 }
    660 
    661 unsigned int vpx_variance64x32_avx2(const uint8_t *src, int src_stride,
    662                                     const uint8_t *ref, int ref_stride,
    663                                     unsigned int *sse) {
    664   int sum;
    665   variance_avx2(src, src_stride, ref, ref_stride, 64, 32, sse, &sum,
    666                 get32x16var_avx2, 32);
    667   return *sse - (uint32_t)(((int64_t)sum * sum) >> 11);
    668 }
    669 
    670 unsigned int vpx_sub_pixel_variance64x64_avx2(const uint8_t *src,
    671                                               int src_stride, int x_offset,
    672                                               int y_offset, const uint8_t *dst,
    673                                               int dst_stride,
    674                                               unsigned int *sse) {
    675   unsigned int sse1;
    676   const int se1 = sub_pixel_variance32xh_avx2(
    677       src, src_stride, x_offset, y_offset, dst, dst_stride, 64, &sse1);
    678   unsigned int sse2;
    679   const int se2 =
    680       sub_pixel_variance32xh_avx2(src + 32, src_stride, x_offset, y_offset,
    681                                   dst + 32, dst_stride, 64, &sse2);
    682   const int se = se1 + se2;
    683   *sse = sse1 + sse2;
    684   return *sse - (uint32_t)(((int64_t)se * se) >> 12);
    685 }
    686 
    687 unsigned int vpx_sub_pixel_variance32x32_avx2(const uint8_t *src,
    688                                               int src_stride, int x_offset,
    689                                               int y_offset, const uint8_t *dst,
    690                                               int dst_stride,
    691                                               unsigned int *sse) {
    692   const int se = sub_pixel_variance32xh_avx2(
    693       src, src_stride, x_offset, y_offset, dst, dst_stride, 32, sse);
    694   return *sse - (uint32_t)(((int64_t)se * se) >> 10);
    695 }
    696 
    697 unsigned int vpx_sub_pixel_avg_variance64x64_avx2(
    698     const uint8_t *src, int src_stride, int x_offset, int y_offset,
    699     const uint8_t *dst, int dst_stride, unsigned int *sse, const uint8_t *sec) {
    700   unsigned int sse1;
    701   const int se1 = sub_pixel_avg_variance32xh_avx2(
    702       src, src_stride, x_offset, y_offset, dst, dst_stride, sec, 64, 64, &sse1);
    703   unsigned int sse2;
    704   const int se2 = sub_pixel_avg_variance32xh_avx2(
    705       src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, sec + 32,
    706       64, 64, &sse2);
    707   const int se = se1 + se2;
    708 
    709   *sse = sse1 + sse2;
    710 
    711   return *sse - (uint32_t)(((int64_t)se * se) >> 12);
    712 }
    713 
    714 unsigned int vpx_sub_pixel_avg_variance32x32_avx2(
    715     const uint8_t *src, int src_stride, int x_offset, int y_offset,
    716     const uint8_t *dst, int dst_stride, unsigned int *sse, const uint8_t *sec) {
    717   // Process 32 elements in parallel.
    718   const int se = sub_pixel_avg_variance32xh_avx2(
    719       src, src_stride, x_offset, y_offset, dst, dst_stride, sec, 32, 32, sse);
    720   return *sse - (uint32_t)(((int64_t)se * se) >> 10);
    721 }
    722