Home | History | Annotate | Download | only in dsp
      1 // Copyright 2011 Google Inc. All Rights Reserved.
      2 //
      3 // This code is licensed under the same terms as WebM:
      4 //  Software License Agreement:  http://www.webmproject.org/license/software/
      5 //  Additional IP Rights Grant:  http://www.webmproject.org/license/additional/
      6 // -----------------------------------------------------------------------------
      7 //
      8 // SSE2 version of some decoding functions (idct, loop filtering).
      9 //
     10 // Author: somnath (at) google.com (Somnath Banerjee)
     11 //         cduvivier (at) google.com (Christian Duvivier)
     12 
     13 #include "./dsp.h"
     14 
     15 #if defined(__cplusplus) || defined(c_plusplus)
     16 extern "C" {
     17 #endif
     18 
     19 #if defined(WEBP_USE_SSE2)
     20 
     21 #include <emmintrin.h>
     22 #include "../dec/vp8i.h"
     23 
     24 //------------------------------------------------------------------------------
     25 // Transforms (Paragraph 14.4)
     26 
     27 static void TransformSSE2(const int16_t* in, uint8_t* dst, int do_two) {
     28   // This implementation makes use of 16-bit fixed point versions of two
     29   // multiply constants:
     30   //    K1 = sqrt(2) * cos (pi/8) ~= 85627 / 2^16
     31   //    K2 = sqrt(2) * sin (pi/8) ~= 35468 / 2^16
     32   //
     33   // To be able to use signed 16-bit integers, we use the following trick to
     34   // have constants within range:
     35   // - Associated constants are obtained by subtracting the 16-bit fixed point
     36   //   version of one:
     37   //      k = K - (1 << 16)  =>  K = k + (1 << 16)
     38   //      K1 = 85267  =>  k1 =  20091
     39   //      K2 = 35468  =>  k2 = -30068
     40   // - The multiplication of a variable by a constant become the sum of the
     41   //   variable and the multiplication of that variable by the associated
     42   //   constant:
     43   //      (x * K) >> 16 = (x * (k + (1 << 16))) >> 16 = ((x * k ) >> 16) + x
     44   const __m128i k1 = _mm_set1_epi16(20091);
     45   const __m128i k2 = _mm_set1_epi16(-30068);
     46   __m128i T0, T1, T2, T3;
     47 
     48   // Load and concatenate the transform coefficients (we'll do two transforms
     49   // in parallel). In the case of only one transform, the second half of the
     50   // vectors will just contain random value we'll never use nor store.
     51   __m128i in0, in1, in2, in3;
     52   {
     53     in0 = _mm_loadl_epi64((__m128i*)&in[0]);
     54     in1 = _mm_loadl_epi64((__m128i*)&in[4]);
     55     in2 = _mm_loadl_epi64((__m128i*)&in[8]);
     56     in3 = _mm_loadl_epi64((__m128i*)&in[12]);
     57     // a00 a10 a20 a30   x x x x
     58     // a01 a11 a21 a31   x x x x
     59     // a02 a12 a22 a32   x x x x
     60     // a03 a13 a23 a33   x x x x
     61     if (do_two) {
     62       const __m128i inB0 = _mm_loadl_epi64((__m128i*)&in[16]);
     63       const __m128i inB1 = _mm_loadl_epi64((__m128i*)&in[20]);
     64       const __m128i inB2 = _mm_loadl_epi64((__m128i*)&in[24]);
     65       const __m128i inB3 = _mm_loadl_epi64((__m128i*)&in[28]);
     66       in0 = _mm_unpacklo_epi64(in0, inB0);
     67       in1 = _mm_unpacklo_epi64(in1, inB1);
     68       in2 = _mm_unpacklo_epi64(in2, inB2);
     69       in3 = _mm_unpacklo_epi64(in3, inB3);
     70       // a00 a10 a20 a30   b00 b10 b20 b30
     71       // a01 a11 a21 a31   b01 b11 b21 b31
     72       // a02 a12 a22 a32   b02 b12 b22 b32
     73       // a03 a13 a23 a33   b03 b13 b23 b33
     74     }
     75   }
     76 
     77   // Vertical pass and subsequent transpose.
     78   {
     79     // First pass, c and d calculations are longer because of the "trick"
     80     // multiplications.
     81     const __m128i a = _mm_add_epi16(in0, in2);
     82     const __m128i b = _mm_sub_epi16(in0, in2);
     83     // c = MUL(in1, K2) - MUL(in3, K1) = MUL(in1, k2) - MUL(in3, k1) + in1 - in3
     84     const __m128i c1 = _mm_mulhi_epi16(in1, k2);
     85     const __m128i c2 = _mm_mulhi_epi16(in3, k1);
     86     const __m128i c3 = _mm_sub_epi16(in1, in3);
     87     const __m128i c4 = _mm_sub_epi16(c1, c2);
     88     const __m128i c = _mm_add_epi16(c3, c4);
     89     // d = MUL(in1, K1) + MUL(in3, K2) = MUL(in1, k1) + MUL(in3, k2) + in1 + in3
     90     const __m128i d1 = _mm_mulhi_epi16(in1, k1);
     91     const __m128i d2 = _mm_mulhi_epi16(in3, k2);
     92     const __m128i d3 = _mm_add_epi16(in1, in3);
     93     const __m128i d4 = _mm_add_epi16(d1, d2);
     94     const __m128i d = _mm_add_epi16(d3, d4);
     95 
     96     // Second pass.
     97     const __m128i tmp0 = _mm_add_epi16(a, d);
     98     const __m128i tmp1 = _mm_add_epi16(b, c);
     99     const __m128i tmp2 = _mm_sub_epi16(b, c);
    100     const __m128i tmp3 = _mm_sub_epi16(a, d);
    101 
    102     // Transpose the two 4x4.
    103     // a00 a01 a02 a03   b00 b01 b02 b03
    104     // a10 a11 a12 a13   b10 b11 b12 b13
    105     // a20 a21 a22 a23   b20 b21 b22 b23
    106     // a30 a31 a32 a33   b30 b31 b32 b33
    107     const __m128i transpose0_0 = _mm_unpacklo_epi16(tmp0, tmp1);
    108     const __m128i transpose0_1 = _mm_unpacklo_epi16(tmp2, tmp3);
    109     const __m128i transpose0_2 = _mm_unpackhi_epi16(tmp0, tmp1);
    110     const __m128i transpose0_3 = _mm_unpackhi_epi16(tmp2, tmp3);
    111     // a00 a10 a01 a11   a02 a12 a03 a13
    112     // a20 a30 a21 a31   a22 a32 a23 a33
    113     // b00 b10 b01 b11   b02 b12 b03 b13
    114     // b20 b30 b21 b31   b22 b32 b23 b33
    115     const __m128i transpose1_0 = _mm_unpacklo_epi32(transpose0_0, transpose0_1);
    116     const __m128i transpose1_1 = _mm_unpacklo_epi32(transpose0_2, transpose0_3);
    117     const __m128i transpose1_2 = _mm_unpackhi_epi32(transpose0_0, transpose0_1);
    118     const __m128i transpose1_3 = _mm_unpackhi_epi32(transpose0_2, transpose0_3);
    119     // a00 a10 a20 a30 a01 a11 a21 a31
    120     // b00 b10 b20 b30 b01 b11 b21 b31
    121     // a02 a12 a22 a32 a03 a13 a23 a33
    122     // b02 b12 a22 b32 b03 b13 b23 b33
    123     T0 = _mm_unpacklo_epi64(transpose1_0, transpose1_1);
    124     T1 = _mm_unpackhi_epi64(transpose1_0, transpose1_1);
    125     T2 = _mm_unpacklo_epi64(transpose1_2, transpose1_3);
    126     T3 = _mm_unpackhi_epi64(transpose1_2, transpose1_3);
    127     // a00 a10 a20 a30   b00 b10 b20 b30
    128     // a01 a11 a21 a31   b01 b11 b21 b31
    129     // a02 a12 a22 a32   b02 b12 b22 b32
    130     // a03 a13 a23 a33   b03 b13 b23 b33
    131   }
    132 
    133   // Horizontal pass and subsequent transpose.
    134   {
    135     // First pass, c and d calculations are longer because of the "trick"
    136     // multiplications.
    137     const __m128i four = _mm_set1_epi16(4);
    138     const __m128i dc = _mm_add_epi16(T0, four);
    139     const __m128i a =  _mm_add_epi16(dc, T2);
    140     const __m128i b =  _mm_sub_epi16(dc, T2);
    141     // c = MUL(T1, K2) - MUL(T3, K1) = MUL(T1, k2) - MUL(T3, k1) + T1 - T3
    142     const __m128i c1 = _mm_mulhi_epi16(T1, k2);
    143     const __m128i c2 = _mm_mulhi_epi16(T3, k1);
    144     const __m128i c3 = _mm_sub_epi16(T1, T3);
    145     const __m128i c4 = _mm_sub_epi16(c1, c2);
    146     const __m128i c = _mm_add_epi16(c3, c4);
    147     // d = MUL(T1, K1) + MUL(T3, K2) = MUL(T1, k1) + MUL(T3, k2) + T1 + T3
    148     const __m128i d1 = _mm_mulhi_epi16(T1, k1);
    149     const __m128i d2 = _mm_mulhi_epi16(T3, k2);
    150     const __m128i d3 = _mm_add_epi16(T1, T3);
    151     const __m128i d4 = _mm_add_epi16(d1, d2);
    152     const __m128i d = _mm_add_epi16(d3, d4);
    153 
    154     // Second pass.
    155     const __m128i tmp0 = _mm_add_epi16(a, d);
    156     const __m128i tmp1 = _mm_add_epi16(b, c);
    157     const __m128i tmp2 = _mm_sub_epi16(b, c);
    158     const __m128i tmp3 = _mm_sub_epi16(a, d);
    159     const __m128i shifted0 = _mm_srai_epi16(tmp0, 3);
    160     const __m128i shifted1 = _mm_srai_epi16(tmp1, 3);
    161     const __m128i shifted2 = _mm_srai_epi16(tmp2, 3);
    162     const __m128i shifted3 = _mm_srai_epi16(tmp3, 3);
    163 
    164     // Transpose the two 4x4.
    165     // a00 a01 a02 a03   b00 b01 b02 b03
    166     // a10 a11 a12 a13   b10 b11 b12 b13
    167     // a20 a21 a22 a23   b20 b21 b22 b23
    168     // a30 a31 a32 a33   b30 b31 b32 b33
    169     const __m128i transpose0_0 = _mm_unpacklo_epi16(shifted0, shifted1);
    170     const __m128i transpose0_1 = _mm_unpacklo_epi16(shifted2, shifted3);
    171     const __m128i transpose0_2 = _mm_unpackhi_epi16(shifted0, shifted1);
    172     const __m128i transpose0_3 = _mm_unpackhi_epi16(shifted2, shifted3);
    173     // a00 a10 a01 a11   a02 a12 a03 a13
    174     // a20 a30 a21 a31   a22 a32 a23 a33
    175     // b00 b10 b01 b11   b02 b12 b03 b13
    176     // b20 b30 b21 b31   b22 b32 b23 b33
    177     const __m128i transpose1_0 = _mm_unpacklo_epi32(transpose0_0, transpose0_1);
    178     const __m128i transpose1_1 = _mm_unpacklo_epi32(transpose0_2, transpose0_3);
    179     const __m128i transpose1_2 = _mm_unpackhi_epi32(transpose0_0, transpose0_1);
    180     const __m128i transpose1_3 = _mm_unpackhi_epi32(transpose0_2, transpose0_3);
    181     // a00 a10 a20 a30 a01 a11 a21 a31
    182     // b00 b10 b20 b30 b01 b11 b21 b31
    183     // a02 a12 a22 a32 a03 a13 a23 a33
    184     // b02 b12 a22 b32 b03 b13 b23 b33
    185     T0 = _mm_unpacklo_epi64(transpose1_0, transpose1_1);
    186     T1 = _mm_unpackhi_epi64(transpose1_0, transpose1_1);
    187     T2 = _mm_unpacklo_epi64(transpose1_2, transpose1_3);
    188     T3 = _mm_unpackhi_epi64(transpose1_2, transpose1_3);
    189     // a00 a10 a20 a30   b00 b10 b20 b30
    190     // a01 a11 a21 a31   b01 b11 b21 b31
    191     // a02 a12 a22 a32   b02 b12 b22 b32
    192     // a03 a13 a23 a33   b03 b13 b23 b33
    193   }
    194 
    195   // Add inverse transform to 'dst' and store.
    196   {
    197     const __m128i zero = _mm_setzero_si128();
    198     // Load the reference(s).
    199     __m128i dst0, dst1, dst2, dst3;
    200     if (do_two) {
    201       // Load eight bytes/pixels per line.
    202       dst0 = _mm_loadl_epi64((__m128i*)&dst[0 * BPS]);
    203       dst1 = _mm_loadl_epi64((__m128i*)&dst[1 * BPS]);
    204       dst2 = _mm_loadl_epi64((__m128i*)&dst[2 * BPS]);
    205       dst3 = _mm_loadl_epi64((__m128i*)&dst[3 * BPS]);
    206     } else {
    207       // Load four bytes/pixels per line.
    208       dst0 = _mm_cvtsi32_si128(*(int*)&dst[0 * BPS]);
    209       dst1 = _mm_cvtsi32_si128(*(int*)&dst[1 * BPS]);
    210       dst2 = _mm_cvtsi32_si128(*(int*)&dst[2 * BPS]);
    211       dst3 = _mm_cvtsi32_si128(*(int*)&dst[3 * BPS]);
    212     }
    213     // Convert to 16b.
    214     dst0 = _mm_unpacklo_epi8(dst0, zero);
    215     dst1 = _mm_unpacklo_epi8(dst1, zero);
    216     dst2 = _mm_unpacklo_epi8(dst2, zero);
    217     dst3 = _mm_unpacklo_epi8(dst3, zero);
    218     // Add the inverse transform(s).
    219     dst0 = _mm_add_epi16(dst0, T0);
    220     dst1 = _mm_add_epi16(dst1, T1);
    221     dst2 = _mm_add_epi16(dst2, T2);
    222     dst3 = _mm_add_epi16(dst3, T3);
    223     // Unsigned saturate to 8b.
    224     dst0 = _mm_packus_epi16(dst0, dst0);
    225     dst1 = _mm_packus_epi16(dst1, dst1);
    226     dst2 = _mm_packus_epi16(dst2, dst2);
    227     dst3 = _mm_packus_epi16(dst3, dst3);
    228     // Store the results.
    229     if (do_two) {
    230       // Store eight bytes/pixels per line.
    231       _mm_storel_epi64((__m128i*)&dst[0 * BPS], dst0);
    232       _mm_storel_epi64((__m128i*)&dst[1 * BPS], dst1);
    233       _mm_storel_epi64((__m128i*)&dst[2 * BPS], dst2);
    234       _mm_storel_epi64((__m128i*)&dst[3 * BPS], dst3);
    235     } else {
    236       // Store four bytes/pixels per line.
    237       *((int32_t *)&dst[0 * BPS]) = _mm_cvtsi128_si32(dst0);
    238       *((int32_t *)&dst[1 * BPS]) = _mm_cvtsi128_si32(dst1);
    239       *((int32_t *)&dst[2 * BPS]) = _mm_cvtsi128_si32(dst2);
    240       *((int32_t *)&dst[3 * BPS]) = _mm_cvtsi128_si32(dst3);
    241     }
    242   }
    243 }
    244 
    245 //------------------------------------------------------------------------------
    246 // Loop Filter (Paragraph 15)
    247 
    248 // Compute abs(p - q) = subs(p - q) OR subs(q - p)
    249 #define MM_ABS(p, q)  _mm_or_si128(                                            \
    250     _mm_subs_epu8((q), (p)),                                                   \
    251     _mm_subs_epu8((p), (q)))
    252 
    253 // Shift each byte of "a" by N bits while preserving by the sign bit.
    254 //
    255 // It first shifts the lower bytes of the words and then the upper bytes and
    256 // then merges the results together.
    257 #define SIGNED_SHIFT_N(a, N) {                                                 \
    258   __m128i t = a;                                                               \
    259   t = _mm_slli_epi16(t, 8);                                                    \
    260   t = _mm_srai_epi16(t, N);                                                    \
    261   t = _mm_srli_epi16(t, 8);                                                    \
    262                                                                                \
    263   a = _mm_srai_epi16(a, N + 8);                                                \
    264   a = _mm_slli_epi16(a, 8);                                                    \
    265                                                                                \
    266   a = _mm_or_si128(t, a);                                                      \
    267 }
    268 
    269 #define FLIP_SIGN_BIT2(a, b) {                                                 \
    270   a = _mm_xor_si128(a, sign_bit);                                              \
    271   b = _mm_xor_si128(b, sign_bit);                                              \
    272 }
    273 
    274 #define FLIP_SIGN_BIT4(a, b, c, d) {                                           \
    275   FLIP_SIGN_BIT2(a, b);                                                        \
    276   FLIP_SIGN_BIT2(c, d);                                                        \
    277 }
    278 
    279 #define GET_NOTHEV(p1, p0, q0, q1, hev_thresh, not_hev) {                      \
    280   const __m128i zero = _mm_setzero_si128();                                    \
    281   const __m128i t_1 = MM_ABS(p1, p0);                                          \
    282   const __m128i t_2 = MM_ABS(q1, q0);                                          \
    283                                                                                \
    284   const __m128i h = _mm_set1_epi8(hev_thresh);                                 \
    285   const __m128i t_3 = _mm_subs_epu8(t_1, h);  /* abs(p1 - p0) - hev_tresh */   \
    286   const __m128i t_4 = _mm_subs_epu8(t_2, h);  /* abs(q1 - q0) - hev_tresh */   \
    287                                                                                \
    288   not_hev = _mm_or_si128(t_3, t_4);                                            \
    289   not_hev = _mm_cmpeq_epi8(not_hev, zero); /* not_hev <= t1 && not_hev <= t2 */\
    290 }
    291 
    292 #define GET_BASE_DELTA(p1, p0, q0, q1, o) {                                    \
    293   const __m128i qp0 = _mm_subs_epi8(q0, p0);  /* q0 - p0 */                    \
    294   o = _mm_subs_epi8(p1, q1);            /* p1 - q1 */                          \
    295   o = _mm_adds_epi8(o, qp0);            /* p1 - q1 + 1 * (q0 - p0) */          \
    296   o = _mm_adds_epi8(o, qp0);            /* p1 - q1 + 2 * (q0 - p0) */          \
    297   o = _mm_adds_epi8(o, qp0);            /* p1 - q1 + 3 * (q0 - p0) */          \
    298 }
    299 
    300 #define DO_SIMPLE_FILTER(p0, q0, fl) {                                         \
    301   const __m128i three = _mm_set1_epi8(3);                                      \
    302   const __m128i four = _mm_set1_epi8(4);                                       \
    303   __m128i v3 = _mm_adds_epi8(fl, three);                                       \
    304   __m128i v4 = _mm_adds_epi8(fl, four);                                        \
    305                                                                                \
    306   /* Do +4 side */                                                             \
    307   SIGNED_SHIFT_N(v4, 3);                /* v4 >> 3  */                         \
    308   q0 = _mm_subs_epi8(q0, v4);           /* q0 -= v4 */                         \
    309                                                                                \
    310   /* Now do +3 side */                                                         \
    311   SIGNED_SHIFT_N(v3, 3);                /* v3 >> 3  */                         \
    312   p0 = _mm_adds_epi8(p0, v3);           /* p0 += v3 */                         \
    313 }
    314 
    315 // Updates values of 2 pixels at MB edge during complex filtering.
    316 // Update operations:
    317 // q = q - delta and p = p + delta; where delta = [(a_hi >> 7), (a_lo >> 7)]
    318 #define UPDATE_2PIXELS(pi, qi, a_lo, a_hi) {                                   \
    319   const __m128i a_lo7 = _mm_srai_epi16(a_lo, 7);                               \
    320   const __m128i a_hi7 = _mm_srai_epi16(a_hi, 7);                               \
    321   const __m128i delta = _mm_packs_epi16(a_lo7, a_hi7);                         \
    322   pi = _mm_adds_epi8(pi, delta);                                               \
    323   qi = _mm_subs_epi8(qi, delta);                                               \
    324 }
    325 
    326 static void NeedsFilter(const __m128i* p1, const __m128i* p0, const __m128i* q0,
    327                         const __m128i* q1, int thresh, __m128i *mask) {
    328   __m128i t1 = MM_ABS(*p1, *q1);        // abs(p1 - q1)
    329   *mask = _mm_set1_epi8(0xFE);
    330   t1 = _mm_and_si128(t1, *mask);        // set lsb of each byte to zero
    331   t1 = _mm_srli_epi16(t1, 1);           // abs(p1 - q1) / 2
    332 
    333   *mask = MM_ABS(*p0, *q0);             // abs(p0 - q0)
    334   *mask = _mm_adds_epu8(*mask, *mask);  // abs(p0 - q0) * 2
    335   *mask = _mm_adds_epu8(*mask, t1);     // abs(p0 - q0) * 2 + abs(p1 - q1) / 2
    336 
    337   t1 = _mm_set1_epi8(thresh);
    338   *mask = _mm_subs_epu8(*mask, t1);     // mask <= thresh
    339   *mask = _mm_cmpeq_epi8(*mask, _mm_setzero_si128());
    340 }
    341 
    342 //------------------------------------------------------------------------------
    343 // Edge filtering functions
    344 
    345 // Applies filter on 2 pixels (p0 and q0)
    346 static WEBP_INLINE void DoFilter2(const __m128i* p1, __m128i* p0, __m128i* q0,
    347                                   const __m128i* q1, int thresh) {
    348   __m128i a, mask;
    349   const __m128i sign_bit = _mm_set1_epi8(0x80);
    350   const __m128i p1s = _mm_xor_si128(*p1, sign_bit);
    351   const __m128i q1s = _mm_xor_si128(*q1, sign_bit);
    352 
    353   NeedsFilter(p1, p0, q0, q1, thresh, &mask);
    354 
    355   // convert to signed values
    356   FLIP_SIGN_BIT2(*p0, *q0);
    357 
    358   GET_BASE_DELTA(p1s, *p0, *q0, q1s, a);
    359   a = _mm_and_si128(a, mask);     // mask filter values we don't care about
    360   DO_SIMPLE_FILTER(*p0, *q0, a);
    361 
    362   // unoffset
    363   FLIP_SIGN_BIT2(*p0, *q0);
    364 }
    365 
    366 // Applies filter on 4 pixels (p1, p0, q0 and q1)
    367 static WEBP_INLINE void DoFilter4(__m128i* p1, __m128i *p0,
    368                                   __m128i* q0, __m128i* q1,
    369                                   const __m128i* mask, int hev_thresh) {
    370   __m128i not_hev;
    371   __m128i t1, t2, t3;
    372   const __m128i sign_bit = _mm_set1_epi8(0x80);
    373 
    374   // compute hev mask
    375   GET_NOTHEV(*p1, *p0, *q0, *q1, hev_thresh, not_hev);
    376 
    377   // convert to signed values
    378   FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1);
    379 
    380   t1 = _mm_subs_epi8(*p1, *q1);        // p1 - q1
    381   t1 = _mm_andnot_si128(not_hev, t1);  // hev(p1 - q1)
    382   t2 = _mm_subs_epi8(*q0, *p0);        // q0 - p0
    383   t1 = _mm_adds_epi8(t1, t2);          // hev(p1 - q1) + 1 * (q0 - p0)
    384   t1 = _mm_adds_epi8(t1, t2);          // hev(p1 - q1) + 2 * (q0 - p0)
    385   t1 = _mm_adds_epi8(t1, t2);          // hev(p1 - q1) + 3 * (q0 - p0)
    386   t1 = _mm_and_si128(t1, *mask);       // mask filter values we don't care about
    387 
    388   // Do +4 side
    389   t2 = _mm_set1_epi8(4);
    390   t2 = _mm_adds_epi8(t1, t2);        // 3 * (q0 - p0) + (p1 - q1) + 4
    391   SIGNED_SHIFT_N(t2, 3);             // (3 * (q0 - p0) + hev(p1 - q1) + 4) >> 3
    392   t3 = t2;                           // save t2
    393   *q0 = _mm_subs_epi8(*q0, t2);      // q0 -= t2
    394 
    395   // Now do +3 side
    396   t2 = _mm_set1_epi8(3);
    397   t2 = _mm_adds_epi8(t1, t2);        // +3 instead of +4
    398   SIGNED_SHIFT_N(t2, 3);             // (3 * (q0 - p0) + hev(p1 - q1) + 3) >> 3
    399   *p0 = _mm_adds_epi8(*p0, t2);      // p0 += t2
    400 
    401   t2 = _mm_set1_epi8(1);
    402   t3 = _mm_adds_epi8(t3, t2);
    403   SIGNED_SHIFT_N(t3, 1);             // (3 * (q0 - p0) + hev(p1 - q1) + 4) >> 4
    404 
    405   t3 = _mm_and_si128(not_hev, t3);   // if !hev
    406   *q1 = _mm_subs_epi8(*q1, t3);      // q1 -= t3
    407   *p1 = _mm_adds_epi8(*p1, t3);      // p1 += t3
    408 
    409   // unoffset
    410   FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1);
    411 }
    412 
    413 // Applies filter on 6 pixels (p2, p1, p0, q0, q1 and q2)
    414 static WEBP_INLINE void DoFilter6(__m128i *p2, __m128i* p1, __m128i *p0,
    415                                   __m128i* q0, __m128i* q1, __m128i *q2,
    416                                   const __m128i* mask, int hev_thresh) {
    417   __m128i a, not_hev;
    418   const __m128i sign_bit = _mm_set1_epi8(0x80);
    419 
    420   // compute hev mask
    421   GET_NOTHEV(*p1, *p0, *q0, *q1, hev_thresh, not_hev);
    422 
    423   // convert to signed values
    424   FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1);
    425   FLIP_SIGN_BIT2(*p2, *q2);
    426 
    427   GET_BASE_DELTA(*p1, *p0, *q0, *q1, a);
    428 
    429   { // do simple filter on pixels with hev
    430     const __m128i m = _mm_andnot_si128(not_hev, *mask);
    431     const __m128i f = _mm_and_si128(a, m);
    432     DO_SIMPLE_FILTER(*p0, *q0, f);
    433   }
    434   { // do strong filter on pixels with not hev
    435     const __m128i zero = _mm_setzero_si128();
    436     const __m128i nine = _mm_set1_epi16(0x0900);
    437     const __m128i sixty_three = _mm_set1_epi16(63);
    438 
    439     const __m128i m = _mm_and_si128(not_hev, *mask);
    440     const __m128i f = _mm_and_si128(a, m);
    441     const __m128i f_lo = _mm_unpacklo_epi8(zero, f);
    442     const __m128i f_hi = _mm_unpackhi_epi8(zero, f);
    443 
    444     const __m128i f9_lo = _mm_mulhi_epi16(f_lo, nine);   // Filter (lo) * 9
    445     const __m128i f9_hi = _mm_mulhi_epi16(f_hi, nine);   // Filter (hi) * 9
    446     const __m128i f18_lo = _mm_add_epi16(f9_lo, f9_lo);  // Filter (lo) * 18
    447     const __m128i f18_hi = _mm_add_epi16(f9_hi, f9_hi);  // Filter (hi) * 18
    448 
    449     const __m128i a2_lo = _mm_add_epi16(f9_lo, sixty_three);  // Filter * 9 + 63
    450     const __m128i a2_hi = _mm_add_epi16(f9_hi, sixty_three);  // Filter * 9 + 63
    451 
    452     const __m128i a1_lo = _mm_add_epi16(f18_lo, sixty_three);  // F... * 18 + 63
    453     const __m128i a1_hi = _mm_add_epi16(f18_hi, sixty_three);  // F... * 18 + 63
    454 
    455     const __m128i a0_lo = _mm_add_epi16(f18_lo, a2_lo);  // Filter * 27 + 63
    456     const __m128i a0_hi = _mm_add_epi16(f18_hi, a2_hi);  // Filter * 27 + 63
    457 
    458     UPDATE_2PIXELS(*p2, *q2, a2_lo, a2_hi);
    459     UPDATE_2PIXELS(*p1, *q1, a1_lo, a1_hi);
    460     UPDATE_2PIXELS(*p0, *q0, a0_lo, a0_hi);
    461   }
    462 
    463   // unoffset
    464   FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1);
    465   FLIP_SIGN_BIT2(*p2, *q2);
    466 }
    467 
    468 // reads 8 rows across a vertical edge.
    469 //
    470 // TODO(somnath): Investigate _mm_shuffle* also see if it can be broken into
    471 // two Load4x4() to avoid code duplication.
    472 static WEBP_INLINE void Load8x4(const uint8_t* b, int stride,
    473                                 __m128i* p, __m128i* q) {
    474   __m128i t1, t2;
    475 
    476   // Load 0th, 1st, 4th and 5th rows
    477   __m128i r0 =  _mm_cvtsi32_si128(*((int*)&b[0 * stride]));  // 03 02 01 00
    478   __m128i r1 =  _mm_cvtsi32_si128(*((int*)&b[1 * stride]));  // 13 12 11 10
    479   __m128i r4 =  _mm_cvtsi32_si128(*((int*)&b[4 * stride]));  // 43 42 41 40
    480   __m128i r5 =  _mm_cvtsi32_si128(*((int*)&b[5 * stride]));  // 53 52 51 50
    481 
    482   r0 = _mm_unpacklo_epi32(r0, r4);               // 43 42 41 40 03 02 01 00
    483   r1 = _mm_unpacklo_epi32(r1, r5);               // 53 52 51 50 13 12 11 10
    484 
    485   // t1 = 53 43 52 42 51 41 50 40 13 03 12 02 11 01 10 00
    486   t1 = _mm_unpacklo_epi8(r0, r1);
    487 
    488   // Load 2nd, 3rd, 6th and 7th rows
    489   r0 =  _mm_cvtsi32_si128(*((int*)&b[2 * stride]));          // 23 22 21 22
    490   r1 =  _mm_cvtsi32_si128(*((int*)&b[3 * stride]));          // 33 32 31 30
    491   r4 =  _mm_cvtsi32_si128(*((int*)&b[6 * stride]));          // 63 62 61 60
    492   r5 =  _mm_cvtsi32_si128(*((int*)&b[7 * stride]));          // 73 72 71 70
    493 
    494   r0 = _mm_unpacklo_epi32(r0, r4);               // 63 62 61 60 23 22 21 20
    495   r1 = _mm_unpacklo_epi32(r1, r5);               // 73 72 71 70 33 32 31 30
    496 
    497   // t2 = 73 63 72 62 71 61 70 60 33 23 32 22 31 21 30 20
    498   t2 = _mm_unpacklo_epi8(r0, r1);
    499 
    500   // t1 = 33 23 13 03 32 22 12 02 31 21 11 01 30 20 10 00
    501   // t2 = 73 63 53 43 72 62 52 42 71 61 51 41 70 60 50 40
    502   r0 = t1;
    503   t1 = _mm_unpacklo_epi16(t1, t2);
    504   t2 = _mm_unpackhi_epi16(r0, t2);
    505 
    506   // *p = 71 61 51 41 31 21 11 01 70 60 50 40 30 20 10 00
    507   // *q = 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02
    508   *p = _mm_unpacklo_epi32(t1, t2);
    509   *q = _mm_unpackhi_epi32(t1, t2);
    510 }
    511 
    512 static WEBP_INLINE void Load16x4(const uint8_t* r0, const uint8_t* r8,
    513                                  int stride,
    514                                  __m128i* p1, __m128i* p0,
    515                                  __m128i* q0, __m128i* q1) {
    516   __m128i t1, t2;
    517   // Assume the pixels around the edge (|) are numbered as follows
    518   //                00 01 | 02 03
    519   //                10 11 | 12 13
    520   //                 ...  |  ...
    521   //                e0 e1 | e2 e3
    522   //                f0 f1 | f2 f3
    523   //
    524   // r0 is pointing to the 0th row (00)
    525   // r8 is pointing to the 8th row (80)
    526 
    527   // Load
    528   // p1 = 71 61 51 41 31 21 11 01 70 60 50 40 30 20 10 00
    529   // q0 = 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02
    530   // p0 = f1 e1 d1 c1 b1 a1 91 81 f0 e0 d0 c0 b0 a0 90 80
    531   // q1 = f3 e3 d3 c3 b3 a3 93 83 f2 e2 d2 c2 b2 a2 92 82
    532   Load8x4(r0, stride, p1, q0);
    533   Load8x4(r8, stride, p0, q1);
    534 
    535   t1 = *p1;
    536   t2 = *q0;
    537   // p1 = f0 e0 d0 c0 b0 a0 90 80 70 60 50 40 30 20 10 00
    538   // p0 = f1 e1 d1 c1 b1 a1 91 81 71 61 51 41 31 21 11 01
    539   // q0 = f2 e2 d2 c2 b2 a2 92 82 72 62 52 42 32 22 12 02
    540   // q1 = f3 e3 d3 c3 b3 a3 93 83 73 63 53 43 33 23 13 03
    541   *p1 = _mm_unpacklo_epi64(t1, *p0);
    542   *p0 = _mm_unpackhi_epi64(t1, *p0);
    543   *q0 = _mm_unpacklo_epi64(t2, *q1);
    544   *q1 = _mm_unpackhi_epi64(t2, *q1);
    545 }
    546 
    547 static WEBP_INLINE void Store4x4(__m128i* x, uint8_t* dst, int stride) {
    548   int i;
    549   for (i = 0; i < 4; ++i, dst += stride) {
    550     *((int32_t*)dst) = _mm_cvtsi128_si32(*x);
    551     *x = _mm_srli_si128(*x, 4);
    552   }
    553 }
    554 
    555 // Transpose back and store
    556 static WEBP_INLINE void Store16x4(uint8_t* r0, uint8_t* r8, int stride,
    557                                   __m128i* p1, __m128i* p0,
    558                                   __m128i* q0, __m128i* q1) {
    559   __m128i t1;
    560 
    561   // p0 = 71 70 61 60 51 50 41 40 31 30 21 20 11 10 01 00
    562   // p1 = f1 f0 e1 e0 d1 d0 c1 c0 b1 b0 a1 a0 91 90 81 80
    563   t1 = *p0;
    564   *p0 = _mm_unpacklo_epi8(*p1, t1);
    565   *p1 = _mm_unpackhi_epi8(*p1, t1);
    566 
    567   // q0 = 73 72 63 62 53 52 43 42 33 32 23 22 13 12 03 02
    568   // q1 = f3 f2 e3 e2 d3 d2 c3 c2 b3 b2 a3 a2 93 92 83 82
    569   t1 = *q0;
    570   *q0 = _mm_unpacklo_epi8(t1, *q1);
    571   *q1 = _mm_unpackhi_epi8(t1, *q1);
    572 
    573   // p0 = 33 32 31 30 23 22 21 20 13 12 11 10 03 02 01 00
    574   // q0 = 73 72 71 70 63 62 61 60 53 52 51 50 43 42 41 40
    575   t1 = *p0;
    576   *p0 = _mm_unpacklo_epi16(t1, *q0);
    577   *q0 = _mm_unpackhi_epi16(t1, *q0);
    578 
    579   // p1 = b3 b2 b1 b0 a3 a2 a1 a0 93 92 91 90 83 82 81 80
    580   // q1 = f3 f2 f1 f0 e3 e2 e1 e0 d3 d2 d1 d0 c3 c2 c1 c0
    581   t1 = *p1;
    582   *p1 = _mm_unpacklo_epi16(t1, *q1);
    583   *q1 = _mm_unpackhi_epi16(t1, *q1);
    584 
    585   Store4x4(p0, r0, stride);
    586   r0 += 4 * stride;
    587   Store4x4(q0, r0, stride);
    588 
    589   Store4x4(p1, r8, stride);
    590   r8 += 4 * stride;
    591   Store4x4(q1, r8, stride);
    592 }
    593 
    594 //------------------------------------------------------------------------------
    595 // Simple In-loop filtering (Paragraph 15.2)
    596 
    597 static void SimpleVFilter16SSE2(uint8_t* p, int stride, int thresh) {
    598   // Load
    599   __m128i p1 = _mm_loadu_si128((__m128i*)&p[-2 * stride]);
    600   __m128i p0 = _mm_loadu_si128((__m128i*)&p[-stride]);
    601   __m128i q0 = _mm_loadu_si128((__m128i*)&p[0]);
    602   __m128i q1 = _mm_loadu_si128((__m128i*)&p[stride]);
    603 
    604   DoFilter2(&p1, &p0, &q0, &q1, thresh);
    605 
    606   // Store
    607   _mm_storeu_si128((__m128i*)&p[-stride], p0);
    608   _mm_storeu_si128((__m128i*)p, q0);
    609 }
    610 
    611 static void SimpleHFilter16SSE2(uint8_t* p, int stride, int thresh) {
    612   __m128i p1, p0, q0, q1;
    613 
    614   p -= 2;  // beginning of p1
    615 
    616   Load16x4(p, p + 8 * stride,  stride, &p1, &p0, &q0, &q1);
    617   DoFilter2(&p1, &p0, &q0, &q1, thresh);
    618   Store16x4(p, p + 8 * stride, stride, &p1, &p0, &q0, &q1);
    619 }
    620 
    621 static void SimpleVFilter16iSSE2(uint8_t* p, int stride, int thresh) {
    622   int k;
    623   for (k = 3; k > 0; --k) {
    624     p += 4 * stride;
    625     SimpleVFilter16SSE2(p, stride, thresh);
    626   }
    627 }
    628 
    629 static void SimpleHFilter16iSSE2(uint8_t* p, int stride, int thresh) {
    630   int k;
    631   for (k = 3; k > 0; --k) {
    632     p += 4;
    633     SimpleHFilter16SSE2(p, stride, thresh);
    634   }
    635 }
    636 
    637 //------------------------------------------------------------------------------
    638 // Complex In-loop filtering (Paragraph 15.3)
    639 
    640 #define MAX_DIFF1(p3, p2, p1, p0, m) {                                         \
    641   m = MM_ABS(p3, p2);                                                          \
    642   m = _mm_max_epu8(m, MM_ABS(p2, p1));                                         \
    643   m = _mm_max_epu8(m, MM_ABS(p1, p0));                                         \
    644 }
    645 
    646 #define MAX_DIFF2(p3, p2, p1, p0, m) {                                         \
    647   m = _mm_max_epu8(m, MM_ABS(p3, p2));                                         \
    648   m = _mm_max_epu8(m, MM_ABS(p2, p1));                                         \
    649   m = _mm_max_epu8(m, MM_ABS(p1, p0));                                         \
    650 }
    651 
    652 #define LOAD_H_EDGES4(p, stride, e1, e2, e3, e4) {                             \
    653   e1 = _mm_loadu_si128((__m128i*)&(p)[0 * stride]);                            \
    654   e2 = _mm_loadu_si128((__m128i*)&(p)[1 * stride]);                            \
    655   e3 = _mm_loadu_si128((__m128i*)&(p)[2 * stride]);                            \
    656   e4 = _mm_loadu_si128((__m128i*)&(p)[3 * stride]);                            \
    657 }
    658 
    659 #define LOADUV_H_EDGE(p, u, v, stride) {                                       \
    660   p = _mm_loadl_epi64((__m128i*)&(u)[(stride)]);                               \
    661   p = _mm_unpacklo_epi64(p, _mm_loadl_epi64((__m128i*)&(v)[(stride)]));        \
    662 }
    663 
    664 #define LOADUV_H_EDGES4(u, v, stride, e1, e2, e3, e4) {                        \
    665   LOADUV_H_EDGE(e1, u, v, 0 * stride);                                         \
    666   LOADUV_H_EDGE(e2, u, v, 1 * stride);                                         \
    667   LOADUV_H_EDGE(e3, u, v, 2 * stride);                                         \
    668   LOADUV_H_EDGE(e4, u, v, 3 * stride);                                         \
    669 }
    670 
    671 #define STOREUV(p, u, v, stride) {                                             \
    672   _mm_storel_epi64((__m128i*)&u[(stride)], p);                                 \
    673   p = _mm_srli_si128(p, 8);                                                    \
    674   _mm_storel_epi64((__m128i*)&v[(stride)], p);                                 \
    675 }
    676 
    677 #define COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask) {               \
    678   __m128i fl_yes;                                                              \
    679   const __m128i it = _mm_set1_epi8(ithresh);                                   \
    680   mask = _mm_subs_epu8(mask, it);                                              \
    681   mask = _mm_cmpeq_epi8(mask, _mm_setzero_si128());                            \
    682   NeedsFilter(&p1, &p0, &q0, &q1, thresh, &fl_yes);                            \
    683   mask = _mm_and_si128(mask, fl_yes);                                          \
    684 }
    685 
    686 // on macroblock edges
    687 static void VFilter16SSE2(uint8_t* p, int stride,
    688                           int thresh, int ithresh, int hev_thresh) {
    689   __m128i t1;
    690   __m128i mask;
    691   __m128i p2, p1, p0, q0, q1, q2;
    692 
    693   // Load p3, p2, p1, p0
    694   LOAD_H_EDGES4(p - 4 * stride, stride, t1, p2, p1, p0);
    695   MAX_DIFF1(t1, p2, p1, p0, mask);
    696 
    697   // Load q0, q1, q2, q3
    698   LOAD_H_EDGES4(p, stride, q0, q1, q2, t1);
    699   MAX_DIFF2(t1, q2, q1, q0, mask);
    700 
    701   COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
    702   DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh);
    703 
    704   // Store
    705   _mm_storeu_si128((__m128i*)&p[-3 * stride], p2);
    706   _mm_storeu_si128((__m128i*)&p[-2 * stride], p1);
    707   _mm_storeu_si128((__m128i*)&p[-1 * stride], p0);
    708   _mm_storeu_si128((__m128i*)&p[0 * stride], q0);
    709   _mm_storeu_si128((__m128i*)&p[1 * stride], q1);
    710   _mm_storeu_si128((__m128i*)&p[2 * stride], q2);
    711 }
    712 
    713 static void HFilter16SSE2(uint8_t* p, int stride,
    714                           int thresh, int ithresh, int hev_thresh) {
    715   __m128i mask;
    716   __m128i p3, p2, p1, p0, q0, q1, q2, q3;
    717 
    718   uint8_t* const b = p - 4;
    719   Load16x4(b, b + 8 * stride, stride, &p3, &p2, &p1, &p0);  // p3, p2, p1, p0
    720   MAX_DIFF1(p3, p2, p1, p0, mask);
    721 
    722   Load16x4(p, p + 8 * stride, stride, &q0, &q1, &q2, &q3);  // q0, q1, q2, q3
    723   MAX_DIFF2(q3, q2, q1, q0, mask);
    724 
    725   COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
    726   DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh);
    727 
    728   Store16x4(b, b + 8 * stride, stride, &p3, &p2, &p1, &p0);
    729   Store16x4(p, p + 8 * stride, stride, &q0, &q1, &q2, &q3);
    730 }
    731 
    732 // on three inner edges
    733 static void VFilter16iSSE2(uint8_t* p, int stride,
    734                            int thresh, int ithresh, int hev_thresh) {
    735   int k;
    736   __m128i mask;
    737   __m128i t1, t2, p1, p0, q0, q1;
    738 
    739   for (k = 3; k > 0; --k) {
    740     // Load p3, p2, p1, p0
    741     LOAD_H_EDGES4(p, stride, t2, t1, p1, p0);
    742     MAX_DIFF1(t2, t1, p1, p0, mask);
    743 
    744     p += 4 * stride;
    745 
    746     // Load q0, q1, q2, q3
    747     LOAD_H_EDGES4(p, stride, q0, q1, t1, t2);
    748     MAX_DIFF2(t2, t1, q1, q0, mask);
    749 
    750     COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
    751     DoFilter4(&p1, &p0, &q0, &q1, &mask, hev_thresh);
    752 
    753     // Store
    754     _mm_storeu_si128((__m128i*)&p[-2 * stride], p1);
    755     _mm_storeu_si128((__m128i*)&p[-1 * stride], p0);
    756     _mm_storeu_si128((__m128i*)&p[0 * stride], q0);
    757     _mm_storeu_si128((__m128i*)&p[1 * stride], q1);
    758   }
    759 }
    760 
    761 static void HFilter16iSSE2(uint8_t* p, int stride,
    762                            int thresh, int ithresh, int hev_thresh) {
    763   int k;
    764   uint8_t* b;
    765   __m128i mask;
    766   __m128i t1, t2, p1, p0, q0, q1;
    767 
    768   for (k = 3; k > 0; --k) {
    769     b = p;
    770     Load16x4(b, b + 8 * stride, stride, &t2, &t1, &p1, &p0);  // p3, p2, p1, p0
    771     MAX_DIFF1(t2, t1, p1, p0, mask);
    772 
    773     b += 4;  // beginning of q0
    774     Load16x4(b, b + 8 * stride, stride, &q0, &q1, &t1, &t2);  // q0, q1, q2, q3
    775     MAX_DIFF2(t2, t1, q1, q0, mask);
    776 
    777     COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
    778     DoFilter4(&p1, &p0, &q0, &q1, &mask, hev_thresh);
    779 
    780     b -= 2;  // beginning of p1
    781     Store16x4(b, b + 8 * stride, stride, &p1, &p0, &q0, &q1);
    782 
    783     p += 4;
    784   }
    785 }
    786 
    787 // 8-pixels wide variant, for chroma filtering
    788 static void VFilter8SSE2(uint8_t* u, uint8_t* v, int stride,
    789                          int thresh, int ithresh, int hev_thresh) {
    790   __m128i mask;
    791   __m128i t1, p2, p1, p0, q0, q1, q2;
    792 
    793   // Load p3, p2, p1, p0
    794   LOADUV_H_EDGES4(u - 4 * stride, v - 4 * stride, stride, t1, p2, p1, p0);
    795   MAX_DIFF1(t1, p2, p1, p0, mask);
    796 
    797   // Load q0, q1, q2, q3
    798   LOADUV_H_EDGES4(u, v, stride, q0, q1, q2, t1);
    799   MAX_DIFF2(t1, q2, q1, q0, mask);
    800 
    801   COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
    802   DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh);
    803 
    804   // Store
    805   STOREUV(p2, u, v, -3 * stride);
    806   STOREUV(p1, u, v, -2 * stride);
    807   STOREUV(p0, u, v, -1 * stride);
    808   STOREUV(q0, u, v, 0 * stride);
    809   STOREUV(q1, u, v, 1 * stride);
    810   STOREUV(q2, u, v, 2 * stride);
    811 }
    812 
    813 static void HFilter8SSE2(uint8_t* u, uint8_t* v, int stride,
    814                          int thresh, int ithresh, int hev_thresh) {
    815   __m128i mask;
    816   __m128i p3, p2, p1, p0, q0, q1, q2, q3;
    817 
    818   uint8_t* const tu = u - 4;
    819   uint8_t* const tv = v - 4;
    820   Load16x4(tu, tv, stride, &p3, &p2, &p1, &p0);  // p3, p2, p1, p0
    821   MAX_DIFF1(p3, p2, p1, p0, mask);
    822 
    823   Load16x4(u, v, stride, &q0, &q1, &q2, &q3);    // q0, q1, q2, q3
    824   MAX_DIFF2(q3, q2, q1, q0, mask);
    825 
    826   COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
    827   DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh);
    828 
    829   Store16x4(tu, tv, stride, &p3, &p2, &p1, &p0);
    830   Store16x4(u, v, stride, &q0, &q1, &q2, &q3);
    831 }
    832 
    833 static void VFilter8iSSE2(uint8_t* u, uint8_t* v, int stride,
    834                           int thresh, int ithresh, int hev_thresh) {
    835   __m128i mask;
    836   __m128i t1, t2, p1, p0, q0, q1;
    837 
    838   // Load p3, p2, p1, p0
    839   LOADUV_H_EDGES4(u, v, stride, t2, t1, p1, p0);
    840   MAX_DIFF1(t2, t1, p1, p0, mask);
    841 
    842   u += 4 * stride;
    843   v += 4 * stride;
    844 
    845   // Load q0, q1, q2, q3
    846   LOADUV_H_EDGES4(u, v, stride, q0, q1, t1, t2);
    847   MAX_DIFF2(t2, t1, q1, q0, mask);
    848 
    849   COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
    850   DoFilter4(&p1, &p0, &q0, &q1, &mask, hev_thresh);
    851 
    852   // Store
    853   STOREUV(p1, u, v, -2 * stride);
    854   STOREUV(p0, u, v, -1 * stride);
    855   STOREUV(q0, u, v, 0 * stride);
    856   STOREUV(q1, u, v, 1 * stride);
    857 }
    858 
    859 static void HFilter8iSSE2(uint8_t* u, uint8_t* v, int stride,
    860                           int thresh, int ithresh, int hev_thresh) {
    861   __m128i mask;
    862   __m128i t1, t2, p1, p0, q0, q1;
    863   Load16x4(u, v, stride, &t2, &t1, &p1, &p0);   // p3, p2, p1, p0
    864   MAX_DIFF1(t2, t1, p1, p0, mask);
    865 
    866   u += 4;  // beginning of q0
    867   v += 4;
    868   Load16x4(u, v, stride, &q0, &q1, &t1, &t2);  // q0, q1, q2, q3
    869   MAX_DIFF2(t2, t1, q1, q0, mask);
    870 
    871   COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
    872   DoFilter4(&p1, &p0, &q0, &q1, &mask, hev_thresh);
    873 
    874   u -= 2;  // beginning of p1
    875   v -= 2;
    876   Store16x4(u, v, stride, &p1, &p0, &q0, &q1);
    877 }
    878 
    879 #endif   // WEBP_USE_SSE2
    880 
    881 //------------------------------------------------------------------------------
    882 // Entry point
    883 
    884 extern void VP8DspInitSSE2(void);
    885 
    886 void VP8DspInitSSE2(void) {
    887 #if defined(WEBP_USE_SSE2)
    888   VP8Transform = TransformSSE2;
    889 
    890   VP8VFilter16 = VFilter16SSE2;
    891   VP8HFilter16 = HFilter16SSE2;
    892   VP8VFilter8 = VFilter8SSE2;
    893   VP8HFilter8 = HFilter8SSE2;
    894   VP8VFilter16i = VFilter16iSSE2;
    895   VP8HFilter16i = HFilter16iSSE2;
    896   VP8VFilter8i = VFilter8iSSE2;
    897   VP8HFilter8i = HFilter8iSSE2;
    898 
    899   VP8SimpleVFilter16 = SimpleVFilter16SSE2;
    900   VP8SimpleHFilter16 = SimpleHFilter16SSE2;
    901   VP8SimpleVFilter16i = SimpleVFilter16iSSE2;
    902   VP8SimpleHFilter16i = SimpleHFilter16iSSE2;
    903 #endif   // WEBP_USE_SSE2
    904 }
    905 
    906 #if defined(__cplusplus) || defined(c_plusplus)
    907 }    // extern "C"
    908 #endif
    909