Home | History | Annotate | Download | only in dec
      1 // Copyright 2011 Google Inc.
      2 //
      3 // This code is licensed under the same terms as WebM:
      4 //  Software License Agreement:  http://www.webmproject.org/license/software/
      5 //  Additional IP Rights Grant:  http://www.webmproject.org/license/additional/
      6 // -----------------------------------------------------------------------------
      7 //
      8 // SSE2 version of dsp functions and loop filtering.
      9 //
     10 // Author: somnath (at) google.com (Somnath Banerjee)
     11 //         cduvivier (at) google.com (Christian Duvivier)
     12 
     13 #if defined(__SSE2__) || defined(_MSC_VER)
     14 
     15 #include <emmintrin.h>
     16 #include "vp8i.h"
     17 
     18 #if defined(__cplusplus) || defined(c_plusplus)
     19 extern "C" {
     20 #endif
     21 
     22 //-----------------------------------------------------------------------------
     23 // Transforms (Paragraph 14.4)
     24 
     25 static void TransformSSE2(const int16_t* in, uint8_t* dst, int do_two) {
     26   // This implementation makes use of 16-bit fixed point versions of two
     27   // multiply constants:
     28   //    K1 = sqrt(2) * cos (pi/8) ~= 85627 / 2^16
     29   //    K2 = sqrt(2) * sin (pi/8) ~= 35468 / 2^16
     30   //
     31   // To be able to use signed 16-bit integers, we use the following trick to
     32   // have constants within range:
     33   // - Associated constants are obtained by subtracting the 16-bit fixed point
     34   //   version of one:
     35   //      k = K - (1 << 16)  =>  K = k + (1 << 16)
     36   //      K1 = 85267  =>  k1 =  20091
     37   //      K2 = 35468  =>  k2 = -30068
     38   // - The multiplication of a variable by a constant become the sum of the
     39   //   variable and the multiplication of that variable by the associated
     40   //   constant:
     41   //      (x * K) >> 16 = (x * (k + (1 << 16))) >> 16 = ((x * k ) >> 16) + x
     42   const __m128i k1 = _mm_set1_epi16(20091);
     43   const __m128i k2 = _mm_set1_epi16(-30068);
     44   __m128i T0, T1, T2, T3;
     45 
     46   // Load and concatenate the transform coefficients (we'll do two transforms
     47   // in parallel). In the case of only one transform, the second half of the
     48   // vectors will just contain random value we'll never use nor store.
     49   __m128i in0, in1, in2, in3;
     50   {
     51     in0 = _mm_loadl_epi64((__m128i*)&in[0]);
     52     in1 = _mm_loadl_epi64((__m128i*)&in[4]);
     53     in2 = _mm_loadl_epi64((__m128i*)&in[8]);
     54     in3 = _mm_loadl_epi64((__m128i*)&in[12]);
     55     // a00 a10 a20 a30   x x x x
     56     // a01 a11 a21 a31   x x x x
     57     // a02 a12 a22 a32   x x x x
     58     // a03 a13 a23 a33   x x x x
     59     if (do_two) {
     60       const __m128i inB0 = _mm_loadl_epi64((__m128i*)&in[16]);
     61       const __m128i inB1 = _mm_loadl_epi64((__m128i*)&in[20]);
     62       const __m128i inB2 = _mm_loadl_epi64((__m128i*)&in[24]);
     63       const __m128i inB3 = _mm_loadl_epi64((__m128i*)&in[28]);
     64       in0 = _mm_unpacklo_epi64(in0, inB0);
     65       in1 = _mm_unpacklo_epi64(in1, inB1);
     66       in2 = _mm_unpacklo_epi64(in2, inB2);
     67       in3 = _mm_unpacklo_epi64(in3, inB3);
     68       // a00 a10 a20 a30   b00 b10 b20 b30
     69       // a01 a11 a21 a31   b01 b11 b21 b31
     70       // a02 a12 a22 a32   b02 b12 b22 b32
     71       // a03 a13 a23 a33   b03 b13 b23 b33
     72     }
     73   }
     74 
     75   // Vertical pass and subsequent transpose.
     76   {
     77     // First pass, c and d calculations are longer because of the "trick"
     78     // multiplications.
     79     const __m128i a = _mm_add_epi16(in0, in2);
     80     const __m128i b = _mm_sub_epi16(in0, in2);
     81     // c = MUL(in1, K2) - MUL(in3, K1) = MUL(in1, k2) - MUL(in3, k1) + in1 - in3
     82     const __m128i c1 = _mm_mulhi_epi16(in1, k2);
     83     const __m128i c2 = _mm_mulhi_epi16(in3, k1);
     84     const __m128i c3 = _mm_sub_epi16(in1, in3);
     85     const __m128i c4 = _mm_sub_epi16(c1, c2);
     86     const __m128i c = _mm_add_epi16(c3, c4);
     87     // d = MUL(in1, K1) + MUL(in3, K2) = MUL(in1, k1) + MUL(in3, k2) + in1 + in3
     88     const __m128i d1 = _mm_mulhi_epi16(in1, k1);
     89     const __m128i d2 = _mm_mulhi_epi16(in3, k2);
     90     const __m128i d3 = _mm_add_epi16(in1, in3);
     91     const __m128i d4 = _mm_add_epi16(d1, d2);
     92     const __m128i d = _mm_add_epi16(d3, d4);
     93 
     94     // Second pass.
     95     const __m128i tmp0 = _mm_add_epi16(a, d);
     96     const __m128i tmp1 = _mm_add_epi16(b, c);
     97     const __m128i tmp2 = _mm_sub_epi16(b, c);
     98     const __m128i tmp3 = _mm_sub_epi16(a, d);
     99 
    100     // Transpose the two 4x4.
    101     // a00 a01 a02 a03   b00 b01 b02 b03
    102     // a10 a11 a12 a13   b10 b11 b12 b13
    103     // a20 a21 a22 a23   b20 b21 b22 b23
    104     // a30 a31 a32 a33   b30 b31 b32 b33
    105     const __m128i transpose0_0 = _mm_unpacklo_epi16(tmp0, tmp1);
    106     const __m128i transpose0_1 = _mm_unpacklo_epi16(tmp2, tmp3);
    107     const __m128i transpose0_2 = _mm_unpackhi_epi16(tmp0, tmp1);
    108     const __m128i transpose0_3 = _mm_unpackhi_epi16(tmp2, tmp3);
    109     // a00 a10 a01 a11   a02 a12 a03 a13
    110     // a20 a30 a21 a31   a22 a32 a23 a33
    111     // b00 b10 b01 b11   b02 b12 b03 b13
    112     // b20 b30 b21 b31   b22 b32 b23 b33
    113     const __m128i transpose1_0 = _mm_unpacklo_epi32(transpose0_0, transpose0_1);
    114     const __m128i transpose1_1 = _mm_unpacklo_epi32(transpose0_2, transpose0_3);
    115     const __m128i transpose1_2 = _mm_unpackhi_epi32(transpose0_0, transpose0_1);
    116     const __m128i transpose1_3 = _mm_unpackhi_epi32(transpose0_2, transpose0_3);
    117     // a00 a10 a20 a30 a01 a11 a21 a31
    118     // b00 b10 b20 b30 b01 b11 b21 b31
    119     // a02 a12 a22 a32 a03 a13 a23 a33
    120     // b02 b12 a22 b32 b03 b13 b23 b33
    121     T0 = _mm_unpacklo_epi64(transpose1_0, transpose1_1);
    122     T1 = _mm_unpackhi_epi64(transpose1_0, transpose1_1);
    123     T2 = _mm_unpacklo_epi64(transpose1_2, transpose1_3);
    124     T3 = _mm_unpackhi_epi64(transpose1_2, transpose1_3);
    125     // a00 a10 a20 a30   b00 b10 b20 b30
    126     // a01 a11 a21 a31   b01 b11 b21 b31
    127     // a02 a12 a22 a32   b02 b12 b22 b32
    128     // a03 a13 a23 a33   b03 b13 b23 b33
    129   }
    130 
    131   // Horizontal pass and subsequent transpose.
    132   {
    133     // First pass, c and d calculations are longer because of the "trick"
    134     // multiplications.
    135     const __m128i four = _mm_set1_epi16(4);
    136     const __m128i dc = _mm_add_epi16(T0, four);
    137     const __m128i a =  _mm_add_epi16(dc, T2);
    138     const __m128i b =  _mm_sub_epi16(dc, T2);
    139     // c = MUL(T1, K2) - MUL(T3, K1) = MUL(T1, k2) - MUL(T3, k1) + T1 - T3
    140     const __m128i c1 = _mm_mulhi_epi16(T1, k2);
    141     const __m128i c2 = _mm_mulhi_epi16(T3, k1);
    142     const __m128i c3 = _mm_sub_epi16(T1, T3);
    143     const __m128i c4 = _mm_sub_epi16(c1, c2);
    144     const __m128i c = _mm_add_epi16(c3, c4);
    145     // d = MUL(T1, K1) + MUL(T3, K2) = MUL(T1, k1) + MUL(T3, k2) + T1 + T3
    146     const __m128i d1 = _mm_mulhi_epi16(T1, k1);
    147     const __m128i d2 = _mm_mulhi_epi16(T3, k2);
    148     const __m128i d3 = _mm_add_epi16(T1, T3);
    149     const __m128i d4 = _mm_add_epi16(d1, d2);
    150     const __m128i d = _mm_add_epi16(d3, d4);
    151 
    152     // Second pass.
    153     const __m128i tmp0 = _mm_add_epi16(a, d);
    154     const __m128i tmp1 = _mm_add_epi16(b, c);
    155     const __m128i tmp2 = _mm_sub_epi16(b, c);
    156     const __m128i tmp3 = _mm_sub_epi16(a, d);
    157     const __m128i shifted0 = _mm_srai_epi16(tmp0, 3);
    158     const __m128i shifted1 = _mm_srai_epi16(tmp1, 3);
    159     const __m128i shifted2 = _mm_srai_epi16(tmp2, 3);
    160     const __m128i shifted3 = _mm_srai_epi16(tmp3, 3);
    161 
    162     // Transpose the two 4x4.
    163     // a00 a01 a02 a03   b00 b01 b02 b03
    164     // a10 a11 a12 a13   b10 b11 b12 b13
    165     // a20 a21 a22 a23   b20 b21 b22 b23
    166     // a30 a31 a32 a33   b30 b31 b32 b33
    167     const __m128i transpose0_0 = _mm_unpacklo_epi16(shifted0, shifted1);
    168     const __m128i transpose0_1 = _mm_unpacklo_epi16(shifted2, shifted3);
    169     const __m128i transpose0_2 = _mm_unpackhi_epi16(shifted0, shifted1);
    170     const __m128i transpose0_3 = _mm_unpackhi_epi16(shifted2, shifted3);
    171     // a00 a10 a01 a11   a02 a12 a03 a13
    172     // a20 a30 a21 a31   a22 a32 a23 a33
    173     // b00 b10 b01 b11   b02 b12 b03 b13
    174     // b20 b30 b21 b31   b22 b32 b23 b33
    175     const __m128i transpose1_0 = _mm_unpacklo_epi32(transpose0_0, transpose0_1);
    176     const __m128i transpose1_1 = _mm_unpacklo_epi32(transpose0_2, transpose0_3);
    177     const __m128i transpose1_2 = _mm_unpackhi_epi32(transpose0_0, transpose0_1);
    178     const __m128i transpose1_3 = _mm_unpackhi_epi32(transpose0_2, transpose0_3);
    179     // a00 a10 a20 a30 a01 a11 a21 a31
    180     // b00 b10 b20 b30 b01 b11 b21 b31
    181     // a02 a12 a22 a32 a03 a13 a23 a33
    182     // b02 b12 a22 b32 b03 b13 b23 b33
    183     T0 = _mm_unpacklo_epi64(transpose1_0, transpose1_1);
    184     T1 = _mm_unpackhi_epi64(transpose1_0, transpose1_1);
    185     T2 = _mm_unpacklo_epi64(transpose1_2, transpose1_3);
    186     T3 = _mm_unpackhi_epi64(transpose1_2, transpose1_3);
    187     // a00 a10 a20 a30   b00 b10 b20 b30
    188     // a01 a11 a21 a31   b01 b11 b21 b31
    189     // a02 a12 a22 a32   b02 b12 b22 b32
    190     // a03 a13 a23 a33   b03 b13 b23 b33
    191   }
    192 
    193   // Add inverse transform to 'dst' and store.
    194   {
    195     const __m128i zero = _mm_set1_epi16(0);
    196     // Load the reference(s).
    197     __m128i dst0, dst1, dst2, dst3;
    198     if (do_two) {
    199       // Load eight bytes/pixels per line.
    200       dst0 = _mm_loadl_epi64((__m128i*)&dst[0 * BPS]);
    201       dst1 = _mm_loadl_epi64((__m128i*)&dst[1 * BPS]);
    202       dst2 = _mm_loadl_epi64((__m128i*)&dst[2 * BPS]);
    203       dst3 = _mm_loadl_epi64((__m128i*)&dst[3 * BPS]);
    204     } else {
    205       // Load four bytes/pixels per line.
    206       dst0 = _mm_cvtsi32_si128(*(int*)&dst[0 * BPS]);
    207       dst1 = _mm_cvtsi32_si128(*(int*)&dst[1 * BPS]);
    208       dst2 = _mm_cvtsi32_si128(*(int*)&dst[2 * BPS]);
    209       dst3 = _mm_cvtsi32_si128(*(int*)&dst[3 * BPS]);
    210     }
    211     // Convert to 16b.
    212     dst0 = _mm_unpacklo_epi8(dst0, zero);
    213     dst1 = _mm_unpacklo_epi8(dst1, zero);
    214     dst2 = _mm_unpacklo_epi8(dst2, zero);
    215     dst3 = _mm_unpacklo_epi8(dst3, zero);
    216     // Add the inverse transform(s).
    217     dst0 = _mm_add_epi16(dst0, T0);
    218     dst1 = _mm_add_epi16(dst1, T1);
    219     dst2 = _mm_add_epi16(dst2, T2);
    220     dst3 = _mm_add_epi16(dst3, T3);
    221     // Unsigned saturate to 8b.
    222     dst0 = _mm_packus_epi16(dst0, dst0);
    223     dst1 = _mm_packus_epi16(dst1, dst1);
    224     dst2 = _mm_packus_epi16(dst2, dst2);
    225     dst3 = _mm_packus_epi16(dst3, dst3);
    226     // Store the results.
    227     if (do_two) {
    228       // Store eight bytes/pixels per line.
    229       _mm_storel_epi64((__m128i*)&dst[0 * BPS], dst0);
    230       _mm_storel_epi64((__m128i*)&dst[1 * BPS], dst1);
    231       _mm_storel_epi64((__m128i*)&dst[2 * BPS], dst2);
    232       _mm_storel_epi64((__m128i*)&dst[3 * BPS], dst3);
    233     } else {
    234       // Store four bytes/pixels per line.
    235       *((int32_t *)&dst[0 * BPS]) = _mm_cvtsi128_si32(dst0);
    236       *((int32_t *)&dst[1 * BPS]) = _mm_cvtsi128_si32(dst1);
    237       *((int32_t *)&dst[2 * BPS]) = _mm_cvtsi128_si32(dst2);
    238       *((int32_t *)&dst[3 * BPS]) = _mm_cvtsi128_si32(dst3);
    239     }
    240   }
    241 }
    242 
    243 //-----------------------------------------------------------------------------
    244 // Loop Filter (Paragraph 15)
    245 
    246 // Compute abs(p - q) = subs(p - q) OR subs(q - p)
    247 #define MM_ABS(p, q)  _mm_or_si128(                                            \
    248     _mm_subs_epu8((q), (p)),                                                   \
    249     _mm_subs_epu8((p), (q)))
    250 
    251 // Shift each byte of "a" by N bits while preserving by the sign bit.
    252 //
    253 // It first shifts the lower bytes of the words and then the upper bytes and
    254 // then merges the results together.
    255 #define SIGNED_SHIFT_N(a, N) {                                                 \
    256   __m128i t = a;                                                               \
    257   t = _mm_slli_epi16(t, 8);                                                    \
    258   t = _mm_srai_epi16(t, N);                                                    \
    259   t = _mm_srli_epi16(t, 8);                                                    \
    260                                                                                \
    261   a = _mm_srai_epi16(a, N + 8);                                                \
    262   a = _mm_slli_epi16(a, 8);                                                    \
    263                                                                                \
    264   a = _mm_or_si128(t, a);                                                      \
    265 }
    266 
    267 #define FLIP_SIGN_BIT2(a, b) {                                                 \
    268   a = _mm_xor_si128(a, sign_bit);                                              \
    269   b = _mm_xor_si128(b, sign_bit);                                              \
    270 }
    271 
    272 #define FLIP_SIGN_BIT4(a, b, c, d) {                                           \
    273   FLIP_SIGN_BIT2(a, b);                                                        \
    274   FLIP_SIGN_BIT2(c, d);                                                        \
    275 }
    276 
    277 #define GET_NOTHEV(p1, p0, q0, q1, hev_thresh, not_hev) {                      \
    278   const __m128i zero = _mm_setzero_si128();                                    \
    279   const __m128i t1 = MM_ABS(p1, p0);                                           \
    280   const __m128i t2 = MM_ABS(q1, q0);                                           \
    281                                                                                \
    282   const __m128i h = _mm_set1_epi8(hev_thresh);                                 \
    283   const __m128i t3 = _mm_subs_epu8(t1, h);  /* abs(p1 - p0) - hev_tresh */     \
    284   const __m128i t4 = _mm_subs_epu8(t2, h);  /* abs(q1 - q0) - hev_tresh */     \
    285                                                                                \
    286   not_hev = _mm_or_si128(t3, t4);                                              \
    287   not_hev = _mm_cmpeq_epi8(not_hev, zero); /* not_hev <= t1 && not_hev <= t2 */\
    288 }
    289 
    290 #define GET_BASE_DELTA(p1, p0, q0, q1, o) {                                    \
    291   const __m128i qp0 = _mm_subs_epi8(q0, p0);  /* q0 - p0 */                    \
    292   o = _mm_subs_epi8(p1, q1);            /* p1 - q1 */                          \
    293   o = _mm_adds_epi8(o, qp0);            /* p1 - q1 + 1 * (q0 - p0) */          \
    294   o = _mm_adds_epi8(o, qp0);            /* p1 - q1 + 2 * (q0 - p0) */          \
    295   o = _mm_adds_epi8(o, qp0);            /* p1 - q1 + 3 * (q0 - p0) */          \
    296 }
    297 
    298 #define DO_SIMPLE_FILTER(p0, q0, fl) {                                         \
    299   const __m128i three = _mm_set1_epi8(3);                                      \
    300   const __m128i four = _mm_set1_epi8(4);                                       \
    301   __m128i v3 = _mm_adds_epi8(fl, three);                                       \
    302   __m128i v4 = _mm_adds_epi8(fl, four);                                        \
    303                                                                                \
    304   /* Do +4 side */                                                             \
    305   SIGNED_SHIFT_N(v4, 3);                /* v4 >> 3  */                         \
    306   q0 = _mm_subs_epi8(q0, v4);           /* q0 -= v4 */                         \
    307                                                                                \
    308   /* Now do +3 side */                                                         \
    309   SIGNED_SHIFT_N(v3, 3);                /* v3 >> 3  */                         \
    310   p0 = _mm_adds_epi8(p0, v3);           /* p0 += v3 */                         \
    311 }
    312 
    313 // Updates values of 2 pixels at MB edge during complex filtering.
    314 // Update operations:
    315 // q = q - a and p = p + a; where a = [(a_hi >> 7), (a_lo >> 7)]
    316 #define UPDATE_2PIXELS(pi, qi, a_lo, a_hi) {                                   \
    317   const __m128i a_lo7 = _mm_srai_epi16(a_lo, 7);                               \
    318   const __m128i a_hi7 = _mm_srai_epi16(a_hi, 7);                               \
    319   const __m128i a = _mm_packs_epi16(a_lo7, a_hi7);                             \
    320   pi = _mm_adds_epi8(pi, a);                                                   \
    321   qi = _mm_subs_epi8(qi, a);                                                   \
    322 }
    323 
    324 static void NeedsFilter(const __m128i* p1, const __m128i* p0, const __m128i* q0,
    325                         const __m128i* q1, int thresh, __m128i *mask) {
    326   __m128i t1 = MM_ABS(*p1, *q1);        // abs(p1 - q1)
    327   *mask = _mm_set1_epi8(0xFE);
    328   t1 = _mm_and_si128(t1, *mask);        // set lsb of each byte to zero
    329   t1 = _mm_srli_epi16(t1, 1);           // abs(p1 - q1) / 2
    330 
    331   *mask = MM_ABS(*p0, *q0);             // abs(p0 - q0)
    332   *mask = _mm_adds_epu8(*mask, *mask);  // abs(p0 - q0) * 2
    333   *mask = _mm_adds_epu8(*mask, t1);     // abs(p0 - q0) * 2 + abs(p1 - q1) / 2
    334 
    335   t1 = _mm_set1_epi8(thresh);
    336   *mask = _mm_subs_epu8(*mask, t1);     // mask <= thresh
    337   *mask = _mm_cmpeq_epi8(*mask, _mm_setzero_si128());
    338 }
    339 
    340 //-----------------------------------------------------------------------------
    341 // Edge filtering functions
    342 
    343 // Applies filter on 2 pixels (p0 and q0)
    344 static inline void DoFilter2(const __m128i* p1, __m128i* p0, __m128i* q0,
    345                              const __m128i* q1, int thresh) {
    346   __m128i a, mask;
    347   const __m128i sign_bit = _mm_set1_epi8(0x80);
    348   const __m128i p1s = _mm_xor_si128(*p1, sign_bit);
    349   const __m128i q1s = _mm_xor_si128(*q1, sign_bit);
    350 
    351   NeedsFilter(p1, p0, q0, q1, thresh, &mask);
    352 
    353   // convert to signed values
    354   FLIP_SIGN_BIT2(*p0, *q0);
    355 
    356   GET_BASE_DELTA(p1s, *p0, *q0, q1s, a);
    357   a = _mm_and_si128(a, mask);     // mask filter values we don't care about
    358   DO_SIMPLE_FILTER(*p0, *q0, a);
    359 
    360   // unoffset
    361   FLIP_SIGN_BIT2(*p0, *q0);
    362 }
    363 
    364 // Applies filter on 4 pixels (p1, p0, q0 and q1)
    365 static inline void DoFilter4(__m128i* p1, __m128i *p0, __m128i* q0, __m128i* q1,
    366                              const __m128i* mask, int hev_thresh) {
    367   __m128i not_hev;
    368   __m128i t1, t2, t3;
    369   const __m128i sign_bit = _mm_set1_epi8(0x80);
    370 
    371   // compute hev mask
    372   GET_NOTHEV(*p1, *p0, *q0, *q1, hev_thresh, not_hev);
    373 
    374   // convert to signed values
    375   FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1);
    376 
    377   t1 = _mm_subs_epi8(*p1, *q1);        // p1 - q1
    378   t1 = _mm_andnot_si128(not_hev, t1);  // hev(p1 - q1)
    379   t2 = _mm_subs_epi8(*q0, *p0);        // q0 - p0
    380   t1 = _mm_adds_epi8(t1, t2);          // hev(p1 - q1) + 1 * (q0 - p0)
    381   t1 = _mm_adds_epi8(t1, t2);          // hev(p1 - q1) + 2 * (q0 - p0)
    382   t1 = _mm_adds_epi8(t1, t2);          // hev(p1 - q1) + 3 * (q0 - p0)
    383   t1 = _mm_and_si128(t1, *mask);       // mask filter values we don't care about
    384 
    385   // Do +4 side
    386   t2 = _mm_set1_epi8(4);
    387   t2 = _mm_adds_epi8(t1, t2);        // 3 * (q0 - p0) + (p1 - q1) + 4
    388   SIGNED_SHIFT_N(t2, 3);             // (3 * (q0 - p0) + hev(p1 - q1) + 4) >> 3
    389   t3 = t2;                           // save t2
    390   *q0 = _mm_subs_epi8(*q0, t2);      // q0 -= t2
    391 
    392   // Now do +3 side
    393   t2 = _mm_set1_epi8(3);
    394   t2 = _mm_adds_epi8(t1, t2);        // +3 instead of +4
    395   SIGNED_SHIFT_N(t2, 3);             // (3 * (q0 - p0) + hev(p1 - q1) + 3) >> 3
    396   *p0 = _mm_adds_epi8(*p0, t2);      // p0 += t2
    397 
    398   t2 = _mm_set1_epi8(1);
    399   t3 = _mm_adds_epi8(t3, t2);
    400   SIGNED_SHIFT_N(t3, 1);             // (3 * (q0 - p0) + hev(p1 - q1) + 4) >> 4
    401 
    402   t3 = _mm_and_si128(not_hev, t3);   // if !hev
    403   *q1 = _mm_subs_epi8(*q1, t3);      // q1 -= t3
    404   *p1 = _mm_adds_epi8(*p1, t3);      // p1 += t3
    405 
    406   // unoffset
    407   FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1);
    408 }
    409 
    410 // Applies filter on 6 pixels (p2, p1, p0, q0, q1 and q2)
    411 static inline void DoFilter6(__m128i *p2, __m128i* p1, __m128i *p0,
    412                              __m128i* q0, __m128i* q1, __m128i *q2,
    413                              const __m128i* mask, int hev_thresh) {
    414   __m128i a, not_hev;
    415   const __m128i sign_bit = _mm_set1_epi8(0x80);
    416 
    417   // compute hev mask
    418   GET_NOTHEV(*p1, *p0, *q0, *q1, hev_thresh, not_hev);
    419 
    420   // convert to signed values
    421   FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1);
    422   FLIP_SIGN_BIT2(*p2, *q2);
    423 
    424   GET_BASE_DELTA(*p1, *p0, *q0, *q1, a);
    425 
    426   { // do simple filter on pixels with hev
    427     const __m128i m = _mm_andnot_si128(not_hev, *mask);
    428     const __m128i f = _mm_and_si128(a, m);
    429     DO_SIMPLE_FILTER(*p0, *q0, f);
    430   }
    431   { // do strong filter on pixels with not hev
    432     const __m128i zero = _mm_setzero_si128();
    433     const __m128i nine = _mm_set1_epi16(0x0900);
    434     const __m128i sixty_three = _mm_set1_epi16(63);
    435 
    436     const __m128i m = _mm_and_si128(not_hev, *mask);
    437     const __m128i f = _mm_and_si128(a, m);
    438     const __m128i f_lo = _mm_unpacklo_epi8(zero, f);
    439     const __m128i f_hi = _mm_unpackhi_epi8(zero, f);
    440 
    441     const __m128i f9_lo = _mm_mulhi_epi16(f_lo, nine);   // Filter (lo) * 9
    442     const __m128i f9_hi = _mm_mulhi_epi16(f_hi, nine);   // Filter (hi) * 9
    443     const __m128i f18_lo = _mm_add_epi16(f9_lo, f9_lo);  // Filter (lo) * 18
    444     const __m128i f18_hi = _mm_add_epi16(f9_hi, f9_hi);  // Filter (hi) * 18
    445 
    446     const __m128i a2_lo = _mm_add_epi16(f9_lo, sixty_three);  // Filter * 9 + 63
    447     const __m128i a2_hi = _mm_add_epi16(f9_hi, sixty_three);  // Filter * 9 + 63
    448 
    449     const __m128i a1_lo = _mm_add_epi16(f18_lo, sixty_three);  // F... * 18 + 63
    450     const __m128i a1_hi = _mm_add_epi16(f18_hi, sixty_three);  // F... * 18 + 63
    451 
    452     const __m128i a0_lo = _mm_add_epi16(f18_lo, a2_lo);  // Filter * 27 + 63
    453     const __m128i a0_hi = _mm_add_epi16(f18_hi, a2_hi);  // Filter * 27 + 63
    454 
    455     UPDATE_2PIXELS(*p2, *q2, a2_lo, a2_hi);
    456     UPDATE_2PIXELS(*p1, *q1, a1_lo, a1_hi);
    457     UPDATE_2PIXELS(*p0, *q0, a0_lo, a0_hi);
    458   }
    459 
    460   // unoffset
    461   FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1);
    462   FLIP_SIGN_BIT2(*p2, *q2);
    463 }
    464 
    465 // reads 8 rows across a vertical edge.
    466 //
    467 // TODO(somnath): Investigate _mm_shuffle* also see if it can be broken into
    468 // two Load4x4() to avoid code duplication.
    469 static inline void Load8x4(const uint8_t* b, int stride,
    470                            __m128i* p, __m128i* q) {
    471   __m128i t1, t2;
    472 
    473   // Load 0th, 1st, 4th and 5th rows
    474   __m128i r0 =  _mm_cvtsi32_si128(*((int*)&b[0 * stride]));  // 03 02 01 00
    475   __m128i r1 =  _mm_cvtsi32_si128(*((int*)&b[1 * stride]));  // 13 12 11 10
    476   __m128i r4 =  _mm_cvtsi32_si128(*((int*)&b[4 * stride]));  // 43 42 41 40
    477   __m128i r5 =  _mm_cvtsi32_si128(*((int*)&b[5 * stride]));  // 53 52 51 50
    478 
    479   r0 = _mm_unpacklo_epi32(r0, r4);               // 43 42 41 40 03 02 01 00
    480   r1 = _mm_unpacklo_epi32(r1, r5);               // 53 52 51 50 13 12 11 10
    481 
    482   // t1 = 53 43 52 42 51 41 50 40 13 03 12 02 11 01 10 00
    483   t1 = _mm_unpacklo_epi8(r0, r1);
    484 
    485   // Load 2nd, 3rd, 6th and 7th rows
    486   r0 =  _mm_cvtsi32_si128(*((int*)&b[2 * stride]));          // 23 22 21 22
    487   r1 =  _mm_cvtsi32_si128(*((int*)&b[3 * stride]));          // 33 32 31 30
    488   r4 =  _mm_cvtsi32_si128(*((int*)&b[6 * stride]));          // 63 62 61 60
    489   r5 =  _mm_cvtsi32_si128(*((int*)&b[7 * stride]));          // 73 72 71 70
    490 
    491   r0 = _mm_unpacklo_epi32(r0, r4);               // 63 62 61 60 23 22 21 20
    492   r1 = _mm_unpacklo_epi32(r1, r5);               // 73 72 71 70 33 32 31 30
    493 
    494   // t2 = 73 63 72 62 71 61 70 60 33 23 32 22 31 21 30 20
    495   t2 = _mm_unpacklo_epi8(r0, r1);
    496 
    497   // t1 = 33 23 13 03 32 22 12 02 31 21 11 01 30 20 10 00
    498   // t2 = 73 63 53 43 72 62 52 42 71 61 51 41 70 60 50 40
    499   r0 = t1;
    500   t1 = _mm_unpacklo_epi16(t1, t2);
    501   t2 = _mm_unpackhi_epi16(r0, t2);
    502 
    503   // *p = 71 61 51 41 31 21 11 01 70 60 50 40 30 20 10 00
    504   // *q = 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02
    505   *p = _mm_unpacklo_epi32(t1, t2);
    506   *q = _mm_unpackhi_epi32(t1, t2);
    507 }
    508 
    509 static inline void Load16x4(const uint8_t* r0, const uint8_t* r8, int stride,
    510                             __m128i* p1, __m128i* p0,
    511                             __m128i* q0, __m128i* q1) {
    512   __m128i t1, t2;
    513   // Assume the pixels around the edge (|) are numbered as follows
    514   //                00 01 | 02 03
    515   //                10 11 | 12 13
    516   //                 ...  |  ...
    517   //                e0 e1 | e2 e3
    518   //                f0 f1 | f2 f3
    519   //
    520   // r0 is pointing to the 0th row (00)
    521   // r8 is pointing to the 8th row (80)
    522 
    523   // Load
    524   // p1 = 71 61 51 41 31 21 11 01 70 60 50 40 30 20 10 00
    525   // q0 = 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02
    526   // p0 = f1 e1 d1 c1 b1 a1 91 81 f0 e0 d0 c0 b0 a0 90 80
    527   // q1 = f3 e3 d3 c3 b3 a3 93 83 f2 e2 d2 c2 b2 a2 92 82
    528   Load8x4(r0, stride, p1, q0);
    529   Load8x4(r8, stride, p0, q1);
    530 
    531   t1 = *p1;
    532   t2 = *q0;
    533   // p1 = f0 e0 d0 c0 b0 a0 90 80 70 60 50 40 30 20 10 00
    534   // p0 = f1 e1 d1 c1 b1 a1 91 81 71 61 51 41 31 21 11 01
    535   // q0 = f2 e2 d2 c2 b2 a2 92 82 72 62 52 42 32 22 12 02
    536   // q1 = f3 e3 d3 c3 b3 a3 93 83 73 63 53 43 33 23 13 03
    537   *p1 = _mm_unpacklo_epi64(t1, *p0);
    538   *p0 = _mm_unpackhi_epi64(t1, *p0);
    539   *q0 = _mm_unpacklo_epi64(t2, *q1);
    540   *q1 = _mm_unpackhi_epi64(t2, *q1);
    541 }
    542 
    543 static inline void Store4x4(__m128i* x, uint8_t* dst, int stride) {
    544   int i;
    545   for (i = 0; i < 4; ++i, dst += stride) {
    546     *((int32_t*)dst) = _mm_cvtsi128_si32(*x);
    547     *x = _mm_srli_si128(*x, 4);
    548   }
    549 }
    550 
    551 // Transpose back and store
    552 static inline void Store16x4(uint8_t* r0, uint8_t* r8, int stride, __m128i* p1,
    553                              __m128i* p0, __m128i* q0, __m128i* q1) {
    554   __m128i t1;
    555 
    556   // p0 = 71 70 61 60 51 50 41 40 31 30 21 20 11 10 01 00
    557   // p1 = f1 f0 e1 e0 d1 d0 c1 c0 b1 b0 a1 a0 91 90 81 80
    558   t1 = *p0;
    559   *p0 = _mm_unpacklo_epi8(*p1, t1);
    560   *p1 = _mm_unpackhi_epi8(*p1, t1);
    561 
    562   // q0 = 73 72 63 62 53 52 43 42 33 32 23 22 13 12 03 02
    563   // q1 = f3 f2 e3 e2 d3 d2 c3 c2 b3 b2 a3 a2 93 92 83 82
    564   t1 = *q0;
    565   *q0 = _mm_unpacklo_epi8(t1, *q1);
    566   *q1 = _mm_unpackhi_epi8(t1, *q1);
    567 
    568   // p0 = 33 32 31 30 23 22 21 20 13 12 11 10 03 02 01 00
    569   // q0 = 73 72 71 70 63 62 61 60 53 52 51 50 43 42 41 40
    570   t1 = *p0;
    571   *p0 = _mm_unpacklo_epi16(t1, *q0);
    572   *q0 = _mm_unpackhi_epi16(t1, *q0);
    573 
    574   // p1 = b3 b2 b1 b0 a3 a2 a1 a0 93 92 91 90 83 82 81 80
    575   // q1 = f3 f2 f1 f0 e3 e2 e1 e0 d3 d2 d1 d0 c3 c2 c1 c0
    576   t1 = *p1;
    577   *p1 = _mm_unpacklo_epi16(t1, *q1);
    578   *q1 = _mm_unpackhi_epi16(t1, *q1);
    579 
    580   Store4x4(p0, r0, stride);
    581   r0 += 4 * stride;
    582   Store4x4(q0, r0, stride);
    583 
    584   Store4x4(p1, r8, stride);
    585   r8 += 4 * stride;
    586   Store4x4(q1, r8, stride);
    587 }
    588 
    589 //-----------------------------------------------------------------------------
    590 // Simple In-loop filtering (Paragraph 15.2)
    591 
    592 static void SimpleVFilter16SSE2(uint8_t* p, int stride, int thresh) {
    593   // Load
    594   __m128i p1 = _mm_loadu_si128((__m128i*)&p[-2 * stride]);
    595   __m128i p0 = _mm_loadu_si128((__m128i*)&p[-stride]);
    596   __m128i q0 = _mm_loadu_si128((__m128i*)&p[0]);
    597   __m128i q1 = _mm_loadu_si128((__m128i*)&p[stride]);
    598 
    599   DoFilter2(&p1, &p0, &q0, &q1, thresh);
    600 
    601   // Store
    602   _mm_storeu_si128((__m128i*)&p[-stride], p0);
    603   _mm_storeu_si128((__m128i*)p, q0);
    604 }
    605 
    606 static void SimpleHFilter16SSE2(uint8_t* p, int stride, int thresh) {
    607   __m128i p1, p0, q0, q1;
    608 
    609   p -= 2;  // beginning of p1
    610 
    611   Load16x4(p, p + 8 * stride,  stride, &p1, &p0, &q0, &q1);
    612   DoFilter2(&p1, &p0, &q0, &q1, thresh);
    613   Store16x4(p, p + 8 * stride, stride, &p1, &p0, &q0, &q1);
    614 }
    615 
    616 static void SimpleVFilter16iSSE2(uint8_t* p, int stride, int thresh) {
    617   int k;
    618   for (k = 3; k > 0; --k) {
    619     p += 4 * stride;
    620     SimpleVFilter16SSE2(p, stride, thresh);
    621   }
    622 }
    623 
    624 static void SimpleHFilter16iSSE2(uint8_t* p, int stride, int thresh) {
    625   int k;
    626   for (k = 3; k > 0; --k) {
    627     p += 4;
    628     SimpleHFilter16SSE2(p, stride, thresh);
    629   }
    630 }
    631 
    632 //-----------------------------------------------------------------------------
    633 // Complex In-loop filtering (Paragraph 15.3)
    634 
    635 #define MAX_DIFF1(p3, p2, p1, p0, m) {                                         \
    636   m = MM_ABS(p3, p2);                                                          \
    637   m = _mm_max_epu8(m, MM_ABS(p2, p1));                                         \
    638   m = _mm_max_epu8(m, MM_ABS(p1, p0));                                         \
    639 }
    640 
    641 #define MAX_DIFF2(p3, p2, p1, p0, m) {                                         \
    642   m = _mm_max_epu8(m, MM_ABS(p3, p2));                                         \
    643   m = _mm_max_epu8(m, MM_ABS(p2, p1));                                         \
    644   m = _mm_max_epu8(m, MM_ABS(p1, p0));                                         \
    645 }
    646 
    647 #define LOAD_H_EDGES4(p, stride, e1, e2, e3, e4) {                             \
    648   e1 = _mm_loadu_si128((__m128i*)&(p)[0 * stride]);                            \
    649   e2 = _mm_loadu_si128((__m128i*)&(p)[1 * stride]);                            \
    650   e3 = _mm_loadu_si128((__m128i*)&(p)[2 * stride]);                            \
    651   e4 = _mm_loadu_si128((__m128i*)&(p)[3 * stride]);                            \
    652 }
    653 
    654 #define LOADUV_H_EDGE(p, u, v, stride) {                                       \
    655   p = _mm_loadl_epi64((__m128i*)&(u)[(stride)]);                               \
    656   p = _mm_unpacklo_epi64(p, _mm_loadl_epi64((__m128i*)&(v)[(stride)]));        \
    657 }
    658 
    659 #define LOADUV_H_EDGES4(u, v, stride, e1, e2, e3, e4) {                        \
    660   LOADUV_H_EDGE(e1, u, v, 0 * stride);                                         \
    661   LOADUV_H_EDGE(e2, u, v, 1 * stride);                                         \
    662   LOADUV_H_EDGE(e3, u, v, 2 * stride);                                         \
    663   LOADUV_H_EDGE(e4, u, v, 3 * stride);                                         \
    664 }
    665 
    666 #define STOREUV(p, u, v, stride) {                                             \
    667   _mm_storel_epi64((__m128i*)&u[(stride)], p);                                 \
    668   p = _mm_srli_si128(p, 8);                                                    \
    669   _mm_storel_epi64((__m128i*)&v[(stride)], p);                                 \
    670 }
    671 
    672 #define COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask) {               \
    673   __m128i fl_yes;                                                              \
    674   const __m128i it = _mm_set1_epi8(ithresh);                                   \
    675   mask = _mm_subs_epu8(mask, it);                                              \
    676   mask = _mm_cmpeq_epi8(mask, _mm_setzero_si128());                            \
    677   NeedsFilter(&p1, &p0, &q0, &q1, thresh, &fl_yes);                            \
    678   mask = _mm_and_si128(mask, fl_yes);                                          \
    679 }
    680 
    681 // on macroblock edges
    682 static void VFilter16SSE2(uint8_t* p, int stride,
    683                           int thresh, int ithresh, int hev_thresh) {
    684   __m128i t1;
    685   __m128i mask;
    686   __m128i p2, p1, p0, q0, q1, q2;
    687 
    688   // Load p3, p2, p1, p0
    689   LOAD_H_EDGES4(p - 4 * stride, stride, t1, p2, p1, p0);
    690   MAX_DIFF1(t1, p2, p1, p0, mask);
    691 
    692   // Load q0, q1, q2, q3
    693   LOAD_H_EDGES4(p, stride, q0, q1, q2, t1);
    694   MAX_DIFF2(t1, q2, q1, q0, mask);
    695 
    696   COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
    697   DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh);
    698 
    699   // Store
    700   _mm_storeu_si128((__m128i*)&p[-3 * stride], p2);
    701   _mm_storeu_si128((__m128i*)&p[-2 * stride], p1);
    702   _mm_storeu_si128((__m128i*)&p[-1 * stride], p0);
    703   _mm_storeu_si128((__m128i*)&p[0 * stride], q0);
    704   _mm_storeu_si128((__m128i*)&p[1 * stride], q1);
    705   _mm_storeu_si128((__m128i*)&p[2 * stride], q2);
    706 }
    707 
    708 static void HFilter16SSE2(uint8_t* p, int stride,
    709                           int thresh, int ithresh, int hev_thresh) {
    710   __m128i mask;
    711   __m128i p3, p2, p1, p0, q0, q1, q2, q3;
    712 
    713   uint8_t* const b = p - 4;
    714   Load16x4(b, b + 8 * stride, stride, &p3, &p2, &p1, &p0);  // p3, p2, p1, p0
    715   MAX_DIFF1(p3, p2, p1, p0, mask);
    716 
    717   Load16x4(p, p + 8 * stride, stride, &q0, &q1, &q2, &q3);  // q0, q1, q2, q3
    718   MAX_DIFF2(q3, q2, q1, q0, mask);
    719 
    720   COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
    721   DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh);
    722 
    723   Store16x4(b, b + 8 * stride, stride, &p3, &p2, &p1, &p0);
    724   Store16x4(p, p + 8 * stride, stride, &q0, &q1, &q2, &q3);
    725 }
    726 
    727 // on three inner edges
    728 static void VFilter16iSSE2(uint8_t* p, int stride,
    729                            int thresh, int ithresh, int hev_thresh) {
    730   int k;
    731   __m128i mask;
    732   __m128i t1, t2, p1, p0, q0, q1;
    733 
    734   for (k = 3; k > 0; --k) {
    735     // Load p3, p2, p1, p0
    736     LOAD_H_EDGES4(p, stride, t2, t1, p1, p0);
    737     MAX_DIFF1(t2, t1, p1, p0, mask);
    738 
    739     p += 4 * stride;
    740 
    741     // Load q0, q1, q2, q3
    742     LOAD_H_EDGES4(p, stride, q0, q1, t1, t2);
    743     MAX_DIFF2(t2, t1, q1, q0, mask);
    744 
    745     COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
    746     DoFilter4(&p1, &p0, &q0, &q1, &mask, hev_thresh);
    747 
    748     // Store
    749     _mm_storeu_si128((__m128i*)&p[-2 * stride], p1);
    750     _mm_storeu_si128((__m128i*)&p[-1 * stride], p0);
    751     _mm_storeu_si128((__m128i*)&p[0 * stride], q0);
    752     _mm_storeu_si128((__m128i*)&p[1 * stride], q1);
    753   }
    754 }
    755 
    756 static void HFilter16iSSE2(uint8_t* p, int stride,
    757                            int thresh, int ithresh, int hev_thresh) {
    758   int k;
    759   uint8_t* b;
    760   __m128i mask;
    761   __m128i t1, t2, p1, p0, q0, q1;
    762 
    763   for (k = 3; k > 0; --k) {
    764     b = p;
    765     Load16x4(b, b + 8 * stride, stride, &t2, &t1, &p1, &p0);  // p3, p2, p1, p0
    766     MAX_DIFF1(t2, t1, p1, p0, mask);
    767 
    768     b += 4;  // beginning of q0
    769     Load16x4(b, b + 8 * stride, stride, &q0, &q1, &t1, &t2);  // q0, q1, q2, q3
    770     MAX_DIFF2(t2, t1, q1, q0, mask);
    771 
    772     COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
    773     DoFilter4(&p1, &p0, &q0, &q1, &mask, hev_thresh);
    774 
    775     b -= 2;  // beginning of p1
    776     Store16x4(b, b + 8 * stride, stride, &p1, &p0, &q0, &q1);
    777 
    778     p += 4;
    779   }
    780 }
    781 
    782 // 8-pixels wide variant, for chroma filtering
    783 static void VFilter8SSE2(uint8_t* u, uint8_t* v, int stride,
    784                          int thresh, int ithresh, int hev_thresh) {
    785   __m128i mask;
    786   __m128i t1, p2, p1, p0, q0, q1, q2;
    787 
    788   // Load p3, p2, p1, p0
    789   LOADUV_H_EDGES4(u - 4 * stride, v - 4 * stride, stride, t1, p2, p1, p0);
    790   MAX_DIFF1(t1, p2, p1, p0, mask);
    791 
    792   // Load q0, q1, q2, q3
    793   LOADUV_H_EDGES4(u, v, stride, q0, q1, q2, t1);
    794   MAX_DIFF2(t1, q2, q1, q0, mask);
    795 
    796   COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
    797   DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh);
    798 
    799   // Store
    800   STOREUV(p2, u, v, -3 * stride);
    801   STOREUV(p1, u, v, -2 * stride);
    802   STOREUV(p0, u, v, -1 * stride);
    803   STOREUV(q0, u, v, 0 * stride);
    804   STOREUV(q1, u, v, 1 * stride);
    805   STOREUV(q2, u, v, 2 * stride);
    806 }
    807 
    808 static void HFilter8SSE2(uint8_t* u, uint8_t* v, int stride,
    809                          int thresh, int ithresh, int hev_thresh) {
    810   __m128i mask;
    811   __m128i p3, p2, p1, p0, q0, q1, q2, q3;
    812 
    813   uint8_t* const tu = u - 4;
    814   uint8_t* const tv = v - 4;
    815   Load16x4(tu, tv, stride, &p3, &p2, &p1, &p0);  // p3, p2, p1, p0
    816   MAX_DIFF1(p3, p2, p1, p0, mask);
    817 
    818   Load16x4(u, v, stride, &q0, &q1, &q2, &q3);    // q0, q1, q2, q3
    819   MAX_DIFF2(q3, q2, q1, q0, mask);
    820 
    821   COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
    822   DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh);
    823 
    824   Store16x4(tu, tv, stride, &p3, &p2, &p1, &p0);
    825   Store16x4(u, v, stride, &q0, &q1, &q2, &q3);
    826 }
    827 
    828 static void VFilter8iSSE2(uint8_t* u, uint8_t* v, int stride,
    829                           int thresh, int ithresh, int hev_thresh) {
    830   __m128i mask;
    831   __m128i t1, t2, p1, p0, q0, q1;
    832 
    833   // Load p3, p2, p1, p0
    834   LOADUV_H_EDGES4(u, v, stride, t2, t1, p1, p0);
    835   MAX_DIFF1(t2, t1, p1, p0, mask);
    836 
    837   u += 4 * stride;
    838   v += 4 * stride;
    839 
    840   // Load q0, q1, q2, q3
    841   LOADUV_H_EDGES4(u, v, stride, q0, q1, t1, t2);
    842   MAX_DIFF2(t2, t1, q1, q0, mask);
    843 
    844   COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
    845   DoFilter4(&p1, &p0, &q0, &q1, &mask, hev_thresh);
    846 
    847   // Store
    848   STOREUV(p1, u, v, -2 * stride);
    849   STOREUV(p0, u, v, -1 * stride);
    850   STOREUV(q0, u, v, 0 * stride);
    851   STOREUV(q1, u, v, 1 * stride);
    852 }
    853 
    854 static void HFilter8iSSE2(uint8_t* u, uint8_t* v, int stride,
    855                           int thresh, int ithresh, int hev_thresh) {
    856   __m128i mask;
    857   __m128i t1, t2, p1, p0, q0, q1;
    858   Load16x4(u, v, stride, &t2, &t1, &p1, &p0);   // p3, p2, p1, p0
    859   MAX_DIFF1(t2, t1, p1, p0, mask);
    860 
    861   u += 4;  // beginning of q0
    862   v += 4;
    863   Load16x4(u, v, stride, &q0, &q1, &t1, &t2);  // q0, q1, q2, q3
    864   MAX_DIFF2(t2, t1, q1, q0, mask);
    865 
    866   COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
    867   DoFilter4(&p1, &p0, &q0, &q1, &mask, hev_thresh);
    868 
    869   u -= 2;  // beginning of p1
    870   v -= 2;
    871   Store16x4(u, v, stride, &p1, &p0, &q0, &q1);
    872 }
    873 
    874 extern void VP8DspInitSSE2(void);
    875 
    876 void VP8DspInitSSE2(void) {
    877   VP8Transform = TransformSSE2;
    878 
    879   VP8VFilter16 = VFilter16SSE2;
    880   VP8HFilter16 = HFilter16SSE2;
    881   VP8VFilter8 = VFilter8SSE2;
    882   VP8HFilter8 = HFilter8SSE2;
    883   VP8VFilter16i = VFilter16iSSE2;
    884   VP8HFilter16i = HFilter16iSSE2;
    885   VP8VFilter8i = VFilter8iSSE2;
    886   VP8HFilter8i = HFilter8iSSE2;
    887 
    888   VP8SimpleVFilter16 = SimpleVFilter16SSE2;
    889   VP8SimpleHFilter16 = SimpleHFilter16SSE2;
    890   VP8SimpleVFilter16i = SimpleVFilter16iSSE2;
    891   VP8SimpleHFilter16i = SimpleHFilter16iSSE2;
    892 }
    893 
    894 #if defined(__cplusplus) || defined(c_plusplus)
    895 }    // extern "C"
    896 #endif
    897 
    898 #endif   //__SSE2__ || _MSC_VER
    899