1 // Copyright 2011 Google Inc. All Rights Reserved. 2 // 3 // Use of this source code is governed by a BSD-style license 4 // that can be found in the COPYING file in the root of the source 5 // tree. An additional intellectual property rights grant can be found 6 // in the file PATENTS. All contributing project authors may 7 // be found in the AUTHORS file in the root of the source tree. 8 // ----------------------------------------------------------------------------- 9 // 10 // SSE2 version of some decoding functions (idct, loop filtering). 11 // 12 // Author: somnath (at) google.com (Somnath Banerjee) 13 // cduvivier (at) google.com (Christian Duvivier) 14 15 #include "./dsp.h" 16 17 #if defined(WEBP_USE_SSE2) 18 19 // The 3-coeff sparse transform in SSE2 is not really faster than the plain-C 20 // one it seems => disable it by default. Uncomment the following to enable: 21 // #define USE_TRANSFORM_AC3 22 23 #include <emmintrin.h> 24 #include "./common_sse2.h" 25 #include "../dec/vp8i_dec.h" 26 #include "../utils/utils.h" 27 28 //------------------------------------------------------------------------------ 29 // Transforms (Paragraph 14.4) 30 31 static void Transform(const int16_t* in, uint8_t* dst, int do_two) { 32 // This implementation makes use of 16-bit fixed point versions of two 33 // multiply constants: 34 // K1 = sqrt(2) * cos (pi/8) ~= 85627 / 2^16 35 // K2 = sqrt(2) * sin (pi/8) ~= 35468 / 2^16 36 // 37 // To be able to use signed 16-bit integers, we use the following trick to 38 // have constants within range: 39 // - Associated constants are obtained by subtracting the 16-bit fixed point 40 // version of one: 41 // k = K - (1 << 16) => K = k + (1 << 16) 42 // K1 = 85267 => k1 = 20091 43 // K2 = 35468 => k2 = -30068 44 // - The multiplication of a variable by a constant become the sum of the 45 // variable and the multiplication of that variable by the associated 46 // constant: 47 // (x * K) >> 16 = (x * (k + (1 << 16))) >> 16 = ((x * k ) >> 16) + x 48 const __m128i k1 = _mm_set1_epi16(20091); 49 const __m128i k2 = _mm_set1_epi16(-30068); 50 __m128i T0, T1, T2, T3; 51 52 // Load and concatenate the transform coefficients (we'll do two transforms 53 // in parallel). In the case of only one transform, the second half of the 54 // vectors will just contain random value we'll never use nor store. 55 __m128i in0, in1, in2, in3; 56 { 57 in0 = _mm_loadl_epi64((const __m128i*)&in[0]); 58 in1 = _mm_loadl_epi64((const __m128i*)&in[4]); 59 in2 = _mm_loadl_epi64((const __m128i*)&in[8]); 60 in3 = _mm_loadl_epi64((const __m128i*)&in[12]); 61 // a00 a10 a20 a30 x x x x 62 // a01 a11 a21 a31 x x x x 63 // a02 a12 a22 a32 x x x x 64 // a03 a13 a23 a33 x x x x 65 if (do_two) { 66 const __m128i inB0 = _mm_loadl_epi64((const __m128i*)&in[16]); 67 const __m128i inB1 = _mm_loadl_epi64((const __m128i*)&in[20]); 68 const __m128i inB2 = _mm_loadl_epi64((const __m128i*)&in[24]); 69 const __m128i inB3 = _mm_loadl_epi64((const __m128i*)&in[28]); 70 in0 = _mm_unpacklo_epi64(in0, inB0); 71 in1 = _mm_unpacklo_epi64(in1, inB1); 72 in2 = _mm_unpacklo_epi64(in2, inB2); 73 in3 = _mm_unpacklo_epi64(in3, inB3); 74 // a00 a10 a20 a30 b00 b10 b20 b30 75 // a01 a11 a21 a31 b01 b11 b21 b31 76 // a02 a12 a22 a32 b02 b12 b22 b32 77 // a03 a13 a23 a33 b03 b13 b23 b33 78 } 79 } 80 81 // Vertical pass and subsequent transpose. 82 { 83 // First pass, c and d calculations are longer because of the "trick" 84 // multiplications. 85 const __m128i a = _mm_add_epi16(in0, in2); 86 const __m128i b = _mm_sub_epi16(in0, in2); 87 // c = MUL(in1, K2) - MUL(in3, K1) = MUL(in1, k2) - MUL(in3, k1) + in1 - in3 88 const __m128i c1 = _mm_mulhi_epi16(in1, k2); 89 const __m128i c2 = _mm_mulhi_epi16(in3, k1); 90 const __m128i c3 = _mm_sub_epi16(in1, in3); 91 const __m128i c4 = _mm_sub_epi16(c1, c2); 92 const __m128i c = _mm_add_epi16(c3, c4); 93 // d = MUL(in1, K1) + MUL(in3, K2) = MUL(in1, k1) + MUL(in3, k2) + in1 + in3 94 const __m128i d1 = _mm_mulhi_epi16(in1, k1); 95 const __m128i d2 = _mm_mulhi_epi16(in3, k2); 96 const __m128i d3 = _mm_add_epi16(in1, in3); 97 const __m128i d4 = _mm_add_epi16(d1, d2); 98 const __m128i d = _mm_add_epi16(d3, d4); 99 100 // Second pass. 101 const __m128i tmp0 = _mm_add_epi16(a, d); 102 const __m128i tmp1 = _mm_add_epi16(b, c); 103 const __m128i tmp2 = _mm_sub_epi16(b, c); 104 const __m128i tmp3 = _mm_sub_epi16(a, d); 105 106 // Transpose the two 4x4. 107 VP8Transpose_2_4x4_16b(&tmp0, &tmp1, &tmp2, &tmp3, &T0, &T1, &T2, &T3); 108 } 109 110 // Horizontal pass and subsequent transpose. 111 { 112 // First pass, c and d calculations are longer because of the "trick" 113 // multiplications. 114 const __m128i four = _mm_set1_epi16(4); 115 const __m128i dc = _mm_add_epi16(T0, four); 116 const __m128i a = _mm_add_epi16(dc, T2); 117 const __m128i b = _mm_sub_epi16(dc, T2); 118 // c = MUL(T1, K2) - MUL(T3, K1) = MUL(T1, k2) - MUL(T3, k1) + T1 - T3 119 const __m128i c1 = _mm_mulhi_epi16(T1, k2); 120 const __m128i c2 = _mm_mulhi_epi16(T3, k1); 121 const __m128i c3 = _mm_sub_epi16(T1, T3); 122 const __m128i c4 = _mm_sub_epi16(c1, c2); 123 const __m128i c = _mm_add_epi16(c3, c4); 124 // d = MUL(T1, K1) + MUL(T3, K2) = MUL(T1, k1) + MUL(T3, k2) + T1 + T3 125 const __m128i d1 = _mm_mulhi_epi16(T1, k1); 126 const __m128i d2 = _mm_mulhi_epi16(T3, k2); 127 const __m128i d3 = _mm_add_epi16(T1, T3); 128 const __m128i d4 = _mm_add_epi16(d1, d2); 129 const __m128i d = _mm_add_epi16(d3, d4); 130 131 // Second pass. 132 const __m128i tmp0 = _mm_add_epi16(a, d); 133 const __m128i tmp1 = _mm_add_epi16(b, c); 134 const __m128i tmp2 = _mm_sub_epi16(b, c); 135 const __m128i tmp3 = _mm_sub_epi16(a, d); 136 const __m128i shifted0 = _mm_srai_epi16(tmp0, 3); 137 const __m128i shifted1 = _mm_srai_epi16(tmp1, 3); 138 const __m128i shifted2 = _mm_srai_epi16(tmp2, 3); 139 const __m128i shifted3 = _mm_srai_epi16(tmp3, 3); 140 141 // Transpose the two 4x4. 142 VP8Transpose_2_4x4_16b(&shifted0, &shifted1, &shifted2, &shifted3, &T0, &T1, 143 &T2, &T3); 144 } 145 146 // Add inverse transform to 'dst' and store. 147 { 148 const __m128i zero = _mm_setzero_si128(); 149 // Load the reference(s). 150 __m128i dst0, dst1, dst2, dst3; 151 if (do_two) { 152 // Load eight bytes/pixels per line. 153 dst0 = _mm_loadl_epi64((__m128i*)(dst + 0 * BPS)); 154 dst1 = _mm_loadl_epi64((__m128i*)(dst + 1 * BPS)); 155 dst2 = _mm_loadl_epi64((__m128i*)(dst + 2 * BPS)); 156 dst3 = _mm_loadl_epi64((__m128i*)(dst + 3 * BPS)); 157 } else { 158 // Load four bytes/pixels per line. 159 dst0 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 0 * BPS)); 160 dst1 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 1 * BPS)); 161 dst2 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 2 * BPS)); 162 dst3 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 3 * BPS)); 163 } 164 // Convert to 16b. 165 dst0 = _mm_unpacklo_epi8(dst0, zero); 166 dst1 = _mm_unpacklo_epi8(dst1, zero); 167 dst2 = _mm_unpacklo_epi8(dst2, zero); 168 dst3 = _mm_unpacklo_epi8(dst3, zero); 169 // Add the inverse transform(s). 170 dst0 = _mm_add_epi16(dst0, T0); 171 dst1 = _mm_add_epi16(dst1, T1); 172 dst2 = _mm_add_epi16(dst2, T2); 173 dst3 = _mm_add_epi16(dst3, T3); 174 // Unsigned saturate to 8b. 175 dst0 = _mm_packus_epi16(dst0, dst0); 176 dst1 = _mm_packus_epi16(dst1, dst1); 177 dst2 = _mm_packus_epi16(dst2, dst2); 178 dst3 = _mm_packus_epi16(dst3, dst3); 179 // Store the results. 180 if (do_two) { 181 // Store eight bytes/pixels per line. 182 _mm_storel_epi64((__m128i*)(dst + 0 * BPS), dst0); 183 _mm_storel_epi64((__m128i*)(dst + 1 * BPS), dst1); 184 _mm_storel_epi64((__m128i*)(dst + 2 * BPS), dst2); 185 _mm_storel_epi64((__m128i*)(dst + 3 * BPS), dst3); 186 } else { 187 // Store four bytes/pixels per line. 188 WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32(dst0)); 189 WebPUint32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(dst1)); 190 WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(dst2)); 191 WebPUint32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(dst3)); 192 } 193 } 194 } 195 196 #if defined(USE_TRANSFORM_AC3) 197 #define MUL(a, b) (((a) * (b)) >> 16) 198 static void TransformAC3(const int16_t* in, uint8_t* dst) { 199 static const int kC1 = 20091 + (1 << 16); 200 static const int kC2 = 35468; 201 const __m128i A = _mm_set1_epi16(in[0] + 4); 202 const __m128i c4 = _mm_set1_epi16(MUL(in[4], kC2)); 203 const __m128i d4 = _mm_set1_epi16(MUL(in[4], kC1)); 204 const int c1 = MUL(in[1], kC2); 205 const int d1 = MUL(in[1], kC1); 206 const __m128i CD = _mm_set_epi16(0, 0, 0, 0, -d1, -c1, c1, d1); 207 const __m128i B = _mm_adds_epi16(A, CD); 208 const __m128i m0 = _mm_adds_epi16(B, d4); 209 const __m128i m1 = _mm_adds_epi16(B, c4); 210 const __m128i m2 = _mm_subs_epi16(B, c4); 211 const __m128i m3 = _mm_subs_epi16(B, d4); 212 const __m128i zero = _mm_setzero_si128(); 213 // Load the source pixels. 214 __m128i dst0 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 0 * BPS)); 215 __m128i dst1 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 1 * BPS)); 216 __m128i dst2 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 2 * BPS)); 217 __m128i dst3 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 3 * BPS)); 218 // Convert to 16b. 219 dst0 = _mm_unpacklo_epi8(dst0, zero); 220 dst1 = _mm_unpacklo_epi8(dst1, zero); 221 dst2 = _mm_unpacklo_epi8(dst2, zero); 222 dst3 = _mm_unpacklo_epi8(dst3, zero); 223 // Add the inverse transform. 224 dst0 = _mm_adds_epi16(dst0, _mm_srai_epi16(m0, 3)); 225 dst1 = _mm_adds_epi16(dst1, _mm_srai_epi16(m1, 3)); 226 dst2 = _mm_adds_epi16(dst2, _mm_srai_epi16(m2, 3)); 227 dst3 = _mm_adds_epi16(dst3, _mm_srai_epi16(m3, 3)); 228 // Unsigned saturate to 8b. 229 dst0 = _mm_packus_epi16(dst0, dst0); 230 dst1 = _mm_packus_epi16(dst1, dst1); 231 dst2 = _mm_packus_epi16(dst2, dst2); 232 dst3 = _mm_packus_epi16(dst3, dst3); 233 // Store the results. 234 WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32(dst0)); 235 WebPUint32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(dst1)); 236 WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(dst2)); 237 WebPUint32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(dst3)); 238 } 239 #undef MUL 240 #endif // USE_TRANSFORM_AC3 241 242 //------------------------------------------------------------------------------ 243 // Loop Filter (Paragraph 15) 244 245 // Compute abs(p - q) = subs(p - q) OR subs(q - p) 246 #define MM_ABS(p, q) _mm_or_si128( \ 247 _mm_subs_epu8((q), (p)), \ 248 _mm_subs_epu8((p), (q))) 249 250 // Shift each byte of "x" by 3 bits while preserving by the sign bit. 251 static WEBP_INLINE void SignedShift8b(__m128i* const x) { 252 const __m128i zero = _mm_setzero_si128(); 253 const __m128i lo_0 = _mm_unpacklo_epi8(zero, *x); 254 const __m128i hi_0 = _mm_unpackhi_epi8(zero, *x); 255 const __m128i lo_1 = _mm_srai_epi16(lo_0, 3 + 8); 256 const __m128i hi_1 = _mm_srai_epi16(hi_0, 3 + 8); 257 *x = _mm_packs_epi16(lo_1, hi_1); 258 } 259 260 #define FLIP_SIGN_BIT2(a, b) { \ 261 a = _mm_xor_si128(a, sign_bit); \ 262 b = _mm_xor_si128(b, sign_bit); \ 263 } 264 265 #define FLIP_SIGN_BIT4(a, b, c, d) { \ 266 FLIP_SIGN_BIT2(a, b); \ 267 FLIP_SIGN_BIT2(c, d); \ 268 } 269 270 // input/output is uint8_t 271 static WEBP_INLINE void GetNotHEV(const __m128i* const p1, 272 const __m128i* const p0, 273 const __m128i* const q0, 274 const __m128i* const q1, 275 int hev_thresh, __m128i* const not_hev) { 276 const __m128i zero = _mm_setzero_si128(); 277 const __m128i t_1 = MM_ABS(*p1, *p0); 278 const __m128i t_2 = MM_ABS(*q1, *q0); 279 280 const __m128i h = _mm_set1_epi8(hev_thresh); 281 const __m128i t_max = _mm_max_epu8(t_1, t_2); 282 283 const __m128i t_max_h = _mm_subs_epu8(t_max, h); 284 *not_hev = _mm_cmpeq_epi8(t_max_h, zero); // not_hev <= t1 && not_hev <= t2 285 } 286 287 // input pixels are int8_t 288 static WEBP_INLINE void GetBaseDelta(const __m128i* const p1, 289 const __m128i* const p0, 290 const __m128i* const q0, 291 const __m128i* const q1, 292 __m128i* const delta) { 293 // beware of addition order, for saturation! 294 const __m128i p1_q1 = _mm_subs_epi8(*p1, *q1); // p1 - q1 295 const __m128i q0_p0 = _mm_subs_epi8(*q0, *p0); // q0 - p0 296 const __m128i s1 = _mm_adds_epi8(p1_q1, q0_p0); // p1 - q1 + 1 * (q0 - p0) 297 const __m128i s2 = _mm_adds_epi8(q0_p0, s1); // p1 - q1 + 2 * (q0 - p0) 298 const __m128i s3 = _mm_adds_epi8(q0_p0, s2); // p1 - q1 + 3 * (q0 - p0) 299 *delta = s3; 300 } 301 302 // input and output are int8_t 303 static WEBP_INLINE void DoSimpleFilter(__m128i* const p0, __m128i* const q0, 304 const __m128i* const fl) { 305 const __m128i k3 = _mm_set1_epi8(3); 306 const __m128i k4 = _mm_set1_epi8(4); 307 __m128i v3 = _mm_adds_epi8(*fl, k3); 308 __m128i v4 = _mm_adds_epi8(*fl, k4); 309 310 SignedShift8b(&v4); // v4 >> 3 311 SignedShift8b(&v3); // v3 >> 3 312 *q0 = _mm_subs_epi8(*q0, v4); // q0 -= v4 313 *p0 = _mm_adds_epi8(*p0, v3); // p0 += v3 314 } 315 316 // Updates values of 2 pixels at MB edge during complex filtering. 317 // Update operations: 318 // q = q - delta and p = p + delta; where delta = [(a_hi >> 7), (a_lo >> 7)] 319 // Pixels 'pi' and 'qi' are int8_t on input, uint8_t on output (sign flip). 320 static WEBP_INLINE void Update2Pixels(__m128i* const pi, __m128i* const qi, 321 const __m128i* const a0_lo, 322 const __m128i* const a0_hi) { 323 const __m128i a1_lo = _mm_srai_epi16(*a0_lo, 7); 324 const __m128i a1_hi = _mm_srai_epi16(*a0_hi, 7); 325 const __m128i delta = _mm_packs_epi16(a1_lo, a1_hi); 326 const __m128i sign_bit = _mm_set1_epi8(0x80); 327 *pi = _mm_adds_epi8(*pi, delta); 328 *qi = _mm_subs_epi8(*qi, delta); 329 FLIP_SIGN_BIT2(*pi, *qi); 330 } 331 332 // input pixels are uint8_t 333 static WEBP_INLINE void NeedsFilter(const __m128i* const p1, 334 const __m128i* const p0, 335 const __m128i* const q0, 336 const __m128i* const q1, 337 int thresh, __m128i* const mask) { 338 const __m128i m_thresh = _mm_set1_epi8(thresh); 339 const __m128i t1 = MM_ABS(*p1, *q1); // abs(p1 - q1) 340 const __m128i kFE = _mm_set1_epi8(0xFE); 341 const __m128i t2 = _mm_and_si128(t1, kFE); // set lsb of each byte to zero 342 const __m128i t3 = _mm_srli_epi16(t2, 1); // abs(p1 - q1) / 2 343 344 const __m128i t4 = MM_ABS(*p0, *q0); // abs(p0 - q0) 345 const __m128i t5 = _mm_adds_epu8(t4, t4); // abs(p0 - q0) * 2 346 const __m128i t6 = _mm_adds_epu8(t5, t3); // abs(p0-q0)*2 + abs(p1-q1)/2 347 348 const __m128i t7 = _mm_subs_epu8(t6, m_thresh); // mask <= m_thresh 349 *mask = _mm_cmpeq_epi8(t7, _mm_setzero_si128()); 350 } 351 352 //------------------------------------------------------------------------------ 353 // Edge filtering functions 354 355 // Applies filter on 2 pixels (p0 and q0) 356 static WEBP_INLINE void DoFilter2(__m128i* const p1, __m128i* const p0, 357 __m128i* const q0, __m128i* const q1, 358 int thresh) { 359 __m128i a, mask; 360 const __m128i sign_bit = _mm_set1_epi8(0x80); 361 // convert p1/q1 to int8_t (for GetBaseDelta) 362 const __m128i p1s = _mm_xor_si128(*p1, sign_bit); 363 const __m128i q1s = _mm_xor_si128(*q1, sign_bit); 364 365 NeedsFilter(p1, p0, q0, q1, thresh, &mask); 366 367 FLIP_SIGN_BIT2(*p0, *q0); 368 GetBaseDelta(&p1s, p0, q0, &q1s, &a); 369 a = _mm_and_si128(a, mask); // mask filter values we don't care about 370 DoSimpleFilter(p0, q0, &a); 371 FLIP_SIGN_BIT2(*p0, *q0); 372 } 373 374 // Applies filter on 4 pixels (p1, p0, q0 and q1) 375 static WEBP_INLINE void DoFilter4(__m128i* const p1, __m128i* const p0, 376 __m128i* const q0, __m128i* const q1, 377 const __m128i* const mask, int hev_thresh) { 378 const __m128i zero = _mm_setzero_si128(); 379 const __m128i sign_bit = _mm_set1_epi8(0x80); 380 const __m128i k64 = _mm_set1_epi8(64); 381 const __m128i k3 = _mm_set1_epi8(3); 382 const __m128i k4 = _mm_set1_epi8(4); 383 __m128i not_hev; 384 __m128i t1, t2, t3; 385 386 // compute hev mask 387 GetNotHEV(p1, p0, q0, q1, hev_thresh, ¬_hev); 388 389 // convert to signed values 390 FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1); 391 392 t1 = _mm_subs_epi8(*p1, *q1); // p1 - q1 393 t1 = _mm_andnot_si128(not_hev, t1); // hev(p1 - q1) 394 t2 = _mm_subs_epi8(*q0, *p0); // q0 - p0 395 t1 = _mm_adds_epi8(t1, t2); // hev(p1 - q1) + 1 * (q0 - p0) 396 t1 = _mm_adds_epi8(t1, t2); // hev(p1 - q1) + 2 * (q0 - p0) 397 t1 = _mm_adds_epi8(t1, t2); // hev(p1 - q1) + 3 * (q0 - p0) 398 t1 = _mm_and_si128(t1, *mask); // mask filter values we don't care about 399 400 t2 = _mm_adds_epi8(t1, k3); // 3 * (q0 - p0) + hev(p1 - q1) + 3 401 t3 = _mm_adds_epi8(t1, k4); // 3 * (q0 - p0) + hev(p1 - q1) + 4 402 SignedShift8b(&t2); // (3 * (q0 - p0) + hev(p1 - q1) + 3) >> 3 403 SignedShift8b(&t3); // (3 * (q0 - p0) + hev(p1 - q1) + 4) >> 3 404 *p0 = _mm_adds_epi8(*p0, t2); // p0 += t2 405 *q0 = _mm_subs_epi8(*q0, t3); // q0 -= t3 406 FLIP_SIGN_BIT2(*p0, *q0); 407 408 // this is equivalent to signed (a + 1) >> 1 calculation 409 t2 = _mm_add_epi8(t3, sign_bit); 410 t3 = _mm_avg_epu8(t2, zero); 411 t3 = _mm_sub_epi8(t3, k64); 412 413 t3 = _mm_and_si128(not_hev, t3); // if !hev 414 *q1 = _mm_subs_epi8(*q1, t3); // q1 -= t3 415 *p1 = _mm_adds_epi8(*p1, t3); // p1 += t3 416 FLIP_SIGN_BIT2(*p1, *q1); 417 } 418 419 // Applies filter on 6 pixels (p2, p1, p0, q0, q1 and q2) 420 static WEBP_INLINE void DoFilter6(__m128i* const p2, __m128i* const p1, 421 __m128i* const p0, __m128i* const q0, 422 __m128i* const q1, __m128i* const q2, 423 const __m128i* const mask, int hev_thresh) { 424 const __m128i zero = _mm_setzero_si128(); 425 const __m128i sign_bit = _mm_set1_epi8(0x80); 426 __m128i a, not_hev; 427 428 // compute hev mask 429 GetNotHEV(p1, p0, q0, q1, hev_thresh, ¬_hev); 430 431 FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1); 432 FLIP_SIGN_BIT2(*p2, *q2); 433 GetBaseDelta(p1, p0, q0, q1, &a); 434 435 { // do simple filter on pixels with hev 436 const __m128i m = _mm_andnot_si128(not_hev, *mask); 437 const __m128i f = _mm_and_si128(a, m); 438 DoSimpleFilter(p0, q0, &f); 439 } 440 441 { // do strong filter on pixels with not hev 442 const __m128i k9 = _mm_set1_epi16(0x0900); 443 const __m128i k63 = _mm_set1_epi16(63); 444 445 const __m128i m = _mm_and_si128(not_hev, *mask); 446 const __m128i f = _mm_and_si128(a, m); 447 448 const __m128i f_lo = _mm_unpacklo_epi8(zero, f); 449 const __m128i f_hi = _mm_unpackhi_epi8(zero, f); 450 451 const __m128i f9_lo = _mm_mulhi_epi16(f_lo, k9); // Filter (lo) * 9 452 const __m128i f9_hi = _mm_mulhi_epi16(f_hi, k9); // Filter (hi) * 9 453 454 const __m128i a2_lo = _mm_add_epi16(f9_lo, k63); // Filter * 9 + 63 455 const __m128i a2_hi = _mm_add_epi16(f9_hi, k63); // Filter * 9 + 63 456 457 const __m128i a1_lo = _mm_add_epi16(a2_lo, f9_lo); // Filter * 18 + 63 458 const __m128i a1_hi = _mm_add_epi16(a2_hi, f9_hi); // Filter * 18 + 63 459 460 const __m128i a0_lo = _mm_add_epi16(a1_lo, f9_lo); // Filter * 27 + 63 461 const __m128i a0_hi = _mm_add_epi16(a1_hi, f9_hi); // Filter * 27 + 63 462 463 Update2Pixels(p2, q2, &a2_lo, &a2_hi); 464 Update2Pixels(p1, q1, &a1_lo, &a1_hi); 465 Update2Pixels(p0, q0, &a0_lo, &a0_hi); 466 } 467 } 468 469 // reads 8 rows across a vertical edge. 470 static WEBP_INLINE void Load8x4(const uint8_t* const b, int stride, 471 __m128i* const p, __m128i* const q) { 472 // A0 = 63 62 61 60 23 22 21 20 43 42 41 40 03 02 01 00 473 // A1 = 73 72 71 70 33 32 31 30 53 52 51 50 13 12 11 10 474 const __m128i A0 = _mm_set_epi32( 475 WebPMemToUint32(&b[6 * stride]), WebPMemToUint32(&b[2 * stride]), 476 WebPMemToUint32(&b[4 * stride]), WebPMemToUint32(&b[0 * stride])); 477 const __m128i A1 = _mm_set_epi32( 478 WebPMemToUint32(&b[7 * stride]), WebPMemToUint32(&b[3 * stride]), 479 WebPMemToUint32(&b[5 * stride]), WebPMemToUint32(&b[1 * stride])); 480 481 // B0 = 53 43 52 42 51 41 50 40 13 03 12 02 11 01 10 00 482 // B1 = 73 63 72 62 71 61 70 60 33 23 32 22 31 21 30 20 483 const __m128i B0 = _mm_unpacklo_epi8(A0, A1); 484 const __m128i B1 = _mm_unpackhi_epi8(A0, A1); 485 486 // C0 = 33 23 13 03 32 22 12 02 31 21 11 01 30 20 10 00 487 // C1 = 73 63 53 43 72 62 52 42 71 61 51 41 70 60 50 40 488 const __m128i C0 = _mm_unpacklo_epi16(B0, B1); 489 const __m128i C1 = _mm_unpackhi_epi16(B0, B1); 490 491 // *p = 71 61 51 41 31 21 11 01 70 60 50 40 30 20 10 00 492 // *q = 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02 493 *p = _mm_unpacklo_epi32(C0, C1); 494 *q = _mm_unpackhi_epi32(C0, C1); 495 } 496 497 static WEBP_INLINE void Load16x4(const uint8_t* const r0, 498 const uint8_t* const r8, 499 int stride, 500 __m128i* const p1, __m128i* const p0, 501 __m128i* const q0, __m128i* const q1) { 502 // Assume the pixels around the edge (|) are numbered as follows 503 // 00 01 | 02 03 504 // 10 11 | 12 13 505 // ... | ... 506 // e0 e1 | e2 e3 507 // f0 f1 | f2 f3 508 // 509 // r0 is pointing to the 0th row (00) 510 // r8 is pointing to the 8th row (80) 511 512 // Load 513 // p1 = 71 61 51 41 31 21 11 01 70 60 50 40 30 20 10 00 514 // q0 = 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02 515 // p0 = f1 e1 d1 c1 b1 a1 91 81 f0 e0 d0 c0 b0 a0 90 80 516 // q1 = f3 e3 d3 c3 b3 a3 93 83 f2 e2 d2 c2 b2 a2 92 82 517 Load8x4(r0, stride, p1, q0); 518 Load8x4(r8, stride, p0, q1); 519 520 { 521 // p1 = f0 e0 d0 c0 b0 a0 90 80 70 60 50 40 30 20 10 00 522 // p0 = f1 e1 d1 c1 b1 a1 91 81 71 61 51 41 31 21 11 01 523 // q0 = f2 e2 d2 c2 b2 a2 92 82 72 62 52 42 32 22 12 02 524 // q1 = f3 e3 d3 c3 b3 a3 93 83 73 63 53 43 33 23 13 03 525 const __m128i t1 = *p1; 526 const __m128i t2 = *q0; 527 *p1 = _mm_unpacklo_epi64(t1, *p0); 528 *p0 = _mm_unpackhi_epi64(t1, *p0); 529 *q0 = _mm_unpacklo_epi64(t2, *q1); 530 *q1 = _mm_unpackhi_epi64(t2, *q1); 531 } 532 } 533 534 static WEBP_INLINE void Store4x4(__m128i* const x, uint8_t* dst, int stride) { 535 int i; 536 for (i = 0; i < 4; ++i, dst += stride) { 537 WebPUint32ToMem(dst, _mm_cvtsi128_si32(*x)); 538 *x = _mm_srli_si128(*x, 4); 539 } 540 } 541 542 // Transpose back and store 543 static WEBP_INLINE void Store16x4(const __m128i* const p1, 544 const __m128i* const p0, 545 const __m128i* const q0, 546 const __m128i* const q1, 547 uint8_t* r0, uint8_t* r8, 548 int stride) { 549 __m128i t1, p1_s, p0_s, q0_s, q1_s; 550 551 // p0 = 71 70 61 60 51 50 41 40 31 30 21 20 11 10 01 00 552 // p1 = f1 f0 e1 e0 d1 d0 c1 c0 b1 b0 a1 a0 91 90 81 80 553 t1 = *p0; 554 p0_s = _mm_unpacklo_epi8(*p1, t1); 555 p1_s = _mm_unpackhi_epi8(*p1, t1); 556 557 // q0 = 73 72 63 62 53 52 43 42 33 32 23 22 13 12 03 02 558 // q1 = f3 f2 e3 e2 d3 d2 c3 c2 b3 b2 a3 a2 93 92 83 82 559 t1 = *q0; 560 q0_s = _mm_unpacklo_epi8(t1, *q1); 561 q1_s = _mm_unpackhi_epi8(t1, *q1); 562 563 // p0 = 33 32 31 30 23 22 21 20 13 12 11 10 03 02 01 00 564 // q0 = 73 72 71 70 63 62 61 60 53 52 51 50 43 42 41 40 565 t1 = p0_s; 566 p0_s = _mm_unpacklo_epi16(t1, q0_s); 567 q0_s = _mm_unpackhi_epi16(t1, q0_s); 568 569 // p1 = b3 b2 b1 b0 a3 a2 a1 a0 93 92 91 90 83 82 81 80 570 // q1 = f3 f2 f1 f0 e3 e2 e1 e0 d3 d2 d1 d0 c3 c2 c1 c0 571 t1 = p1_s; 572 p1_s = _mm_unpacklo_epi16(t1, q1_s); 573 q1_s = _mm_unpackhi_epi16(t1, q1_s); 574 575 Store4x4(&p0_s, r0, stride); 576 r0 += 4 * stride; 577 Store4x4(&q0_s, r0, stride); 578 579 Store4x4(&p1_s, r8, stride); 580 r8 += 4 * stride; 581 Store4x4(&q1_s, r8, stride); 582 } 583 584 //------------------------------------------------------------------------------ 585 // Simple In-loop filtering (Paragraph 15.2) 586 587 static void SimpleVFilter16(uint8_t* p, int stride, int thresh) { 588 // Load 589 __m128i p1 = _mm_loadu_si128((__m128i*)&p[-2 * stride]); 590 __m128i p0 = _mm_loadu_si128((__m128i*)&p[-stride]); 591 __m128i q0 = _mm_loadu_si128((__m128i*)&p[0]); 592 __m128i q1 = _mm_loadu_si128((__m128i*)&p[stride]); 593 594 DoFilter2(&p1, &p0, &q0, &q1, thresh); 595 596 // Store 597 _mm_storeu_si128((__m128i*)&p[-stride], p0); 598 _mm_storeu_si128((__m128i*)&p[0], q0); 599 } 600 601 static void SimpleHFilter16(uint8_t* p, int stride, int thresh) { 602 __m128i p1, p0, q0, q1; 603 604 p -= 2; // beginning of p1 605 606 Load16x4(p, p + 8 * stride, stride, &p1, &p0, &q0, &q1); 607 DoFilter2(&p1, &p0, &q0, &q1, thresh); 608 Store16x4(&p1, &p0, &q0, &q1, p, p + 8 * stride, stride); 609 } 610 611 static void SimpleVFilter16i(uint8_t* p, int stride, int thresh) { 612 int k; 613 for (k = 3; k > 0; --k) { 614 p += 4 * stride; 615 SimpleVFilter16(p, stride, thresh); 616 } 617 } 618 619 static void SimpleHFilter16i(uint8_t* p, int stride, int thresh) { 620 int k; 621 for (k = 3; k > 0; --k) { 622 p += 4; 623 SimpleHFilter16(p, stride, thresh); 624 } 625 } 626 627 //------------------------------------------------------------------------------ 628 // Complex In-loop filtering (Paragraph 15.3) 629 630 #define MAX_DIFF1(p3, p2, p1, p0, m) do { \ 631 m = MM_ABS(p1, p0); \ 632 m = _mm_max_epu8(m, MM_ABS(p3, p2)); \ 633 m = _mm_max_epu8(m, MM_ABS(p2, p1)); \ 634 } while (0) 635 636 #define MAX_DIFF2(p3, p2, p1, p0, m) do { \ 637 m = _mm_max_epu8(m, MM_ABS(p1, p0)); \ 638 m = _mm_max_epu8(m, MM_ABS(p3, p2)); \ 639 m = _mm_max_epu8(m, MM_ABS(p2, p1)); \ 640 } while (0) 641 642 #define LOAD_H_EDGES4(p, stride, e1, e2, e3, e4) { \ 643 e1 = _mm_loadu_si128((__m128i*)&(p)[0 * stride]); \ 644 e2 = _mm_loadu_si128((__m128i*)&(p)[1 * stride]); \ 645 e3 = _mm_loadu_si128((__m128i*)&(p)[2 * stride]); \ 646 e4 = _mm_loadu_si128((__m128i*)&(p)[3 * stride]); \ 647 } 648 649 #define LOADUV_H_EDGE(p, u, v, stride) do { \ 650 const __m128i U = _mm_loadl_epi64((__m128i*)&(u)[(stride)]); \ 651 const __m128i V = _mm_loadl_epi64((__m128i*)&(v)[(stride)]); \ 652 p = _mm_unpacklo_epi64(U, V); \ 653 } while (0) 654 655 #define LOADUV_H_EDGES4(u, v, stride, e1, e2, e3, e4) { \ 656 LOADUV_H_EDGE(e1, u, v, 0 * stride); \ 657 LOADUV_H_EDGE(e2, u, v, 1 * stride); \ 658 LOADUV_H_EDGE(e3, u, v, 2 * stride); \ 659 LOADUV_H_EDGE(e4, u, v, 3 * stride); \ 660 } 661 662 #define STOREUV(p, u, v, stride) { \ 663 _mm_storel_epi64((__m128i*)&u[(stride)], p); \ 664 p = _mm_srli_si128(p, 8); \ 665 _mm_storel_epi64((__m128i*)&v[(stride)], p); \ 666 } 667 668 static WEBP_INLINE void ComplexMask(const __m128i* const p1, 669 const __m128i* const p0, 670 const __m128i* const q0, 671 const __m128i* const q1, 672 int thresh, int ithresh, 673 __m128i* const mask) { 674 const __m128i it = _mm_set1_epi8(ithresh); 675 const __m128i diff = _mm_subs_epu8(*mask, it); 676 const __m128i thresh_mask = _mm_cmpeq_epi8(diff, _mm_setzero_si128()); 677 __m128i filter_mask; 678 NeedsFilter(p1, p0, q0, q1, thresh, &filter_mask); 679 *mask = _mm_and_si128(thresh_mask, filter_mask); 680 } 681 682 // on macroblock edges 683 static void VFilter16(uint8_t* p, int stride, 684 int thresh, int ithresh, int hev_thresh) { 685 __m128i t1; 686 __m128i mask; 687 __m128i p2, p1, p0, q0, q1, q2; 688 689 // Load p3, p2, p1, p0 690 LOAD_H_EDGES4(p - 4 * stride, stride, t1, p2, p1, p0); 691 MAX_DIFF1(t1, p2, p1, p0, mask); 692 693 // Load q0, q1, q2, q3 694 LOAD_H_EDGES4(p, stride, q0, q1, q2, t1); 695 MAX_DIFF2(t1, q2, q1, q0, mask); 696 697 ComplexMask(&p1, &p0, &q0, &q1, thresh, ithresh, &mask); 698 DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh); 699 700 // Store 701 _mm_storeu_si128((__m128i*)&p[-3 * stride], p2); 702 _mm_storeu_si128((__m128i*)&p[-2 * stride], p1); 703 _mm_storeu_si128((__m128i*)&p[-1 * stride], p0); 704 _mm_storeu_si128((__m128i*)&p[+0 * stride], q0); 705 _mm_storeu_si128((__m128i*)&p[+1 * stride], q1); 706 _mm_storeu_si128((__m128i*)&p[+2 * stride], q2); 707 } 708 709 static void HFilter16(uint8_t* p, int stride, 710 int thresh, int ithresh, int hev_thresh) { 711 __m128i mask; 712 __m128i p3, p2, p1, p0, q0, q1, q2, q3; 713 714 uint8_t* const b = p - 4; 715 Load16x4(b, b + 8 * stride, stride, &p3, &p2, &p1, &p0); // p3, p2, p1, p0 716 MAX_DIFF1(p3, p2, p1, p0, mask); 717 718 Load16x4(p, p + 8 * stride, stride, &q0, &q1, &q2, &q3); // q0, q1, q2, q3 719 MAX_DIFF2(q3, q2, q1, q0, mask); 720 721 ComplexMask(&p1, &p0, &q0, &q1, thresh, ithresh, &mask); 722 DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh); 723 724 Store16x4(&p3, &p2, &p1, &p0, b, b + 8 * stride, stride); 725 Store16x4(&q0, &q1, &q2, &q3, p, p + 8 * stride, stride); 726 } 727 728 // on three inner edges 729 static void VFilter16i(uint8_t* p, int stride, 730 int thresh, int ithresh, int hev_thresh) { 731 int k; 732 __m128i p3, p2, p1, p0; // loop invariants 733 734 LOAD_H_EDGES4(p, stride, p3, p2, p1, p0); // prologue 735 736 for (k = 3; k > 0; --k) { 737 __m128i mask, tmp1, tmp2; 738 uint8_t* const b = p + 2 * stride; // beginning of p1 739 p += 4 * stride; 740 741 MAX_DIFF1(p3, p2, p1, p0, mask); // compute partial mask 742 LOAD_H_EDGES4(p, stride, p3, p2, tmp1, tmp2); 743 MAX_DIFF2(p3, p2, tmp1, tmp2, mask); 744 745 // p3 and p2 are not just temporary variables here: they will be 746 // re-used for next span. And q2/q3 will become p1/p0 accordingly. 747 ComplexMask(&p1, &p0, &p3, &p2, thresh, ithresh, &mask); 748 DoFilter4(&p1, &p0, &p3, &p2, &mask, hev_thresh); 749 750 // Store 751 _mm_storeu_si128((__m128i*)&b[0 * stride], p1); 752 _mm_storeu_si128((__m128i*)&b[1 * stride], p0); 753 _mm_storeu_si128((__m128i*)&b[2 * stride], p3); 754 _mm_storeu_si128((__m128i*)&b[3 * stride], p2); 755 756 // rotate samples 757 p1 = tmp1; 758 p0 = tmp2; 759 } 760 } 761 762 static void HFilter16i(uint8_t* p, int stride, 763 int thresh, int ithresh, int hev_thresh) { 764 int k; 765 __m128i p3, p2, p1, p0; // loop invariants 766 767 Load16x4(p, p + 8 * stride, stride, &p3, &p2, &p1, &p0); // prologue 768 769 for (k = 3; k > 0; --k) { 770 __m128i mask, tmp1, tmp2; 771 uint8_t* const b = p + 2; // beginning of p1 772 773 p += 4; // beginning of q0 (and next span) 774 775 MAX_DIFF1(p3, p2, p1, p0, mask); // compute partial mask 776 Load16x4(p, p + 8 * stride, stride, &p3, &p2, &tmp1, &tmp2); 777 MAX_DIFF2(p3, p2, tmp1, tmp2, mask); 778 779 ComplexMask(&p1, &p0, &p3, &p2, thresh, ithresh, &mask); 780 DoFilter4(&p1, &p0, &p3, &p2, &mask, hev_thresh); 781 782 Store16x4(&p1, &p0, &p3, &p2, b, b + 8 * stride, stride); 783 784 // rotate samples 785 p1 = tmp1; 786 p0 = tmp2; 787 } 788 } 789 790 // 8-pixels wide variant, for chroma filtering 791 static void VFilter8(uint8_t* u, uint8_t* v, int stride, 792 int thresh, int ithresh, int hev_thresh) { 793 __m128i mask; 794 __m128i t1, p2, p1, p0, q0, q1, q2; 795 796 // Load p3, p2, p1, p0 797 LOADUV_H_EDGES4(u - 4 * stride, v - 4 * stride, stride, t1, p2, p1, p0); 798 MAX_DIFF1(t1, p2, p1, p0, mask); 799 800 // Load q0, q1, q2, q3 801 LOADUV_H_EDGES4(u, v, stride, q0, q1, q2, t1); 802 MAX_DIFF2(t1, q2, q1, q0, mask); 803 804 ComplexMask(&p1, &p0, &q0, &q1, thresh, ithresh, &mask); 805 DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh); 806 807 // Store 808 STOREUV(p2, u, v, -3 * stride); 809 STOREUV(p1, u, v, -2 * stride); 810 STOREUV(p0, u, v, -1 * stride); 811 STOREUV(q0, u, v, 0 * stride); 812 STOREUV(q1, u, v, 1 * stride); 813 STOREUV(q2, u, v, 2 * stride); 814 } 815 816 static void HFilter8(uint8_t* u, uint8_t* v, int stride, 817 int thresh, int ithresh, int hev_thresh) { 818 __m128i mask; 819 __m128i p3, p2, p1, p0, q0, q1, q2, q3; 820 821 uint8_t* const tu = u - 4; 822 uint8_t* const tv = v - 4; 823 Load16x4(tu, tv, stride, &p3, &p2, &p1, &p0); // p3, p2, p1, p0 824 MAX_DIFF1(p3, p2, p1, p0, mask); 825 826 Load16x4(u, v, stride, &q0, &q1, &q2, &q3); // q0, q1, q2, q3 827 MAX_DIFF2(q3, q2, q1, q0, mask); 828 829 ComplexMask(&p1, &p0, &q0, &q1, thresh, ithresh, &mask); 830 DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh); 831 832 Store16x4(&p3, &p2, &p1, &p0, tu, tv, stride); 833 Store16x4(&q0, &q1, &q2, &q3, u, v, stride); 834 } 835 836 static void VFilter8i(uint8_t* u, uint8_t* v, int stride, 837 int thresh, int ithresh, int hev_thresh) { 838 __m128i mask; 839 __m128i t1, t2, p1, p0, q0, q1; 840 841 // Load p3, p2, p1, p0 842 LOADUV_H_EDGES4(u, v, stride, t2, t1, p1, p0); 843 MAX_DIFF1(t2, t1, p1, p0, mask); 844 845 u += 4 * stride; 846 v += 4 * stride; 847 848 // Load q0, q1, q2, q3 849 LOADUV_H_EDGES4(u, v, stride, q0, q1, t1, t2); 850 MAX_DIFF2(t2, t1, q1, q0, mask); 851 852 ComplexMask(&p1, &p0, &q0, &q1, thresh, ithresh, &mask); 853 DoFilter4(&p1, &p0, &q0, &q1, &mask, hev_thresh); 854 855 // Store 856 STOREUV(p1, u, v, -2 * stride); 857 STOREUV(p0, u, v, -1 * stride); 858 STOREUV(q0, u, v, 0 * stride); 859 STOREUV(q1, u, v, 1 * stride); 860 } 861 862 static void HFilter8i(uint8_t* u, uint8_t* v, int stride, 863 int thresh, int ithresh, int hev_thresh) { 864 __m128i mask; 865 __m128i t1, t2, p1, p0, q0, q1; 866 Load16x4(u, v, stride, &t2, &t1, &p1, &p0); // p3, p2, p1, p0 867 MAX_DIFF1(t2, t1, p1, p0, mask); 868 869 u += 4; // beginning of q0 870 v += 4; 871 Load16x4(u, v, stride, &q0, &q1, &t1, &t2); // q0, q1, q2, q3 872 MAX_DIFF2(t2, t1, q1, q0, mask); 873 874 ComplexMask(&p1, &p0, &q0, &q1, thresh, ithresh, &mask); 875 DoFilter4(&p1, &p0, &q0, &q1, &mask, hev_thresh); 876 877 u -= 2; // beginning of p1 878 v -= 2; 879 Store16x4(&p1, &p0, &q0, &q1, u, v, stride); 880 } 881 882 //------------------------------------------------------------------------------ 883 // 4x4 predictions 884 885 #define DST(x, y) dst[(x) + (y) * BPS] 886 #define AVG3(a, b, c) (((a) + 2 * (b) + (c) + 2) >> 2) 887 888 // We use the following 8b-arithmetic tricks: 889 // (a + 2 * b + c + 2) >> 2 = (AC + b + 1) >> 1 890 // where: AC = (a + c) >> 1 = [(a + c + 1) >> 1] - [(a^c) & 1] 891 // and: 892 // (a + 2 * b + c + 2) >> 2 = (AB + BC + 1) >> 1 - (ab|bc)&lsb 893 // where: AC = (a + b + 1) >> 1, BC = (b + c + 1) >> 1 894 // and ab = a ^ b, bc = b ^ c, lsb = (AC^BC)&1 895 896 static void VE4(uint8_t* dst) { // vertical 897 const __m128i one = _mm_set1_epi8(1); 898 const __m128i ABCDEFGH = _mm_loadl_epi64((__m128i*)(dst - BPS - 1)); 899 const __m128i BCDEFGH0 = _mm_srli_si128(ABCDEFGH, 1); 900 const __m128i CDEFGH00 = _mm_srli_si128(ABCDEFGH, 2); 901 const __m128i a = _mm_avg_epu8(ABCDEFGH, CDEFGH00); 902 const __m128i lsb = _mm_and_si128(_mm_xor_si128(ABCDEFGH, CDEFGH00), one); 903 const __m128i b = _mm_subs_epu8(a, lsb); 904 const __m128i avg = _mm_avg_epu8(b, BCDEFGH0); 905 const uint32_t vals = _mm_cvtsi128_si32(avg); 906 int i; 907 for (i = 0; i < 4; ++i) { 908 WebPUint32ToMem(dst + i * BPS, vals); 909 } 910 } 911 912 static void LD4(uint8_t* dst) { // Down-Left 913 const __m128i one = _mm_set1_epi8(1); 914 const __m128i ABCDEFGH = _mm_loadl_epi64((__m128i*)(dst - BPS)); 915 const __m128i BCDEFGH0 = _mm_srli_si128(ABCDEFGH, 1); 916 const __m128i CDEFGH00 = _mm_srli_si128(ABCDEFGH, 2); 917 const __m128i CDEFGHH0 = _mm_insert_epi16(CDEFGH00, dst[-BPS + 7], 3); 918 const __m128i avg1 = _mm_avg_epu8(ABCDEFGH, CDEFGHH0); 919 const __m128i lsb = _mm_and_si128(_mm_xor_si128(ABCDEFGH, CDEFGHH0), one); 920 const __m128i avg2 = _mm_subs_epu8(avg1, lsb); 921 const __m128i abcdefg = _mm_avg_epu8(avg2, BCDEFGH0); 922 WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32( abcdefg )); 923 WebPUint32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 1))); 924 WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 2))); 925 WebPUint32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 3))); 926 } 927 928 static void VR4(uint8_t* dst) { // Vertical-Right 929 const __m128i one = _mm_set1_epi8(1); 930 const int I = dst[-1 + 0 * BPS]; 931 const int J = dst[-1 + 1 * BPS]; 932 const int K = dst[-1 + 2 * BPS]; 933 const int X = dst[-1 - BPS]; 934 const __m128i XABCD = _mm_loadl_epi64((__m128i*)(dst - BPS - 1)); 935 const __m128i ABCD0 = _mm_srli_si128(XABCD, 1); 936 const __m128i abcd = _mm_avg_epu8(XABCD, ABCD0); 937 const __m128i _XABCD = _mm_slli_si128(XABCD, 1); 938 const __m128i IXABCD = _mm_insert_epi16(_XABCD, I | (X << 8), 0); 939 const __m128i avg1 = _mm_avg_epu8(IXABCD, ABCD0); 940 const __m128i lsb = _mm_and_si128(_mm_xor_si128(IXABCD, ABCD0), one); 941 const __m128i avg2 = _mm_subs_epu8(avg1, lsb); 942 const __m128i efgh = _mm_avg_epu8(avg2, XABCD); 943 WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32( abcd )); 944 WebPUint32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32( efgh )); 945 WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_slli_si128(abcd, 1))); 946 WebPUint32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(_mm_slli_si128(efgh, 1))); 947 948 // these two are hard to implement in SSE2, so we keep the C-version: 949 DST(0, 2) = AVG3(J, I, X); 950 DST(0, 3) = AVG3(K, J, I); 951 } 952 953 static void VL4(uint8_t* dst) { // Vertical-Left 954 const __m128i one = _mm_set1_epi8(1); 955 const __m128i ABCDEFGH = _mm_loadl_epi64((__m128i*)(dst - BPS)); 956 const __m128i BCDEFGH_ = _mm_srli_si128(ABCDEFGH, 1); 957 const __m128i CDEFGH__ = _mm_srli_si128(ABCDEFGH, 2); 958 const __m128i avg1 = _mm_avg_epu8(ABCDEFGH, BCDEFGH_); 959 const __m128i avg2 = _mm_avg_epu8(CDEFGH__, BCDEFGH_); 960 const __m128i avg3 = _mm_avg_epu8(avg1, avg2); 961 const __m128i lsb1 = _mm_and_si128(_mm_xor_si128(avg1, avg2), one); 962 const __m128i ab = _mm_xor_si128(ABCDEFGH, BCDEFGH_); 963 const __m128i bc = _mm_xor_si128(CDEFGH__, BCDEFGH_); 964 const __m128i abbc = _mm_or_si128(ab, bc); 965 const __m128i lsb2 = _mm_and_si128(abbc, lsb1); 966 const __m128i avg4 = _mm_subs_epu8(avg3, lsb2); 967 const uint32_t extra_out = _mm_cvtsi128_si32(_mm_srli_si128(avg4, 4)); 968 WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32( avg1 )); 969 WebPUint32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32( avg4 )); 970 WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(avg1, 1))); 971 WebPUint32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(avg4, 1))); 972 973 // these two are hard to get and irregular 974 DST(3, 2) = (extra_out >> 0) & 0xff; 975 DST(3, 3) = (extra_out >> 8) & 0xff; 976 } 977 978 static void RD4(uint8_t* dst) { // Down-right 979 const __m128i one = _mm_set1_epi8(1); 980 const __m128i XABCD = _mm_loadl_epi64((__m128i*)(dst - BPS - 1)); 981 const __m128i ____XABCD = _mm_slli_si128(XABCD, 4); 982 const uint32_t I = dst[-1 + 0 * BPS]; 983 const uint32_t J = dst[-1 + 1 * BPS]; 984 const uint32_t K = dst[-1 + 2 * BPS]; 985 const uint32_t L = dst[-1 + 3 * BPS]; 986 const __m128i LKJI_____ = 987 _mm_cvtsi32_si128(L | (K << 8) | (J << 16) | (I << 24)); 988 const __m128i LKJIXABCD = _mm_or_si128(LKJI_____, ____XABCD); 989 const __m128i KJIXABCD_ = _mm_srli_si128(LKJIXABCD, 1); 990 const __m128i JIXABCD__ = _mm_srli_si128(LKJIXABCD, 2); 991 const __m128i avg1 = _mm_avg_epu8(JIXABCD__, LKJIXABCD); 992 const __m128i lsb = _mm_and_si128(_mm_xor_si128(JIXABCD__, LKJIXABCD), one); 993 const __m128i avg2 = _mm_subs_epu8(avg1, lsb); 994 const __m128i abcdefg = _mm_avg_epu8(avg2, KJIXABCD_); 995 WebPUint32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32( abcdefg )); 996 WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 1))); 997 WebPUint32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 2))); 998 WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 3))); 999 } 1000 1001 #undef DST 1002 #undef AVG3 1003 1004 //------------------------------------------------------------------------------ 1005 // Luma 16x16 1006 1007 static WEBP_INLINE void TrueMotion(uint8_t* dst, int size) { 1008 const uint8_t* top = dst - BPS; 1009 const __m128i zero = _mm_setzero_si128(); 1010 int y; 1011 if (size == 4) { 1012 const __m128i top_values = _mm_cvtsi32_si128(WebPMemToUint32(top)); 1013 const __m128i top_base = _mm_unpacklo_epi8(top_values, zero); 1014 for (y = 0; y < 4; ++y, dst += BPS) { 1015 const int val = dst[-1] - top[-1]; 1016 const __m128i base = _mm_set1_epi16(val); 1017 const __m128i out = _mm_packus_epi16(_mm_add_epi16(base, top_base), zero); 1018 WebPUint32ToMem(dst, _mm_cvtsi128_si32(out)); 1019 } 1020 } else if (size == 8) { 1021 const __m128i top_values = _mm_loadl_epi64((const __m128i*)top); 1022 const __m128i top_base = _mm_unpacklo_epi8(top_values, zero); 1023 for (y = 0; y < 8; ++y, dst += BPS) { 1024 const int val = dst[-1] - top[-1]; 1025 const __m128i base = _mm_set1_epi16(val); 1026 const __m128i out = _mm_packus_epi16(_mm_add_epi16(base, top_base), zero); 1027 _mm_storel_epi64((__m128i*)dst, out); 1028 } 1029 } else { 1030 const __m128i top_values = _mm_loadu_si128((const __m128i*)top); 1031 const __m128i top_base_0 = _mm_unpacklo_epi8(top_values, zero); 1032 const __m128i top_base_1 = _mm_unpackhi_epi8(top_values, zero); 1033 for (y = 0; y < 16; ++y, dst += BPS) { 1034 const int val = dst[-1] - top[-1]; 1035 const __m128i base = _mm_set1_epi16(val); 1036 const __m128i out_0 = _mm_add_epi16(base, top_base_0); 1037 const __m128i out_1 = _mm_add_epi16(base, top_base_1); 1038 const __m128i out = _mm_packus_epi16(out_0, out_1); 1039 _mm_storeu_si128((__m128i*)dst, out); 1040 } 1041 } 1042 } 1043 1044 static void TM4(uint8_t* dst) { TrueMotion(dst, 4); } 1045 static void TM8uv(uint8_t* dst) { TrueMotion(dst, 8); } 1046 static void TM16(uint8_t* dst) { TrueMotion(dst, 16); } 1047 1048 static void VE16(uint8_t* dst) { 1049 const __m128i top = _mm_loadu_si128((const __m128i*)(dst - BPS)); 1050 int j; 1051 for (j = 0; j < 16; ++j) { 1052 _mm_storeu_si128((__m128i*)(dst + j * BPS), top); 1053 } 1054 } 1055 1056 static void HE16(uint8_t* dst) { // horizontal 1057 int j; 1058 for (j = 16; j > 0; --j) { 1059 const __m128i values = _mm_set1_epi8(dst[-1]); 1060 _mm_storeu_si128((__m128i*)dst, values); 1061 dst += BPS; 1062 } 1063 } 1064 1065 static WEBP_INLINE void Put16(uint8_t v, uint8_t* dst) { 1066 int j; 1067 const __m128i values = _mm_set1_epi8(v); 1068 for (j = 0; j < 16; ++j) { 1069 _mm_storeu_si128((__m128i*)(dst + j * BPS), values); 1070 } 1071 } 1072 1073 static void DC16(uint8_t* dst) { // DC 1074 const __m128i zero = _mm_setzero_si128(); 1075 const __m128i top = _mm_loadu_si128((const __m128i*)(dst - BPS)); 1076 const __m128i sad8x2 = _mm_sad_epu8(top, zero); 1077 // sum the two sads: sad8x2[0:1] + sad8x2[8:9] 1078 const __m128i sum = _mm_add_epi16(sad8x2, _mm_shuffle_epi32(sad8x2, 2)); 1079 int left = 0; 1080 int j; 1081 for (j = 0; j < 16; ++j) { 1082 left += dst[-1 + j * BPS]; 1083 } 1084 { 1085 const int DC = _mm_cvtsi128_si32(sum) + left + 16; 1086 Put16(DC >> 5, dst); 1087 } 1088 } 1089 1090 static void DC16NoTop(uint8_t* dst) { // DC with top samples not available 1091 int DC = 8; 1092 int j; 1093 for (j = 0; j < 16; ++j) { 1094 DC += dst[-1 + j * BPS]; 1095 } 1096 Put16(DC >> 4, dst); 1097 } 1098 1099 static void DC16NoLeft(uint8_t* dst) { // DC with left samples not available 1100 const __m128i zero = _mm_setzero_si128(); 1101 const __m128i top = _mm_loadu_si128((const __m128i*)(dst - BPS)); 1102 const __m128i sad8x2 = _mm_sad_epu8(top, zero); 1103 // sum the two sads: sad8x2[0:1] + sad8x2[8:9] 1104 const __m128i sum = _mm_add_epi16(sad8x2, _mm_shuffle_epi32(sad8x2, 2)); 1105 const int DC = _mm_cvtsi128_si32(sum) + 8; 1106 Put16(DC >> 4, dst); 1107 } 1108 1109 static void DC16NoTopLeft(uint8_t* dst) { // DC with no top and left samples 1110 Put16(0x80, dst); 1111 } 1112 1113 //------------------------------------------------------------------------------ 1114 // Chroma 1115 1116 static void VE8uv(uint8_t* dst) { // vertical 1117 int j; 1118 const __m128i top = _mm_loadl_epi64((const __m128i*)(dst - BPS)); 1119 for (j = 0; j < 8; ++j) { 1120 _mm_storel_epi64((__m128i*)(dst + j * BPS), top); 1121 } 1122 } 1123 1124 static void HE8uv(uint8_t* dst) { // horizontal 1125 int j; 1126 for (j = 0; j < 8; ++j) { 1127 const __m128i values = _mm_set1_epi8(dst[-1]); 1128 _mm_storel_epi64((__m128i*)dst, values); 1129 dst += BPS; 1130 } 1131 } 1132 1133 // helper for chroma-DC predictions 1134 static WEBP_INLINE void Put8x8uv(uint8_t v, uint8_t* dst) { 1135 int j; 1136 const __m128i values = _mm_set1_epi8(v); 1137 for (j = 0; j < 8; ++j) { 1138 _mm_storel_epi64((__m128i*)(dst + j * BPS), values); 1139 } 1140 } 1141 1142 static void DC8uv(uint8_t* dst) { // DC 1143 const __m128i zero = _mm_setzero_si128(); 1144 const __m128i top = _mm_loadl_epi64((const __m128i*)(dst - BPS)); 1145 const __m128i sum = _mm_sad_epu8(top, zero); 1146 int left = 0; 1147 int j; 1148 for (j = 0; j < 8; ++j) { 1149 left += dst[-1 + j * BPS]; 1150 } 1151 { 1152 const int DC = _mm_cvtsi128_si32(sum) + left + 8; 1153 Put8x8uv(DC >> 4, dst); 1154 } 1155 } 1156 1157 static void DC8uvNoLeft(uint8_t* dst) { // DC with no left samples 1158 const __m128i zero = _mm_setzero_si128(); 1159 const __m128i top = _mm_loadl_epi64((const __m128i*)(dst - BPS)); 1160 const __m128i sum = _mm_sad_epu8(top, zero); 1161 const int DC = _mm_cvtsi128_si32(sum) + 4; 1162 Put8x8uv(DC >> 3, dst); 1163 } 1164 1165 static void DC8uvNoTop(uint8_t* dst) { // DC with no top samples 1166 int dc0 = 4; 1167 int i; 1168 for (i = 0; i < 8; ++i) { 1169 dc0 += dst[-1 + i * BPS]; 1170 } 1171 Put8x8uv(dc0 >> 3, dst); 1172 } 1173 1174 static void DC8uvNoTopLeft(uint8_t* dst) { // DC with nothing 1175 Put8x8uv(0x80, dst); 1176 } 1177 1178 //------------------------------------------------------------------------------ 1179 // Entry point 1180 1181 extern void VP8DspInitSSE2(void); 1182 1183 WEBP_TSAN_IGNORE_FUNCTION void VP8DspInitSSE2(void) { 1184 VP8Transform = Transform; 1185 #if defined(USE_TRANSFORM_AC3) 1186 VP8TransformAC3 = TransformAC3; 1187 #endif 1188 1189 VP8VFilter16 = VFilter16; 1190 VP8HFilter16 = HFilter16; 1191 VP8VFilter8 = VFilter8; 1192 VP8HFilter8 = HFilter8; 1193 VP8VFilter16i = VFilter16i; 1194 VP8HFilter16i = HFilter16i; 1195 VP8VFilter8i = VFilter8i; 1196 VP8HFilter8i = HFilter8i; 1197 1198 VP8SimpleVFilter16 = SimpleVFilter16; 1199 VP8SimpleHFilter16 = SimpleHFilter16; 1200 VP8SimpleVFilter16i = SimpleVFilter16i; 1201 VP8SimpleHFilter16i = SimpleHFilter16i; 1202 1203 VP8PredLuma4[1] = TM4; 1204 VP8PredLuma4[2] = VE4; 1205 VP8PredLuma4[4] = RD4; 1206 VP8PredLuma4[5] = VR4; 1207 VP8PredLuma4[6] = LD4; 1208 VP8PredLuma4[7] = VL4; 1209 1210 VP8PredLuma16[0] = DC16; 1211 VP8PredLuma16[1] = TM16; 1212 VP8PredLuma16[2] = VE16; 1213 VP8PredLuma16[3] = HE16; 1214 VP8PredLuma16[4] = DC16NoTop; 1215 VP8PredLuma16[5] = DC16NoLeft; 1216 VP8PredLuma16[6] = DC16NoTopLeft; 1217 1218 VP8PredChroma8[0] = DC8uv; 1219 VP8PredChroma8[1] = TM8uv; 1220 VP8PredChroma8[2] = VE8uv; 1221 VP8PredChroma8[3] = HE8uv; 1222 VP8PredChroma8[4] = DC8uvNoTop; 1223 VP8PredChroma8[5] = DC8uvNoLeft; 1224 VP8PredChroma8[6] = DC8uvNoTopLeft; 1225 } 1226 1227 #else // !WEBP_USE_SSE2 1228 1229 WEBP_DSP_INIT_STUB(VP8DspInitSSE2) 1230 1231 #endif // WEBP_USE_SSE2 1232