1 // Copyright 2011 Google Inc. 2 // 3 // This code is licensed under the same terms as WebM: 4 // Software License Agreement: http://www.webmproject.org/license/software/ 5 // Additional IP Rights Grant: http://www.webmproject.org/license/additional/ 6 // ----------------------------------------------------------------------------- 7 // 8 // SSE2 version of speed-critical functions. 9 // 10 // Author: Christian Duvivier (cduvivier (at) google.com) 11 12 #if defined(__SSE2__) || defined(_MSC_VER) 13 #include <emmintrin.h> 14 15 #include "vp8enci.h" 16 17 #if defined(__cplusplus) || defined(c_plusplus) 18 extern "C" { 19 #endif 20 21 //----------------------------------------------------------------------------- 22 // Compute susceptibility based on DCT-coeff histograms: 23 // the higher, the "easier" the macroblock is to compress. 24 25 static int CollectHistogramSSE2(const uint8_t* ref, const uint8_t* pred, 26 int start_block, int end_block) { 27 int histo[MAX_COEFF_THRESH + 1] = { 0 }; 28 int16_t out[16]; 29 int j, k; 30 const __m128i max_coeff_thresh = _mm_set1_epi16(MAX_COEFF_THRESH); 31 for (j = start_block; j < end_block; ++j) { 32 VP8FTransform(ref + VP8Scan[j], pred + VP8Scan[j], out); 33 34 // Convert coefficients to bin (within out[]). 35 { 36 // Load. 37 const __m128i out0 = _mm_loadu_si128((__m128i*)&out[0]); 38 const __m128i out1 = _mm_loadu_si128((__m128i*)&out[8]); 39 // sign(out) = out >> 15 (0x0000 if positive, 0xffff if negative) 40 const __m128i sign0 = _mm_srai_epi16(out0, 15); 41 const __m128i sign1 = _mm_srai_epi16(out1, 15); 42 // abs(out) = (out ^ sign) - sign 43 const __m128i xor0 = _mm_xor_si128(out0, sign0); 44 const __m128i xor1 = _mm_xor_si128(out1, sign1); 45 const __m128i abs0 = _mm_sub_epi16(xor0, sign0); 46 const __m128i abs1 = _mm_sub_epi16(xor1, sign1); 47 // v = abs(out) >> 2 48 const __m128i v0 = _mm_srai_epi16(abs0, 2); 49 const __m128i v1 = _mm_srai_epi16(abs1, 2); 50 // bin = min(v, MAX_COEFF_THRESH) 51 const __m128i bin0 = _mm_min_epi16(v0, max_coeff_thresh); 52 const __m128i bin1 = _mm_min_epi16(v1, max_coeff_thresh); 53 // Store. 54 _mm_storeu_si128((__m128i*)&out[0], bin0); 55 _mm_storeu_si128((__m128i*)&out[8], bin1); 56 } 57 58 // Use bin to update histogram. 59 for (k = 0; k < 16; ++k) { 60 histo[out[k]]++; 61 } 62 } 63 64 return VP8GetAlpha(histo); 65 } 66 67 //----------------------------------------------------------------------------- 68 // Transforms (Paragraph 14.4) 69 70 // Does one or two inverse transforms. 71 static void ITransformSSE2(const uint8_t* ref, const int16_t* in, uint8_t* dst, 72 int do_two) { 73 // This implementation makes use of 16-bit fixed point versions of two 74 // multiply constants: 75 // K1 = sqrt(2) * cos (pi/8) ~= 85627 / 2^16 76 // K2 = sqrt(2) * sin (pi/8) ~= 35468 / 2^16 77 // 78 // To be able to use signed 16-bit integers, we use the following trick to 79 // have constants within range: 80 // - Associated constants are obtained by subtracting the 16-bit fixed point 81 // version of one: 82 // k = K - (1 << 16) => K = k + (1 << 16) 83 // K1 = 85267 => k1 = 20091 84 // K2 = 35468 => k2 = -30068 85 // - The multiplication of a variable by a constant become the sum of the 86 // variable and the multiplication of that variable by the associated 87 // constant: 88 // (x * K) >> 16 = (x * (k + (1 << 16))) >> 16 = ((x * k ) >> 16) + x 89 const __m128i k1 = _mm_set1_epi16(20091); 90 const __m128i k2 = _mm_set1_epi16(-30068); 91 __m128i T0, T1, T2, T3; 92 93 // Load and concatenate the transform coefficients (we'll do two inverse 94 // transforms in parallel). In the case of only one inverse transform, the 95 // second half of the vectors will just contain random value we'll never 96 // use nor store. 97 __m128i in0, in1, in2, in3; 98 { 99 in0 = _mm_loadl_epi64((__m128i*)&in[0]); 100 in1 = _mm_loadl_epi64((__m128i*)&in[4]); 101 in2 = _mm_loadl_epi64((__m128i*)&in[8]); 102 in3 = _mm_loadl_epi64((__m128i*)&in[12]); 103 // a00 a10 a20 a30 x x x x 104 // a01 a11 a21 a31 x x x x 105 // a02 a12 a22 a32 x x x x 106 // a03 a13 a23 a33 x x x x 107 if (do_two) { 108 const __m128i inB0 = _mm_loadl_epi64((__m128i*)&in[16]); 109 const __m128i inB1 = _mm_loadl_epi64((__m128i*)&in[20]); 110 const __m128i inB2 = _mm_loadl_epi64((__m128i*)&in[24]); 111 const __m128i inB3 = _mm_loadl_epi64((__m128i*)&in[28]); 112 in0 = _mm_unpacklo_epi64(in0, inB0); 113 in1 = _mm_unpacklo_epi64(in1, inB1); 114 in2 = _mm_unpacklo_epi64(in2, inB2); 115 in3 = _mm_unpacklo_epi64(in3, inB3); 116 // a00 a10 a20 a30 b00 b10 b20 b30 117 // a01 a11 a21 a31 b01 b11 b21 b31 118 // a02 a12 a22 a32 b02 b12 b22 b32 119 // a03 a13 a23 a33 b03 b13 b23 b33 120 } 121 } 122 123 // Vertical pass and subsequent transpose. 124 { 125 // First pass, c and d calculations are longer because of the "trick" 126 // multiplications. 127 const __m128i a = _mm_add_epi16(in0, in2); 128 const __m128i b = _mm_sub_epi16(in0, in2); 129 // c = MUL(in1, K2) - MUL(in3, K1) = MUL(in1, k2) - MUL(in3, k1) + in1 - in3 130 const __m128i c1 = _mm_mulhi_epi16(in1, k2); 131 const __m128i c2 = _mm_mulhi_epi16(in3, k1); 132 const __m128i c3 = _mm_sub_epi16(in1, in3); 133 const __m128i c4 = _mm_sub_epi16(c1, c2); 134 const __m128i c = _mm_add_epi16(c3, c4); 135 // d = MUL(in1, K1) + MUL(in3, K2) = MUL(in1, k1) + MUL(in3, k2) + in1 + in3 136 const __m128i d1 = _mm_mulhi_epi16(in1, k1); 137 const __m128i d2 = _mm_mulhi_epi16(in3, k2); 138 const __m128i d3 = _mm_add_epi16(in1, in3); 139 const __m128i d4 = _mm_add_epi16(d1, d2); 140 const __m128i d = _mm_add_epi16(d3, d4); 141 142 // Second pass. 143 const __m128i tmp0 = _mm_add_epi16(a, d); 144 const __m128i tmp1 = _mm_add_epi16(b, c); 145 const __m128i tmp2 = _mm_sub_epi16(b, c); 146 const __m128i tmp3 = _mm_sub_epi16(a, d); 147 148 // Transpose the two 4x4. 149 // a00 a01 a02 a03 b00 b01 b02 b03 150 // a10 a11 a12 a13 b10 b11 b12 b13 151 // a20 a21 a22 a23 b20 b21 b22 b23 152 // a30 a31 a32 a33 b30 b31 b32 b33 153 const __m128i transpose0_0 = _mm_unpacklo_epi16(tmp0, tmp1); 154 const __m128i transpose0_1 = _mm_unpacklo_epi16(tmp2, tmp3); 155 const __m128i transpose0_2 = _mm_unpackhi_epi16(tmp0, tmp1); 156 const __m128i transpose0_3 = _mm_unpackhi_epi16(tmp2, tmp3); 157 // a00 a10 a01 a11 a02 a12 a03 a13 158 // a20 a30 a21 a31 a22 a32 a23 a33 159 // b00 b10 b01 b11 b02 b12 b03 b13 160 // b20 b30 b21 b31 b22 b32 b23 b33 161 const __m128i transpose1_0 = _mm_unpacklo_epi32(transpose0_0, transpose0_1); 162 const __m128i transpose1_1 = _mm_unpacklo_epi32(transpose0_2, transpose0_3); 163 const __m128i transpose1_2 = _mm_unpackhi_epi32(transpose0_0, transpose0_1); 164 const __m128i transpose1_3 = _mm_unpackhi_epi32(transpose0_2, transpose0_3); 165 // a00 a10 a20 a30 a01 a11 a21 a31 166 // b00 b10 b20 b30 b01 b11 b21 b31 167 // a02 a12 a22 a32 a03 a13 a23 a33 168 // b02 b12 a22 b32 b03 b13 b23 b33 169 T0 = _mm_unpacklo_epi64(transpose1_0, transpose1_1); 170 T1 = _mm_unpackhi_epi64(transpose1_0, transpose1_1); 171 T2 = _mm_unpacklo_epi64(transpose1_2, transpose1_3); 172 T3 = _mm_unpackhi_epi64(transpose1_2, transpose1_3); 173 // a00 a10 a20 a30 b00 b10 b20 b30 174 // a01 a11 a21 a31 b01 b11 b21 b31 175 // a02 a12 a22 a32 b02 b12 b22 b32 176 // a03 a13 a23 a33 b03 b13 b23 b33 177 } 178 179 // Horizontal pass and subsequent transpose. 180 { 181 // First pass, c and d calculations are longer because of the "trick" 182 // multiplications. 183 const __m128i four = _mm_set1_epi16(4); 184 const __m128i dc = _mm_add_epi16(T0, four); 185 const __m128i a = _mm_add_epi16(dc, T2); 186 const __m128i b = _mm_sub_epi16(dc, T2); 187 // c = MUL(T1, K2) - MUL(T3, K1) = MUL(T1, k2) - MUL(T3, k1) + T1 - T3 188 const __m128i c1 = _mm_mulhi_epi16(T1, k2); 189 const __m128i c2 = _mm_mulhi_epi16(T3, k1); 190 const __m128i c3 = _mm_sub_epi16(T1, T3); 191 const __m128i c4 = _mm_sub_epi16(c1, c2); 192 const __m128i c = _mm_add_epi16(c3, c4); 193 // d = MUL(T1, K1) + MUL(T3, K2) = MUL(T1, k1) + MUL(T3, k2) + T1 + T3 194 const __m128i d1 = _mm_mulhi_epi16(T1, k1); 195 const __m128i d2 = _mm_mulhi_epi16(T3, k2); 196 const __m128i d3 = _mm_add_epi16(T1, T3); 197 const __m128i d4 = _mm_add_epi16(d1, d2); 198 const __m128i d = _mm_add_epi16(d3, d4); 199 200 // Second pass. 201 const __m128i tmp0 = _mm_add_epi16(a, d); 202 const __m128i tmp1 = _mm_add_epi16(b, c); 203 const __m128i tmp2 = _mm_sub_epi16(b, c); 204 const __m128i tmp3 = _mm_sub_epi16(a, d); 205 const __m128i shifted0 = _mm_srai_epi16(tmp0, 3); 206 const __m128i shifted1 = _mm_srai_epi16(tmp1, 3); 207 const __m128i shifted2 = _mm_srai_epi16(tmp2, 3); 208 const __m128i shifted3 = _mm_srai_epi16(tmp3, 3); 209 210 // Transpose the two 4x4. 211 // a00 a01 a02 a03 b00 b01 b02 b03 212 // a10 a11 a12 a13 b10 b11 b12 b13 213 // a20 a21 a22 a23 b20 b21 b22 b23 214 // a30 a31 a32 a33 b30 b31 b32 b33 215 const __m128i transpose0_0 = _mm_unpacklo_epi16(shifted0, shifted1); 216 const __m128i transpose0_1 = _mm_unpacklo_epi16(shifted2, shifted3); 217 const __m128i transpose0_2 = _mm_unpackhi_epi16(shifted0, shifted1); 218 const __m128i transpose0_3 = _mm_unpackhi_epi16(shifted2, shifted3); 219 // a00 a10 a01 a11 a02 a12 a03 a13 220 // a20 a30 a21 a31 a22 a32 a23 a33 221 // b00 b10 b01 b11 b02 b12 b03 b13 222 // b20 b30 b21 b31 b22 b32 b23 b33 223 const __m128i transpose1_0 = _mm_unpacklo_epi32(transpose0_0, transpose0_1); 224 const __m128i transpose1_1 = _mm_unpacklo_epi32(transpose0_2, transpose0_3); 225 const __m128i transpose1_2 = _mm_unpackhi_epi32(transpose0_0, transpose0_1); 226 const __m128i transpose1_3 = _mm_unpackhi_epi32(transpose0_2, transpose0_3); 227 // a00 a10 a20 a30 a01 a11 a21 a31 228 // b00 b10 b20 b30 b01 b11 b21 b31 229 // a02 a12 a22 a32 a03 a13 a23 a33 230 // b02 b12 a22 b32 b03 b13 b23 b33 231 T0 = _mm_unpacklo_epi64(transpose1_0, transpose1_1); 232 T1 = _mm_unpackhi_epi64(transpose1_0, transpose1_1); 233 T2 = _mm_unpacklo_epi64(transpose1_2, transpose1_3); 234 T3 = _mm_unpackhi_epi64(transpose1_2, transpose1_3); 235 // a00 a10 a20 a30 b00 b10 b20 b30 236 // a01 a11 a21 a31 b01 b11 b21 b31 237 // a02 a12 a22 a32 b02 b12 b22 b32 238 // a03 a13 a23 a33 b03 b13 b23 b33 239 } 240 241 // Add inverse transform to 'ref' and store. 242 { 243 const __m128i zero = _mm_set1_epi16(0); 244 // Load the reference(s). 245 __m128i ref0, ref1, ref2, ref3; 246 if (do_two) { 247 // Load eight bytes/pixels per line. 248 ref0 = _mm_loadl_epi64((__m128i*)&ref[0 * BPS]); 249 ref1 = _mm_loadl_epi64((__m128i*)&ref[1 * BPS]); 250 ref2 = _mm_loadl_epi64((__m128i*)&ref[2 * BPS]); 251 ref3 = _mm_loadl_epi64((__m128i*)&ref[3 * BPS]); 252 } else { 253 // Load four bytes/pixels per line. 254 ref0 = _mm_cvtsi32_si128(*(int*)&ref[0 * BPS]); 255 ref1 = _mm_cvtsi32_si128(*(int*)&ref[1 * BPS]); 256 ref2 = _mm_cvtsi32_si128(*(int*)&ref[2 * BPS]); 257 ref3 = _mm_cvtsi32_si128(*(int*)&ref[3 * BPS]); 258 } 259 // Convert to 16b. 260 ref0 = _mm_unpacklo_epi8(ref0, zero); 261 ref1 = _mm_unpacklo_epi8(ref1, zero); 262 ref2 = _mm_unpacklo_epi8(ref2, zero); 263 ref3 = _mm_unpacklo_epi8(ref3, zero); 264 // Add the inverse transform(s). 265 ref0 = _mm_add_epi16(ref0, T0); 266 ref1 = _mm_add_epi16(ref1, T1); 267 ref2 = _mm_add_epi16(ref2, T2); 268 ref3 = _mm_add_epi16(ref3, T3); 269 // Unsigned saturate to 8b. 270 ref0 = _mm_packus_epi16(ref0, ref0); 271 ref1 = _mm_packus_epi16(ref1, ref1); 272 ref2 = _mm_packus_epi16(ref2, ref2); 273 ref3 = _mm_packus_epi16(ref3, ref3); 274 // Store the results. 275 if (do_two) { 276 // Store eight bytes/pixels per line. 277 _mm_storel_epi64((__m128i*)&dst[0 * BPS], ref0); 278 _mm_storel_epi64((__m128i*)&dst[1 * BPS], ref1); 279 _mm_storel_epi64((__m128i*)&dst[2 * BPS], ref2); 280 _mm_storel_epi64((__m128i*)&dst[3 * BPS], ref3); 281 } else { 282 // Store four bytes/pixels per line. 283 *((int32_t *)&dst[0 * BPS]) = _mm_cvtsi128_si32(ref0); 284 *((int32_t *)&dst[1 * BPS]) = _mm_cvtsi128_si32(ref1); 285 *((int32_t *)&dst[2 * BPS]) = _mm_cvtsi128_si32(ref2); 286 *((int32_t *)&dst[3 * BPS]) = _mm_cvtsi128_si32(ref3); 287 } 288 } 289 } 290 291 static void FTransformSSE2(const uint8_t* src, const uint8_t* ref, 292 int16_t* out) { 293 const __m128i zero = _mm_setzero_si128(); 294 const __m128i seven = _mm_set1_epi16(7); 295 const __m128i k7500 = _mm_set1_epi32(7500); 296 const __m128i k14500 = _mm_set1_epi32(14500); 297 const __m128i k51000 = _mm_set1_epi32(51000); 298 const __m128i k12000_plus_one = _mm_set1_epi32(12000 + (1 << 16)); 299 const __m128i k5352_2217 = _mm_set_epi16(5352, 2217, 5352, 2217, 300 5352, 2217, 5352, 2217); 301 const __m128i k2217_5352 = _mm_set_epi16(2217, -5352, 2217, -5352, 302 2217, -5352, 2217, -5352); 303 304 __m128i v01, v32; 305 306 // Difference between src and ref and initial transpose. 307 { 308 // Load src and convert to 16b. 309 const __m128i src0 = _mm_loadl_epi64((__m128i*)&src[0 * BPS]); 310 const __m128i src1 = _mm_loadl_epi64((__m128i*)&src[1 * BPS]); 311 const __m128i src2 = _mm_loadl_epi64((__m128i*)&src[2 * BPS]); 312 const __m128i src3 = _mm_loadl_epi64((__m128i*)&src[3 * BPS]); 313 const __m128i src_0 = _mm_unpacklo_epi8(src0, zero); 314 const __m128i src_1 = _mm_unpacklo_epi8(src1, zero); 315 const __m128i src_2 = _mm_unpacklo_epi8(src2, zero); 316 const __m128i src_3 = _mm_unpacklo_epi8(src3, zero); 317 // Load ref and convert to 16b. 318 const __m128i ref0 = _mm_loadl_epi64((__m128i*)&ref[0 * BPS]); 319 const __m128i ref1 = _mm_loadl_epi64((__m128i*)&ref[1 * BPS]); 320 const __m128i ref2 = _mm_loadl_epi64((__m128i*)&ref[2 * BPS]); 321 const __m128i ref3 = _mm_loadl_epi64((__m128i*)&ref[3 * BPS]); 322 const __m128i ref_0 = _mm_unpacklo_epi8(ref0, zero); 323 const __m128i ref_1 = _mm_unpacklo_epi8(ref1, zero); 324 const __m128i ref_2 = _mm_unpacklo_epi8(ref2, zero); 325 const __m128i ref_3 = _mm_unpacklo_epi8(ref3, zero); 326 // Compute difference. 327 const __m128i diff0 = _mm_sub_epi16(src_0, ref_0); 328 const __m128i diff1 = _mm_sub_epi16(src_1, ref_1); 329 const __m128i diff2 = _mm_sub_epi16(src_2, ref_2); 330 const __m128i diff3 = _mm_sub_epi16(src_3, ref_3); 331 332 // Transpose. 333 // 00 01 02 03 0 0 0 0 334 // 10 11 12 13 0 0 0 0 335 // 20 21 22 23 0 0 0 0 336 // 30 31 32 33 0 0 0 0 337 const __m128i transpose0_0 = _mm_unpacklo_epi16(diff0, diff1); 338 const __m128i transpose0_1 = _mm_unpacklo_epi16(diff2, diff3); 339 // 00 10 01 11 02 12 03 13 340 // 20 30 21 31 22 32 23 33 341 const __m128i v23 = _mm_unpackhi_epi32(transpose0_0, transpose0_1); 342 v01 = _mm_unpacklo_epi32(transpose0_0, transpose0_1); 343 v32 = _mm_shuffle_epi32(v23, _MM_SHUFFLE(1, 0, 3, 2)); 344 // a02 a12 a22 a32 a03 a13 a23 a33 345 // a00 a10 a20 a30 a01 a11 a21 a31 346 // a03 a13 a23 a33 a02 a12 a22 a32 347 } 348 349 // First pass and subsequent transpose. 350 { 351 // Same operations are done on the (0,3) and (1,2) pairs. 352 // b0 = (a0 + a3) << 3 353 // b1 = (a1 + a2) << 3 354 // b3 = (a0 - a3) << 3 355 // b2 = (a1 - a2) << 3 356 const __m128i a01 = _mm_add_epi16(v01, v32); 357 const __m128i a32 = _mm_sub_epi16(v01, v32); 358 const __m128i b01 = _mm_slli_epi16(a01, 3); 359 const __m128i b32 = _mm_slli_epi16(a32, 3); 360 const __m128i b11 = _mm_unpackhi_epi64(b01, b01); 361 const __m128i b22 = _mm_unpackhi_epi64(b32, b32); 362 363 // e0 = b0 + b1 364 // e2 = b0 - b1 365 const __m128i e0 = _mm_add_epi16(b01, b11); 366 const __m128i e2 = _mm_sub_epi16(b01, b11); 367 const __m128i e02 = _mm_unpacklo_epi64(e0, e2); 368 369 // e1 = (b3 * 5352 + b2 * 2217 + 14500) >> 12 370 // e3 = (b3 * 2217 - b2 * 5352 + 7500) >> 12 371 const __m128i b23 = _mm_unpacklo_epi16(b22, b32); 372 const __m128i c1 = _mm_madd_epi16(b23, k5352_2217); 373 const __m128i c3 = _mm_madd_epi16(b23, k2217_5352); 374 const __m128i d1 = _mm_add_epi32(c1, k14500); 375 const __m128i d3 = _mm_add_epi32(c3, k7500); 376 const __m128i e1 = _mm_srai_epi32(d1, 12); 377 const __m128i e3 = _mm_srai_epi32(d3, 12); 378 const __m128i e13 = _mm_packs_epi32(e1, e3); 379 380 // Transpose. 381 // 00 01 02 03 20 21 22 23 382 // 10 11 12 13 30 31 32 33 383 const __m128i transpose0_0 = _mm_unpacklo_epi16(e02, e13); 384 const __m128i transpose0_1 = _mm_unpackhi_epi16(e02, e13); 385 // 00 10 01 11 02 12 03 13 386 // 20 30 21 31 22 32 23 33 387 const __m128i v23 = _mm_unpackhi_epi32(transpose0_0, transpose0_1); 388 v01 = _mm_unpacklo_epi32(transpose0_0, transpose0_1); 389 v32 = _mm_shuffle_epi32(v23, _MM_SHUFFLE(1, 0, 3, 2)); 390 // 02 12 22 32 03 13 23 33 391 // 00 10 20 30 01 11 21 31 392 // 03 13 23 33 02 12 22 32 393 } 394 395 // Second pass 396 { 397 // Same operations are done on the (0,3) and (1,2) pairs. 398 // a0 = v0 + v3 399 // a1 = v1 + v2 400 // a3 = v0 - v3 401 // a2 = v1 - v2 402 const __m128i a01 = _mm_add_epi16(v01, v32); 403 const __m128i a32 = _mm_sub_epi16(v01, v32); 404 const __m128i a11 = _mm_unpackhi_epi64(a01, a01); 405 const __m128i a22 = _mm_unpackhi_epi64(a32, a32); 406 407 // d0 = (a0 + a1 + 7) >> 4; 408 // d2 = (a0 - a1 + 7) >> 4; 409 const __m128i b0 = _mm_add_epi16(a01, a11); 410 const __m128i b2 = _mm_sub_epi16(a01, a11); 411 const __m128i c0 = _mm_add_epi16(b0, seven); 412 const __m128i c2 = _mm_add_epi16(b2, seven); 413 const __m128i d0 = _mm_srai_epi16(c0, 4); 414 const __m128i d2 = _mm_srai_epi16(c2, 4); 415 416 // f1 = ((b3 * 5352 + b2 * 2217 + 12000) >> 16) 417 // f3 = ((b3 * 2217 - b2 * 5352 + 51000) >> 16) 418 const __m128i b23 = _mm_unpacklo_epi16(a22, a32); 419 const __m128i c1 = _mm_madd_epi16(b23, k5352_2217); 420 const __m128i c3 = _mm_madd_epi16(b23, k2217_5352); 421 const __m128i d1 = _mm_add_epi32(c1, k12000_plus_one); 422 const __m128i d3 = _mm_add_epi32(c3, k51000); 423 const __m128i e1 = _mm_srai_epi32(d1, 16); 424 const __m128i e3 = _mm_srai_epi32(d3, 16); 425 const __m128i f1 = _mm_packs_epi32(e1, e1); 426 const __m128i f3 = _mm_packs_epi32(e3, e3); 427 // f1 = f1 + (a3 != 0); 428 // The compare will return (0xffff, 0) for (==0, !=0). To turn that into the 429 // desired (0, 1), we add one earlier through k12000_plus_one. 430 const __m128i g1 = _mm_add_epi16(f1, _mm_cmpeq_epi16(a32, zero)); 431 432 _mm_storel_epi64((__m128i*)&out[ 0], d0); 433 _mm_storel_epi64((__m128i*)&out[ 4], g1); 434 _mm_storel_epi64((__m128i*)&out[ 8], d2); 435 _mm_storel_epi64((__m128i*)&out[12], f3); 436 } 437 } 438 439 //----------------------------------------------------------------------------- 440 // Metric 441 442 static int SSE4x4SSE2(const uint8_t* a, const uint8_t* b) { 443 const __m128i zero = _mm_set1_epi16(0); 444 445 // Load values. 446 const __m128i a0 = _mm_loadl_epi64((__m128i*)&a[BPS * 0]); 447 const __m128i a1 = _mm_loadl_epi64((__m128i*)&a[BPS * 1]); 448 const __m128i a2 = _mm_loadl_epi64((__m128i*)&a[BPS * 2]); 449 const __m128i a3 = _mm_loadl_epi64((__m128i*)&a[BPS * 3]); 450 const __m128i b0 = _mm_loadl_epi64((__m128i*)&b[BPS * 0]); 451 const __m128i b1 = _mm_loadl_epi64((__m128i*)&b[BPS * 1]); 452 const __m128i b2 = _mm_loadl_epi64((__m128i*)&b[BPS * 2]); 453 const __m128i b3 = _mm_loadl_epi64((__m128i*)&b[BPS * 3]); 454 455 // Combine pair of lines and convert to 16b. 456 const __m128i a01 = _mm_unpacklo_epi32(a0, a1); 457 const __m128i a23 = _mm_unpacklo_epi32(a2, a3); 458 const __m128i b01 = _mm_unpacklo_epi32(b0, b1); 459 const __m128i b23 = _mm_unpacklo_epi32(b2, b3); 460 const __m128i a01s = _mm_unpacklo_epi8(a01, zero); 461 const __m128i a23s = _mm_unpacklo_epi8(a23, zero); 462 const __m128i b01s = _mm_unpacklo_epi8(b01, zero); 463 const __m128i b23s = _mm_unpacklo_epi8(b23, zero); 464 465 // Compute differences; (a-b)^2 = (abs(a-b))^2 = (sat8(a-b) + sat8(b-a))^2 466 // TODO(cduvivier): Dissassemble and figure out why this is fastest. We don't 467 // need absolute values, there is no need to do calculation 468 // in 8bit as we are already in 16bit, ... Yet this is what 469 // benchmarks the fastest! 470 const __m128i d0 = _mm_subs_epu8(a01s, b01s); 471 const __m128i d1 = _mm_subs_epu8(b01s, a01s); 472 const __m128i d2 = _mm_subs_epu8(a23s, b23s); 473 const __m128i d3 = _mm_subs_epu8(b23s, a23s); 474 475 // Square and add them all together. 476 const __m128i madd0 = _mm_madd_epi16(d0, d0); 477 const __m128i madd1 = _mm_madd_epi16(d1, d1); 478 const __m128i madd2 = _mm_madd_epi16(d2, d2); 479 const __m128i madd3 = _mm_madd_epi16(d3, d3); 480 const __m128i sum0 = _mm_add_epi32(madd0, madd1); 481 const __m128i sum1 = _mm_add_epi32(madd2, madd3); 482 const __m128i sum2 = _mm_add_epi32(sum0, sum1); 483 int32_t tmp[4]; 484 _mm_storeu_si128((__m128i*)tmp, sum2); 485 return (tmp[3] + tmp[2] + tmp[1] + tmp[0]); 486 } 487 488 //----------------------------------------------------------------------------- 489 // Texture distortion 490 // 491 // We try to match the spectral content (weighted) between source and 492 // reconstructed samples. 493 494 // Hadamard transform 495 // Returns the difference between the weighted sum of the absolute value of 496 // transformed coefficients. 497 static int TTransformSSE2(const uint8_t* inA, const uint8_t* inB, 498 const uint16_t* const w) { 499 int32_t sum[4]; 500 __m128i tmp_0, tmp_1, tmp_2, tmp_3; 501 const __m128i zero = _mm_setzero_si128(); 502 const __m128i one = _mm_set1_epi16(1); 503 const __m128i three = _mm_set1_epi16(3); 504 505 // Load, combine and tranpose inputs. 506 { 507 const __m128i inA_0 = _mm_loadl_epi64((__m128i*)&inA[BPS * 0]); 508 const __m128i inA_1 = _mm_loadl_epi64((__m128i*)&inA[BPS * 1]); 509 const __m128i inA_2 = _mm_loadl_epi64((__m128i*)&inA[BPS * 2]); 510 const __m128i inA_3 = _mm_loadl_epi64((__m128i*)&inA[BPS * 3]); 511 const __m128i inB_0 = _mm_loadl_epi64((__m128i*)&inB[BPS * 0]); 512 const __m128i inB_1 = _mm_loadl_epi64((__m128i*)&inB[BPS * 1]); 513 const __m128i inB_2 = _mm_loadl_epi64((__m128i*)&inB[BPS * 2]); 514 const __m128i inB_3 = _mm_loadl_epi64((__m128i*)&inB[BPS * 3]); 515 516 // Combine inA and inB (we'll do two transforms in parallel). 517 const __m128i inAB_0 = _mm_unpacklo_epi8(inA_0, inB_0); 518 const __m128i inAB_1 = _mm_unpacklo_epi8(inA_1, inB_1); 519 const __m128i inAB_2 = _mm_unpacklo_epi8(inA_2, inB_2); 520 const __m128i inAB_3 = _mm_unpacklo_epi8(inA_3, inB_3); 521 // a00 b00 a01 b01 a02 b03 a03 b03 0 0 0 0 0 0 0 0 522 // a10 b10 a11 b11 a12 b12 a13 b13 0 0 0 0 0 0 0 0 523 // a20 b20 a21 b21 a22 b22 a23 b23 0 0 0 0 0 0 0 0 524 // a30 b30 a31 b31 a32 b32 a33 b33 0 0 0 0 0 0 0 0 525 526 // Transpose the two 4x4, discarding the filling zeroes. 527 const __m128i transpose0_0 = _mm_unpacklo_epi8(inAB_0, inAB_2); 528 const __m128i transpose0_1 = _mm_unpacklo_epi8(inAB_1, inAB_3); 529 // a00 a20 b00 b20 a01 a21 b01 b21 a02 a22 b02 b22 a03 a23 b03 b23 530 // a10 a30 b10 b30 a11 a31 b11 b31 a12 a32 b12 b32 a13 a33 b13 b33 531 const __m128i transpose1_0 = _mm_unpacklo_epi8(transpose0_0, transpose0_1); 532 const __m128i transpose1_1 = _mm_unpackhi_epi8(transpose0_0, transpose0_1); 533 // a00 a10 a20 a30 b00 b10 b20 b30 a01 a11 a21 a31 b01 b11 b21 b31 534 // a02 a12 a22 a32 b02 b12 b22 b32 a03 a13 a23 a33 b03 b13 b23 b33 535 536 // Convert to 16b. 537 tmp_0 = _mm_unpacklo_epi8(transpose1_0, zero); 538 tmp_1 = _mm_unpackhi_epi8(transpose1_0, zero); 539 tmp_2 = _mm_unpacklo_epi8(transpose1_1, zero); 540 tmp_3 = _mm_unpackhi_epi8(transpose1_1, zero); 541 // a00 a10 a20 a30 b00 b10 b20 b30 542 // a01 a11 a21 a31 b01 b11 b21 b31 543 // a02 a12 a22 a32 b02 b12 b22 b32 544 // a03 a13 a23 a33 b03 b13 b23 b33 545 } 546 547 // Horizontal pass and subsequent transpose. 548 { 549 // Calculate a and b (two 4x4 at once). 550 const __m128i a0 = _mm_slli_epi16(_mm_add_epi16(tmp_0, tmp_2), 2); 551 const __m128i a1 = _mm_slli_epi16(_mm_add_epi16(tmp_1, tmp_3), 2); 552 const __m128i a2 = _mm_slli_epi16(_mm_sub_epi16(tmp_1, tmp_3), 2); 553 const __m128i a3 = _mm_slli_epi16(_mm_sub_epi16(tmp_0, tmp_2), 2); 554 // b0_extra = (a0 != 0); 555 const __m128i b0_extra = _mm_andnot_si128(_mm_cmpeq_epi16 (a0, zero), one); 556 const __m128i b0_base = _mm_add_epi16(a0, a1); 557 const __m128i b1 = _mm_add_epi16(a3, a2); 558 const __m128i b2 = _mm_sub_epi16(a3, a2); 559 const __m128i b3 = _mm_sub_epi16(a0, a1); 560 const __m128i b0 = _mm_add_epi16(b0_base, b0_extra); 561 // a00 a01 a02 a03 b00 b01 b02 b03 562 // a10 a11 a12 a13 b10 b11 b12 b13 563 // a20 a21 a22 a23 b20 b21 b22 b23 564 // a30 a31 a32 a33 b30 b31 b32 b33 565 566 // Transpose the two 4x4. 567 const __m128i transpose0_0 = _mm_unpacklo_epi16(b0, b1); 568 const __m128i transpose0_1 = _mm_unpacklo_epi16(b2, b3); 569 const __m128i transpose0_2 = _mm_unpackhi_epi16(b0, b1); 570 const __m128i transpose0_3 = _mm_unpackhi_epi16(b2, b3); 571 // a00 a10 a01 a11 a02 a12 a03 a13 572 // a20 a30 a21 a31 a22 a32 a23 a33 573 // b00 b10 b01 b11 b02 b12 b03 b13 574 // b20 b30 b21 b31 b22 b32 b23 b33 575 const __m128i transpose1_0 = _mm_unpacklo_epi32(transpose0_0, transpose0_1); 576 const __m128i transpose1_1 = _mm_unpacklo_epi32(transpose0_2, transpose0_3); 577 const __m128i transpose1_2 = _mm_unpackhi_epi32(transpose0_0, transpose0_1); 578 const __m128i transpose1_3 = _mm_unpackhi_epi32(transpose0_2, transpose0_3); 579 // a00 a10 a20 a30 a01 a11 a21 a31 580 // b00 b10 b20 b30 b01 b11 b21 b31 581 // a02 a12 a22 a32 a03 a13 a23 a33 582 // b02 b12 a22 b32 b03 b13 b23 b33 583 tmp_0 = _mm_unpacklo_epi64(transpose1_0, transpose1_1); 584 tmp_1 = _mm_unpackhi_epi64(transpose1_0, transpose1_1); 585 tmp_2 = _mm_unpacklo_epi64(transpose1_2, transpose1_3); 586 tmp_3 = _mm_unpackhi_epi64(transpose1_2, transpose1_3); 587 // a00 a10 a20 a30 b00 b10 b20 b30 588 // a01 a11 a21 a31 b01 b11 b21 b31 589 // a02 a12 a22 a32 b02 b12 b22 b32 590 // a03 a13 a23 a33 b03 b13 b23 b33 591 } 592 593 // Vertical pass and difference of weighted sums. 594 { 595 // Load all inputs. 596 // TODO(cduvivier): Make variable declarations and allocations aligned so 597 // we can use _mm_load_si128 instead of _mm_loadu_si128. 598 const __m128i w_0 = _mm_loadu_si128((__m128i*)&w[0]); 599 const __m128i w_8 = _mm_loadu_si128((__m128i*)&w[8]); 600 601 // Calculate a and b (two 4x4 at once). 602 const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2); 603 const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3); 604 const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3); 605 const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2); 606 const __m128i b0 = _mm_add_epi16(a0, a1); 607 const __m128i b1 = _mm_add_epi16(a3, a2); 608 const __m128i b2 = _mm_sub_epi16(a3, a2); 609 const __m128i b3 = _mm_sub_epi16(a0, a1); 610 611 // Separate the transforms of inA and inB. 612 __m128i A_b0 = _mm_unpacklo_epi64(b0, b1); 613 __m128i A_b2 = _mm_unpacklo_epi64(b2, b3); 614 __m128i B_b0 = _mm_unpackhi_epi64(b0, b1); 615 __m128i B_b2 = _mm_unpackhi_epi64(b2, b3); 616 617 { 618 // sign(b) = b >> 15 (0x0000 if positive, 0xffff if negative) 619 const __m128i sign_A_b0 = _mm_srai_epi16(A_b0, 15); 620 const __m128i sign_A_b2 = _mm_srai_epi16(A_b2, 15); 621 const __m128i sign_B_b0 = _mm_srai_epi16(B_b0, 15); 622 const __m128i sign_B_b2 = _mm_srai_epi16(B_b2, 15); 623 624 // b = abs(b) = (b ^ sign) - sign 625 A_b0 = _mm_xor_si128(A_b0, sign_A_b0); 626 A_b2 = _mm_xor_si128(A_b2, sign_A_b2); 627 B_b0 = _mm_xor_si128(B_b0, sign_B_b0); 628 B_b2 = _mm_xor_si128(B_b2, sign_B_b2); 629 A_b0 = _mm_sub_epi16(A_b0, sign_A_b0); 630 A_b2 = _mm_sub_epi16(A_b2, sign_A_b2); 631 B_b0 = _mm_sub_epi16(B_b0, sign_B_b0); 632 B_b2 = _mm_sub_epi16(B_b2, sign_B_b2); 633 } 634 635 // b = abs(b) + 3 636 A_b0 = _mm_add_epi16(A_b0, three); 637 A_b2 = _mm_add_epi16(A_b2, three); 638 B_b0 = _mm_add_epi16(B_b0, three); 639 B_b2 = _mm_add_epi16(B_b2, three); 640 641 // abs((b + (b<0) + 3) >> 3) = (abs(b) + 3) >> 3 642 // b = (abs(b) + 3) >> 3 643 A_b0 = _mm_srai_epi16(A_b0, 3); 644 A_b2 = _mm_srai_epi16(A_b2, 3); 645 B_b0 = _mm_srai_epi16(B_b0, 3); 646 B_b2 = _mm_srai_epi16(B_b2, 3); 647 648 // weighted sums 649 A_b0 = _mm_madd_epi16(A_b0, w_0); 650 A_b2 = _mm_madd_epi16(A_b2, w_8); 651 B_b0 = _mm_madd_epi16(B_b0, w_0); 652 B_b2 = _mm_madd_epi16(B_b2, w_8); 653 A_b0 = _mm_add_epi32(A_b0, A_b2); 654 B_b0 = _mm_add_epi32(B_b0, B_b2); 655 656 // difference of weighted sums 657 A_b0 = _mm_sub_epi32(A_b0, B_b0); 658 _mm_storeu_si128((__m128i*)&sum[0], A_b0); 659 } 660 return sum[0] + sum[1] + sum[2] + sum[3]; 661 } 662 663 static int Disto4x4SSE2(const uint8_t* const a, const uint8_t* const b, 664 const uint16_t* const w) { 665 const int diff_sum = TTransformSSE2(a, b, w); 666 return (abs(diff_sum) + 8) >> 4; 667 } 668 669 static int Disto16x16SSE2(const uint8_t* const a, const uint8_t* const b, 670 const uint16_t* const w) { 671 int D = 0; 672 int x, y; 673 for (y = 0; y < 16 * BPS; y += 4 * BPS) { 674 for (x = 0; x < 16; x += 4) { 675 D += Disto4x4SSE2(a + x + y, b + x + y, w); 676 } 677 } 678 return D; 679 } 680 681 682 //----------------------------------------------------------------------------- 683 // Quantization 684 // 685 686 // Simple quantization 687 static int QuantizeBlockSSE2(int16_t in[16], int16_t out[16], 688 int n, const VP8Matrix* const mtx) { 689 const __m128i max_coeff_2047 = _mm_set1_epi16(2047); 690 const __m128i zero = _mm_set1_epi16(0); 691 __m128i sign0, sign8; 692 __m128i coeff0, coeff8; 693 __m128i out0, out8; 694 __m128i packed_out; 695 696 // Load all inputs. 697 // TODO(cduvivier): Make variable declarations and allocations aligned so that 698 // we can use _mm_load_si128 instead of _mm_loadu_si128. 699 __m128i in0 = _mm_loadu_si128((__m128i*)&in[0]); 700 __m128i in8 = _mm_loadu_si128((__m128i*)&in[8]); 701 const __m128i sharpen0 = _mm_loadu_si128((__m128i*)&mtx->sharpen_[0]); 702 const __m128i sharpen8 = _mm_loadu_si128((__m128i*)&mtx->sharpen_[8]); 703 const __m128i iq0 = _mm_loadu_si128((__m128i*)&mtx->iq_[0]); 704 const __m128i iq8 = _mm_loadu_si128((__m128i*)&mtx->iq_[8]); 705 const __m128i bias0 = _mm_loadu_si128((__m128i*)&mtx->bias_[0]); 706 const __m128i bias8 = _mm_loadu_si128((__m128i*)&mtx->bias_[8]); 707 const __m128i q0 = _mm_loadu_si128((__m128i*)&mtx->q_[0]); 708 const __m128i q8 = _mm_loadu_si128((__m128i*)&mtx->q_[8]); 709 const __m128i zthresh0 = _mm_loadu_si128((__m128i*)&mtx->zthresh_[0]); 710 const __m128i zthresh8 = _mm_loadu_si128((__m128i*)&mtx->zthresh_[8]); 711 712 // sign(in) = in >> 15 (0x0000 if positive, 0xffff if negative) 713 sign0 = _mm_srai_epi16(in0, 15); 714 sign8 = _mm_srai_epi16(in8, 15); 715 716 // coeff = abs(in) = (in ^ sign) - sign 717 coeff0 = _mm_xor_si128(in0, sign0); 718 coeff8 = _mm_xor_si128(in8, sign8); 719 coeff0 = _mm_sub_epi16(coeff0, sign0); 720 coeff8 = _mm_sub_epi16(coeff8, sign8); 721 722 // coeff = abs(in) + sharpen 723 coeff0 = _mm_add_epi16(coeff0, sharpen0); 724 coeff8 = _mm_add_epi16(coeff8, sharpen8); 725 726 // if (coeff > 2047) coeff = 2047 727 coeff0 = _mm_min_epi16(coeff0, max_coeff_2047); 728 coeff8 = _mm_min_epi16(coeff8, max_coeff_2047); 729 730 // out = (coeff * iQ + B) >> QFIX; 731 { 732 // doing calculations with 32b precision (QFIX=17) 733 // out = (coeff * iQ) 734 __m128i coeff_iQ0H = _mm_mulhi_epu16(coeff0, iq0); 735 __m128i coeff_iQ0L = _mm_mullo_epi16(coeff0, iq0); 736 __m128i coeff_iQ8H = _mm_mulhi_epu16(coeff8, iq8); 737 __m128i coeff_iQ8L = _mm_mullo_epi16(coeff8, iq8); 738 __m128i out_00 = _mm_unpacklo_epi16(coeff_iQ0L, coeff_iQ0H); 739 __m128i out_04 = _mm_unpackhi_epi16(coeff_iQ0L, coeff_iQ0H); 740 __m128i out_08 = _mm_unpacklo_epi16(coeff_iQ8L, coeff_iQ8H); 741 __m128i out_12 = _mm_unpackhi_epi16(coeff_iQ8L, coeff_iQ8H); 742 // expand bias from 16b to 32b 743 __m128i bias_00 = _mm_unpacklo_epi16(bias0, zero); 744 __m128i bias_04 = _mm_unpackhi_epi16(bias0, zero); 745 __m128i bias_08 = _mm_unpacklo_epi16(bias8, zero); 746 __m128i bias_12 = _mm_unpackhi_epi16(bias8, zero); 747 // out = (coeff * iQ + B) 748 out_00 = _mm_add_epi32(out_00, bias_00); 749 out_04 = _mm_add_epi32(out_04, bias_04); 750 out_08 = _mm_add_epi32(out_08, bias_08); 751 out_12 = _mm_add_epi32(out_12, bias_12); 752 // out = (coeff * iQ + B) >> QFIX; 753 out_00 = _mm_srai_epi32(out_00, QFIX); 754 out_04 = _mm_srai_epi32(out_04, QFIX); 755 out_08 = _mm_srai_epi32(out_08, QFIX); 756 out_12 = _mm_srai_epi32(out_12, QFIX); 757 // pack result as 16b 758 out0 = _mm_packs_epi32(out_00, out_04); 759 out8 = _mm_packs_epi32(out_08, out_12); 760 } 761 762 // get sign back (if (sign[j]) out_n = -out_n) 763 out0 = _mm_xor_si128(out0, sign0); 764 out8 = _mm_xor_si128(out8, sign8); 765 out0 = _mm_sub_epi16(out0, sign0); 766 out8 = _mm_sub_epi16(out8, sign8); 767 768 // in = out * Q 769 in0 = _mm_mullo_epi16(out0, q0); 770 in8 = _mm_mullo_epi16(out8, q8); 771 772 // if (coeff <= mtx->zthresh_) {in=0; out=0;} 773 { 774 __m128i cmp0 = _mm_cmpgt_epi16(coeff0, zthresh0); 775 __m128i cmp8 = _mm_cmpgt_epi16(coeff8, zthresh8); 776 in0 = _mm_and_si128(in0, cmp0); 777 in8 = _mm_and_si128(in8, cmp8); 778 _mm_storeu_si128((__m128i*)&in[0], in0); 779 _mm_storeu_si128((__m128i*)&in[8], in8); 780 out0 = _mm_and_si128(out0, cmp0); 781 out8 = _mm_and_si128(out8, cmp8); 782 } 783 784 // zigzag the output before storing it. 785 // 786 // The zigzag pattern can almost be reproduced with a small sequence of 787 // shuffles. After it, we only need to swap the 7th (ending up in third 788 // position instead of twelfth) and 8th values. 789 { 790 __m128i outZ0, outZ8; 791 outZ0 = _mm_shufflehi_epi16(out0, _MM_SHUFFLE(2, 1, 3, 0)); 792 outZ0 = _mm_shuffle_epi32 (outZ0, _MM_SHUFFLE(3, 1, 2, 0)); 793 outZ0 = _mm_shufflehi_epi16(outZ0, _MM_SHUFFLE(3, 1, 0, 2)); 794 outZ8 = _mm_shufflelo_epi16(out8, _MM_SHUFFLE(3, 0, 2, 1)); 795 outZ8 = _mm_shuffle_epi32 (outZ8, _MM_SHUFFLE(3, 1, 2, 0)); 796 outZ8 = _mm_shufflelo_epi16(outZ8, _MM_SHUFFLE(1, 3, 2, 0)); 797 _mm_storeu_si128((__m128i*)&out[0], outZ0); 798 _mm_storeu_si128((__m128i*)&out[8], outZ8); 799 packed_out = _mm_packs_epi16(outZ0, outZ8); 800 } 801 { 802 const int16_t outZ_12 = out[12]; 803 const int16_t outZ_3 = out[3]; 804 out[3] = outZ_12; 805 out[12] = outZ_3; 806 } 807 808 // detect if all 'out' values are zeroes or not 809 { 810 int32_t tmp[4]; 811 _mm_storeu_si128((__m128i*)tmp, packed_out); 812 if (n) { 813 tmp[0] &= ~0xff; 814 } 815 return (tmp[3] || tmp[2] || tmp[1] || tmp[0]); 816 } 817 } 818 819 extern void VP8EncDspInitSSE2(void); 820 void VP8EncDspInitSSE2(void) { 821 VP8CollectHistogram = CollectHistogramSSE2; 822 VP8EncQuantizeBlock = QuantizeBlockSSE2; 823 VP8ITransform = ITransformSSE2; 824 VP8FTransform = FTransformSSE2; 825 VP8SSE4x4 = SSE4x4SSE2; 826 VP8TDisto4x4 = Disto4x4SSE2; 827 VP8TDisto16x16 = Disto16x16SSE2; 828 } 829 830 #if defined(__cplusplus) || defined(c_plusplus) 831 } // extern "C" 832 #endif 833 834 #endif //__SSE2__ 835