1 /* 2 * Copyright 2012 The Android Open Source Project 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #include "SkBitmapProcState_opts_SSSE3.h" 9 #include "SkColorPriv.h" 10 #include "SkPaint.h" 11 #include "SkUtils.h" 12 13 /* With the exception of the compilers that don't support it, we always build the 14 * SSSE3 functions and enable the caller to determine SSSE3 support. However for 15 * compilers that do not support SSSE3 we provide a stub implementation. 16 */ 17 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 18 19 #include <tmmintrin.h> // SSSE3 20 21 // adding anonymous namespace seemed to force gcc to inline directly the 22 // instantiation, instead of creating the functions 23 // S32_generic_D32_filter_DX_SSSE3<true> and 24 // S32_generic_D32_filter_DX_SSSE3<false> which were then called by the 25 // external functions. 26 namespace { 27 // In this file, variations for alpha and non alpha versions are implemented 28 // with a template, as it makes the code more compact and a bit easier to 29 // maintain, while making the compiler generate the same exact code as with 30 // two functions that only differ by a few lines. 31 32 33 // Prepare all necessary constants for a round of processing for two pixel 34 // pairs. 35 // @param xy is the location where the xy parameters for four pixels should be 36 // read from. It is identical in concept with argument two of 37 // S32_{opaque}_D32_filter_DX methods. 38 // @param mask_3FFF vector of 32 bit constants containing 3FFF, 39 // suitable to mask the bottom 14 bits of a XY value. 40 // @param mask_000F vector of 32 bit constants containing 000F, 41 // suitable to mask the bottom 4 bits of a XY value. 42 // @param sixteen_8bit vector of 8 bit components containing the value 16. 43 // @param mask_dist_select vector of 8 bit components containing the shuffling 44 // parameters to reorder x[0-3] parameters. 45 // @param all_x_result vector of 8 bit components that will contain the 46 // (4x(x3), 4x(x2), 4x(x1), 4x(x0)) upon return. 47 // @param sixteen_minus_x vector of 8 bit components, containing 48 // (4x(16 - x3), 4x(16 - x2), 4x(16 - x1), 4x(16 - x0)) 49 inline void PrepareConstantsTwoPixelPairs(const uint32_t* xy, 50 const __m128i& mask_3FFF, 51 const __m128i& mask_000F, 52 const __m128i& sixteen_8bit, 53 const __m128i& mask_dist_select, 54 __m128i* all_x_result, 55 __m128i* sixteen_minus_x, 56 int* x0, 57 int* x1) { 58 const __m128i xx = _mm_loadu_si128(reinterpret_cast<const __m128i *>(xy)); 59 60 // 4 delta X 61 // (x03, x02, x01, x00) 62 const __m128i x0_wide = _mm_srli_epi32(xx, 18); 63 // (x13, x12, x11, x10) 64 const __m128i x1_wide = _mm_and_si128(xx, mask_3FFF); 65 66 _mm_storeu_si128(reinterpret_cast<__m128i *>(x0), x0_wide); 67 _mm_storeu_si128(reinterpret_cast<__m128i *>(x1), x1_wide); 68 69 __m128i all_x = _mm_and_si128(_mm_srli_epi32(xx, 14), mask_000F); 70 71 // (4x(x3), 4x(x2), 4x(x1), 4x(x0)) 72 all_x = _mm_shuffle_epi8(all_x, mask_dist_select); 73 74 *all_x_result = all_x; 75 // (4x(16-x3), 4x(16-x2), 4x(16-x1), 4x(16-x0)) 76 *sixteen_minus_x = _mm_sub_epi8(sixteen_8bit, all_x); 77 } 78 79 // Prepare all necessary constants for a round of processing for two pixel 80 // pairs. 81 // @param xy is the location where the xy parameters for four pixels should be 82 // read from. It is identical in concept with argument two of 83 // S32_{opaque}_D32_filter_DXDY methods. 84 // @param mask_3FFF vector of 32 bit constants containing 3FFF, 85 // suitable to mask the bottom 14 bits of a XY value. 86 // @param mask_000F vector of 32 bit constants containing 000F, 87 // suitable to mask the bottom 4 bits of a XY value. 88 // @param sixteen_8bit vector of 8 bit components containing the value 16. 89 // @param mask_dist_select vector of 8 bit components containing the shuffling 90 // parameters to reorder x[0-3] parameters. 91 // @param all_xy_result vector of 8 bit components that will contain the 92 // (4x(y1), 4x(y0), 4x(x1), 4x(x0)) upon return. 93 // @param sixteen_minus_x vector of 8 bit components, containing 94 // (4x(16-y1), 4x(16-y0), 4x(16-x1), 4x(16-x0)). 95 inline void PrepareConstantsTwoPixelPairsDXDY(const uint32_t* xy, 96 const __m128i& mask_3FFF, 97 const __m128i& mask_000F, 98 const __m128i& sixteen_8bit, 99 const __m128i& mask_dist_select, 100 __m128i* all_xy_result, 101 __m128i* sixteen_minus_xy, 102 int* xy0, int* xy1) { 103 const __m128i xy_wide = 104 _mm_loadu_si128(reinterpret_cast<const __m128i *>(xy)); 105 106 // (x10, y10, x00, y00) 107 __m128i xy0_wide = _mm_srli_epi32(xy_wide, 18); 108 // (y10, y00, x10, x00) 109 xy0_wide = _mm_shuffle_epi32(xy0_wide, _MM_SHUFFLE(2, 0, 3, 1)); 110 // (x11, y11, x01, y01) 111 __m128i xy1_wide = _mm_and_si128(xy_wide, mask_3FFF); 112 // (y11, y01, x11, x01) 113 xy1_wide = _mm_shuffle_epi32(xy1_wide, _MM_SHUFFLE(2, 0, 3, 1)); 114 115 _mm_storeu_si128(reinterpret_cast<__m128i *>(xy0), xy0_wide); 116 _mm_storeu_si128(reinterpret_cast<__m128i *>(xy1), xy1_wide); 117 118 // (x1, y1, x0, y0) 119 __m128i all_xy = _mm_and_si128(_mm_srli_epi32(xy_wide, 14), mask_000F); 120 // (y1, y0, x1, x0) 121 all_xy = _mm_shuffle_epi32(all_xy, _MM_SHUFFLE(2, 0, 3, 1)); 122 // (4x(y1), 4x(y0), 4x(x1), 4x(x0)) 123 all_xy = _mm_shuffle_epi8(all_xy, mask_dist_select); 124 125 *all_xy_result = all_xy; 126 // (4x(16-y1), 4x(16-y0), 4x(16-x1), 4x(16-x0)) 127 *sixteen_minus_xy = _mm_sub_epi8(sixteen_8bit, all_xy); 128 } 129 130 // Helper function used when processing one pixel pair. 131 // @param pixel0..3 are the four input pixels 132 // @param scale_x vector of 8 bit components to multiply the pixel[0:3]. This 133 // will contain (4x(x1, 16-x1), 4x(x0, 16-x0)) 134 // or (4x(x3, 16-x3), 4x(x2, 16-x2)) 135 // @return a vector of 16 bit components containing: 136 // (Aa2 * (16 - x1) + Aa3 * x1, ... , Ra0 * (16 - x0) + Ra1 * x0) 137 inline __m128i ProcessPixelPairHelper(uint32_t pixel0, 138 uint32_t pixel1, 139 uint32_t pixel2, 140 uint32_t pixel3, 141 const __m128i& scale_x) { 142 __m128i a0, a1, a2, a3; 143 // Load 2 pairs of pixels 144 a0 = _mm_cvtsi32_si128(pixel0); 145 a1 = _mm_cvtsi32_si128(pixel1); 146 147 // Interleave pixels. 148 // (0, 0, 0, 0, 0, 0, 0, 0, Aa1, Aa0, Ba1, Ba0, Ga1, Ga0, Ra1, Ra0) 149 a0 = _mm_unpacklo_epi8(a0, a1); 150 151 a2 = _mm_cvtsi32_si128(pixel2); 152 a3 = _mm_cvtsi32_si128(pixel3); 153 // (0, 0, 0, 0, 0, 0, 0, 0, Aa3, Aa2, Ba3, Ba2, Ga3, Ga2, Ra3, Ra2) 154 a2 = _mm_unpacklo_epi8(a2, a3); 155 156 // two pairs of pixel pairs, interleaved. 157 // (Aa3, Aa2, Ba3, Ba2, Ga3, Ga2, Ra3, Ra2, 158 // Aa1, Aa0, Ba1, Ba0, Ga1, Ga0, Ra1, Ra0) 159 a0 = _mm_unpacklo_epi64(a0, a2); 160 161 // multiply and sum to 16 bit components. 162 // (Aa2 * (16 - x1) + Aa3 * x1, ... , Ra0 * (16 - x0) + Ra1 * x0) 163 // At that point, we use up a bit less than 12 bits for each 16 bit 164 // component: 165 // All components are less than 255. So, 166 // C0 * (16 - x) + C1 * x <= 255 * (16 - x) + 255 * x = 255 * 16. 167 return _mm_maddubs_epi16(a0, scale_x); 168 } 169 170 // Scale back the results after multiplications to the [0:255] range, and scale 171 // by alpha when has_alpha is true. 172 // Depending on whether one set or two sets of multiplications had been applied, 173 // the results have to be shifted by four places (dividing by 16), or shifted 174 // by eight places (dividing by 256), since each multiplication is by a quantity 175 // in the range [0:16]. 176 template<bool has_alpha, int scale> 177 inline __m128i ScaleFourPixels(__m128i* pixels, 178 const __m128i& alpha) { 179 // Divide each 16 bit component by 16 (or 256 depending on scale). 180 *pixels = _mm_srli_epi16(*pixels, scale); 181 182 if (has_alpha) { 183 // Multiply by alpha. 184 *pixels = _mm_mullo_epi16(*pixels, alpha); 185 186 // Divide each 16 bit component by 256. 187 *pixels = _mm_srli_epi16(*pixels, 8); 188 } 189 return *pixels; 190 } 191 192 // Wrapper to calculate two output pixels from four input pixels. The 193 // arguments are the same as ProcessPixelPairHelper. Technically, there are 194 // eight input pixels, but since sub_y == 0, the factors applied to half of the 195 // pixels is zero (sub_y), and are therefore omitted here to save on some 196 // processing. 197 // @param alpha when has_alpha is true, scale all resulting components by this 198 // value. 199 // @return a vector of 16 bit components containing: 200 // ((Aa2 * (16 - x1) + Aa3 * x1) * alpha, ..., 201 // (Ra0 * (16 - x0) + Ra1 * x0) * alpha) (when has_alpha is true) 202 // otherwise 203 // (Aa2 * (16 - x1) + Aa3 * x1, ... , Ra0 * (16 - x0) + Ra1 * x0) 204 // In both cases, the results are renormalized (divided by 16) to match the 205 // expected formats when storing back the results into memory. 206 template<bool has_alpha> 207 inline __m128i ProcessPixelPairZeroSubY(uint32_t pixel0, 208 uint32_t pixel1, 209 uint32_t pixel2, 210 uint32_t pixel3, 211 const __m128i& scale_x, 212 const __m128i& alpha) { 213 __m128i sum = ProcessPixelPairHelper(pixel0, pixel1, pixel2, pixel3, 214 scale_x); 215 return ScaleFourPixels<has_alpha, 4>(&sum, alpha); 216 } 217 218 // Same as ProcessPixelPairZeroSubY, expect processing one output pixel at a 219 // time instead of two. As in the above function, only two pixels are needed 220 // to generate a single pixel since sub_y == 0. 221 // @return same as ProcessPixelPairZeroSubY, except that only the bottom 4 222 // 16 bit components are set. 223 template<bool has_alpha> 224 inline __m128i ProcessOnePixelZeroSubY(uint32_t pixel0, 225 uint32_t pixel1, 226 __m128i scale_x, 227 __m128i alpha) { 228 __m128i a0 = _mm_cvtsi32_si128(pixel0); 229 __m128i a1 = _mm_cvtsi32_si128(pixel1); 230 231 // Interleave 232 a0 = _mm_unpacklo_epi8(a0, a1); 233 234 // (a0 * (16-x) + a1 * x) 235 __m128i sum = _mm_maddubs_epi16(a0, scale_x); 236 237 return ScaleFourPixels<has_alpha, 4>(&sum, alpha); 238 } 239 240 // Methods when sub_y != 0 241 242 243 // Same as ProcessPixelPairHelper, except that the values are scaled by y. 244 // @param y vector of 16 bit components containing 'y' values. There are two 245 // cases in practice, where y will contain the sub_y constant, or will 246 // contain the 16 - sub_y constant. 247 // @return vector of 16 bit components containing: 248 // (y * (Aa2 * (16 - x1) + Aa3 * x1), ... , y * (Ra0 * (16 - x0) + Ra1 * x0)) 249 inline __m128i ProcessPixelPair(uint32_t pixel0, 250 uint32_t pixel1, 251 uint32_t pixel2, 252 uint32_t pixel3, 253 const __m128i& scale_x, 254 const __m128i& y) { 255 __m128i sum = ProcessPixelPairHelper(pixel0, pixel1, pixel2, pixel3, 256 scale_x); 257 258 // first row times 16-y or y depending on whether 'y' represents one or 259 // the other. 260 // Values will be up to 255 * 16 * 16 = 65280. 261 // (y * (Aa2 * (16 - x1) + Aa3 * x1), ... , 262 // y * (Ra0 * (16 - x0) + Ra1 * x0)) 263 sum = _mm_mullo_epi16(sum, y); 264 265 return sum; 266 } 267 268 // Process two pixel pairs out of eight input pixels. 269 // In other methods, the distinct pixels are passed one by one, but in this 270 // case, the rows, and index offsets to the pixels into the row are passed 271 // to generate the 8 pixels. 272 // @param row0..1 top and bottom row where to find input pixels. 273 // @param x0..1 offsets into the row for all eight input pixels. 274 // @param all_y vector of 16 bit components containing the constant sub_y 275 // @param neg_y vector of 16 bit components containing the constant 16 - sub_y 276 // @param alpha vector of 16 bit components containing the alpha value to scale 277 // the results by, when has_alpha is true. 278 // @return 279 // (alpha * ((16-y) * (Aa2 * (16-x1) + Aa3 * x1) + 280 // y * (Aa2' * (16-x1) + Aa3' * x1)), 281 // ... 282 // alpha * ((16-y) * (Ra0 * (16-x0) + Ra1 * x0) + 283 // y * (Ra0' * (16-x0) + Ra1' * x0)) 284 // With the factor alpha removed when has_alpha is false. 285 // The values are scaled back to 16 bit components, but with only the bottom 286 // 8 bits being set. 287 template<bool has_alpha> 288 inline __m128i ProcessTwoPixelPairs(const uint32_t* row0, 289 const uint32_t* row1, 290 const int* x0, 291 const int* x1, 292 const __m128i& scale_x, 293 const __m128i& all_y, 294 const __m128i& neg_y, 295 const __m128i& alpha) { 296 __m128i sum0 = ProcessPixelPair( 297 row0[x0[0]], row0[x1[0]], row0[x0[1]], row0[x1[1]], 298 scale_x, neg_y); 299 __m128i sum1 = ProcessPixelPair( 300 row1[x0[0]], row1[x1[0]], row1[x0[1]], row1[x1[1]], 301 scale_x, all_y); 302 303 // 2 samples fully summed. 304 // ((16-y) * (Aa2 * (16-x1) + Aa3 * x1) + 305 // y * (Aa2' * (16-x1) + Aa3' * x1), 306 // ... 307 // (16-y) * (Ra0 * (16 - x0) + Ra1 * x0)) + 308 // y * (Ra0' * (16-x0) + Ra1' * x0)) 309 // Each component, again can be at most 256 * 255 = 65280, so no overflow. 310 sum0 = _mm_add_epi16(sum0, sum1); 311 312 return ScaleFourPixels<has_alpha, 8>(&sum0, alpha); 313 } 314 315 // Similar to ProcessTwoPixelPairs except the pixel indexes. 316 template<bool has_alpha> 317 inline __m128i ProcessTwoPixelPairsDXDY(const uint32_t* row00, 318 const uint32_t* row01, 319 const uint32_t* row10, 320 const uint32_t* row11, 321 const int* xy0, 322 const int* xy1, 323 const __m128i& scale_x, 324 const __m128i& all_y, 325 const __m128i& neg_y, 326 const __m128i& alpha) { 327 // first row 328 __m128i sum0 = ProcessPixelPair( 329 row00[xy0[0]], row00[xy1[0]], row10[xy0[1]], row10[xy1[1]], 330 scale_x, neg_y); 331 // second row 332 __m128i sum1 = ProcessPixelPair( 333 row01[xy0[0]], row01[xy1[0]], row11[xy0[1]], row11[xy1[1]], 334 scale_x, all_y); 335 336 // 2 samples fully summed. 337 // ((16-y1) * (Aa2 * (16-x1) + Aa3 * x1) + 338 // y0 * (Aa2' * (16-x1) + Aa3' * x1), 339 // ... 340 // (16-y0) * (Ra0 * (16 - x0) + Ra1 * x0)) + 341 // y0 * (Ra0' * (16-x0) + Ra1' * x0)) 342 // Each component, again can be at most 256 * 255 = 65280, so no overflow. 343 sum0 = _mm_add_epi16(sum0, sum1); 344 345 return ScaleFourPixels<has_alpha, 8>(&sum0, alpha); 346 } 347 348 349 // Same as ProcessPixelPair, except that performing the math one output pixel 350 // at a time. This means that only the bottom four 16 bit components are set. 351 inline __m128i ProcessOnePixel(uint32_t pixel0, uint32_t pixel1, 352 const __m128i& scale_x, const __m128i& y) { 353 __m128i a0 = _mm_cvtsi32_si128(pixel0); 354 __m128i a1 = _mm_cvtsi32_si128(pixel1); 355 356 // Interleave 357 // (0, 0, 0, 0, 0, 0, 0, 0, Aa1, Aa0, Ba1, Ba0, Ga1, Ga0, Ra1, Ra0) 358 a0 = _mm_unpacklo_epi8(a0, a1); 359 360 // (a0 * (16-x) + a1 * x) 361 a0 = _mm_maddubs_epi16(a0, scale_x); 362 363 // scale row by y 364 return _mm_mullo_epi16(a0, y); 365 } 366 367 // Notes about the various tricks that are used in this implementation: 368 // - specialization for sub_y == 0. 369 // Statistically, 1/16th of the samples will have sub_y == 0. When this 370 // happens, the math goes from: 371 // (16 - x)*(16 - y)*a00 + x*(16 - y)*a01 + (16 - x)*y*a10 + x*y*a11 372 // to: 373 // (16 - x)*a00 + 16*x*a01 374 // much simpler. The simplification makes for an easy boost in performance. 375 // - calculating 4 output pixels at a time. 376 // This allows loading the coefficients x0 and x1 and shuffling them to the 377 // optimum location only once per loop, instead of twice per loop. 378 // This also allows us to store the four pixels with a single store. 379 // - Use of 2 special SSSE3 instructions (comparatively to the SSE2 instruction 380 // version): 381 // _mm_shuffle_epi8 : this allows us to spread the coefficients x[0-3] loaded 382 // in 32 bit values to 8 bit values repeated four times. 383 // _mm_maddubs_epi16 : this allows us to perform multiplications and additions 384 // in one swoop of 8bit values storing the results in 16 bit values. This 385 // instruction is actually crucial for the speed of the implementation since 386 // as one can see in the SSE2 implementation, all inputs have to be used as 387 // 16 bits because the results are 16 bits. This basically allows us to process 388 // twice as many pixel components per iteration. 389 // 390 // As a result, this method behaves faster than the traditional SSE2. The actual 391 // boost varies greatly on the underlying architecture. 392 template<bool has_alpha> 393 void S32_generic_D32_filter_DX_SSSE3(const SkBitmapProcState& s, 394 const uint32_t* xy, 395 int count, uint32_t* colors) { 396 SkASSERT(count > 0 && colors != NULL); 397 SkASSERT(s.fFilterLevel != kNone_SkFilterQuality); 398 SkASSERT(kN32_SkColorType == s.fBitmap->colorType()); 399 if (has_alpha) { 400 SkASSERT(s.fAlphaScale < 256); 401 } else { 402 SkASSERT(s.fAlphaScale == 256); 403 } 404 405 const uint8_t* src_addr = 406 static_cast<const uint8_t*>(s.fBitmap->getPixels()); 407 const size_t rb = s.fBitmap->rowBytes(); 408 const uint32_t XY = *xy++; 409 const unsigned y0 = XY >> 14; 410 const uint32_t* row0 = 411 reinterpret_cast<const uint32_t*>(src_addr + (y0 >> 4) * rb); 412 const uint32_t* row1 = 413 reinterpret_cast<const uint32_t*>(src_addr + (XY & 0x3FFF) * rb); 414 const unsigned sub_y = y0 & 0xF; 415 416 // vector constants 417 const __m128i mask_dist_select = _mm_set_epi8(12, 12, 12, 12, 418 8, 8, 8, 8, 419 4, 4, 4, 4, 420 0, 0, 0, 0); 421 const __m128i mask_3FFF = _mm_set1_epi32(0x3FFF); 422 const __m128i mask_000F = _mm_set1_epi32(0x000F); 423 const __m128i sixteen_8bit = _mm_set1_epi8(16); 424 // (0, 0, 0, 0, 0, 0, 0, 0) 425 const __m128i zero = _mm_setzero_si128(); 426 427 __m128i alpha = _mm_setzero_si128(); 428 if (has_alpha) { 429 // 8x(alpha) 430 alpha = _mm_set1_epi16(s.fAlphaScale); 431 } 432 433 if (sub_y == 0) { 434 // Unroll 4x, interleave bytes, use pmaddubsw (all_x is small) 435 while (count > 3) { 436 count -= 4; 437 438 int x0[4]; 439 int x1[4]; 440 __m128i all_x, sixteen_minus_x; 441 PrepareConstantsTwoPixelPairs(xy, mask_3FFF, mask_000F, 442 sixteen_8bit, mask_dist_select, 443 &all_x, &sixteen_minus_x, x0, x1); 444 xy += 4; 445 446 // First pair of pixel pairs. 447 // (4x(x1, 16-x1), 4x(x0, 16-x0)) 448 __m128i scale_x; 449 scale_x = _mm_unpacklo_epi8(sixteen_minus_x, all_x); 450 451 __m128i sum0 = ProcessPixelPairZeroSubY<has_alpha>( 452 row0[x0[0]], row0[x1[0]], row0[x0[1]], row0[x1[1]], 453 scale_x, alpha); 454 455 // second pair of pixel pairs 456 // (4x (x3, 16-x3), 4x (16-x2, x2)) 457 scale_x = _mm_unpackhi_epi8(sixteen_minus_x, all_x); 458 459 __m128i sum1 = ProcessPixelPairZeroSubY<has_alpha>( 460 row0[x0[2]], row0[x1[2]], row0[x0[3]], row0[x1[3]], 461 scale_x, alpha); 462 463 // Pack lower 4 16 bit values of sum into lower 4 bytes. 464 sum0 = _mm_packus_epi16(sum0, sum1); 465 466 // Extract low int and store. 467 _mm_storeu_si128(reinterpret_cast<__m128i *>(colors), sum0); 468 469 colors += 4; 470 } 471 472 // handle remainder 473 while (count-- > 0) { 474 uint32_t xx = *xy++; // x0:14 | 4 | x1:14 475 unsigned x0 = xx >> 18; 476 unsigned x1 = xx & 0x3FFF; 477 478 // 16x(x) 479 const __m128i all_x = _mm_set1_epi8((xx >> 14) & 0x0F); 480 481 // (16x(16-x)) 482 __m128i scale_x = _mm_sub_epi8(sixteen_8bit, all_x); 483 484 scale_x = _mm_unpacklo_epi8(scale_x, all_x); 485 486 __m128i sum = ProcessOnePixelZeroSubY<has_alpha>( 487 row0[x0], row0[x1], 488 scale_x, alpha); 489 490 // Pack lower 4 16 bit values of sum into lower 4 bytes. 491 sum = _mm_packus_epi16(sum, zero); 492 493 // Extract low int and store. 494 *colors++ = _mm_cvtsi128_si32(sum); 495 } 496 } else { // more general case, y != 0 497 // 8x(16) 498 const __m128i sixteen_16bit = _mm_set1_epi16(16); 499 500 // 8x (y) 501 const __m128i all_y = _mm_set1_epi16(sub_y); 502 503 // 8x (16-y) 504 const __m128i neg_y = _mm_sub_epi16(sixteen_16bit, all_y); 505 506 // Unroll 4x, interleave bytes, use pmaddubsw (all_x is small) 507 while (count > 3) { 508 count -= 4; 509 510 int x0[4]; 511 int x1[4]; 512 __m128i all_x, sixteen_minus_x; 513 PrepareConstantsTwoPixelPairs(xy, mask_3FFF, mask_000F, 514 sixteen_8bit, mask_dist_select, 515 &all_x, &sixteen_minus_x, x0, x1); 516 xy += 4; 517 518 // First pair of pixel pairs 519 // (4x(x1, 16-x1), 4x(x0, 16-x0)) 520 __m128i scale_x; 521 scale_x = _mm_unpacklo_epi8(sixteen_minus_x, all_x); 522 523 __m128i sum0 = ProcessTwoPixelPairs<has_alpha>( 524 row0, row1, x0, x1, 525 scale_x, all_y, neg_y, alpha); 526 527 // second pair of pixel pairs 528 // (4x (x3, 16-x3), 4x (16-x2, x2)) 529 scale_x = _mm_unpackhi_epi8(sixteen_minus_x, all_x); 530 531 __m128i sum1 = ProcessTwoPixelPairs<has_alpha>( 532 row0, row1, x0 + 2, x1 + 2, 533 scale_x, all_y, neg_y, alpha); 534 535 // Do the final packing of the two results 536 537 // Pack lower 4 16 bit values of sum into lower 4 bytes. 538 sum0 = _mm_packus_epi16(sum0, sum1); 539 540 // Extract low int and store. 541 _mm_storeu_si128(reinterpret_cast<__m128i *>(colors), sum0); 542 543 colors += 4; 544 } 545 546 // Left over. 547 while (count-- > 0) { 548 const uint32_t xx = *xy++; // x0:14 | 4 | x1:14 549 const unsigned x0 = xx >> 18; 550 const unsigned x1 = xx & 0x3FFF; 551 552 // 16x(x) 553 const __m128i all_x = _mm_set1_epi8((xx >> 14) & 0x0F); 554 555 // 16x (16-x) 556 __m128i scale_x = _mm_sub_epi8(sixteen_8bit, all_x); 557 558 // (8x (x, 16-x)) 559 scale_x = _mm_unpacklo_epi8(scale_x, all_x); 560 561 // first row. 562 __m128i sum0 = ProcessOnePixel(row0[x0], row0[x1], scale_x, neg_y); 563 // second row. 564 __m128i sum1 = ProcessOnePixel(row1[x0], row1[x1], scale_x, all_y); 565 566 // Add both rows for full sample 567 sum0 = _mm_add_epi16(sum0, sum1); 568 569 sum0 = ScaleFourPixels<has_alpha, 8>(&sum0, alpha); 570 571 // Pack lower 4 16 bit values of sum into lower 4 bytes. 572 sum0 = _mm_packus_epi16(sum0, zero); 573 574 // Extract low int and store. 575 *colors++ = _mm_cvtsi128_si32(sum0); 576 } 577 } 578 } 579 580 /* 581 * Similar to S32_generic_D32_filter_DX_SSSE3, we do not need to handle the 582 * special case suby == 0 as suby is changing in every loop. 583 */ 584 template<bool has_alpha> 585 void S32_generic_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s, 586 const uint32_t* xy, 587 int count, uint32_t* colors) { 588 SkASSERT(count > 0 && colors != NULL); 589 SkASSERT(s.fFilterLevel != kNone_SkFilterQuality); 590 SkASSERT(kN32_SkColorType == s.fBitmap->colorType()); 591 if (has_alpha) { 592 SkASSERT(s.fAlphaScale < 256); 593 } else { 594 SkASSERT(s.fAlphaScale == 256); 595 } 596 597 const uint8_t* src_addr = 598 static_cast<const uint8_t*>(s.fBitmap->getPixels()); 599 const size_t rb = s.fBitmap->rowBytes(); 600 601 // vector constants 602 const __m128i mask_dist_select = _mm_set_epi8(12, 12, 12, 12, 603 8, 8, 8, 8, 604 4, 4, 4, 4, 605 0, 0, 0, 0); 606 const __m128i mask_3FFF = _mm_set1_epi32(0x3FFF); 607 const __m128i mask_000F = _mm_set1_epi32(0x000F); 608 const __m128i sixteen_8bit = _mm_set1_epi8(16); 609 610 __m128i alpha; 611 if (has_alpha) { 612 // 8x(alpha) 613 alpha = _mm_set1_epi16(s.fAlphaScale); 614 } 615 616 // Unroll 2x, interleave bytes, use pmaddubsw (all_x is small) 617 while (count >= 2) { 618 int xy0[4]; 619 int xy1[4]; 620 __m128i all_xy, sixteen_minus_xy; 621 PrepareConstantsTwoPixelPairsDXDY(xy, mask_3FFF, mask_000F, 622 sixteen_8bit, mask_dist_select, 623 &all_xy, &sixteen_minus_xy, xy0, xy1); 624 625 // (4x(x1, 16-x1), 4x(x0, 16-x0)) 626 __m128i scale_x = _mm_unpacklo_epi8(sixteen_minus_xy, all_xy); 627 // (4x(0, y1), 4x(0, y0)) 628 __m128i all_y = _mm_unpackhi_epi8(all_xy, _mm_setzero_si128()); 629 __m128i neg_y = _mm_sub_epi16(_mm_set1_epi16(16), all_y); 630 631 const uint32_t* row00 = 632 reinterpret_cast<const uint32_t*>(src_addr + xy0[2] * rb); 633 const uint32_t* row01 = 634 reinterpret_cast<const uint32_t*>(src_addr + xy1[2] * rb); 635 const uint32_t* row10 = 636 reinterpret_cast<const uint32_t*>(src_addr + xy0[3] * rb); 637 const uint32_t* row11 = 638 reinterpret_cast<const uint32_t*>(src_addr + xy1[3] * rb); 639 640 __m128i sum0 = ProcessTwoPixelPairsDXDY<has_alpha>( 641 row00, row01, row10, row11, xy0, xy1, 642 scale_x, all_y, neg_y, alpha); 643 644 // Pack lower 4 16 bit values of sum into lower 4 bytes. 645 sum0 = _mm_packus_epi16(sum0, _mm_setzero_si128()); 646 647 // Extract low int and store. 648 _mm_storel_epi64(reinterpret_cast<__m128i *>(colors), sum0); 649 650 xy += 4; 651 colors += 2; 652 count -= 2; 653 } 654 655 // Handle the remainder 656 while (count-- > 0) { 657 uint32_t data = *xy++; 658 unsigned y0 = data >> 14; 659 unsigned y1 = data & 0x3FFF; 660 unsigned subY = y0 & 0xF; 661 y0 >>= 4; 662 663 data = *xy++; 664 unsigned x0 = data >> 14; 665 unsigned x1 = data & 0x3FFF; 666 unsigned subX = x0 & 0xF; 667 x0 >>= 4; 668 669 const uint32_t* row0 = 670 reinterpret_cast<const uint32_t*>(src_addr + y0 * rb); 671 const uint32_t* row1 = 672 reinterpret_cast<const uint32_t*>(src_addr + y1 * rb); 673 674 // 16x(x) 675 const __m128i all_x = _mm_set1_epi8(subX); 676 677 // 16x (16-x) 678 __m128i scale_x = _mm_sub_epi8(sixteen_8bit, all_x); 679 680 // (8x (x, 16-x)) 681 scale_x = _mm_unpacklo_epi8(scale_x, all_x); 682 683 // 8x(16) 684 const __m128i sixteen_16bit = _mm_set1_epi16(16); 685 686 // 8x (y) 687 const __m128i all_y = _mm_set1_epi16(subY); 688 689 // 8x (16-y) 690 const __m128i neg_y = _mm_sub_epi16(sixteen_16bit, all_y); 691 692 // first row. 693 __m128i sum0 = ProcessOnePixel(row0[x0], row0[x1], scale_x, neg_y); 694 // second row. 695 __m128i sum1 = ProcessOnePixel(row1[x0], row1[x1], scale_x, all_y); 696 697 // Add both rows for full sample 698 sum0 = _mm_add_epi16(sum0, sum1); 699 700 sum0 = ScaleFourPixels<has_alpha, 8>(&sum0, alpha); 701 702 // Pack lower 4 16 bit values of sum into lower 4 bytes. 703 sum0 = _mm_packus_epi16(sum0, _mm_setzero_si128()); 704 705 // Extract low int and store. 706 *colors++ = _mm_cvtsi128_si32(sum0); 707 } 708 } 709 } // namespace 710 711 void S32_opaque_D32_filter_DX_SSSE3(const SkBitmapProcState& s, 712 const uint32_t* xy, 713 int count, uint32_t* colors) { 714 S32_generic_D32_filter_DX_SSSE3<false>(s, xy, count, colors); 715 } 716 717 void S32_alpha_D32_filter_DX_SSSE3(const SkBitmapProcState& s, 718 const uint32_t* xy, 719 int count, uint32_t* colors) { 720 S32_generic_D32_filter_DX_SSSE3<true>(s, xy, count, colors); 721 } 722 723 void S32_opaque_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s, 724 const uint32_t* xy, 725 int count, uint32_t* colors) { 726 S32_generic_D32_filter_DXDY_SSSE3<false>(s, xy, count, colors); 727 } 728 729 void S32_alpha_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s, 730 const uint32_t* xy, 731 int count, uint32_t* colors) { 732 S32_generic_D32_filter_DXDY_SSSE3<true>(s, xy, count, colors); 733 } 734 735 void S32_D16_filter_DX_SSSE3(const SkBitmapProcState& s, 736 const uint32_t* xy, 737 int count, uint16_t* colors) { 738 SkASSERT(254 >= count); 739 SkAutoSTMalloc<254, uint32_t> colors32(count); 740 S32_generic_D32_filter_DX_SSSE3<false>(s, xy, count, colors32); 741 for(int i = 0; i < count; i++) { 742 *colors++ = SkPixel32ToPixel16(colors32[i]); 743 } 744 } 745 746 void S32_D16_filter_DXDY_SSSE3(const SkBitmapProcState& s, 747 const uint32_t* xy, 748 int count, uint16_t* colors) { 749 SkASSERT(64 >= count); 750 SkAutoSTMalloc<64, uint32_t> colors32(count); 751 S32_generic_D32_filter_DXDY_SSSE3<false>(s, xy, count, colors32); 752 for(int i = 0; i < count; i++) { 753 *colors++ = SkPixel32ToPixel16(colors32[i]); 754 } 755 } 756 757 #else // SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 758 759 void S32_opaque_D32_filter_DX_SSSE3(const SkBitmapProcState& s, 760 const uint32_t* xy, 761 int count, uint32_t* colors) { 762 sk_throw(); 763 } 764 765 void S32_alpha_D32_filter_DX_SSSE3(const SkBitmapProcState& s, 766 const uint32_t* xy, 767 int count, uint32_t* colors) { 768 sk_throw(); 769 } 770 771 void S32_opaque_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s, 772 const uint32_t* xy, 773 int count, uint32_t* colors) { 774 sk_throw(); 775 } 776 777 void S32_alpha_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s, 778 const uint32_t* xy, 779 int count, uint32_t* colors) { 780 sk_throw(); 781 } 782 783 void S32_D16_filter_DX_SSSE3(const SkBitmapProcState& s, 784 const uint32_t* xy, 785 int count, uint16_t* colors) { 786 sk_throw(); 787 } 788 789 void S32_D16_filter_DXDY_SSSE3(const SkBitmapProcState& s, 790 const uint32_t* xy, 791 int count, uint16_t* colors) { 792 sk_throw(); 793 } 794 795 #endif 796