1 /* 2 * Copyright 2014 The Android Open Source Project 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef SkColor_opts_SSE2_DEFINED 9 #define SkColor_opts_SSE2_DEFINED 10 11 #include <emmintrin.h> 12 13 #define ASSERT_EQ(a,b) SkASSERT(0xffff == _mm_movemask_epi8(_mm_cmpeq_epi8((a), (b)))) 14 15 // Because no _mm_mul_epi32() in SSE2, we emulate it here. 16 // Multiplies 4 32-bit integers from a by 4 32-bit intergers from b. 17 // The 4 multiplication results should be represented within 32-bit 18 // integers, otherwise they would be overflow. 19 static inline __m128i Multiply32_SSE2(const __m128i& a, const __m128i& b) { 20 // Calculate results of a0 * b0 and a2 * b2. 21 __m128i r1 = _mm_mul_epu32(a, b); 22 // Calculate results of a1 * b1 and a3 * b3. 23 __m128i r2 = _mm_mul_epu32(_mm_srli_si128(a, 4), _mm_srli_si128(b, 4)); 24 // Shuffle results to [63..0] and interleave the results. 25 __m128i r = _mm_unpacklo_epi32(_mm_shuffle_epi32(r1, _MM_SHUFFLE(0,0,2,0)), 26 _mm_shuffle_epi32(r2, _MM_SHUFFLE(0,0,2,0))); 27 return r; 28 } 29 30 static inline __m128i SkAlpha255To256_SSE2(const __m128i& alpha) { 31 return _mm_add_epi32(alpha, _mm_set1_epi32(1)); 32 } 33 34 // See #define SkAlphaMulAlpha(a, b) SkMulDiv255Round(a, b) in SkXfermode.cpp. 35 static inline __m128i SkAlphaMulAlpha_SSE2(const __m128i& a, 36 const __m128i& b) { 37 __m128i prod = _mm_mullo_epi16(a, b); 38 prod = _mm_add_epi32(prod, _mm_set1_epi32(128)); 39 prod = _mm_add_epi32(prod, _mm_srli_epi32(prod, 8)); 40 prod = _mm_srli_epi32(prod, 8); 41 42 return prod; 43 } 44 45 // Portable version SkAlphaMulQ is in SkColorPriv.h. 46 static inline __m128i SkAlphaMulQ_SSE2(const __m128i& c, const __m128i& scale) { 47 const __m128i mask = _mm_set1_epi32(0xFF00FF); 48 __m128i s = _mm_or_si128(_mm_slli_epi32(scale, 16), scale); 49 50 // uint32_t rb = ((c & mask) * scale) >> 8 51 __m128i rb = _mm_and_si128(mask, c); 52 rb = _mm_mullo_epi16(rb, s); 53 rb = _mm_srli_epi16(rb, 8); 54 55 // uint32_t ag = ((c >> 8) & mask) * scale 56 __m128i ag = _mm_srli_epi16(c, 8); 57 ASSERT_EQ(ag, _mm_and_si128(mask, ag)); // ag = _mm_srli_epi16(c, 8) did this for us. 58 ag = _mm_mullo_epi16(ag, s); 59 60 // (rb & mask) | (ag & ~mask) 61 ASSERT_EQ(rb, _mm_and_si128(mask, rb)); // rb = _mm_srli_epi16(rb, 8) did this for us. 62 ag = _mm_andnot_si128(mask, ag); 63 return _mm_or_si128(rb, ag); 64 } 65 66 // Fast path for SkAlphaMulQ_SSE2 with a constant scale factor. 67 static inline __m128i SkAlphaMulQ_SSE2(const __m128i& c, const unsigned scale) { 68 const __m128i mask = _mm_set1_epi32(0xFF00FF); 69 __m128i s = _mm_set1_epi16(scale << 8); // Move scale factor to upper byte of word. 70 71 // With mulhi, red and blue values are already in the right place and 72 // don't need to be divided by 256. 73 __m128i rb = _mm_and_si128(mask, c); 74 rb = _mm_mulhi_epu16(rb, s); 75 76 __m128i ag = _mm_andnot_si128(mask, c); 77 ag = _mm_mulhi_epu16(ag, s); // Alpha and green values are in the higher byte of each word. 78 ag = _mm_andnot_si128(mask, ag); 79 80 return _mm_or_si128(rb, ag); 81 } 82 83 static inline __m128i SkGetPackedA32_SSE2(const __m128i& src) { 84 #if SK_A32_SHIFT == 24 // It's very common (universal?) that alpha is the top byte. 85 return _mm_srli_epi32(src, 24); // You'd hope the compiler would remove the left shift then, 86 #else // but I've seen Clang just do a dumb left shift of zero. :( 87 __m128i a = _mm_slli_epi32(src, (24 - SK_A32_SHIFT)); 88 return _mm_srli_epi32(a, 24); 89 #endif 90 } 91 92 static inline __m128i SkGetPackedR32_SSE2(const __m128i& src) { 93 __m128i r = _mm_slli_epi32(src, (24 - SK_R32_SHIFT)); 94 return _mm_srli_epi32(r, 24); 95 } 96 97 static inline __m128i SkGetPackedG32_SSE2(const __m128i& src) { 98 __m128i g = _mm_slli_epi32(src, (24 - SK_G32_SHIFT)); 99 return _mm_srli_epi32(g, 24); 100 } 101 102 static inline __m128i SkGetPackedB32_SSE2(const __m128i& src) { 103 __m128i b = _mm_slli_epi32(src, (24 - SK_B32_SHIFT)); 104 return _mm_srli_epi32(b, 24); 105 } 106 107 static inline __m128i SkMul16ShiftRound_SSE2(const __m128i& a, 108 const __m128i& b, int shift) { 109 __m128i prod = _mm_mullo_epi16(a, b); 110 prod = _mm_add_epi16(prod, _mm_set1_epi16(1 << (shift - 1))); 111 prod = _mm_add_epi16(prod, _mm_srli_epi16(prod, shift)); 112 prod = _mm_srli_epi16(prod, shift); 113 114 return prod; 115 } 116 117 static inline __m128i SkPackRGB16_SSE2(const __m128i& r, 118 const __m128i& g, const __m128i& b) { 119 __m128i dr = _mm_slli_epi16(r, SK_R16_SHIFT); 120 __m128i dg = _mm_slli_epi16(g, SK_G16_SHIFT); 121 __m128i db = _mm_slli_epi16(b, SK_B16_SHIFT); 122 123 __m128i c = _mm_or_si128(dr, dg); 124 return _mm_or_si128(c, db); 125 } 126 127 static inline __m128i SkPackARGB32_SSE2(const __m128i& a, const __m128i& r, 128 const __m128i& g, const __m128i& b) { 129 __m128i da = _mm_slli_epi32(a, SK_A32_SHIFT); 130 __m128i dr = _mm_slli_epi32(r, SK_R32_SHIFT); 131 __m128i dg = _mm_slli_epi32(g, SK_G32_SHIFT); 132 __m128i db = _mm_slli_epi32(b, SK_B32_SHIFT); 133 134 __m128i c = _mm_or_si128(da, dr); 135 c = _mm_or_si128(c, dg); 136 return _mm_or_si128(c, db); 137 } 138 139 static inline __m128i SkPacked16ToR32_SSE2(const __m128i& src) { 140 __m128i r = _mm_srli_epi32(src, SK_R16_SHIFT); 141 r = _mm_and_si128(r, _mm_set1_epi32(SK_R16_MASK)); 142 r = _mm_or_si128(_mm_slli_epi32(r, (8 - SK_R16_BITS)), 143 _mm_srli_epi32(r, (2 * SK_R16_BITS - 8))); 144 145 return r; 146 } 147 148 static inline __m128i SkPacked16ToG32_SSE2(const __m128i& src) { 149 __m128i g = _mm_srli_epi32(src, SK_G16_SHIFT); 150 g = _mm_and_si128(g, _mm_set1_epi32(SK_G16_MASK)); 151 g = _mm_or_si128(_mm_slli_epi32(g, (8 - SK_G16_BITS)), 152 _mm_srli_epi32(g, (2 * SK_G16_BITS - 8))); 153 154 return g; 155 } 156 157 static inline __m128i SkPacked16ToB32_SSE2(const __m128i& src) { 158 __m128i b = _mm_srli_epi32(src, SK_B16_SHIFT); 159 b = _mm_and_si128(b, _mm_set1_epi32(SK_B16_MASK)); 160 b = _mm_or_si128(_mm_slli_epi32(b, (8 - SK_B16_BITS)), 161 _mm_srli_epi32(b, (2 * SK_B16_BITS - 8))); 162 163 return b; 164 } 165 166 static inline __m128i SkPixel16ToPixel32_SSE2(const __m128i& src) { 167 __m128i r = SkPacked16ToR32_SSE2(src); 168 __m128i g = SkPacked16ToG32_SSE2(src); 169 __m128i b = SkPacked16ToB32_SSE2(src); 170 171 return SkPackARGB32_SSE2(_mm_set1_epi32(0xFF), r, g, b); 172 } 173 174 static inline __m128i SkPixel32ToPixel16_ToU16_SSE2(const __m128i& src_pixel1, 175 const __m128i& src_pixel2) { 176 // Calculate result r. 177 __m128i r1 = _mm_srli_epi32(src_pixel1, 178 SK_R32_SHIFT + (8 - SK_R16_BITS)); 179 r1 = _mm_and_si128(r1, _mm_set1_epi32(SK_R16_MASK)); 180 __m128i r2 = _mm_srli_epi32(src_pixel2, 181 SK_R32_SHIFT + (8 - SK_R16_BITS)); 182 r2 = _mm_and_si128(r2, _mm_set1_epi32(SK_R16_MASK)); 183 __m128i r = _mm_packs_epi32(r1, r2); 184 185 // Calculate result g. 186 __m128i g1 = _mm_srli_epi32(src_pixel1, 187 SK_G32_SHIFT + (8 - SK_G16_BITS)); 188 g1 = _mm_and_si128(g1, _mm_set1_epi32(SK_G16_MASK)); 189 __m128i g2 = _mm_srli_epi32(src_pixel2, 190 SK_G32_SHIFT + (8 - SK_G16_BITS)); 191 g2 = _mm_and_si128(g2, _mm_set1_epi32(SK_G16_MASK)); 192 __m128i g = _mm_packs_epi32(g1, g2); 193 194 // Calculate result b. 195 __m128i b1 = _mm_srli_epi32(src_pixel1, 196 SK_B32_SHIFT + (8 - SK_B16_BITS)); 197 b1 = _mm_and_si128(b1, _mm_set1_epi32(SK_B16_MASK)); 198 __m128i b2 = _mm_srli_epi32(src_pixel2, 199 SK_B32_SHIFT + (8 - SK_B16_BITS)); 200 b2 = _mm_and_si128(b2, _mm_set1_epi32(SK_B16_MASK)); 201 __m128i b = _mm_packs_epi32(b1, b2); 202 203 // Store 8 16-bit colors in dst. 204 __m128i d_pixel = SkPackRGB16_SSE2(r, g, b); 205 206 return d_pixel; 207 } 208 209 // Portable version is SkPMSrcOver in SkColorPriv.h. 210 static inline __m128i SkPMSrcOver_SSE2(const __m128i& src, const __m128i& dst) { 211 return _mm_add_epi32(src, 212 SkAlphaMulQ_SSE2(dst, _mm_sub_epi32(_mm_set1_epi32(256), 213 SkGetPackedA32_SSE2(src)))); 214 } 215 216 // Portable version is SkBlendARGB32 in SkColorPriv.h. 217 static inline __m128i SkBlendARGB32_SSE2(const __m128i& src, const __m128i& dst, 218 const __m128i& aa) { 219 __m128i src_scale = SkAlpha255To256_SSE2(aa); 220 // SkAlpha255To256(255 - SkAlphaMul(SkGetPackedA32(src), src_scale)) 221 __m128i dst_scale = SkGetPackedA32_SSE2(src); 222 dst_scale = _mm_mullo_epi16(dst_scale, src_scale); 223 dst_scale = _mm_srli_epi16(dst_scale, 8); 224 dst_scale = _mm_sub_epi32(_mm_set1_epi32(256), dst_scale); 225 226 __m128i result = SkAlphaMulQ_SSE2(src, src_scale); 227 return _mm_add_epi8(result, SkAlphaMulQ_SSE2(dst, dst_scale)); 228 } 229 230 // Fast path for SkBlendARGB32_SSE2 with a constant alpha factor. 231 static inline __m128i SkBlendARGB32_SSE2(const __m128i& src, const __m128i& dst, 232 const unsigned aa) { 233 unsigned alpha = SkAlpha255To256(aa); 234 __m128i src_scale = _mm_set1_epi32(alpha); 235 // SkAlpha255To256(255 - SkAlphaMul(SkGetPackedA32(src), src_scale)) 236 __m128i dst_scale = SkGetPackedA32_SSE2(src); 237 dst_scale = _mm_mullo_epi16(dst_scale, src_scale); 238 dst_scale = _mm_srli_epi16(dst_scale, 8); 239 dst_scale = _mm_sub_epi32(_mm_set1_epi32(256), dst_scale); 240 241 __m128i result = SkAlphaMulQ_SSE2(src, alpha); 242 return _mm_add_epi8(result, SkAlphaMulQ_SSE2(dst, dst_scale)); 243 } 244 245 #undef ASSERT_EQ 246 #endif // SkColor_opts_SSE2_DEFINED 247