1 /* 2 * Copyright (C) 2006 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef SkColorPriv_DEFINED 18 #define SkColorPriv_DEFINED 19 20 // turn this own for extra debug checking when blending onto 565 21 #ifdef SK_DEBUG 22 #define CHECK_FOR_565_OVERFLOW 23 #endif 24 25 #include "SkColor.h" 26 #include "SkMath.h" 27 28 /** Turn 0..255 into 0..256 by adding 1 at the half-way point. Used to turn a 29 byte into a scale value, so that we can say scale * value >> 8 instead of 30 alpha * value / 255. 31 32 In debugging, asserts that alpha is 0..255 33 */ 34 static inline unsigned SkAlpha255To256(U8CPU alpha) { 35 SkASSERT(SkToU8(alpha) == alpha); 36 // this one assues that blending on top of an opaque dst keeps it that way 37 // even though it is less accurate than a+(a>>7) for non-opaque dsts 38 return alpha + 1; 39 } 40 41 /** Multiplify value by 0..256, and shift the result down 8 42 (i.e. return (value * alpha256) >> 8) 43 */ 44 #define SkAlphaMul(value, alpha256) (SkMulS16(value, alpha256) >> 8) 45 46 // The caller may want negative values, so keep all params signed (int) 47 // so we don't accidentally slip into unsigned math and lose the sign 48 // extension when we shift (in SkAlphaMul) 49 static inline int SkAlphaBlend(int src, int dst, int scale256) { 50 SkASSERT((unsigned)scale256 <= 256); 51 return dst + SkAlphaMul(src - dst, scale256); 52 } 53 54 #define SK_R16_BITS 5 55 #define SK_G16_BITS 6 56 #define SK_B16_BITS 5 57 58 #define SK_R16_SHIFT (SK_B16_BITS + SK_G16_BITS) 59 #define SK_G16_SHIFT (SK_B16_BITS) 60 #define SK_B16_SHIFT 0 61 62 #define SK_R16_MASK ((1 << SK_R16_BITS) - 1) 63 #define SK_G16_MASK ((1 << SK_G16_BITS) - 1) 64 #define SK_B16_MASK ((1 << SK_B16_BITS) - 1) 65 66 #define SkGetPackedR16(color) (((unsigned)(color) >> SK_R16_SHIFT) & SK_R16_MASK) 67 #define SkGetPackedG16(color) (((unsigned)(color) >> SK_G16_SHIFT) & SK_G16_MASK) 68 #define SkGetPackedB16(color) (((unsigned)(color) >> SK_B16_SHIFT) & SK_B16_MASK) 69 70 #define SkR16Assert(r) SkASSERT((unsigned)(r) <= SK_R16_MASK) 71 #define SkG16Assert(g) SkASSERT((unsigned)(g) <= SK_G16_MASK) 72 #define SkB16Assert(b) SkASSERT((unsigned)(b) <= SK_B16_MASK) 73 74 static inline uint16_t SkPackRGB16(unsigned r, unsigned g, unsigned b) { 75 SkASSERT(r <= SK_R16_MASK); 76 SkASSERT(g <= SK_G16_MASK); 77 SkASSERT(b <= SK_B16_MASK); 78 79 return SkToU16((r << SK_R16_SHIFT) | (g << SK_G16_SHIFT) | (b << SK_B16_SHIFT)); 80 } 81 82 #define SK_R16_MASK_IN_PLACE (SK_R16_MASK << SK_R16_SHIFT) 83 #define SK_G16_MASK_IN_PLACE (SK_G16_MASK << SK_G16_SHIFT) 84 #define SK_B16_MASK_IN_PLACE (SK_B16_MASK << SK_B16_SHIFT) 85 86 /** Expand the 16bit color into a 32bit value that can be scaled all at once 87 by a value up to 32. Used in conjunction with SkCompact_rgb_16. 88 */ 89 static inline uint32_t SkExpand_rgb_16(U16CPU c) { 90 SkASSERT(c == (uint16_t)c); 91 92 return ((c & SK_G16_MASK_IN_PLACE) << 16) | (c & ~SK_G16_MASK_IN_PLACE); 93 } 94 95 /** Compress an expanded value (from SkExpand_rgb_16) back down to a 16bit 96 color value. The computation yields only 16bits of valid data, but we claim 97 to return 32bits, so that the compiler won't generate extra instructions to 98 "clean" the top 16bits. However, the top 16 can contain garbage, so it is 99 up to the caller to safely ignore them. 100 */ 101 static inline U16CPU SkCompact_rgb_16(uint32_t c) { 102 return ((c >> 16) & SK_G16_MASK_IN_PLACE) | (c & ~SK_G16_MASK_IN_PLACE); 103 } 104 105 /** Scale the 16bit color value by the 0..256 scale parameter. 106 The computation yields only 16bits of valid data, but we claim 107 to return 32bits, so that the compiler won't generate extra instructions to 108 "clean" the top 16bits. 109 */ 110 static inline U16CPU SkAlphaMulRGB16(U16CPU c, unsigned scale) { 111 return SkCompact_rgb_16(SkExpand_rgb_16(c) * (scale >> 3) >> 5); 112 } 113 114 // this helper explicitly returns a clean 16bit value (but slower) 115 #define SkAlphaMulRGB16_ToU16(c, s) (uint16_t)SkAlphaMulRGB16(c, s) 116 117 /** Blend src and dst 16bit colors by the 0..256 scale parameter. 118 The computation yields only 16bits of valid data, but we claim 119 to return 32bits, so that the compiler won't generate extra instructions to 120 "clean" the top 16bits. 121 */ 122 static inline U16CPU SkBlendRGB16(U16CPU src, U16CPU dst, int srcScale) { 123 SkASSERT((unsigned)srcScale <= 256); 124 125 srcScale >>= 3; 126 127 uint32_t src32 = SkExpand_rgb_16(src); 128 uint32_t dst32 = SkExpand_rgb_16(dst); 129 return SkCompact_rgb_16(dst32 + ((src32 - dst32) * srcScale >> 5)); 130 } 131 132 static inline void SkBlendRGB16(const uint16_t src[], uint16_t dst[], 133 int srcScale, int count) { 134 SkASSERT(count > 0); 135 SkASSERT((unsigned)srcScale <= 256); 136 137 srcScale >>= 3; 138 139 do { 140 uint32_t src32 = SkExpand_rgb_16(*src++); 141 uint32_t dst32 = SkExpand_rgb_16(*dst); 142 *dst++ = SkCompact_rgb_16(dst32 + ((src32 - dst32) * srcScale >> 5)); 143 } while (--count > 0); 144 } 145 146 #ifdef SK_DEBUG 147 static inline U16CPU SkRGB16Add(U16CPU a, U16CPU b) { 148 SkASSERT(SkGetPackedR16(a) + SkGetPackedR16(b) <= SK_R16_MASK); 149 SkASSERT(SkGetPackedG16(a) + SkGetPackedG16(b) <= SK_G16_MASK); 150 SkASSERT(SkGetPackedB16(a) + SkGetPackedB16(b) <= SK_B16_MASK); 151 152 return a + b; 153 } 154 #else 155 #define SkRGB16Add(a, b) ((a) + (b)) 156 #endif 157 158 /////////////////////////////////////////////////////////////////////////////// 159 160 #define SK_A32_BITS 8 161 #define SK_R32_BITS 8 162 #define SK_G32_BITS 8 163 #define SK_B32_BITS 8 164 165 #define SK_A32_MASK ((1 << SK_A32_BITS) - 1) 166 #define SK_R32_MASK ((1 << SK_R32_BITS) - 1) 167 #define SK_G32_MASK ((1 << SK_G32_BITS) - 1) 168 #define SK_B32_MASK ((1 << SK_B32_BITS) - 1) 169 170 #define SkGetPackedA32(packed) ((uint32_t)((packed) << (24 - SK_A32_SHIFT)) >> 24) 171 #define SkGetPackedR32(packed) ((uint32_t)((packed) << (24 - SK_R32_SHIFT)) >> 24) 172 #define SkGetPackedG32(packed) ((uint32_t)((packed) << (24 - SK_G32_SHIFT)) >> 24) 173 #define SkGetPackedB32(packed) ((uint32_t)((packed) << (24 - SK_B32_SHIFT)) >> 24) 174 175 #define SkA32Assert(a) SkASSERT((unsigned)(a) <= SK_A32_MASK) 176 #define SkR32Assert(r) SkASSERT((unsigned)(r) <= SK_R32_MASK) 177 #define SkG32Assert(g) SkASSERT((unsigned)(g) <= SK_G32_MASK) 178 #define SkB32Assert(b) SkASSERT((unsigned)(b) <= SK_B32_MASK) 179 180 #ifdef SK_DEBUG 181 static inline void SkPMColorAssert(SkPMColor c) { 182 unsigned a = SkGetPackedA32(c); 183 unsigned r = SkGetPackedR32(c); 184 unsigned g = SkGetPackedG32(c); 185 unsigned b = SkGetPackedB32(c); 186 187 SkA32Assert(a); 188 SkASSERT(r <= a); 189 SkASSERT(g <= a); 190 SkASSERT(b <= a); 191 } 192 #else 193 #define SkPMColorAssert(c) 194 #endif 195 196 /** 197 * Pack the components into a SkPMColor, checking (in the debug version) that 198 * the components are 0..255, and are already premultiplied (i.e. alpha >= color) 199 */ 200 static inline SkPMColor SkPackARGB32(U8CPU a, U8CPU r, U8CPU g, U8CPU b) { 201 SkA32Assert(a); 202 SkASSERT(r <= a); 203 SkASSERT(g <= a); 204 SkASSERT(b <= a); 205 206 return (a << SK_A32_SHIFT) | (r << SK_R32_SHIFT) | 207 (g << SK_G32_SHIFT) | (b << SK_B32_SHIFT); 208 } 209 210 /** 211 * Same as SkPackARGB32, but this version guarantees to not check that the 212 * values are premultiplied in the debug version. 213 */ 214 static inline SkPMColor SkPackARGB32NoCheck(U8CPU a, U8CPU r, U8CPU g, U8CPU b) { 215 return (a << SK_A32_SHIFT) | (r << SK_R32_SHIFT) | 216 (g << SK_G32_SHIFT) | (b << SK_B32_SHIFT); 217 } 218 219 SK_API extern const uint32_t gMask_00FF00FF; 220 221 static inline uint32_t SkAlphaMulQ(uint32_t c, unsigned scale) { 222 uint32_t mask = gMask_00FF00FF; 223 // uint32_t mask = 0xFF00FF; 224 225 uint32_t rb = ((c & mask) * scale) >> 8; 226 uint32_t ag = ((c >> 8) & mask) * scale; 227 return (rb & mask) | (ag & ~mask); 228 } 229 230 static inline SkPMColor SkPMSrcOver(SkPMColor src, SkPMColor dst) { 231 return src + SkAlphaMulQ(dst, SkAlpha255To256(255 - SkGetPackedA32(src))); 232 } 233 234 static inline SkPMColor SkBlendARGB32(SkPMColor src, SkPMColor dst, U8CPU aa) { 235 SkASSERT((unsigned)aa <= 255); 236 237 unsigned src_scale = SkAlpha255To256(aa); 238 unsigned dst_scale = SkAlpha255To256(255 - SkAlphaMul(SkGetPackedA32(src), src_scale)); 239 240 return SkAlphaMulQ(src, src_scale) + SkAlphaMulQ(dst, dst_scale); 241 } 242 243 //////////////////////////////////////////////////////////////////////////////////////////// 244 // Convert a 32bit pixel to a 16bit pixel (no dither) 245 246 #define SkR32ToR16_MACRO(r) ((unsigned)(r) >> (SK_R32_BITS - SK_R16_BITS)) 247 #define SkG32ToG16_MACRO(g) ((unsigned)(g) >> (SK_G32_BITS - SK_G16_BITS)) 248 #define SkB32ToB16_MACRO(b) ((unsigned)(b) >> (SK_B32_BITS - SK_B16_BITS)) 249 250 #ifdef SK_DEBUG 251 static inline unsigned SkR32ToR16(unsigned r) { 252 SkR32Assert(r); 253 return SkR32ToR16_MACRO(r); 254 } 255 static inline unsigned SkG32ToG16(unsigned g) { 256 SkG32Assert(g); 257 return SkG32ToG16_MACRO(g); 258 } 259 static inline unsigned SkB32ToB16(unsigned b) { 260 SkB32Assert(b); 261 return SkB32ToB16_MACRO(b); 262 } 263 #else 264 #define SkR32ToR16(r) SkR32ToR16_MACRO(r) 265 #define SkG32ToG16(g) SkG32ToG16_MACRO(g) 266 #define SkB32ToB16(b) SkB32ToB16_MACRO(b) 267 #endif 268 269 #define SkPacked32ToR16(c) (((unsigned)(c) >> (SK_R32_SHIFT + SK_R32_BITS - SK_R16_BITS)) & SK_R16_MASK) 270 #define SkPacked32ToG16(c) (((unsigned)(c) >> (SK_G32_SHIFT + SK_G32_BITS - SK_G16_BITS)) & SK_G16_MASK) 271 #define SkPacked32ToB16(c) (((unsigned)(c) >> (SK_B32_SHIFT + SK_B32_BITS - SK_B16_BITS)) & SK_B16_MASK) 272 273 static inline U16CPU SkPixel32ToPixel16(SkPMColor c) { 274 unsigned r = ((c >> (SK_R32_SHIFT + (8 - SK_R16_BITS))) & SK_R16_MASK) << SK_R16_SHIFT; 275 unsigned g = ((c >> (SK_G32_SHIFT + (8 - SK_G16_BITS))) & SK_G16_MASK) << SK_G16_SHIFT; 276 unsigned b = ((c >> (SK_B32_SHIFT + (8 - SK_B16_BITS))) & SK_B16_MASK) << SK_B16_SHIFT; 277 return r | g | b; 278 } 279 280 static inline U16CPU SkPack888ToRGB16(U8CPU r, U8CPU g, U8CPU b) { 281 return (SkR32ToR16(r) << SK_R16_SHIFT) | 282 (SkG32ToG16(g) << SK_G16_SHIFT) | 283 (SkB32ToB16(b) << SK_B16_SHIFT); 284 } 285 286 #define SkPixel32ToPixel16_ToU16(src) SkToU16(SkPixel32ToPixel16(src)) 287 288 ///////////////////////////////////////////////////////////////////////////////////////// 289 // Fast dither from 32->16 290 291 #define SkShouldDitherXY(x, y) (((x) ^ (y)) & 1) 292 293 static inline uint16_t SkDitherPack888ToRGB16(U8CPU r, U8CPU g, U8CPU b) { 294 r = ((r << 1) - ((r >> (8 - SK_R16_BITS) << (8 - SK_R16_BITS)) | (r >> SK_R16_BITS))) >> (8 - SK_R16_BITS); 295 g = ((g << 1) - ((g >> (8 - SK_G16_BITS) << (8 - SK_G16_BITS)) | (g >> SK_G16_BITS))) >> (8 - SK_G16_BITS); 296 b = ((b << 1) - ((b >> (8 - SK_B16_BITS) << (8 - SK_B16_BITS)) | (b >> SK_B16_BITS))) >> (8 - SK_B16_BITS); 297 298 return SkPackRGB16(r, g, b); 299 } 300 301 static inline uint16_t SkDitherPixel32ToPixel16(SkPMColor c) { 302 return SkDitherPack888ToRGB16(SkGetPackedR32(c), SkGetPackedG32(c), SkGetPackedB32(c)); 303 } 304 305 /* Return c in expanded_rgb_16 format, but also scaled up by 32 (5 bits) 306 It is now suitable for combining with a scaled expanded_rgb_16 color 307 as in SkSrcOver32To16(). 308 We must do this 565 high-bit replication, in order for the subsequent add 309 to saturate properly (and not overflow). If we take the 8 bits as is, it is 310 possible to overflow. 311 */ 312 static inline uint32_t SkPMColorToExpanded16x5(SkPMColor c) { 313 unsigned sr = SkPacked32ToR16(c); 314 unsigned sg = SkPacked32ToG16(c); 315 unsigned sb = SkPacked32ToB16(c); 316 317 sr = (sr << 5) | sr; 318 sg = (sg << 5) | (sg >> 1); 319 sb = (sb << 5) | sb; 320 return (sr << 11) | (sg << 21) | (sb << 0); 321 } 322 323 /* SrcOver the 32bit src color with the 16bit dst, returning a 16bit value 324 (with dirt in the high 16bits, so caller beware). 325 */ 326 static inline U16CPU SkSrcOver32To16(SkPMColor src, uint16_t dst) { 327 unsigned sr = SkGetPackedR32(src); 328 unsigned sg = SkGetPackedG32(src); 329 unsigned sb = SkGetPackedB32(src); 330 331 unsigned dr = SkGetPackedR16(dst); 332 unsigned dg = SkGetPackedG16(dst); 333 unsigned db = SkGetPackedB16(dst); 334 335 unsigned isa = 255 - SkGetPackedA32(src); 336 337 dr = (sr + SkMul16ShiftRound(dr, isa, SK_R16_BITS)) >> (8 - SK_R16_BITS); 338 dg = (sg + SkMul16ShiftRound(dg, isa, SK_G16_BITS)) >> (8 - SK_G16_BITS); 339 db = (sb + SkMul16ShiftRound(db, isa, SK_B16_BITS)) >> (8 - SK_B16_BITS); 340 341 return SkPackRGB16(dr, dg, db); 342 } 343 344 //////////////////////////////////////////////////////////////////////////////////////////// 345 // Convert a 16bit pixel to a 32bit pixel 346 347 static inline unsigned SkR16ToR32(unsigned r) { 348 return (r << (8 - SK_R16_BITS)) | (r >> (2 * SK_R16_BITS - 8)); 349 } 350 351 static inline unsigned SkG16ToG32(unsigned g) { 352 return (g << (8 - SK_G16_BITS)) | (g >> (2 * SK_G16_BITS - 8)); 353 } 354 355 static inline unsigned SkB16ToB32(unsigned b) { 356 return (b << (8 - SK_B16_BITS)) | (b >> (2 * SK_B16_BITS - 8)); 357 } 358 359 #define SkPacked16ToR32(c) SkR16ToR32(SkGetPackedR16(c)) 360 #define SkPacked16ToG32(c) SkG16ToG32(SkGetPackedG16(c)) 361 #define SkPacked16ToB32(c) SkB16ToB32(SkGetPackedB16(c)) 362 363 static inline SkPMColor SkPixel16ToPixel32(U16CPU src) { 364 SkASSERT(src == SkToU16(src)); 365 366 unsigned r = SkPacked16ToR32(src); 367 unsigned g = SkPacked16ToG32(src); 368 unsigned b = SkPacked16ToB32(src); 369 370 SkASSERT((r >> (8 - SK_R16_BITS)) == SkGetPackedR16(src)); 371 SkASSERT((g >> (8 - SK_G16_BITS)) == SkGetPackedG16(src)); 372 SkASSERT((b >> (8 - SK_B16_BITS)) == SkGetPackedB16(src)); 373 374 return SkPackARGB32(0xFF, r, g, b); 375 } 376 377 // similar to SkPixel16ToPixel32, but returns SkColor instead of SkPMColor 378 static inline SkColor SkPixel16ToColor(U16CPU src) { 379 SkASSERT(src == SkToU16(src)); 380 381 unsigned r = SkPacked16ToR32(src); 382 unsigned g = SkPacked16ToG32(src); 383 unsigned b = SkPacked16ToB32(src); 384 385 SkASSERT((r >> (8 - SK_R16_BITS)) == SkGetPackedR16(src)); 386 SkASSERT((g >> (8 - SK_G16_BITS)) == SkGetPackedG16(src)); 387 SkASSERT((b >> (8 - SK_B16_BITS)) == SkGetPackedB16(src)); 388 389 return SkColorSetRGB(r, g, b); 390 } 391 392 /////////////////////////////////////////////////////////////////////////////// 393 394 typedef uint16_t SkPMColor16; 395 396 // Put in OpenGL order (r g b a) 397 #define SK_A4444_SHIFT 0 398 #define SK_R4444_SHIFT 12 399 #define SK_G4444_SHIFT 8 400 #define SK_B4444_SHIFT 4 401 402 #define SkA32To4444(a) ((unsigned)(a) >> 4) 403 #define SkR32To4444(r) ((unsigned)(r) >> 4) 404 #define SkG32To4444(g) ((unsigned)(g) >> 4) 405 #define SkB32To4444(b) ((unsigned)(b) >> 4) 406 407 static inline U8CPU SkReplicateNibble(unsigned nib) { 408 SkASSERT(nib <= 0xF); 409 return (nib << 4) | nib; 410 } 411 412 #define SkA4444ToA32(a) SkReplicateNibble(a) 413 #define SkR4444ToR32(r) SkReplicateNibble(r) 414 #define SkG4444ToG32(g) SkReplicateNibble(g) 415 #define SkB4444ToB32(b) SkReplicateNibble(b) 416 417 #define SkGetPackedA4444(c) (((unsigned)(c) >> SK_A4444_SHIFT) & 0xF) 418 #define SkGetPackedR4444(c) (((unsigned)(c) >> SK_R4444_SHIFT) & 0xF) 419 #define SkGetPackedG4444(c) (((unsigned)(c) >> SK_G4444_SHIFT) & 0xF) 420 #define SkGetPackedB4444(c) (((unsigned)(c) >> SK_B4444_SHIFT) & 0xF) 421 422 #define SkPacked4444ToA32(c) SkReplicateNibble(SkGetPackedA4444(c)) 423 #define SkPacked4444ToR32(c) SkReplicateNibble(SkGetPackedR4444(c)) 424 #define SkPacked4444ToG32(c) SkReplicateNibble(SkGetPackedG4444(c)) 425 #define SkPacked4444ToB32(c) SkReplicateNibble(SkGetPackedB4444(c)) 426 427 #ifdef SK_DEBUG 428 static inline void SkPMColor16Assert(U16CPU c) { 429 unsigned a = SkGetPackedA4444(c); 430 unsigned r = SkGetPackedR4444(c); 431 unsigned g = SkGetPackedG4444(c); 432 unsigned b = SkGetPackedB4444(c); 433 434 SkASSERT(a <= 0xF); 435 SkASSERT(r <= a); 436 SkASSERT(g <= a); 437 SkASSERT(b <= a); 438 } 439 #else 440 #define SkPMColor16Assert(c) 441 #endif 442 443 static inline unsigned SkAlpha15To16(unsigned a) { 444 SkASSERT(a <= 0xF); 445 return a + (a >> 3); 446 } 447 448 #ifdef SK_DEBUG 449 static inline int SkAlphaMul4(int value, int scale) { 450 SkASSERT((unsigned)scale <= 0x10); 451 return value * scale >> 4; 452 } 453 #else 454 #define SkAlphaMul4(value, scale) ((value) * (scale) >> 4) 455 #endif 456 457 static inline unsigned SkR4444ToR565(unsigned r) { 458 SkASSERT(r <= 0xF); 459 return (r << (SK_R16_BITS - 4)) | (r >> (8 - SK_R16_BITS)); 460 } 461 462 static inline unsigned SkG4444ToG565(unsigned g) { 463 SkASSERT(g <= 0xF); 464 return (g << (SK_G16_BITS - 4)) | (g >> (8 - SK_G16_BITS)); 465 } 466 467 static inline unsigned SkB4444ToB565(unsigned b) { 468 SkASSERT(b <= 0xF); 469 return (b << (SK_B16_BITS - 4)) | (b >> (8 - SK_B16_BITS)); 470 } 471 472 static inline SkPMColor16 SkPackARGB4444(unsigned a, unsigned r, 473 unsigned g, unsigned b) { 474 SkASSERT(a <= 0xF); 475 SkASSERT(r <= a); 476 SkASSERT(g <= a); 477 SkASSERT(b <= a); 478 479 return (SkPMColor16)((a << SK_A4444_SHIFT) | (r << SK_R4444_SHIFT) | 480 (g << SK_G4444_SHIFT) | (b << SK_B4444_SHIFT)); 481 } 482 483 extern const uint16_t gMask_0F0F; 484 485 static inline U16CPU SkAlphaMulQ4(U16CPU c, unsigned scale) { 486 SkASSERT(scale <= 16); 487 488 const unsigned mask = 0xF0F; //gMask_0F0F; 489 490 #if 0 491 unsigned rb = ((c & mask) * scale) >> 4; 492 unsigned ag = ((c >> 4) & mask) * scale; 493 return (rb & mask) | (ag & ~mask); 494 #else 495 c = (c & mask) | ((c & (mask << 4)) << 12); 496 c = c * scale >> 4; 497 return (c & mask) | ((c >> 12) & (mask << 4)); 498 #endif 499 } 500 501 /** Expand the SkPMColor16 color into a 32bit value that can be scaled all at 502 once by a value up to 16. Used in conjunction with SkCompact_4444. 503 */ 504 static inline uint32_t SkExpand_4444(U16CPU c) { 505 SkASSERT(c == (uint16_t)c); 506 507 const unsigned mask = 0xF0F; //gMask_0F0F; 508 return (c & mask) | ((c & ~mask) << 12); 509 } 510 511 /** Compress an expanded value (from SkExpand_4444) back down to a SkPMColor16. 512 NOTE: this explicitly does not clean the top 16 bits (which may be garbage). 513 It does this for speed, since if it is being written directly to 16bits of 514 memory, the top 16bits will be ignored. Casting the result to uint16_t here 515 would add 2 more instructions, slow us down. It is up to the caller to 516 perform the cast if needed. 517 */ 518 static inline U16CPU SkCompact_4444(uint32_t c) { 519 const unsigned mask = 0xF0F; //gMask_0F0F; 520 return (c & mask) | ((c >> 12) & ~mask); 521 } 522 523 static inline uint16_t SkSrcOver4444To16(SkPMColor16 s, uint16_t d) { 524 unsigned sa = SkGetPackedA4444(s); 525 unsigned sr = SkR4444ToR565(SkGetPackedR4444(s)); 526 unsigned sg = SkG4444ToG565(SkGetPackedG4444(s)); 527 unsigned sb = SkB4444ToB565(SkGetPackedB4444(s)); 528 529 // To avoid overflow, we have to clear the low bit of the synthetic sg 530 // if the src alpha is <= 7. 531 // to see why, try blending 0x4444 on top of 565-white and watch green 532 // overflow (sum == 64) 533 sg &= ~(~(sa >> 3) & 1); 534 535 unsigned scale = SkAlpha15To16(15 - sa); 536 unsigned dr = SkAlphaMul4(SkGetPackedR16(d), scale); 537 unsigned dg = SkAlphaMul4(SkGetPackedG16(d), scale); 538 unsigned db = SkAlphaMul4(SkGetPackedB16(d), scale); 539 540 #if 0 541 if (sg + dg > 63) { 542 SkDebugf("---- SkSrcOver4444To16 src=%x dst=%x scale=%d, sg=%d dg=%d\n", s, d, scale, sg, dg); 543 } 544 #endif 545 return SkPackRGB16(sr + dr, sg + dg, sb + db); 546 } 547 548 static inline uint16_t SkBlend4444To16(SkPMColor16 src, uint16_t dst, int scale16) { 549 SkASSERT((unsigned)scale16 <= 16); 550 551 return SkSrcOver4444To16(SkAlphaMulQ4(src, scale16), dst); 552 } 553 554 static inline uint16_t SkBlend4444(SkPMColor16 src, SkPMColor16 dst, int scale16) { 555 SkASSERT((unsigned)scale16 <= 16); 556 557 uint32_t src32 = SkExpand_4444(src) * scale16; 558 // the scaled srcAlpha is the bottom byte 559 #ifdef SK_DEBUG 560 { 561 unsigned srcA = SkGetPackedA4444(src) * scale16; 562 SkASSERT(srcA == (src32 & 0xFF)); 563 } 564 #endif 565 unsigned dstScale = SkAlpha255To256(255 - (src32 & 0xFF)) >> 4; 566 uint32_t dst32 = SkExpand_4444(dst) * dstScale; 567 return SkCompact_4444((src32 + dst32) >> 4); 568 } 569 570 static inline SkPMColor SkPixel4444ToPixel32(U16CPU c) { 571 uint32_t d = (SkGetPackedA4444(c) << SK_A32_SHIFT) | 572 (SkGetPackedR4444(c) << SK_R32_SHIFT) | 573 (SkGetPackedG4444(c) << SK_G32_SHIFT) | 574 (SkGetPackedB4444(c) << SK_B32_SHIFT); 575 return d | (d << 4); 576 } 577 578 static inline SkPMColor16 SkPixel32ToPixel4444(SkPMColor c) { 579 return (((c >> (SK_A32_SHIFT + 4)) & 0xF) << SK_A4444_SHIFT) | 580 (((c >> (SK_R32_SHIFT + 4)) & 0xF) << SK_R4444_SHIFT) | 581 (((c >> (SK_G32_SHIFT + 4)) & 0xF) << SK_G4444_SHIFT) | 582 (((c >> (SK_B32_SHIFT + 4)) & 0xF) << SK_B4444_SHIFT); 583 } 584 585 // cheap 2x2 dither 586 static inline SkPMColor16 SkDitherARGB32To4444(U8CPU a, U8CPU r, 587 U8CPU g, U8CPU b) { 588 // to ensure that we stay a legal premultiplied color, we take the max() 589 // of the truncated and dithered alpha values. If we didn't, cases like 590 // SkDitherARGB32To4444(0x31, 0x2E, ...) would generate SkPackARGB4444(2, 3, ...) 591 // which is not legal premultiplied, since a < color 592 unsigned dithered_a = ((a << 1) - ((a >> 4 << 4) | (a >> 4))) >> 4; 593 a = SkMax32(a >> 4, dithered_a); 594 // these we just dither in place 595 r = ((r << 1) - ((r >> 4 << 4) | (r >> 4))) >> 4; 596 g = ((g << 1) - ((g >> 4 << 4) | (g >> 4))) >> 4; 597 b = ((b << 1) - ((b >> 4 << 4) | (b >> 4))) >> 4; 598 599 return SkPackARGB4444(a, r, g, b); 600 } 601 602 static inline SkPMColor16 SkDitherPixel32To4444(SkPMColor c) { 603 return SkDitherARGB32To4444(SkGetPackedA32(c), SkGetPackedR32(c), 604 SkGetPackedG32(c), SkGetPackedB32(c)); 605 } 606 607 /* Assumes 16bit is in standard RGBA order. 608 Transforms a normal ARGB_8888 into the same byte order as 609 expanded ARGB_4444, but keeps each component 8bits 610 */ 611 static inline uint32_t SkExpand_8888(SkPMColor c) { 612 return (((c >> SK_R32_SHIFT) & 0xFF) << 24) | 613 (((c >> SK_G32_SHIFT) & 0xFF) << 8) | 614 (((c >> SK_B32_SHIFT) & 0xFF) << 16) | 615 (((c >> SK_A32_SHIFT) & 0xFF) << 0); 616 } 617 618 /* Undo the operation of SkExpand_8888, turning the argument back into 619 a SkPMColor. 620 */ 621 static inline SkPMColor SkCompact_8888(uint32_t c) { 622 return (((c >> 24) & 0xFF) << SK_R32_SHIFT) | 623 (((c >> 8) & 0xFF) << SK_G32_SHIFT) | 624 (((c >> 16) & 0xFF) << SK_B32_SHIFT) | 625 (((c >> 0) & 0xFF) << SK_A32_SHIFT); 626 } 627 628 /* Like SkExpand_8888, this transforms a pmcolor into the expanded 4444 format, 629 but this routine just keeps the high 4bits of each component in the low 630 4bits of the result (just like a newly expanded PMColor16). 631 */ 632 static inline uint32_t SkExpand32_4444(SkPMColor c) { 633 return (((c >> (SK_R32_SHIFT + 4)) & 0xF) << 24) | 634 (((c >> (SK_G32_SHIFT + 4)) & 0xF) << 8) | 635 (((c >> (SK_B32_SHIFT + 4)) & 0xF) << 16) | 636 (((c >> (SK_A32_SHIFT + 4)) & 0xF) << 0); 637 } 638 639 // takes two values and alternamtes them as part of a memset16 640 // used for cheap 2x2 dithering when the colors are opaque 641 void sk_dither_memset16(uint16_t dst[], uint16_t value, uint16_t other, int n); 642 643 #endif 644 645