1 /* 2 * Copyright 2016 The Android Open Source Project 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #include "SkAnalyticEdge.h" 9 #include "SkAntiRun.h" 10 #include "SkAutoMalloc.h" 11 #include "SkBlitter.h" 12 #include "SkEdge.h" 13 #include "SkEdgeBuilder.h" 14 #include "SkGeometry.h" 15 #include "SkPath.h" 16 #include "SkQuadClipper.h" 17 #include "SkRasterClip.h" 18 #include "SkRegion.h" 19 #include "SkScan.h" 20 #include "SkScanPriv.h" 21 #include "SkTSort.h" 22 #include "SkTemplates.h" 23 #include "SkUtils.h" 24 25 /////////////////////////////////////////////////////////////////////////////// 26 27 /* 28 29 The following is a high-level overview of our analytic anti-aliasing 30 algorithm. We consider a path as a collection of line segments, as 31 quadratic/cubic curves are converted to small line segments. Without loss of 32 generality, let's assume that the draw region is [0, W] x [0, H]. 33 34 Our algorithm is based on horizontal scan lines (y = c_i) as the previous 35 sampling-based algorithm did. However, our algorithm uses non-equal-spaced 36 scan lines, while the previous method always uses equal-spaced scan lines, 37 such as (y = 1/2 + 0, 1/2 + 1, 1/2 + 2, ...) in the previous non-AA algorithm, 38 and (y = 1/8 + 1/4, 1/8 + 2/4, 1/8 + 3/4, ...) in the previous 39 16-supersampling AA algorithm. 40 41 Our algorithm contains scan lines y = c_i for c_i that is either: 42 43 1. an integer between [0, H] 44 45 2. the y value of a line segment endpoint 46 47 3. the y value of an intersection of two line segments 48 49 For two consecutive scan lines y = c_i, y = c_{i+1}, we analytically computes 50 the coverage of this horizontal strip of our path on each pixel. This can be 51 done very efficiently because the strip of our path now only consists of 52 trapezoids whose top and bottom edges are y = c_i, y = c_{i+1} (this includes 53 rectangles and triangles as special cases). 54 55 We now describe how the coverage of single pixel is computed against such a 56 trapezoid. That coverage is essentially the intersection area of a rectangle 57 (e.g., [0, 1] x [c_i, c_{i+1}]) and our trapezoid. However, that intersection 58 could be complicated, as shown in the example region A below: 59 60 +-----------\----+ 61 | \ C| 62 | \ | 63 \ \ | 64 |\ A \| 65 | \ \ 66 | \ | 67 | B \ | 68 +----\-----------+ 69 70 However, we don't have to compute the area of A directly. Instead, we can 71 compute the excluded area, which are B and C, quite easily, because they're 72 just triangles. In fact, we can prove that an excluded region (take B as an 73 example) is either itself a simple trapezoid (including rectangles, triangles, 74 and empty regions), or its opposite (the opposite of B is A + C) is a simple 75 trapezoid. In any case, we can compute its area efficiently. 76 77 In summary, our algorithm has a higher quality because it generates ground- 78 truth coverages analytically. It is also faster because it has much fewer 79 unnessasary horizontal scan lines. For example, given a triangle path, the 80 number of scan lines in our algorithm is only about 3 + H while the 81 16-supersampling algorithm has about 4H scan lines. 82 83 */ 84 85 /////////////////////////////////////////////////////////////////////////////// 86 87 static inline void addAlpha(SkAlpha* alpha, SkAlpha delta) { 88 SkASSERT(*alpha + (int)delta <= 256); 89 *alpha = SkAlphaRuns::CatchOverflow(*alpha + (int)delta); 90 } 91 92 static inline void safelyAddAlpha(SkAlpha* alpha, SkAlpha delta) { 93 *alpha = SkTMin(0xFF, *alpha + (int)delta); 94 } 95 96 class AdditiveBlitter : public SkBlitter { 97 public: 98 ~AdditiveBlitter() override {} 99 100 virtual SkBlitter* getRealBlitter(bool forceRealBlitter = false) = 0; 101 102 virtual void blitAntiH(int x, int y, const SkAlpha antialias[], int len) = 0; 103 virtual void blitAntiH(int x, int y, const SkAlpha alpha) = 0; 104 virtual void blitAntiH(int x, int y, int width, const SkAlpha alpha) = 0; 105 106 void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override { 107 SkDEBUGFAIL("Please call real blitter's blitAntiH instead."); 108 } 109 110 void blitV(int x, int y, int height, SkAlpha alpha) override { 111 SkDEBUGFAIL("Please call real blitter's blitV instead."); 112 } 113 114 void blitH(int x, int y, int width) override { 115 SkDEBUGFAIL("Please call real blitter's blitH instead."); 116 } 117 118 void blitRect(int x, int y, int width, int height) override { 119 SkDEBUGFAIL("Please call real blitter's blitRect instead."); 120 } 121 122 void blitAntiRect(int x, int y, int width, int height, 123 SkAlpha leftAlpha, SkAlpha rightAlpha) override { 124 SkDEBUGFAIL("Please call real blitter's blitAntiRect instead."); 125 } 126 127 virtual int getWidth() = 0; 128 129 // Flush the additive alpha cache if floor(y) and floor(nextY) is different 130 // (i.e., we'll start working on a new pixel row). 131 virtual void flush_if_y_changed(SkFixed y, SkFixed nextY) = 0; 132 }; 133 134 // We need this mask blitter because it significantly accelerates small path filling. 135 class MaskAdditiveBlitter : public AdditiveBlitter { 136 public: 137 MaskAdditiveBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds, 138 bool isInverse); 139 ~MaskAdditiveBlitter() override { 140 fRealBlitter->blitMask(fMask, fClipRect); 141 } 142 143 // Most of the time, we still consider this mask blitter as the real blitter 144 // so we can accelerate blitRect and others. But sometimes we want to return 145 // the absolute real blitter (e.g., when we fall back to the old code path). 146 SkBlitter* getRealBlitter(bool forceRealBlitter) override { 147 return forceRealBlitter ? fRealBlitter : this; 148 } 149 150 // Virtual function is slow. So don't use this. Directly add alpha to the mask instead. 151 void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override; 152 153 // Allowing following methods are used to blit rectangles during aaa_walk_convex_edges 154 // Since there aren't many rectangles, we can still bear the slow speed of virtual functions. 155 void blitAntiH(int x, int y, const SkAlpha alpha) override; 156 void blitAntiH(int x, int y, int width, const SkAlpha alpha) override; 157 void blitV(int x, int y, int height, SkAlpha alpha) override; 158 void blitRect(int x, int y, int width, int height) override; 159 void blitAntiRect(int x, int y, int width, int height, 160 SkAlpha leftAlpha, SkAlpha rightAlpha) override; 161 162 // The flush is only needed for RLE (RunBasedAdditiveBlitter) 163 void flush_if_y_changed(SkFixed y, SkFixed nextY) override {} 164 165 int getWidth() override { return fClipRect.width(); } 166 167 static bool canHandleRect(const SkIRect& bounds) { 168 int width = bounds.width(); 169 if (width > MaskAdditiveBlitter::kMAX_WIDTH) { 170 return false; 171 } 172 int64_t rb = SkAlign4(width); 173 // use 64bits to detect overflow 174 int64_t storage = rb * bounds.height(); 175 176 return (width <= MaskAdditiveBlitter::kMAX_WIDTH) && 177 (storage <= MaskAdditiveBlitter::kMAX_STORAGE); 178 } 179 180 // Return a pointer where pointer[x] corresonds to the alpha of (x, y) 181 inline uint8_t* getRow(int y) { 182 if (y != fY) { 183 fY = y; 184 fRow = fMask.fImage + (y - fMask.fBounds.fTop) * fMask.fRowBytes - fMask.fBounds.fLeft; 185 } 186 return fRow; 187 } 188 189 private: 190 // so we don't try to do very wide things, where the RLE blitter would be faster 191 static const int kMAX_WIDTH = 32; 192 static const int kMAX_STORAGE = 1024; 193 194 SkBlitter* fRealBlitter; 195 SkMask fMask; 196 SkIRect fClipRect; 197 // we add 2 because we can write 1 extra byte at either end due to precision error 198 uint32_t fStorage[(kMAX_STORAGE >> 2) + 2]; 199 200 uint8_t* fRow; 201 int fY; 202 }; 203 204 MaskAdditiveBlitter::MaskAdditiveBlitter( 205 SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds, bool isInverse) { 206 SkASSERT(canHandleRect(ir)); 207 SkASSERT(!isInverse); 208 209 fRealBlitter = realBlitter; 210 211 fMask.fImage = (uint8_t*)fStorage + 1; // There's 1 extra byte at either end of fStorage 212 fMask.fBounds = ir; 213 fMask.fRowBytes = ir.width(); 214 fMask.fFormat = SkMask::kA8_Format; 215 216 fY = ir.fTop - 1; 217 fRow = nullptr; 218 219 fClipRect = ir; 220 if (!fClipRect.intersect(clipBounds)) { 221 SkASSERT(0); 222 fClipRect.setEmpty(); 223 } 224 225 memset(fStorage, 0, fMask.fBounds.height() * fMask.fRowBytes + 2); 226 } 227 228 void MaskAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) { 229 SK_ABORT("Don't use this; directly add alphas to the mask."); 230 } 231 232 void MaskAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) { 233 SkASSERT(x >= fMask.fBounds.fLeft -1); 234 addAlpha(&this->getRow(y)[x], alpha); 235 } 236 237 void MaskAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) { 238 SkASSERT(x >= fMask.fBounds.fLeft -1); 239 uint8_t* row = this->getRow(y); 240 for (int i = 0; i < width; ++i) { 241 addAlpha(&row[x + i], alpha); 242 } 243 } 244 245 void MaskAdditiveBlitter::blitV(int x, int y, int height, SkAlpha alpha) { 246 if (alpha == 0) { 247 return; 248 } 249 SkASSERT(x >= fMask.fBounds.fLeft -1); 250 // This must be called as if this is a real blitter. 251 // So we directly set alpha rather than adding it. 252 uint8_t* row = this->getRow(y); 253 for (int i = 0; i < height; ++i) { 254 row[x] = alpha; 255 row += fMask.fRowBytes; 256 } 257 } 258 259 void MaskAdditiveBlitter::blitRect(int x, int y, int width, int height) { 260 SkASSERT(x >= fMask.fBounds.fLeft -1); 261 // This must be called as if this is a real blitter. 262 // So we directly set alpha rather than adding it. 263 uint8_t* row = this->getRow(y); 264 for (int i = 0; i < height; ++i) { 265 memset(row + x, 0xFF, width); 266 row += fMask.fRowBytes; 267 } 268 } 269 270 void MaskAdditiveBlitter::blitAntiRect(int x, int y, int width, int height, 271 SkAlpha leftAlpha, SkAlpha rightAlpha) { 272 blitV(x, y, height, leftAlpha); 273 blitV(x + 1 + width, y, height, rightAlpha); 274 blitRect(x + 1, y, width, height); 275 } 276 277 class RunBasedAdditiveBlitter : public AdditiveBlitter { 278 public: 279 RunBasedAdditiveBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds, 280 bool isInverse); 281 ~RunBasedAdditiveBlitter() override; 282 283 SkBlitter* getRealBlitter(bool forceRealBlitter) override; 284 285 void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override; 286 void blitAntiH(int x, int y, const SkAlpha alpha) override; 287 void blitAntiH(int x, int y, int width, const SkAlpha alpha) override; 288 289 int getWidth() override; 290 291 void flush_if_y_changed(SkFixed y, SkFixed nextY) override { 292 if (SkFixedFloorToInt(y) != SkFixedFloorToInt(nextY)) { 293 this->flush(); 294 } 295 } 296 297 protected: 298 SkBlitter* fRealBlitter; 299 300 /// Current y coordinate 301 int fCurrY; 302 /// Widest row of region to be blitted 303 int fWidth; 304 /// Leftmost x coordinate in any row 305 int fLeft; 306 /// Initial y coordinate (top of bounds). 307 int fTop; 308 309 // The next three variables are used to track a circular buffer that 310 // contains the values used in SkAlphaRuns. These variables should only 311 // ever be updated in advanceRuns(), and fRuns should always point to 312 // a valid SkAlphaRuns... 313 int fRunsToBuffer; 314 void* fRunsBuffer; 315 int fCurrentRun; 316 SkAlphaRuns fRuns; 317 318 int fOffsetX; 319 320 inline bool check(int x, int width) const { 321 #ifdef SK_DEBUG 322 if (x < 0 || x + width > fWidth) { 323 // SkDebugf("Ignore x = %d, width = %d\n", x, width); 324 } 325 #endif 326 return (x >= 0 && x + width <= fWidth); 327 } 328 329 // extra one to store the zero at the end 330 inline int getRunsSz() const { return (fWidth + 1 + (fWidth + 2)/2) * sizeof(int16_t); } 331 332 // This function updates the fRuns variable to point to the next buffer space 333 // with adequate storage for a SkAlphaRuns. It mostly just advances fCurrentRun 334 // and resets fRuns to point to an empty scanline. 335 inline void advanceRuns() { 336 const size_t kRunsSz = this->getRunsSz(); 337 fCurrentRun = (fCurrentRun + 1) % fRunsToBuffer; 338 fRuns.fRuns = reinterpret_cast<int16_t*>( 339 reinterpret_cast<uint8_t*>(fRunsBuffer) + fCurrentRun * kRunsSz); 340 fRuns.fAlpha = reinterpret_cast<SkAlpha*>(fRuns.fRuns + fWidth + 1); 341 fRuns.reset(fWidth); 342 } 343 344 // Blitting 0xFF and 0 is much faster so we snap alphas close to them 345 inline SkAlpha snapAlpha(SkAlpha alpha) { 346 return alpha > 247 ? 0xFF : alpha < 8 ? 0 : alpha; 347 } 348 349 inline void flush() { 350 if (fCurrY >= fTop) { 351 SkASSERT(fCurrentRun < fRunsToBuffer); 352 for (int x = 0; fRuns.fRuns[x]; x += fRuns.fRuns[x]) { 353 // It seems that blitting 255 or 0 is much faster than blitting 254 or 1 354 fRuns.fAlpha[x] = snapAlpha(fRuns.fAlpha[x]); 355 } 356 if (!fRuns.empty()) { 357 // SkDEBUGCODE(fRuns.dump();) 358 fRealBlitter->blitAntiH(fLeft, fCurrY, fRuns.fAlpha, fRuns.fRuns); 359 this->advanceRuns(); 360 fOffsetX = 0; 361 } 362 fCurrY = fTop - 1; 363 } 364 } 365 366 inline void checkY(int y) { 367 if (y != fCurrY) { 368 this->flush(); 369 fCurrY = y; 370 } 371 } 372 }; 373 374 RunBasedAdditiveBlitter::RunBasedAdditiveBlitter( 375 SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds, bool isInverse) { 376 fRealBlitter = realBlitter; 377 378 SkIRect sectBounds; 379 if (isInverse) { 380 // We use the clip bounds instead of the ir, since we may be asked to 381 //draw outside of the rect when we're a inverse filltype 382 sectBounds = clipBounds; 383 } else { 384 if (!sectBounds.intersect(ir, clipBounds)) { 385 sectBounds.setEmpty(); 386 } 387 } 388 389 const int left = sectBounds.left(); 390 const int right = sectBounds.right(); 391 392 fLeft = left; 393 fWidth = right - left; 394 fTop = sectBounds.top(); 395 fCurrY = fTop - 1; 396 397 fRunsToBuffer = realBlitter->requestRowsPreserved(); 398 fRunsBuffer = realBlitter->allocBlitMemory(fRunsToBuffer * this->getRunsSz()); 399 fCurrentRun = -1; 400 401 this->advanceRuns(); 402 403 fOffsetX = 0; 404 } 405 406 RunBasedAdditiveBlitter::~RunBasedAdditiveBlitter() { 407 this->flush(); 408 } 409 410 SkBlitter* RunBasedAdditiveBlitter::getRealBlitter(bool forceRealBlitter) { 411 return fRealBlitter; 412 } 413 414 void RunBasedAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) { 415 checkY(y); 416 x -= fLeft; 417 418 if (x < 0) { 419 len += x; 420 antialias -= x; 421 x = 0; 422 } 423 len = SkTMin(len, fWidth - x); 424 SkASSERT(check(x, len)); 425 426 if (x < fOffsetX) { 427 fOffsetX = 0; 428 } 429 430 fOffsetX = fRuns.add(x, 0, len, 0, 0, fOffsetX); // Break the run 431 for (int i = 0; i < len; i += fRuns.fRuns[x + i]) { 432 for (int j = 1; j < fRuns.fRuns[x + i]; j++) { 433 fRuns.fRuns[x + i + j] = 1; 434 fRuns.fAlpha[x + i + j] = fRuns.fAlpha[x + i]; 435 } 436 fRuns.fRuns[x + i] = 1; 437 } 438 for (int i = 0; i < len; ++i) { 439 addAlpha(&fRuns.fAlpha[x + i], antialias[i]); 440 } 441 } 442 void RunBasedAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) { 443 checkY(y); 444 x -= fLeft; 445 446 if (x < fOffsetX) { 447 fOffsetX = 0; 448 } 449 450 if (this->check(x, 1)) { 451 fOffsetX = fRuns.add(x, 0, 1, 0, alpha, fOffsetX); 452 } 453 } 454 455 void RunBasedAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) { 456 checkY(y); 457 x -= fLeft; 458 459 if (x < fOffsetX) { 460 fOffsetX = 0; 461 } 462 463 if (this->check(x, width)) { 464 fOffsetX = fRuns.add(x, 0, width, 0, alpha, fOffsetX); 465 } 466 } 467 468 int RunBasedAdditiveBlitter::getWidth() { return fWidth; } 469 470 // This exists specifically for concave path filling. 471 // In those cases, we can easily accumulate alpha greater than 0xFF. 472 class SafeRLEAdditiveBlitter : public RunBasedAdditiveBlitter { 473 public: 474 SafeRLEAdditiveBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds, 475 bool isInverse) : RunBasedAdditiveBlitter(realBlitter, ir, clipBounds, isInverse) {} 476 477 void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override; 478 void blitAntiH(int x, int y, const SkAlpha alpha) override; 479 void blitAntiH(int x, int y, int width, const SkAlpha alpha) override; 480 }; 481 482 void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) { 483 checkY(y); 484 x -= fLeft; 485 486 if (x < 0) { 487 len += x; 488 antialias -= x; 489 x = 0; 490 } 491 len = SkTMin(len, fWidth - x); 492 SkASSERT(check(x, len)); 493 494 if (x < fOffsetX) { 495 fOffsetX = 0; 496 } 497 498 fOffsetX = fRuns.add(x, 0, len, 0, 0, fOffsetX); // Break the run 499 for (int i = 0; i < len; i += fRuns.fRuns[x + i]) { 500 for (int j = 1; j < fRuns.fRuns[x + i]; j++) { 501 fRuns.fRuns[x + i + j] = 1; 502 fRuns.fAlpha[x + i + j] = fRuns.fAlpha[x + i]; 503 } 504 fRuns.fRuns[x + i] = 1; 505 } 506 for (int i = 0; i < len; ++i) { 507 safelyAddAlpha(&fRuns.fAlpha[x + i], antialias[i]); 508 } 509 } 510 511 void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) { 512 checkY(y); 513 x -= fLeft; 514 515 if (x < fOffsetX) { 516 fOffsetX = 0; 517 } 518 519 if (check(x, 1)) { 520 // Break the run 521 fOffsetX = fRuns.add(x, 0, 1, 0, 0, fOffsetX); 522 safelyAddAlpha(&fRuns.fAlpha[x], alpha); 523 } 524 } 525 526 void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) { 527 checkY(y); 528 x -= fLeft; 529 530 if (x < fOffsetX) { 531 fOffsetX = 0; 532 } 533 534 if (check(x, width)) { 535 // Break the run 536 fOffsetX = fRuns.add(x, 0, width, 0, 0, fOffsetX); 537 for(int i = x; i < x + width; i += fRuns.fRuns[i]) { 538 safelyAddAlpha(&fRuns.fAlpha[i], alpha); 539 } 540 } 541 } 542 543 /////////////////////////////////////////////////////////////////////////////// 544 545 // Return the alpha of a trapezoid whose height is 1 546 static inline SkAlpha trapezoidToAlpha(SkFixed l1, SkFixed l2) { 547 SkASSERT(l1 >= 0 && l2 >= 0); 548 return (l1 + l2) >> 9; 549 } 550 551 // The alpha of right-triangle (a, a*b), in 16 bits 552 static inline SkFixed partialTriangleToAlpha16(SkFixed a, SkFixed b) { 553 SkASSERT(a <= SK_Fixed1); 554 // SkFixedMul(SkFixedMul(a, a), b) >> 1 555 // return ((((a >> 8) * (a >> 8)) >> 8) * (b >> 8)) >> 1; 556 return (a >> 11) * (a >> 11) * (b >> 11); 557 } 558 559 // The alpha of right-triangle (a, a*b) 560 static inline SkAlpha partialTriangleToAlpha(SkFixed a, SkFixed b) { 561 return partialTriangleToAlpha16(a, b) >> 8; 562 } 563 564 static inline SkAlpha getPartialAlpha(SkAlpha alpha, SkFixed partialHeight) { 565 return SkToU8(SkFixedRoundToInt(alpha * partialHeight)); 566 } 567 568 static inline SkAlpha getPartialAlpha(SkAlpha alpha, SkAlpha fullAlpha) { 569 return ((uint16_t)alpha * fullAlpha) >> 8; 570 } 571 572 // For SkFixed that's close to SK_Fixed1, we can't convert it to alpha by just shifting right. 573 // For example, when f = SK_Fixed1, right shifting 8 will get 256, but we need 255. 574 // This is rarely the problem so we'll only use this for blitting rectangles. 575 static inline SkAlpha f2a(SkFixed f) { 576 SkASSERT(f <= SK_Fixed1); 577 return getPartialAlpha(0xFF, f); 578 } 579 580 // Suppose that line (l1, y)-(r1, y+1) intersects with (l2, y)-(r2, y+1), 581 // approximate (very coarsely) the x coordinate of the intersection. 582 static inline SkFixed approximateIntersection(SkFixed l1, SkFixed r1, SkFixed l2, SkFixed r2) { 583 if (l1 > r1) { SkTSwap(l1, r1); } 584 if (l2 > r2) { SkTSwap(l2, r2); } 585 return (SkTMax(l1, l2) + SkTMin(r1, r2)) >> 1; 586 } 587 588 // Here we always send in l < SK_Fixed1, and the first alpha we want to compute is alphas[0] 589 static inline void computeAlphaAboveLine(SkAlpha* alphas, SkFixed l, SkFixed r, 590 SkFixed dY, SkAlpha fullAlpha) { 591 SkASSERT(l <= r); 592 SkASSERT(l >> 16 == 0); 593 int R = SkFixedCeilToInt(r); 594 if (R == 0) { 595 return; 596 } else if (R == 1) { 597 alphas[0] = getPartialAlpha(((R << 17) - l - r) >> 9, fullAlpha); 598 } else { 599 SkFixed first = SK_Fixed1 - l; // horizontal edge length of the left-most triangle 600 SkFixed last = r - ((R - 1) << 16); // horizontal edge length of the right-most triangle 601 SkFixed firstH = SkFixedMul(first, dY); // vertical edge of the left-most triangle 602 alphas[0] = SkFixedMul(first, firstH) >> 9; // triangle alpha 603 SkFixed alpha16 = firstH + (dY >> 1); // rectangle plus triangle 604 for (int i = 1; i < R - 1; ++i) { 605 alphas[i] = alpha16 >> 8; 606 alpha16 += dY; 607 } 608 alphas[R - 1] = fullAlpha - partialTriangleToAlpha(last, dY); 609 } 610 } 611 612 // Here we always send in l < SK_Fixed1, and the first alpha we want to compute is alphas[0] 613 static inline void computeAlphaBelowLine( 614 SkAlpha* alphas, SkFixed l, SkFixed r, SkFixed dY, SkAlpha fullAlpha) { 615 SkASSERT(l <= r); 616 SkASSERT(l >> 16 == 0); 617 int R = SkFixedCeilToInt(r); 618 if (R == 0) { 619 return; 620 } else if (R == 1) { 621 alphas[0] = getPartialAlpha(trapezoidToAlpha(l, r), fullAlpha); 622 } else { 623 SkFixed first = SK_Fixed1 - l; // horizontal edge length of the left-most triangle 624 SkFixed last = r - ((R - 1) << 16); // horizontal edge length of the right-most triangle 625 SkFixed lastH = SkFixedMul(last, dY); // vertical edge of the right-most triangle 626 alphas[R-1] = SkFixedMul(last, lastH) >> 9; // triangle alpha 627 SkFixed alpha16 = lastH + (dY >> 1); // rectangle plus triangle 628 for (int i = R - 2; i > 0; i--) { 629 alphas[i] = alpha16 >> 8; 630 alpha16 += dY; 631 } 632 alphas[0] = fullAlpha - partialTriangleToAlpha(first, dY); 633 } 634 } 635 636 // Note that if fullAlpha != 0xFF, we'll multiply alpha by fullAlpha 637 static SK_ALWAYS_INLINE void blit_single_alpha(AdditiveBlitter* blitter, int y, int x, 638 SkAlpha alpha, SkAlpha fullAlpha, SkAlpha* maskRow, 639 bool isUsingMask, bool noRealBlitter, bool needSafeCheck) { 640 if (isUsingMask) { 641 if (fullAlpha == 0xFF && !noRealBlitter) { // noRealBlitter is needed for concave paths 642 maskRow[x] = alpha; 643 } else if (needSafeCheck) { 644 safelyAddAlpha(&maskRow[x], getPartialAlpha(alpha, fullAlpha)); 645 } else { 646 addAlpha(&maskRow[x], getPartialAlpha(alpha, fullAlpha)); 647 } 648 } else { 649 if (fullAlpha == 0xFF && !noRealBlitter) { 650 blitter->getRealBlitter()->blitV(x, y, 1, alpha); 651 } else { 652 blitter->blitAntiH(x, y, getPartialAlpha(alpha, fullAlpha)); 653 } 654 } 655 } 656 657 static SK_ALWAYS_INLINE void blit_two_alphas(AdditiveBlitter* blitter, int y, int x, 658 SkAlpha a1, SkAlpha a2, SkAlpha fullAlpha, SkAlpha* maskRow, 659 bool isUsingMask, bool noRealBlitter, bool needSafeCheck) { 660 if (isUsingMask) { 661 if (needSafeCheck) { 662 safelyAddAlpha(&maskRow[x], a1); 663 safelyAddAlpha(&maskRow[x + 1], a2); 664 } else { 665 addAlpha(&maskRow[x], a1); 666 addAlpha(&maskRow[x + 1], a2); 667 } 668 } else { 669 if (fullAlpha == 0xFF && !noRealBlitter) { 670 blitter->getRealBlitter()->blitAntiH2(x, y, a1, a2); 671 } else { 672 blitter->blitAntiH(x, y, a1); 673 blitter->blitAntiH(x + 1, y, a2); 674 } 675 } 676 } 677 678 // It's important that this is inline. Otherwise it'll be much slower. 679 static SK_ALWAYS_INLINE void blit_full_alpha(AdditiveBlitter* blitter, int y, int x, int len, 680 SkAlpha fullAlpha, SkAlpha* maskRow, bool isUsingMask, 681 bool noRealBlitter, bool needSafeCheck) { 682 if (isUsingMask) { 683 for (int i = 0; i < len; ++i) { 684 if (needSafeCheck) { 685 safelyAddAlpha(&maskRow[x + i], fullAlpha); 686 } else { 687 addAlpha(&maskRow[x + i], fullAlpha); 688 } 689 } 690 } else { 691 if (fullAlpha == 0xFF && !noRealBlitter) { 692 blitter->getRealBlitter()->blitH(x, y, len); 693 } else { 694 blitter->blitAntiH(x, y, len, fullAlpha); 695 } 696 } 697 } 698 699 static void blit_aaa_trapezoid_row(AdditiveBlitter* blitter, int y, 700 SkFixed ul, SkFixed ur, SkFixed ll, SkFixed lr, 701 SkFixed lDY, SkFixed rDY, SkAlpha fullAlpha, SkAlpha* maskRow, 702 bool isUsingMask, bool noRealBlitter, bool needSafeCheck) { 703 int L = SkFixedFloorToInt(ul), R = SkFixedCeilToInt(lr); 704 int len = R - L; 705 706 if (len == 1) { 707 SkAlpha alpha = trapezoidToAlpha(ur - ul, lr - ll); 708 blit_single_alpha(blitter, y, L, alpha, fullAlpha, maskRow, isUsingMask, noRealBlitter, 709 needSafeCheck); 710 return; 711 } 712 713 // SkDebugf("y = %d, len = %d, ul = %f, ur = %f, ll = %f, lr = %f\n", y, len, 714 // SkFixedToFloat(ul), SkFixedToFloat(ur), SkFixedToFloat(ll), SkFixedToFloat(lr)); 715 716 const int kQuickLen = 31; 717 // This is faster than SkAutoSMalloc<1024> 718 char quickMemory[(sizeof(SkAlpha) * 2 + sizeof(int16_t)) * (kQuickLen + 1)]; 719 SkAlpha* alphas; 720 721 if (len <= kQuickLen) { 722 alphas = (SkAlpha*)quickMemory; 723 } else { 724 alphas = new SkAlpha[(len + 1) * (sizeof(SkAlpha) * 2 + sizeof(int16_t))]; 725 } 726 727 SkAlpha* tempAlphas = alphas + len + 1; 728 int16_t* runs = (int16_t*)(alphas + (len + 1) * 2); 729 730 for (int i = 0; i < len; ++i) { 731 runs[i] = 1; 732 alphas[i] = fullAlpha; 733 } 734 runs[len] = 0; 735 736 int uL = SkFixedFloorToInt(ul); 737 int lL = SkFixedCeilToInt(ll); 738 if (uL + 2 == lL) { // We only need to compute two triangles, accelerate this special case 739 SkFixed first = SkIntToFixed(uL) + SK_Fixed1 - ul; 740 SkFixed second = ll - ul - first; 741 SkAlpha a1 = fullAlpha - partialTriangleToAlpha(first, lDY); 742 SkAlpha a2 = partialTriangleToAlpha(second, lDY); 743 alphas[0] = alphas[0] > a1 ? alphas[0] - a1 : 0; 744 alphas[1] = alphas[1] > a2 ? alphas[1] - a2 : 0; 745 } else { 746 computeAlphaBelowLine(tempAlphas + uL - L, ul - SkIntToFixed(uL), ll - SkIntToFixed(uL), 747 lDY, fullAlpha); 748 for (int i = uL; i < lL; ++i) { 749 if (alphas[i - L] > tempAlphas[i - L]) { 750 alphas[i - L] -= tempAlphas[i - L]; 751 } else { 752 alphas[i - L] = 0; 753 } 754 } 755 } 756 757 int uR = SkFixedFloorToInt(ur); 758 int lR = SkFixedCeilToInt(lr); 759 if (uR + 2 == lR) { // We only need to compute two triangles, accelerate this special case 760 SkFixed first = SkIntToFixed(uR) + SK_Fixed1 - ur; 761 SkFixed second = lr - ur - first; 762 SkAlpha a1 = partialTriangleToAlpha(first, rDY); 763 SkAlpha a2 = fullAlpha - partialTriangleToAlpha(second, rDY); 764 alphas[len-2] = alphas[len-2] > a1 ? alphas[len-2] - a1 : 0; 765 alphas[len-1] = alphas[len-1] > a2 ? alphas[len-1] - a2 : 0; 766 } else { 767 computeAlphaAboveLine(tempAlphas + uR - L, ur - SkIntToFixed(uR), lr - SkIntToFixed(uR), 768 rDY, fullAlpha); 769 for (int i = uR; i < lR; ++i) { 770 if (alphas[i - L] > tempAlphas[i - L]) { 771 alphas[i - L] -= tempAlphas[i - L]; 772 } else { 773 alphas[i - L] = 0; 774 } 775 } 776 } 777 778 if (isUsingMask) { 779 for (int i = 0; i < len; ++i) { 780 if (needSafeCheck) { 781 safelyAddAlpha(&maskRow[L + i], alphas[i]); 782 } else { 783 addAlpha(&maskRow[L + i], alphas[i]); 784 } 785 } 786 } else { 787 if (fullAlpha == 0xFF && !noRealBlitter) { 788 // Real blitter is faster than RunBasedAdditiveBlitter 789 blitter->getRealBlitter()->blitAntiH(L, y, alphas, runs); 790 } else { 791 blitter->blitAntiH(L, y, alphas, len); 792 } 793 } 794 795 if (len > kQuickLen) { 796 delete [] alphas; 797 } 798 } 799 800 static SK_ALWAYS_INLINE void blit_trapezoid_row(AdditiveBlitter* blitter, int y, 801 SkFixed ul, SkFixed ur, SkFixed ll, SkFixed lr, 802 SkFixed lDY, SkFixed rDY, SkAlpha fullAlpha, 803 SkAlpha* maskRow, bool isUsingMask, bool noRealBlitter = false, 804 bool needSafeCheck = false) { 805 SkASSERT(lDY >= 0 && rDY >= 0); // We should only send in the absolte value 806 807 if (ul > ur) { 808 #ifdef SK_DEBUG 809 // SkDebugf("ul = %f > ur = %f!\n", SkFixedToFloat(ul), SkFixedToFloat(ur)); 810 #endif 811 return; 812 } 813 814 // Edge crosses. Approximate it. This should only happend due to precision limit, 815 // so the approximation could be very coarse. 816 if (ll > lr) { 817 #ifdef SK_DEBUG 818 // SkDebugf("approximate intersection: %d %f %f\n", y, 819 // SkFixedToFloat(ll), SkFixedToFloat(lr)); 820 #endif 821 ll = lr = approximateIntersection(ul, ll, ur, lr); 822 } 823 824 if (ul == ur && ll == lr) { 825 return; // empty trapzoid 826 } 827 828 // We're going to use the left line ul-ll and the rite line ur-lr 829 // to exclude the area that's not covered by the path. 830 // Swapping (ul, ll) or (ur, lr) won't affect that exclusion 831 // so we'll do that for simplicity. 832 if (ul > ll) { SkTSwap(ul, ll); } 833 if (ur > lr) { SkTSwap(ur, lr); } 834 835 SkFixed joinLeft = SkFixedCeilToFixed(ll); 836 SkFixed joinRite = SkFixedFloorToFixed(ur); 837 if (joinLeft <= joinRite) { // There's a rect from joinLeft to joinRite that we can blit 838 if (ul < joinLeft) { 839 int len = SkFixedCeilToInt(joinLeft - ul); 840 if (len == 1) { 841 SkAlpha alpha = trapezoidToAlpha(joinLeft - ul, joinLeft - ll); 842 blit_single_alpha(blitter, y, ul >> 16, alpha, fullAlpha, maskRow, isUsingMask, 843 noRealBlitter, needSafeCheck); 844 } else if (len == 2) { 845 SkFixed first = joinLeft - SK_Fixed1 - ul; 846 SkFixed second = ll - ul - first; 847 SkAlpha a1 = partialTriangleToAlpha(first, lDY); 848 SkAlpha a2 = fullAlpha - partialTriangleToAlpha(second, lDY); 849 blit_two_alphas(blitter, y, ul >> 16, a1, a2, fullAlpha, maskRow, isUsingMask, 850 noRealBlitter, needSafeCheck); 851 } else { 852 blit_aaa_trapezoid_row(blitter, y, ul, joinLeft, ll, joinLeft, lDY, SK_MaxS32, 853 fullAlpha, maskRow, isUsingMask, noRealBlitter, 854 needSafeCheck); 855 } 856 } 857 // SkAAClip requires that we blit from left to right. 858 // Hence we must blit [ul, joinLeft] before blitting [joinLeft, joinRite] 859 if (joinLeft < joinRite) { 860 blit_full_alpha(blitter, y, SkFixedFloorToInt(joinLeft), 861 SkFixedFloorToInt(joinRite - joinLeft), 862 fullAlpha, maskRow, isUsingMask, noRealBlitter, needSafeCheck); 863 } 864 if (lr > joinRite) { 865 int len = SkFixedCeilToInt(lr - joinRite); 866 if (len == 1) { 867 SkAlpha alpha = trapezoidToAlpha(ur - joinRite, lr - joinRite); 868 blit_single_alpha(blitter, y, joinRite >> 16, alpha, fullAlpha, maskRow, 869 isUsingMask, noRealBlitter, needSafeCheck); 870 } else if (len == 2) { 871 SkFixed first = joinRite + SK_Fixed1 - ur; 872 SkFixed second = lr - ur - first; 873 SkAlpha a1 = fullAlpha - partialTriangleToAlpha(first, rDY); 874 SkAlpha a2 = partialTriangleToAlpha(second, rDY); 875 blit_two_alphas(blitter, y, joinRite >> 16, a1, a2, fullAlpha, maskRow, 876 isUsingMask, noRealBlitter, needSafeCheck); 877 } else { 878 blit_aaa_trapezoid_row(blitter, y, joinRite, ur, joinRite, lr, SK_MaxS32, rDY, 879 fullAlpha, maskRow, isUsingMask, noRealBlitter, 880 needSafeCheck); 881 } 882 } 883 } else { 884 blit_aaa_trapezoid_row(blitter, y, ul, ur, ll, lr, lDY, rDY, fullAlpha, maskRow, 885 isUsingMask, noRealBlitter, needSafeCheck); 886 } 887 } 888 889 /////////////////////////////////////////////////////////////////////////////// 890 891 static bool operator<(const SkAnalyticEdge& a, const SkAnalyticEdge& b) { 892 int valuea = a.fUpperY; 893 int valueb = b.fUpperY; 894 895 if (valuea == valueb) { 896 valuea = a.fX; 897 valueb = b.fX; 898 } 899 900 if (valuea == valueb) { 901 valuea = a.fDX; 902 valueb = b.fDX; 903 } 904 905 return valuea < valueb; 906 } 907 908 static SkAnalyticEdge* sort_edges(SkAnalyticEdge* list[], int count, SkAnalyticEdge** last) { 909 SkTQSort(list, list + count - 1); 910 911 // now make the edges linked in sorted order 912 for (int i = 1; i < count; ++i) { 913 list[i - 1]->fNext = list[i]; 914 list[i]->fPrev = list[i - 1]; 915 } 916 917 *last = list[count - 1]; 918 return list[0]; 919 } 920 921 #ifdef SK_DEBUG 922 static void validate_sort(const SkAnalyticEdge* edge) { 923 SkFixed y = SkIntToFixed(-32768); 924 925 while (edge->fUpperY != SK_MaxS32) { 926 edge->validate(); 927 SkASSERT(y <= edge->fUpperY); 928 929 y = edge->fUpperY; 930 edge = (SkAnalyticEdge*)edge->fNext; 931 } 932 } 933 #else 934 #define validate_sort(edge) 935 #endif 936 937 // For an edge, we consider it smooth if the Dx doesn't change much, and Dy is large enough 938 // For curves that are updating, the Dx is not changing much if fQDx/fCDx and fQDy/fCDy are 939 // relatively large compared to fQDDx/QCDDx and fQDDy/fCDDy 940 static inline bool isSmoothEnough(SkAnalyticEdge* thisEdge, SkAnalyticEdge* nextEdge, int stop_y) { 941 if (thisEdge->fCurveCount < 0) { 942 const SkCubicEdge& cEdge = static_cast<SkAnalyticCubicEdge*>(thisEdge)->fCEdge; 943 int ddshift = cEdge.fCurveShift; 944 return SkAbs32(cEdge.fCDx) >> 1 >= SkAbs32(cEdge.fCDDx) >> ddshift && 945 SkAbs32(cEdge.fCDy) >> 1 >= SkAbs32(cEdge.fCDDy) >> ddshift && 946 // current Dy is (fCDy - (fCDDy >> ddshift)) >> dshift 947 (cEdge.fCDy - (cEdge.fCDDy >> ddshift)) >> cEdge.fCubicDShift >= SK_Fixed1; 948 } else if (thisEdge->fCurveCount > 0) { 949 const SkQuadraticEdge& qEdge = static_cast<SkAnalyticQuadraticEdge*>(thisEdge)->fQEdge; 950 return SkAbs32(qEdge.fQDx) >> 1 >= SkAbs32(qEdge.fQDDx) && 951 SkAbs32(qEdge.fQDy) >> 1 >= SkAbs32(qEdge.fQDDy) && 952 // current Dy is (fQDy - fQDDy) >> shift 953 (qEdge.fQDy - qEdge.fQDDy) >> qEdge.fCurveShift 954 >= SK_Fixed1; 955 } 956 return SkAbs32(nextEdge->fDX - thisEdge->fDX) <= SK_Fixed1 && // DDx should be small 957 nextEdge->fLowerY - nextEdge->fUpperY >= SK_Fixed1; // Dy should be large 958 } 959 960 // Check if the leftE and riteE are changing smoothly in terms of fDX. 961 // If yes, we can later skip the fractional y and directly jump to integer y. 962 static inline bool isSmoothEnough(SkAnalyticEdge* leftE, SkAnalyticEdge* riteE, 963 SkAnalyticEdge* currE, int stop_y) { 964 if (currE->fUpperY >= stop_y << 16) { 965 return false; // We're at the end so we won't skip anything 966 } 967 if (leftE->fLowerY + SK_Fixed1 < riteE->fLowerY) { 968 return isSmoothEnough(leftE, currE, stop_y); // Only leftE is changing 969 } else if (leftE->fLowerY > riteE->fLowerY + SK_Fixed1) { 970 return isSmoothEnough(riteE, currE, stop_y); // Only riteE is changing 971 } 972 973 // Now both edges are changing, find the second next edge 974 SkAnalyticEdge* nextCurrE = currE->fNext; 975 if (nextCurrE->fUpperY >= stop_y << 16) { // Check if we're at the end 976 return false; 977 } 978 // Ensure that currE is the next left edge and nextCurrE is the next right edge. Swap if not. 979 if (nextCurrE->fUpperX < currE->fUpperX) { 980 SkTSwap(currE, nextCurrE); 981 } 982 return isSmoothEnough(leftE, currE, stop_y) && isSmoothEnough(riteE, nextCurrE, stop_y); 983 } 984 985 static inline void aaa_walk_convex_edges(SkAnalyticEdge* prevHead, 986 AdditiveBlitter* blitter, int start_y, int stop_y, SkFixed leftBound, SkFixed riteBound, 987 bool isUsingMask) { 988 validate_sort((SkAnalyticEdge*)prevHead->fNext); 989 990 SkAnalyticEdge* leftE = (SkAnalyticEdge*) prevHead->fNext; 991 SkAnalyticEdge* riteE = (SkAnalyticEdge*) leftE->fNext; 992 SkAnalyticEdge* currE = (SkAnalyticEdge*) riteE->fNext; 993 994 SkFixed y = SkTMax(leftE->fUpperY, riteE->fUpperY); 995 996 #ifdef SK_DEBUG 997 int frac_y_cnt = 0; 998 int total_y_cnt = 0; 999 #endif 1000 1001 for (;;) { 1002 // We have to check fLowerY first because some edges might be alone (e.g., there's only 1003 // a left edge but no right edge in a given y scan line) due to precision limit. 1004 while (leftE->fLowerY <= y) { // Due to smooth jump, we may pass multiple short edges 1005 if (!leftE->update(y)) { 1006 if (SkFixedFloorToInt(currE->fUpperY) >= stop_y) { 1007 goto END_WALK; 1008 } 1009 leftE = currE; 1010 currE = (SkAnalyticEdge*)currE->fNext; 1011 } 1012 } 1013 while (riteE->fLowerY <= y) { // Due to smooth jump, we may pass multiple short edges 1014 if (!riteE->update(y)) { 1015 if (SkFixedFloorToInt(currE->fUpperY) >= stop_y) { 1016 goto END_WALK; 1017 } 1018 riteE = currE; 1019 currE = (SkAnalyticEdge*)currE->fNext; 1020 } 1021 } 1022 1023 SkASSERT(leftE); 1024 SkASSERT(riteE); 1025 1026 // check our bottom clip 1027 if (SkFixedFloorToInt(y) >= stop_y) { 1028 break; 1029 } 1030 1031 SkASSERT(SkFixedFloorToInt(leftE->fUpperY) <= stop_y); 1032 SkASSERT(SkFixedFloorToInt(riteE->fUpperY) <= stop_y); 1033 1034 leftE->goY(y); 1035 riteE->goY(y); 1036 1037 if (leftE->fX > riteE->fX || (leftE->fX == riteE->fX && 1038 leftE->fDX > riteE->fDX)) { 1039 SkTSwap(leftE, riteE); 1040 } 1041 1042 SkFixed local_bot_fixed = SkMin32(leftE->fLowerY, riteE->fLowerY); 1043 if (isSmoothEnough(leftE, riteE, currE, stop_y)) { 1044 local_bot_fixed = SkFixedCeilToFixed(local_bot_fixed); 1045 } 1046 local_bot_fixed = SkMin32(local_bot_fixed, SkIntToFixed(stop_y)); 1047 1048 SkFixed left = SkTMax(leftBound, leftE->fX); 1049 SkFixed dLeft = leftE->fDX; 1050 SkFixed rite = SkTMin(riteBound, riteE->fX); 1051 SkFixed dRite = riteE->fDX; 1052 if (0 == (dLeft | dRite)) { 1053 int fullLeft = SkFixedCeilToInt(left); 1054 int fullRite = SkFixedFloorToInt(rite); 1055 SkFixed partialLeft = SkIntToFixed(fullLeft) - left; 1056 SkFixed partialRite = rite - SkIntToFixed(fullRite); 1057 int fullTop = SkFixedCeilToInt(y); 1058 int fullBot = SkFixedFloorToInt(local_bot_fixed); 1059 SkFixed partialTop = SkIntToFixed(fullTop) - y; 1060 SkFixed partialBot = local_bot_fixed - SkIntToFixed(fullBot); 1061 if (fullTop > fullBot) { // The rectangle is within one pixel height... 1062 partialTop -= (SK_Fixed1 - partialBot); 1063 partialBot = 0; 1064 } 1065 1066 if (fullRite >= fullLeft) { 1067 if (partialTop > 0) { // blit first partial row 1068 if (partialLeft > 0) { 1069 blitter->blitAntiH(fullLeft - 1, fullTop - 1, 1070 f2a(SkFixedMul(partialTop, partialLeft))); 1071 } 1072 blitter->blitAntiH(fullLeft, fullTop - 1, fullRite - fullLeft, 1073 f2a(partialTop)); 1074 if (partialRite > 0) { 1075 blitter->blitAntiH(fullRite, fullTop - 1, 1076 f2a(SkFixedMul(partialTop, partialRite))); 1077 } 1078 blitter->flush_if_y_changed(y, y + partialTop); 1079 } 1080 1081 // Blit all full-height rows from fullTop to fullBot 1082 if (fullBot > fullTop && 1083 // SkAAClip cannot handle the empty rect so check the non-emptiness here 1084 // (bug chromium:662800) 1085 (fullRite > fullLeft || f2a(partialLeft) > 0 || f2a(partialRite) > 0)) { 1086 blitter->getRealBlitter()->blitAntiRect(fullLeft - 1, fullTop, 1087 fullRite - fullLeft, fullBot - fullTop, 1088 f2a(partialLeft), f2a(partialRite)); 1089 } 1090 1091 if (partialBot > 0) { // blit last partial row 1092 if (partialLeft > 0) { 1093 blitter->blitAntiH(fullLeft - 1, fullBot, 1094 f2a(SkFixedMul(partialBot, partialLeft))); 1095 } 1096 blitter->blitAntiH(fullLeft, fullBot, fullRite - fullLeft, f2a(partialBot)); 1097 if (partialRite > 0) { 1098 blitter->blitAntiH(fullRite, fullBot, 1099 f2a(SkFixedMul(partialBot, partialRite))); 1100 } 1101 } 1102 } else { // left and rite are within the same pixel 1103 if (partialTop > 0) { 1104 blitter->blitAntiH(fullLeft - 1, fullTop - 1, 1, 1105 f2a(SkFixedMul(partialTop, rite - left))); 1106 blitter->flush_if_y_changed(y, y + partialTop); 1107 } 1108 if (fullBot > fullTop) { 1109 blitter->getRealBlitter()->blitV(fullLeft - 1, fullTop, fullBot - fullTop, 1110 f2a(rite - left)); 1111 } 1112 if (partialBot > 0) { 1113 blitter->blitAntiH(fullLeft - 1, fullBot, 1, 1114 f2a(SkFixedMul(partialBot, rite - left))); 1115 } 1116 } 1117 1118 y = local_bot_fixed; 1119 } else { 1120 // The following constant are used to snap X 1121 // We snap X mainly for speedup (no tiny triangle) and 1122 // avoiding edge cases caused by precision errors 1123 const SkFixed kSnapDigit = SK_Fixed1 >> 4; 1124 const SkFixed kSnapHalf = kSnapDigit >> 1; 1125 const SkFixed kSnapMask = (-1 ^ (kSnapDigit - 1)); 1126 left += kSnapHalf; rite += kSnapHalf; // For fast rounding 1127 1128 // Number of blit_trapezoid_row calls we'll have 1129 int count = SkFixedCeilToInt(local_bot_fixed) - SkFixedFloorToInt(y); 1130 #ifdef SK_DEBUG 1131 total_y_cnt += count; 1132 frac_y_cnt += ((int)(y & 0xFFFF0000) != y); 1133 if ((int)(y & 0xFFFF0000) != y) { 1134 // SkDebugf("frac_y = %f\n", SkFixedToFloat(y)); 1135 } 1136 #endif 1137 1138 // If we're using mask blitter, we advance the mask row in this function 1139 // to save some "if" condition checks. 1140 SkAlpha* maskRow = nullptr; 1141 if (isUsingMask) { 1142 maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16); 1143 } 1144 1145 // Instead of writing one loop that handles both partial-row blit_trapezoid_row 1146 // and full-row trapezoid_row together, we use the following 3-stage flow to 1147 // handle partial-row blit and full-row blit separately. It will save us much time 1148 // on changing y, left, and rite. 1149 if (count > 1) { 1150 if ((int)(y & 0xFFFF0000) != y) { // There's a partial-row on the top 1151 count--; 1152 SkFixed nextY = SkFixedCeilToFixed(y + 1); 1153 SkFixed dY = nextY - y; 1154 SkFixed nextLeft = left + SkFixedMul(dLeft, dY); 1155 SkFixed nextRite = rite + SkFixedMul(dRite, dY); 1156 SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound && 1157 (nextLeft & kSnapMask) >= leftBound && 1158 (nextRite & kSnapMask) <= riteBound); 1159 blit_trapezoid_row(blitter, y >> 16, left & kSnapMask, rite & kSnapMask, 1160 nextLeft & kSnapMask, nextRite & kSnapMask, leftE->fDY, riteE->fDY, 1161 getPartialAlpha(0xFF, dY), maskRow, isUsingMask); 1162 blitter->flush_if_y_changed(y, nextY); 1163 left = nextLeft; rite = nextRite; y = nextY; 1164 } 1165 1166 while (count > 1) { // Full rows in the middle 1167 count--; 1168 if (isUsingMask) { 1169 maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16); 1170 } 1171 SkFixed nextY = y + SK_Fixed1, nextLeft = left + dLeft, nextRite = rite + dRite; 1172 SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound && 1173 (nextLeft & kSnapMask) >= leftBound && 1174 (nextRite & kSnapMask) <= riteBound); 1175 blit_trapezoid_row(blitter, y >> 16, left & kSnapMask, rite & kSnapMask, 1176 nextLeft & kSnapMask, nextRite & kSnapMask, 1177 leftE->fDY, riteE->fDY, 0xFF, maskRow, isUsingMask); 1178 blitter->flush_if_y_changed(y, nextY); 1179 left = nextLeft; rite = nextRite; y = nextY; 1180 } 1181 } 1182 1183 if (isUsingMask) { 1184 maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16); 1185 } 1186 1187 SkFixed dY = local_bot_fixed - y; // partial-row on the bottom 1188 SkASSERT(dY <= SK_Fixed1); 1189 // Smooth jumping to integer y may make the last nextLeft/nextRite out of bound. 1190 // Take them back into the bound here. 1191 // Note that we substract kSnapHalf later so we have to add them to leftBound/riteBound 1192 SkFixed nextLeft = SkTMax(left + SkFixedMul(dLeft, dY), leftBound + kSnapHalf); 1193 SkFixed nextRite = SkTMin(rite + SkFixedMul(dRite, dY), riteBound + kSnapHalf); 1194 SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound && 1195 (nextLeft & kSnapMask) >= leftBound && (nextRite & kSnapMask) <= riteBound); 1196 blit_trapezoid_row(blitter, y >> 16, left & kSnapMask, rite & kSnapMask, 1197 nextLeft & kSnapMask, nextRite & kSnapMask, leftE->fDY, riteE->fDY, 1198 getPartialAlpha(0xFF, dY), maskRow, isUsingMask); 1199 blitter->flush_if_y_changed(y, local_bot_fixed); 1200 left = nextLeft; rite = nextRite; y = local_bot_fixed; 1201 left -= kSnapHalf; rite -= kSnapHalf; 1202 } 1203 1204 leftE->fX = left; 1205 riteE->fX = rite; 1206 leftE->fY = riteE->fY = y; 1207 } 1208 1209 END_WALK: 1210 ; 1211 #ifdef SK_DEBUG 1212 // SkDebugf("frac_y_cnt = %d, total_y_cnt = %d\n", frac_y_cnt, total_y_cnt); 1213 #endif 1214 } 1215 1216 /////////////////////////////////////////////////////////////////////////////// 1217 1218 static inline void updateNextNextY(SkFixed y, SkFixed nextY, SkFixed* nextNextY) { 1219 *nextNextY = y > nextY && y < *nextNextY ? y : *nextNextY; 1220 } 1221 1222 static inline void checkIntersection(const SkAnalyticEdge* edge, SkFixed nextY, SkFixed* nextNextY) 1223 { 1224 if (edge->fPrev->fPrev && edge->fPrev->fX + edge->fPrev->fDX > edge->fX + edge->fDX) { 1225 *nextNextY = nextY + (SK_Fixed1 >> SkAnalyticEdge::kDefaultAccuracy); 1226 } 1227 } 1228 1229 static void insert_new_edges(SkAnalyticEdge* newEdge, SkFixed y, SkFixed* nextNextY) { 1230 if (newEdge->fUpperY > y) { 1231 updateNextNextY(newEdge->fUpperY, y, nextNextY); 1232 return; 1233 } 1234 SkAnalyticEdge* prev = newEdge->fPrev; 1235 if (prev->fX <= newEdge->fX) { 1236 while (newEdge->fUpperY <= y) { 1237 checkIntersection(newEdge, y, nextNextY); 1238 updateNextNextY(newEdge->fLowerY, y, nextNextY); 1239 newEdge = newEdge->fNext; 1240 } 1241 updateNextNextY(newEdge->fUpperY, y, nextNextY); 1242 return; 1243 } 1244 // find first x pos to insert 1245 SkAnalyticEdge* start = backward_insert_start(prev, newEdge->fX); 1246 //insert the lot, fixing up the links as we go 1247 do { 1248 SkAnalyticEdge* next = newEdge->fNext; 1249 do { 1250 if (start->fNext == newEdge) { 1251 goto nextEdge; 1252 } 1253 SkAnalyticEdge* after = start->fNext; 1254 if (after->fX >= newEdge->fX) { 1255 break; 1256 } 1257 SkASSERT(start != after); 1258 start = after; 1259 } while (true); 1260 remove_edge(newEdge); 1261 insert_edge_after(newEdge, start); 1262 nextEdge: 1263 checkIntersection(newEdge, y, nextNextY); 1264 updateNextNextY(newEdge->fLowerY, y, nextNextY); 1265 start = newEdge; 1266 newEdge = next; 1267 } while (newEdge->fUpperY <= y); 1268 updateNextNextY(newEdge->fUpperY, y, nextNextY); 1269 } 1270 1271 static void validate_edges_for_y(const SkAnalyticEdge* edge, SkFixed y) { 1272 #ifdef SK_DEBUG 1273 while (edge->fUpperY <= y) { 1274 SkASSERT(edge->fPrev && edge->fNext); 1275 SkASSERT(edge->fPrev->fNext == edge); 1276 SkASSERT(edge->fNext->fPrev == edge); 1277 SkASSERT(edge->fUpperY <= edge->fLowerY); 1278 SkASSERT(edge->fPrev->fPrev == nullptr || edge->fPrev->fX <= edge->fX); 1279 edge = edge->fNext; 1280 } 1281 #endif 1282 } 1283 1284 // Return true if prev->fX, next->fX are too close in the current pixel row. 1285 static inline bool edges_too_close(SkAnalyticEdge* prev, SkAnalyticEdge* next, SkFixed lowerY) { 1286 // When next->fDX == 0, prev->fX >= next->fX - SkAbs32(next->fDX) would be false 1287 // even if prev->fX and next->fX are close and within one pixel (e.g., prev->fX == 0.1, 1288 // next->fX == 0.9). Adding SLACK = 1 to the formula would guarantee it to be true if two 1289 // edges prev and next are within one pixel. 1290 constexpr SkFixed SLACK = 1291 #ifdef SK_SUPPORT_LEGACY_AA_BEHAVIOR 1292 0; 1293 #else 1294 SK_Fixed1; 1295 #endif 1296 1297 // Note that even if the following test failed, the edges might still be very close to each 1298 // other at some point within the current pixel row because of prev->fDX and next->fDX. 1299 // However, to handle that case, we have to sacrafice more performance. 1300 // I think the current quality is good enough (mainly by looking at Nebraska-StateSeal.svg) 1301 // so I'll ignore fDX for performance tradeoff. 1302 return next && prev && next->fUpperY < lowerY && prev->fX + SLACK >= 1303 next->fX - SkAbs32(next->fDX); 1304 // The following is more accurate but also slower. 1305 // return (prev && prev->fPrev && next && next->fNext != nullptr && next->fUpperY < lowerY && 1306 // prev->fX + SkAbs32(prev->fDX) + SLACK >= next->fX - SkAbs32(next->fDX)); 1307 } 1308 1309 // This function exists for the case where the previous rite edge is removed because 1310 // its fLowerY <= nextY 1311 static inline bool edges_too_close(int prevRite, SkFixed ul, SkFixed ll) { 1312 return prevRite > SkFixedFloorToInt(ul) || prevRite > SkFixedFloorToInt(ll); 1313 } 1314 1315 static inline void blit_saved_trapezoid(SkAnalyticEdge* leftE, SkFixed lowerY, 1316 SkFixed lowerLeft, SkFixed lowerRite, 1317 AdditiveBlitter* blitter, SkAlpha* maskRow, bool isUsingMask, bool noRealBlitter, 1318 SkFixed leftClip, SkFixed rightClip) { 1319 SkAnalyticEdge* riteE = leftE->fRiteE; 1320 SkASSERT(riteE); 1321 SkASSERT(riteE->fNext == nullptr || leftE->fSavedY == riteE->fSavedY); 1322 SkASSERT(SkFixedFloorToInt(lowerY - 1) == SkFixedFloorToInt(leftE->fSavedY)); 1323 int y = SkFixedFloorToInt(leftE->fSavedY); 1324 // Instead of using f2a(lowerY - leftE->fSavedY), we use the following fullAlpha 1325 // to elimiate cumulative error: if there are many fractional y scan lines within the 1326 // same row, the former may accumulate the rounding error while the later won't. 1327 SkAlpha fullAlpha = f2a(lowerY - SkIntToFixed(y)) - f2a(leftE->fSavedY - SkIntToFixed(y)); 1328 // We need fSavedDY because the (quad or cubic) edge might be updated 1329 blit_trapezoid_row(blitter, y, 1330 SkTMax(leftE->fSavedX, leftClip), SkTMin(riteE->fSavedX, rightClip), 1331 SkTMax(lowerLeft, leftClip), SkTMin(lowerRite, rightClip), 1332 leftE->fSavedDY, riteE->fSavedDY, fullAlpha, maskRow, isUsingMask, 1333 noRealBlitter || 1334 (fullAlpha == 0xFF && (edges_too_close(leftE->fPrev, leftE, lowerY) 1335 || edges_too_close(riteE, riteE->fNext, lowerY))), 1336 true); 1337 leftE->fRiteE = nullptr; 1338 } 1339 1340 static inline void deferred_blit(SkAnalyticEdge* leftE, SkAnalyticEdge* riteE, 1341 SkFixed left, SkFixed leftDY, // don't save leftE->fX/fDY as they may have been updated 1342 SkFixed y, SkFixed nextY, bool isIntegralNextY, bool leftEnds, bool riteEnds, 1343 AdditiveBlitter* blitter, SkAlpha* maskRow, bool isUsingMask, bool noRealBlitter, 1344 SkFixed leftClip, SkFixed rightClip, int yShift) { 1345 if (leftE->fRiteE && leftE->fRiteE != riteE) { 1346 // leftE's right edge changed. Blit the saved trapezoid. 1347 SkASSERT(leftE->fRiteE->fNext == nullptr || leftE->fRiteE->fY == y); 1348 blit_saved_trapezoid(leftE, y, left, leftE->fRiteE->fX, 1349 blitter, maskRow, isUsingMask, noRealBlitter, leftClip, rightClip); 1350 } 1351 if (!leftE->fRiteE) { 1352 // Save and defer blitting the trapezoid 1353 SkASSERT(riteE->fRiteE == nullptr); 1354 SkASSERT(leftE->fPrev == nullptr || leftE->fY == nextY); 1355 SkASSERT(riteE->fNext == nullptr || riteE->fY == y); 1356 leftE->saveXY(left, y, leftDY); 1357 riteE->saveXY(riteE->fX, y, riteE->fDY); 1358 leftE->fRiteE = riteE; 1359 } 1360 SkASSERT(leftE->fPrev == nullptr || leftE->fY == nextY); 1361 riteE->goY(nextY, yShift); 1362 // Always blit when edges end or nextY is integral 1363 if (isIntegralNextY || leftEnds || riteEnds) { 1364 blit_saved_trapezoid(leftE, nextY, leftE->fX, riteE->fX, 1365 blitter, maskRow, isUsingMask, noRealBlitter, leftClip, rightClip); 1366 } 1367 } 1368 1369 static void aaa_walk_edges(SkAnalyticEdge* prevHead, SkAnalyticEdge* nextTail, 1370 SkPath::FillType fillType, AdditiveBlitter* blitter, int start_y, int stop_y, 1371 SkFixed leftClip, SkFixed rightClip, bool isUsingMask, bool forceRLE, bool useDeferred, 1372 bool skipIntersect) { 1373 prevHead->fX = prevHead->fUpperX = leftClip; 1374 nextTail->fX = nextTail->fUpperX = rightClip; 1375 SkFixed y = SkTMax(prevHead->fNext->fUpperY, SkIntToFixed(start_y)); 1376 SkFixed nextNextY = SK_MaxS32; 1377 1378 { 1379 SkAnalyticEdge* edge; 1380 for(edge = prevHead->fNext; edge->fUpperY <= y; edge = edge->fNext) { 1381 edge->goY(y); 1382 updateNextNextY(edge->fLowerY, y, &nextNextY); 1383 } 1384 updateNextNextY(edge->fUpperY, y, &nextNextY); 1385 } 1386 1387 // returns 1 for evenodd, -1 for winding, regardless of inverse-ness 1388 int windingMask = (fillType & 1) ? 1 : -1; 1389 1390 bool isInverse = SkPath::IsInverseFillType(fillType); 1391 1392 if (isInverse && SkIntToFixed(start_y) != y) { 1393 int width = SkFixedFloorToInt(rightClip - leftClip); 1394 if (SkFixedFloorToInt(y) != start_y) { 1395 blitter->getRealBlitter()->blitRect(SkFixedFloorToInt(leftClip), start_y, 1396 width, SkFixedFloorToInt(y) - start_y); 1397 start_y = SkFixedFloorToInt(y); 1398 } 1399 SkAlpha* maskRow = isUsingMask ? static_cast<MaskAdditiveBlitter*>(blitter)->getRow(start_y) 1400 : nullptr; 1401 blit_full_alpha(blitter, start_y, SkFixedFloorToInt(leftClip), width, 1402 f2a(y - SkIntToFixed(start_y)), maskRow, isUsingMask, false, false); 1403 } 1404 1405 while (true) { 1406 int w = 0; 1407 bool in_interval = isInverse; 1408 SkFixed prevX = prevHead->fX; 1409 SkFixed nextY = SkTMin(nextNextY, SkFixedCeilToFixed(y + 1)); 1410 bool isIntegralNextY = (nextY & (SK_Fixed1 - 1)) == 0; 1411 SkAnalyticEdge* currE = prevHead->fNext; 1412 SkAnalyticEdge* leftE = prevHead; 1413 SkFixed left = leftClip; 1414 SkFixed leftDY = 0; 1415 bool leftEnds = false; 1416 int prevRite = SkFixedFloorToInt(leftClip); 1417 1418 nextNextY = SK_MaxS32; 1419 1420 SkASSERT((nextY & ((SK_Fixed1 >> 2) - 1)) == 0); 1421 int yShift = 0; 1422 if ((nextY - y) & (SK_Fixed1 >> 2)) { 1423 yShift = 2; 1424 nextY = y + (SK_Fixed1 >> 2); 1425 } else if ((nextY - y) & (SK_Fixed1 >> 1)) { 1426 yShift = 1; 1427 SkASSERT(nextY == y + (SK_Fixed1 >> 1)); 1428 } 1429 1430 SkAlpha fullAlpha = f2a(nextY - y); 1431 1432 // If we're using mask blitter, we advance the mask row in this function 1433 // to save some "if" condition checks. 1434 SkAlpha* maskRow = nullptr; 1435 if (isUsingMask) { 1436 maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(SkFixedFloorToInt(y)); 1437 } 1438 1439 SkASSERT(currE->fPrev == prevHead); 1440 validate_edges_for_y(currE, y); 1441 1442 // Even if next - y == SK_Fixed1, we can still break the left-to-right order requirement 1443 // of the SKAAClip: |\| (two trapezoids with overlapping middle wedges) 1444 bool noRealBlitter = forceRLE; // forceRLE && (nextY - y != SK_Fixed1); 1445 1446 while (currE->fUpperY <= y) { 1447 SkASSERT(currE->fLowerY >= nextY); 1448 SkASSERT(currE->fY == y); 1449 1450 w += currE->fWinding; 1451 bool prev_in_interval = in_interval; 1452 in_interval = !(w & windingMask) == isInverse; 1453 1454 bool isLeft = in_interval && !prev_in_interval; 1455 bool isRite = !in_interval && prev_in_interval; 1456 bool currEnds = currE->fLowerY == nextY; 1457 1458 if (useDeferred) { 1459 if (currE->fRiteE && !isLeft) { 1460 // currE is a left edge previously, but now it's not. 1461 // Blit the trapezoid between fSavedY and y. 1462 SkASSERT(currE->fRiteE->fY == y); 1463 blit_saved_trapezoid(currE, y, currE->fX, currE->fRiteE->fX, 1464 blitter, maskRow, isUsingMask, noRealBlitter, leftClip, rightClip); 1465 } 1466 if (leftE->fRiteE == currE && !isRite) { 1467 // currE is a right edge previously, but now it's not. 1468 // Moreover, its corresponding leftE doesn't change (otherwise we'll handle it 1469 // in the previous if clause). Hence we blit the trapezoid. 1470 blit_saved_trapezoid(leftE, y, left, currE->fX, 1471 blitter, maskRow, isUsingMask, noRealBlitter, leftClip, rightClip); 1472 } 1473 } 1474 1475 if (isRite) { 1476 if (useDeferred) { 1477 deferred_blit(leftE, currE, left, leftDY, y, nextY, isIntegralNextY, 1478 leftEnds, currEnds, blitter, maskRow, isUsingMask, noRealBlitter, 1479 leftClip, rightClip, yShift); 1480 } else { 1481 SkFixed rite = currE->fX; 1482 currE->goY(nextY, yShift); 1483 #ifdef SK_SUPPORT_LEGACY_DELTA_AA 1484 leftE->fX = SkTMax(leftClip, leftE->fX); 1485 rite = SkTMin(rightClip, rite); 1486 currE->fX = SkTMin(rightClip, currE->fX); 1487 blit_trapezoid_row(blitter, y >> 16, left, rite, leftE->fX, currE->fX, 1488 #else 1489 SkFixed nextLeft = SkTMax(leftClip, leftE->fX); 1490 rite = SkTMin(rightClip, rite); 1491 SkFixed nextRite = SkTMin(rightClip, currE->fX); 1492 blit_trapezoid_row(blitter, y >> 16, left, rite, nextLeft, nextRite, 1493 #endif 1494 leftDY, currE->fDY, fullAlpha, maskRow, isUsingMask, 1495 noRealBlitter || (fullAlpha == 0xFF && ( 1496 edges_too_close(prevRite, left, leftE->fX) || 1497 edges_too_close(currE, currE->fNext, nextY) 1498 )), 1499 true); 1500 prevRite = SkFixedCeilToInt(SkTMax(rite, currE->fX)); 1501 } 1502 } else { 1503 if (isLeft) { 1504 left = SkTMax(currE->fX, leftClip); 1505 leftDY = currE->fDY; 1506 leftE = currE; 1507 leftEnds = leftE->fLowerY == nextY; 1508 } 1509 currE->goY(nextY, yShift); 1510 } 1511 1512 1513 SkAnalyticEdge* next = currE->fNext; 1514 SkFixed newX; 1515 1516 while (currE->fLowerY <= nextY) { 1517 if (currE->fCurveCount < 0) { 1518 SkAnalyticCubicEdge* cubicEdge = (SkAnalyticCubicEdge*)currE; 1519 cubicEdge->keepContinuous(); 1520 if (!cubicEdge->updateCubic()) { 1521 break; 1522 } 1523 } else if (currE->fCurveCount > 0) { 1524 SkAnalyticQuadraticEdge* quadEdge = (SkAnalyticQuadraticEdge*)currE; 1525 quadEdge->keepContinuous(); 1526 if (!quadEdge->updateQuadratic()) { 1527 break; 1528 } 1529 } else { 1530 break; 1531 } 1532 } 1533 SkASSERT(currE->fY == nextY); 1534 1535 if (currE->fLowerY <= nextY) { 1536 remove_edge(currE); 1537 } else { 1538 updateNextNextY(currE->fLowerY, nextY, &nextNextY); 1539 newX = currE->fX; 1540 SkASSERT(currE->fLowerY > nextY); 1541 if (newX < prevX) { // ripple currE backwards until it is x-sorted 1542 // If the crossing edge is a right edge, blit the saved trapezoid. 1543 if (leftE->fRiteE == currE && useDeferred) { 1544 SkASSERT(leftE->fY == nextY && currE->fY == nextY); 1545 blit_saved_trapezoid(leftE, nextY, leftE->fX, currE->fX, 1546 blitter, maskRow, isUsingMask, noRealBlitter, leftClip, rightClip); 1547 } 1548 backward_insert_edge_based_on_x(currE); 1549 } else { 1550 prevX = newX; 1551 } 1552 if (!skipIntersect) { 1553 checkIntersection(currE, nextY, &nextNextY); 1554 } 1555 } 1556 1557 currE = next; 1558 SkASSERT(currE); 1559 } 1560 1561 // was our right-edge culled away? 1562 if (in_interval) { 1563 if (useDeferred) { 1564 deferred_blit(leftE, nextTail, left, leftDY, y, nextY, isIntegralNextY, 1565 leftEnds, false, blitter, maskRow, isUsingMask, noRealBlitter, 1566 leftClip, rightClip, yShift); 1567 } else { 1568 blit_trapezoid_row(blitter, y >> 16, 1569 left, rightClip, 1570 SkTMax(leftClip, leftE->fX), rightClip, 1571 leftDY, 0, fullAlpha, maskRow, isUsingMask, 1572 noRealBlitter || 1573 (fullAlpha == 0xFF && edges_too_close(leftE->fPrev, leftE, nextY)), 1574 true); 1575 } 1576 } 1577 1578 if (forceRLE) { 1579 ((RunBasedAdditiveBlitter*)blitter)->flush_if_y_changed(y, nextY); 1580 } 1581 1582 y = nextY; 1583 if (y >= SkIntToFixed(stop_y)) { 1584 break; 1585 } 1586 1587 // now currE points to the first edge with a fUpperY larger than the previous y 1588 insert_new_edges(currE, y, &nextNextY); 1589 } 1590 } 1591 1592 static SK_ALWAYS_INLINE void aaa_fill_path(const SkPath& path, const SkIRect& clipRect, 1593 AdditiveBlitter* blitter, int start_y, int stop_y, bool pathContainedInClip, 1594 bool isUsingMask, bool forceRLE) { // forceRLE implies that SkAAClip is calling us 1595 SkASSERT(blitter); 1596 1597 SkEdgeBuilder builder; 1598 int count = builder.build_edges(path, &clipRect, 0, pathContainedInClip, 1599 SkEdgeBuilder::kAnalyticEdge); 1600 SkAnalyticEdge** list = builder.analyticEdgeList(); 1601 1602 SkIRect rect = clipRect; 1603 if (0 == count) { 1604 if (path.isInverseFillType()) { 1605 /* 1606 * Since we are in inverse-fill, our caller has already drawn above 1607 * our top (start_y) and will draw below our bottom (stop_y). Thus 1608 * we need to restrict our drawing to the intersection of the clip 1609 * and those two limits. 1610 */ 1611 if (rect.fTop < start_y) { 1612 rect.fTop = start_y; 1613 } 1614 if (rect.fBottom > stop_y) { 1615 rect.fBottom = stop_y; 1616 } 1617 if (!rect.isEmpty()) { 1618 blitter->getRealBlitter()->blitRect(rect.fLeft, rect.fTop, 1619 rect.width(), rect.height()); 1620 } 1621 } 1622 return; 1623 } 1624 1625 SkAnalyticEdge headEdge, tailEdge, *last; 1626 // this returns the first and last edge after they're sorted into a dlink list 1627 SkAnalyticEdge* edge = sort_edges(list, count, &last); 1628 1629 headEdge.fRiteE = nullptr; 1630 headEdge.fPrev = nullptr; 1631 headEdge.fNext = edge; 1632 headEdge.fUpperY = headEdge.fLowerY = SK_MinS32; 1633 headEdge.fX = SK_MinS32; 1634 headEdge.fDX = 0; 1635 headEdge.fDY = SK_MaxS32; 1636 headEdge.fUpperX = SK_MinS32; 1637 edge->fPrev = &headEdge; 1638 1639 tailEdge.fRiteE = nullptr; 1640 tailEdge.fPrev = last; 1641 tailEdge.fNext = nullptr; 1642 tailEdge.fUpperY = tailEdge.fLowerY = SK_MaxS32; 1643 tailEdge.fX = SK_MaxS32; 1644 tailEdge.fDX = 0; 1645 tailEdge.fDY = SK_MaxS32; 1646 tailEdge.fUpperX = SK_MaxS32; 1647 last->fNext = &tailEdge; 1648 1649 // now edge is the head of the sorted linklist 1650 1651 if (!pathContainedInClip && start_y < clipRect.fTop) { 1652 start_y = clipRect.fTop; 1653 } 1654 if (!pathContainedInClip && stop_y > clipRect.fBottom) { 1655 stop_y = clipRect.fBottom; 1656 } 1657 1658 SkFixed leftBound = SkIntToFixed(rect.fLeft); 1659 SkFixed rightBound = SkIntToFixed(rect.fRight); 1660 if (isUsingMask) { 1661 // If we're using mask, then we have to limit the bound within the path bounds. 1662 // Otherwise, the edge drift may access an invalid address inside the mask. 1663 SkIRect ir; 1664 path.getBounds().roundOut(&ir); 1665 leftBound = SkTMax(leftBound, SkIntToFixed(ir.fLeft)); 1666 rightBound = SkTMin(rightBound, SkIntToFixed(ir.fRight)); 1667 } 1668 1669 if (!path.isInverseFillType() && path.isConvex() && count >= 2) { 1670 aaa_walk_convex_edges(&headEdge, blitter, start_y, stop_y, 1671 leftBound, rightBound, isUsingMask); 1672 } else { 1673 // Only use deferred blitting if there are many edges. 1674 bool useDeferred = count > 1675 (SkFixedFloorToInt(tailEdge.fPrev->fLowerY - headEdge.fNext->fUpperY) + 1) * 4; 1676 1677 // We skip intersection computation if there are many points which probably already 1678 // give us enough fractional scan lines. 1679 bool skipIntersect = path.countPoints() > (stop_y - start_y) * 2; 1680 1681 aaa_walk_edges(&headEdge, &tailEdge, path.getFillType(), blitter, start_y, stop_y, 1682 leftBound, rightBound, isUsingMask, forceRLE, useDeferred, skipIntersect); 1683 } 1684 } 1685 1686 /////////////////////////////////////////////////////////////////////////////// 1687 1688 void SkScan::AAAFillPath(const SkPath& path, SkBlitter* blitter, const SkIRect& ir, 1689 const SkIRect& clipBounds, bool forceRLE) { 1690 bool containedInClip = clipBounds.contains(ir); 1691 bool isInverse = path.isInverseFillType(); 1692 1693 // The mask blitter (where we store intermediate alpha values directly in a mask, and then call 1694 // the real blitter once in the end to blit the whole mask) is faster than the RLE blitter when 1695 // the blit region is small enough (i.e., canHandleRect(ir)). When isInverse is true, the blit 1696 // region is no longer the rectangle ir so we won't use the mask blitter. The caller may also 1697 // use the forceRLE flag to force not using the mask blitter. Also, when the path is a simple 1698 // rect, preparing a mask and blitting it might have too much overhead. Hence we'll use 1699 // blitFatAntiRect to avoid the mask and its overhead. 1700 if (MaskAdditiveBlitter::canHandleRect(ir) && !isInverse && !forceRLE) { 1701 // blitFatAntiRect is slower than the normal AAA flow without MaskAdditiveBlitter. 1702 // Hence only tryBlitFatAntiRect when MaskAdditiveBlitter would have been used. 1703 if (!TryBlitFatAntiRect(blitter, path, clipBounds)) { 1704 MaskAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse); 1705 aaa_fill_path(path, clipBounds, &additiveBlitter, ir.fTop, ir.fBottom, 1706 containedInClip, true, forceRLE); 1707 } 1708 } else if (!isInverse && path.isConvex()) { 1709 // If the filling area is convex (i.e., path.isConvex && !isInverse), our simpler 1710 // aaa_walk_convex_edges won't generate alphas above 255. Hence we don't need 1711 // SafeRLEAdditiveBlitter (which is slow due to clamping). The basic RLE blitter 1712 // RunBasedAdditiveBlitter would suffice. 1713 RunBasedAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse); 1714 aaa_fill_path(path, clipBounds, &additiveBlitter, ir.fTop, ir.fBottom, 1715 containedInClip, false, forceRLE); 1716 } else { 1717 // If the filling area might not be convex, the more involved aaa_walk_edges would 1718 // be called and we have to clamp the alpha downto 255. The SafeRLEAdditiveBlitter 1719 // does that at a cost of performance. 1720 SafeRLEAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse); 1721 aaa_fill_path(path, clipBounds, &additiveBlitter, ir.fTop, ir.fBottom, 1722 containedInClip, false, forceRLE); 1723 } 1724 } 1725