1 /* 2 * Copyright 2016 The Android Open Source Project 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #include "SkAnalyticEdge.h" 9 #include "SkAntiRun.h" 10 #include "SkAutoMalloc.h" 11 #include "SkBlitter.h" 12 #include "SkEdge.h" 13 #include "SkEdgeBuilder.h" 14 #include "SkGeometry.h" 15 #include "SkPath.h" 16 #include "SkQuadClipper.h" 17 #include "SkRasterClip.h" 18 #include "SkRegion.h" 19 #include "SkScan.h" 20 #include "SkScanPriv.h" 21 #include "SkTSort.h" 22 #include "SkTemplates.h" 23 #include "SkUtils.h" 24 25 /////////////////////////////////////////////////////////////////////////////// 26 27 /* 28 29 The following is a high-level overview of our analytic anti-aliasing 30 algorithm. We consider a path as a collection of line segments, as 31 quadratic/cubic curves are converted to small line segments. Without loss of 32 generality, let's assume that the draw region is [0, W] x [0, H]. 33 34 Our algorithm is based on horizontal scan lines (y = c_i) as the previous 35 sampling-based algorithm did. However, our algorithm uses non-equal-spaced 36 scan lines, while the previous method always uses equal-spaced scan lines, 37 such as (y = 1/2 + 0, 1/2 + 1, 1/2 + 2, ...) in the previous non-AA algorithm, 38 and (y = 1/8 + 1/4, 1/8 + 2/4, 1/8 + 3/4, ...) in the previous 39 16-supersampling AA algorithm. 40 41 Our algorithm contains scan lines y = c_i for c_i that is either: 42 43 1. an integer between [0, H] 44 45 2. the y value of a line segment endpoint 46 47 3. the y value of an intersection of two line segments 48 49 For two consecutive scan lines y = c_i, y = c_{i+1}, we analytically computes 50 the coverage of this horizontal strip of our path on each pixel. This can be 51 done very efficiently because the strip of our path now only consists of 52 trapezoids whose top and bottom edges are y = c_i, y = c_{i+1} (this includes 53 rectangles and triangles as special cases). 54 55 We now describe how the coverage of single pixel is computed against such a 56 trapezoid. That coverage is essentially the intersection area of a rectangle 57 (e.g., [0, 1] x [c_i, c_{i+1}]) and our trapezoid. However, that intersection 58 could be complicated, as shown in the example region A below: 59 60 +-----------\----+ 61 | \ C| 62 | \ | 63 \ \ | 64 |\ A \| 65 | \ \ 66 | \ | 67 | B \ | 68 +----\-----------+ 69 70 However, we don't have to compute the area of A directly. Instead, we can 71 compute the excluded area, which are B and C, quite easily, because they're 72 just triangles. In fact, we can prove that an excluded region (take B as an 73 example) is either itself a simple trapezoid (including rectangles, triangles, 74 and empty regions), or its opposite (the opposite of B is A + C) is a simple 75 trapezoid. In any case, we can compute its area efficiently. 76 77 In summary, our algorithm has a higher quality because it generates ground- 78 truth coverages analytically. It is also faster because it has much fewer 79 unnessasary horizontal scan lines. For example, given a triangle path, the 80 number of scan lines in our algorithm is only about 3 + H while the 81 16-supersampling algorithm has about 4H scan lines. 82 83 */ 84 85 /////////////////////////////////////////////////////////////////////////////// 86 87 static inline void addAlpha(SkAlpha* alpha, SkAlpha delta) { 88 SkASSERT(*alpha + (int)delta <= 256); 89 *alpha = SkAlphaRuns::CatchOverflow(*alpha + (int)delta); 90 } 91 92 static inline void safelyAddAlpha(SkAlpha* alpha, SkAlpha delta) { 93 *alpha = SkTMin(0xFF, *alpha + (int)delta); 94 } 95 96 class AdditiveBlitter : public SkBlitter { 97 public: 98 ~AdditiveBlitter() override {} 99 100 virtual SkBlitter* getRealBlitter(bool forceRealBlitter = false) = 0; 101 102 virtual void blitAntiH(int x, int y, const SkAlpha antialias[], int len) = 0; 103 virtual void blitAntiH(int x, int y, const SkAlpha alpha) = 0; 104 virtual void blitAntiH(int x, int y, int width, const SkAlpha alpha) = 0; 105 106 void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override { 107 SkDEBUGFAIL("Please call real blitter's blitAntiH instead."); 108 } 109 110 void blitV(int x, int y, int height, SkAlpha alpha) override { 111 SkDEBUGFAIL("Please call real blitter's blitV instead."); 112 } 113 114 void blitH(int x, int y, int width) override { 115 SkDEBUGFAIL("Please call real blitter's blitH instead."); 116 } 117 118 void blitRect(int x, int y, int width, int height) override { 119 SkDEBUGFAIL("Please call real blitter's blitRect instead."); 120 } 121 122 void blitAntiRect(int x, int y, int width, int height, 123 SkAlpha leftAlpha, SkAlpha rightAlpha) override { 124 SkDEBUGFAIL("Please call real blitter's blitAntiRect instead."); 125 } 126 127 virtual int getWidth() = 0; 128 129 // Flush the additive alpha cache if floor(y) and floor(nextY) is different 130 // (i.e., we'll start working on a new pixel row). 131 virtual void flush_if_y_changed(SkFixed y, SkFixed nextY) = 0; 132 }; 133 134 // We need this mask blitter because it significantly accelerates small path filling. 135 class MaskAdditiveBlitter : public AdditiveBlitter { 136 public: 137 MaskAdditiveBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds, 138 bool isInverse); 139 ~MaskAdditiveBlitter() override { 140 fRealBlitter->blitMask(fMask, fClipRect); 141 } 142 143 // Most of the time, we still consider this mask blitter as the real blitter 144 // so we can accelerate blitRect and others. But sometimes we want to return 145 // the absolute real blitter (e.g., when we fall back to the old code path). 146 SkBlitter* getRealBlitter(bool forceRealBlitter) override { 147 return forceRealBlitter ? fRealBlitter : this; 148 } 149 150 // Virtual function is slow. So don't use this. Directly add alpha to the mask instead. 151 void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override; 152 153 // Allowing following methods are used to blit rectangles during aaa_walk_convex_edges 154 // Since there aren't many rectangles, we can still bear the slow speed of virtual functions. 155 void blitAntiH(int x, int y, const SkAlpha alpha) override; 156 void blitAntiH(int x, int y, int width, const SkAlpha alpha) override; 157 void blitV(int x, int y, int height, SkAlpha alpha) override; 158 void blitRect(int x, int y, int width, int height) override; 159 void blitAntiRect(int x, int y, int width, int height, 160 SkAlpha leftAlpha, SkAlpha rightAlpha) override; 161 162 // The flush is only needed for RLE (RunBasedAdditiveBlitter) 163 void flush_if_y_changed(SkFixed y, SkFixed nextY) override {} 164 165 int getWidth() override { return fClipRect.width(); } 166 167 static bool canHandleRect(const SkIRect& bounds) { 168 int width = bounds.width(); 169 if (width > MaskAdditiveBlitter::kMAX_WIDTH) { 170 return false; 171 } 172 int64_t rb = SkAlign4(width); 173 // use 64bits to detect overflow 174 int64_t storage = rb * bounds.height(); 175 176 return (width <= MaskAdditiveBlitter::kMAX_WIDTH) && 177 (storage <= MaskAdditiveBlitter::kMAX_STORAGE); 178 } 179 180 // Return a pointer where pointer[x] corresonds to the alpha of (x, y) 181 inline uint8_t* getRow(int y) { 182 if (y != fY) { 183 fY = y; 184 fRow = fMask.fImage + (y - fMask.fBounds.fTop) * fMask.fRowBytes - fMask.fBounds.fLeft; 185 } 186 return fRow; 187 } 188 189 private: 190 // so we don't try to do very wide things, where the RLE blitter would be faster 191 static const int kMAX_WIDTH = 32; 192 static const int kMAX_STORAGE = 1024; 193 194 SkBlitter* fRealBlitter; 195 SkMask fMask; 196 SkIRect fClipRect; 197 // we add 2 because we can write 1 extra byte at either end due to precision error 198 uint32_t fStorage[(kMAX_STORAGE >> 2) + 2]; 199 200 uint8_t* fRow; 201 int fY; 202 }; 203 204 MaskAdditiveBlitter::MaskAdditiveBlitter( 205 SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds, bool isInverse) { 206 SkASSERT(canHandleRect(ir)); 207 SkASSERT(!isInverse); 208 209 fRealBlitter = realBlitter; 210 211 fMask.fImage = (uint8_t*)fStorage + 1; // There's 1 extra byte at either end of fStorage 212 fMask.fBounds = ir; 213 fMask.fRowBytes = ir.width(); 214 fMask.fFormat = SkMask::kA8_Format; 215 216 fY = ir.fTop - 1; 217 fRow = nullptr; 218 219 fClipRect = ir; 220 if (!fClipRect.intersect(clipBounds)) { 221 SkASSERT(0); 222 fClipRect.setEmpty(); 223 } 224 225 memset(fStorage, 0, fMask.fBounds.height() * fMask.fRowBytes + 2); 226 } 227 228 void MaskAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) { 229 SK_ABORT("Don't use this; directly add alphas to the mask."); 230 } 231 232 void MaskAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) { 233 SkASSERT(x >= fMask.fBounds.fLeft -1); 234 addAlpha(&this->getRow(y)[x], alpha); 235 } 236 237 void MaskAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) { 238 SkASSERT(x >= fMask.fBounds.fLeft -1); 239 uint8_t* row = this->getRow(y); 240 for (int i = 0; i < width; ++i) { 241 addAlpha(&row[x + i], alpha); 242 } 243 } 244 245 void MaskAdditiveBlitter::blitV(int x, int y, int height, SkAlpha alpha) { 246 if (alpha == 0) { 247 return; 248 } 249 SkASSERT(x >= fMask.fBounds.fLeft -1); 250 // This must be called as if this is a real blitter. 251 // So we directly set alpha rather than adding it. 252 uint8_t* row = this->getRow(y); 253 for (int i = 0; i < height; ++i) { 254 row[x] = alpha; 255 row += fMask.fRowBytes; 256 } 257 } 258 259 void MaskAdditiveBlitter::blitRect(int x, int y, int width, int height) { 260 SkASSERT(x >= fMask.fBounds.fLeft -1); 261 // This must be called as if this is a real blitter. 262 // So we directly set alpha rather than adding it. 263 uint8_t* row = this->getRow(y); 264 for (int i = 0; i < height; ++i) { 265 memset(row + x, 0xFF, width); 266 row += fMask.fRowBytes; 267 } 268 } 269 270 void MaskAdditiveBlitter::blitAntiRect(int x, int y, int width, int height, 271 SkAlpha leftAlpha, SkAlpha rightAlpha) { 272 blitV(x, y, height, leftAlpha); 273 blitV(x + 1 + width, y, height, rightAlpha); 274 blitRect(x + 1, y, width, height); 275 } 276 277 class RunBasedAdditiveBlitter : public AdditiveBlitter { 278 public: 279 RunBasedAdditiveBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds, 280 bool isInverse); 281 ~RunBasedAdditiveBlitter() override; 282 283 SkBlitter* getRealBlitter(bool forceRealBlitter) override; 284 285 void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override; 286 void blitAntiH(int x, int y, const SkAlpha alpha) override; 287 void blitAntiH(int x, int y, int width, const SkAlpha alpha) override; 288 289 int getWidth() override; 290 291 void flush_if_y_changed(SkFixed y, SkFixed nextY) override { 292 if (SkFixedFloorToInt(y) != SkFixedFloorToInt(nextY)) { 293 this->flush(); 294 } 295 } 296 297 protected: 298 SkBlitter* fRealBlitter; 299 300 /// Current y coordinate 301 int fCurrY; 302 /// Widest row of region to be blitted 303 int fWidth; 304 /// Leftmost x coordinate in any row 305 int fLeft; 306 /// Initial y coordinate (top of bounds). 307 int fTop; 308 309 // The next three variables are used to track a circular buffer that 310 // contains the values used in SkAlphaRuns. These variables should only 311 // ever be updated in advanceRuns(), and fRuns should always point to 312 // a valid SkAlphaRuns... 313 int fRunsToBuffer; 314 void* fRunsBuffer; 315 int fCurrentRun; 316 SkAlphaRuns fRuns; 317 318 int fOffsetX; 319 320 inline bool check(int x, int width) const { 321 #ifdef SK_DEBUG 322 if (x < 0 || x + width > fWidth) { 323 // SkDebugf("Ignore x = %d, width = %d\n", x, width); 324 } 325 #endif 326 return (x >= 0 && x + width <= fWidth); 327 } 328 329 // extra one to store the zero at the end 330 inline int getRunsSz() const { return (fWidth + 1 + (fWidth + 2)/2) * sizeof(int16_t); } 331 332 // This function updates the fRuns variable to point to the next buffer space 333 // with adequate storage for a SkAlphaRuns. It mostly just advances fCurrentRun 334 // and resets fRuns to point to an empty scanline. 335 inline void advanceRuns() { 336 const size_t kRunsSz = this->getRunsSz(); 337 fCurrentRun = (fCurrentRun + 1) % fRunsToBuffer; 338 fRuns.fRuns = reinterpret_cast<int16_t*>( 339 reinterpret_cast<uint8_t*>(fRunsBuffer) + fCurrentRun * kRunsSz); 340 fRuns.fAlpha = reinterpret_cast<SkAlpha*>(fRuns.fRuns + fWidth + 1); 341 fRuns.reset(fWidth); 342 } 343 344 // Blitting 0xFF and 0 is much faster so we snap alphas close to them 345 inline SkAlpha snapAlpha(SkAlpha alpha) { 346 return alpha > 247 ? 0xFF : alpha < 8 ? 0 : alpha; 347 } 348 349 inline void flush() { 350 if (fCurrY >= fTop) { 351 SkASSERT(fCurrentRun < fRunsToBuffer); 352 for (int x = 0; fRuns.fRuns[x]; x += fRuns.fRuns[x]) { 353 // It seems that blitting 255 or 0 is much faster than blitting 254 or 1 354 fRuns.fAlpha[x] = snapAlpha(fRuns.fAlpha[x]); 355 } 356 if (!fRuns.empty()) { 357 // SkDEBUGCODE(fRuns.dump();) 358 fRealBlitter->blitAntiH(fLeft, fCurrY, fRuns.fAlpha, fRuns.fRuns); 359 this->advanceRuns(); 360 fOffsetX = 0; 361 } 362 fCurrY = fTop - 1; 363 } 364 } 365 366 inline void checkY(int y) { 367 if (y != fCurrY) { 368 this->flush(); 369 fCurrY = y; 370 } 371 } 372 }; 373 374 RunBasedAdditiveBlitter::RunBasedAdditiveBlitter( 375 SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds, bool isInverse) { 376 fRealBlitter = realBlitter; 377 378 SkIRect sectBounds; 379 if (isInverse) { 380 // We use the clip bounds instead of the ir, since we may be asked to 381 //draw outside of the rect when we're a inverse filltype 382 sectBounds = clipBounds; 383 } else { 384 if (!sectBounds.intersect(ir, clipBounds)) { 385 sectBounds.setEmpty(); 386 } 387 } 388 389 const int left = sectBounds.left(); 390 const int right = sectBounds.right(); 391 392 fLeft = left; 393 fWidth = right - left; 394 fTop = sectBounds.top(); 395 fCurrY = fTop - 1; 396 397 fRunsToBuffer = realBlitter->requestRowsPreserved(); 398 fRunsBuffer = realBlitter->allocBlitMemory(fRunsToBuffer * this->getRunsSz()); 399 fCurrentRun = -1; 400 401 this->advanceRuns(); 402 403 fOffsetX = 0; 404 } 405 406 RunBasedAdditiveBlitter::~RunBasedAdditiveBlitter() { 407 this->flush(); 408 } 409 410 SkBlitter* RunBasedAdditiveBlitter::getRealBlitter(bool forceRealBlitter) { 411 return fRealBlitter; 412 } 413 414 void RunBasedAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) { 415 checkY(y); 416 x -= fLeft; 417 418 if (x < 0) { 419 len += x; 420 antialias -= x; 421 x = 0; 422 } 423 len = SkTMin(len, fWidth - x); 424 SkASSERT(check(x, len)); 425 426 if (x < fOffsetX) { 427 fOffsetX = 0; 428 } 429 430 fOffsetX = fRuns.add(x, 0, len, 0, 0, fOffsetX); // Break the run 431 for (int i = 0; i < len; i += fRuns.fRuns[x + i]) { 432 for (int j = 1; j < fRuns.fRuns[x + i]; j++) { 433 fRuns.fRuns[x + i + j] = 1; 434 fRuns.fAlpha[x + i + j] = fRuns.fAlpha[x + i]; 435 } 436 fRuns.fRuns[x + i] = 1; 437 } 438 for (int i = 0; i < len; ++i) { 439 addAlpha(&fRuns.fAlpha[x + i], antialias[i]); 440 } 441 } 442 void RunBasedAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) { 443 checkY(y); 444 x -= fLeft; 445 446 if (x < fOffsetX) { 447 fOffsetX = 0; 448 } 449 450 if (this->check(x, 1)) { 451 fOffsetX = fRuns.add(x, 0, 1, 0, alpha, fOffsetX); 452 } 453 } 454 455 void RunBasedAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) { 456 checkY(y); 457 x -= fLeft; 458 459 if (x < fOffsetX) { 460 fOffsetX = 0; 461 } 462 463 if (this->check(x, width)) { 464 fOffsetX = fRuns.add(x, 0, width, 0, alpha, fOffsetX); 465 } 466 } 467 468 int RunBasedAdditiveBlitter::getWidth() { return fWidth; } 469 470 // This exists specifically for concave path filling. 471 // In those cases, we can easily accumulate alpha greater than 0xFF. 472 class SafeRLEAdditiveBlitter : public RunBasedAdditiveBlitter { 473 public: 474 SafeRLEAdditiveBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds, 475 bool isInverse) : RunBasedAdditiveBlitter(realBlitter, ir, clipBounds, isInverse) {} 476 477 void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override; 478 void blitAntiH(int x, int y, const SkAlpha alpha) override; 479 void blitAntiH(int x, int y, int width, const SkAlpha alpha) override; 480 }; 481 482 void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) { 483 checkY(y); 484 x -= fLeft; 485 486 if (x < 0) { 487 len += x; 488 antialias -= x; 489 x = 0; 490 } 491 len = SkTMin(len, fWidth - x); 492 SkASSERT(check(x, len)); 493 494 if (x < fOffsetX) { 495 fOffsetX = 0; 496 } 497 498 fOffsetX = fRuns.add(x, 0, len, 0, 0, fOffsetX); // Break the run 499 for (int i = 0; i < len; i += fRuns.fRuns[x + i]) { 500 for (int j = 1; j < fRuns.fRuns[x + i]; j++) { 501 fRuns.fRuns[x + i + j] = 1; 502 fRuns.fAlpha[x + i + j] = fRuns.fAlpha[x + i]; 503 } 504 fRuns.fRuns[x + i] = 1; 505 } 506 for (int i = 0; i < len; ++i) { 507 safelyAddAlpha(&fRuns.fAlpha[x + i], antialias[i]); 508 } 509 } 510 511 void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) { 512 checkY(y); 513 x -= fLeft; 514 515 if (x < fOffsetX) { 516 fOffsetX = 0; 517 } 518 519 if (check(x, 1)) { 520 // Break the run 521 fOffsetX = fRuns.add(x, 0, 1, 0, 0, fOffsetX); 522 safelyAddAlpha(&fRuns.fAlpha[x], alpha); 523 } 524 } 525 526 void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) { 527 checkY(y); 528 x -= fLeft; 529 530 if (x < fOffsetX) { 531 fOffsetX = 0; 532 } 533 534 if (check(x, width)) { 535 // Break the run 536 fOffsetX = fRuns.add(x, 0, width, 0, 0, fOffsetX); 537 for(int i = x; i < x + width; i += fRuns.fRuns[i]) { 538 safelyAddAlpha(&fRuns.fAlpha[i], alpha); 539 } 540 } 541 } 542 543 /////////////////////////////////////////////////////////////////////////////// 544 545 // Return the alpha of a trapezoid whose height is 1 546 static inline SkAlpha trapezoidToAlpha(SkFixed l1, SkFixed l2) { 547 SkASSERT(l1 >= 0 && l2 >= 0); 548 return (l1 + l2) >> 9; 549 } 550 551 // The alpha of right-triangle (a, a*b), in 16 bits 552 static inline SkFixed partialTriangleToAlpha16(SkFixed a, SkFixed b) { 553 SkASSERT(a <= SK_Fixed1); 554 // SkFixedMul(SkFixedMul(a, a), b) >> 1 555 // return ((((a >> 8) * (a >> 8)) >> 8) * (b >> 8)) >> 1; 556 return (a >> 11) * (a >> 11) * (b >> 11); 557 } 558 559 // The alpha of right-triangle (a, a*b) 560 static inline SkAlpha partialTriangleToAlpha(SkFixed a, SkFixed b) { 561 return partialTriangleToAlpha16(a, b) >> 8; 562 } 563 564 static inline SkAlpha getPartialAlpha(SkAlpha alpha, SkFixed partialHeight) { 565 return SkToU8(SkFixedRoundToInt(alpha * partialHeight)); 566 } 567 568 static inline SkAlpha getPartialAlpha(SkAlpha alpha, SkAlpha fullAlpha) { 569 return ((uint16_t)alpha * fullAlpha) >> 8; 570 } 571 572 // For SkFixed that's close to SK_Fixed1, we can't convert it to alpha by just shifting right. 573 // For example, when f = SK_Fixed1, right shifting 8 will get 256, but we need 255. 574 // This is rarely the problem so we'll only use this for blitting rectangles. 575 static inline SkAlpha f2a(SkFixed f) { 576 SkASSERT(f <= SK_Fixed1); 577 return getPartialAlpha(0xFF, f); 578 } 579 580 // Suppose that line (l1, y)-(r1, y+1) intersects with (l2, y)-(r2, y+1), 581 // approximate (very coarsely) the x coordinate of the intersection. 582 static inline SkFixed approximateIntersection(SkFixed l1, SkFixed r1, SkFixed l2, SkFixed r2) { 583 if (l1 > r1) { SkTSwap(l1, r1); } 584 if (l2 > r2) { SkTSwap(l2, r2); } 585 return (SkTMax(l1, l2) + SkTMin(r1, r2)) >> 1; 586 } 587 588 // Here we always send in l < SK_Fixed1, and the first alpha we want to compute is alphas[0] 589 static inline void computeAlphaAboveLine(SkAlpha* alphas, SkFixed l, SkFixed r, 590 SkFixed dY, SkAlpha fullAlpha) { 591 SkASSERT(l <= r); 592 SkASSERT(l >> 16 == 0); 593 int R = SkFixedCeilToInt(r); 594 if (R == 0) { 595 return; 596 } else if (R == 1) { 597 alphas[0] = getPartialAlpha(((R << 17) - l - r) >> 9, fullAlpha); 598 } else { 599 SkFixed first = SK_Fixed1 - l; // horizontal edge length of the left-most triangle 600 SkFixed last = r - ((R - 1) << 16); // horizontal edge length of the right-most triangle 601 SkFixed firstH = SkFixedMul(first, dY); // vertical edge of the left-most triangle 602 alphas[0] = SkFixedMul(first, firstH) >> 9; // triangle alpha 603 SkFixed alpha16 = firstH + (dY >> 1); // rectangle plus triangle 604 for (int i = 1; i < R - 1; ++i) { 605 alphas[i] = alpha16 >> 8; 606 alpha16 += dY; 607 } 608 alphas[R - 1] = fullAlpha - partialTriangleToAlpha(last, dY); 609 } 610 } 611 612 // Here we always send in l < SK_Fixed1, and the first alpha we want to compute is alphas[0] 613 static inline void computeAlphaBelowLine( 614 SkAlpha* alphas, SkFixed l, SkFixed r, SkFixed dY, SkAlpha fullAlpha) { 615 SkASSERT(l <= r); 616 SkASSERT(l >> 16 == 0); 617 int R = SkFixedCeilToInt(r); 618 if (R == 0) { 619 return; 620 } else if (R == 1) { 621 alphas[0] = getPartialAlpha(trapezoidToAlpha(l, r), fullAlpha); 622 } else { 623 SkFixed first = SK_Fixed1 - l; // horizontal edge length of the left-most triangle 624 SkFixed last = r - ((R - 1) << 16); // horizontal edge length of the right-most triangle 625 SkFixed lastH = SkFixedMul(last, dY); // vertical edge of the right-most triangle 626 alphas[R-1] = SkFixedMul(last, lastH) >> 9; // triangle alpha 627 SkFixed alpha16 = lastH + (dY >> 1); // rectangle plus triangle 628 for (int i = R - 2; i > 0; i--) { 629 alphas[i] = alpha16 >> 8; 630 alpha16 += dY; 631 } 632 alphas[0] = fullAlpha - partialTriangleToAlpha(first, dY); 633 } 634 } 635 636 // Note that if fullAlpha != 0xFF, we'll multiply alpha by fullAlpha 637 static SK_ALWAYS_INLINE void blit_single_alpha(AdditiveBlitter* blitter, int y, int x, 638 SkAlpha alpha, SkAlpha fullAlpha, SkAlpha* maskRow, 639 bool isUsingMask, bool noRealBlitter, bool needSafeCheck) { 640 if (isUsingMask) { 641 if (fullAlpha == 0xFF && !noRealBlitter) { // noRealBlitter is needed for concave paths 642 maskRow[x] = alpha; 643 } else if (needSafeCheck) { 644 safelyAddAlpha(&maskRow[x], getPartialAlpha(alpha, fullAlpha)); 645 } else { 646 addAlpha(&maskRow[x], getPartialAlpha(alpha, fullAlpha)); 647 } 648 } else { 649 if (fullAlpha == 0xFF && !noRealBlitter) { 650 blitter->getRealBlitter()->blitV(x, y, 1, alpha); 651 } else { 652 blitter->blitAntiH(x, y, getPartialAlpha(alpha, fullAlpha)); 653 } 654 } 655 } 656 657 static SK_ALWAYS_INLINE void blit_two_alphas(AdditiveBlitter* blitter, int y, int x, 658 SkAlpha a1, SkAlpha a2, SkAlpha fullAlpha, SkAlpha* maskRow, 659 bool isUsingMask, bool noRealBlitter, bool needSafeCheck) { 660 if (isUsingMask) { 661 if (needSafeCheck) { 662 safelyAddAlpha(&maskRow[x], a1); 663 safelyAddAlpha(&maskRow[x + 1], a2); 664 } else { 665 addAlpha(&maskRow[x], a1); 666 addAlpha(&maskRow[x + 1], a2); 667 } 668 } else { 669 if (fullAlpha == 0xFF && !noRealBlitter) { 670 blitter->getRealBlitter()->blitAntiH2(x, y, a1, a2); 671 } else { 672 blitter->blitAntiH(x, y, a1); 673 blitter->blitAntiH(x + 1, y, a2); 674 } 675 } 676 } 677 678 // It's important that this is inline. Otherwise it'll be much slower. 679 static SK_ALWAYS_INLINE void blit_full_alpha(AdditiveBlitter* blitter, int y, int x, int len, 680 SkAlpha fullAlpha, SkAlpha* maskRow, bool isUsingMask, 681 bool noRealBlitter, bool needSafeCheck) { 682 if (isUsingMask) { 683 for (int i = 0; i < len; ++i) { 684 if (needSafeCheck) { 685 safelyAddAlpha(&maskRow[x + i], fullAlpha); 686 } else { 687 addAlpha(&maskRow[x + i], fullAlpha); 688 } 689 } 690 } else { 691 if (fullAlpha == 0xFF && !noRealBlitter) { 692 blitter->getRealBlitter()->blitH(x, y, len); 693 } else { 694 blitter->blitAntiH(x, y, len, fullAlpha); 695 } 696 } 697 } 698 699 static void blit_aaa_trapezoid_row(AdditiveBlitter* blitter, int y, 700 SkFixed ul, SkFixed ur, SkFixed ll, SkFixed lr, 701 SkFixed lDY, SkFixed rDY, SkAlpha fullAlpha, SkAlpha* maskRow, 702 bool isUsingMask, bool noRealBlitter, bool needSafeCheck) { 703 int L = SkFixedFloorToInt(ul), R = SkFixedCeilToInt(lr); 704 int len = R - L; 705 706 if (len == 1) { 707 SkAlpha alpha = trapezoidToAlpha(ur - ul, lr - ll); 708 blit_single_alpha(blitter, y, L, alpha, fullAlpha, maskRow, isUsingMask, noRealBlitter, 709 needSafeCheck); 710 return; 711 } 712 713 // SkDebugf("y = %d, len = %d, ul = %f, ur = %f, ll = %f, lr = %f\n", y, len, 714 // SkFixedToFloat(ul), SkFixedToFloat(ur), SkFixedToFloat(ll), SkFixedToFloat(lr)); 715 716 const int kQuickLen = 31; 717 // This is faster than SkAutoSMalloc<1024> 718 char quickMemory[(sizeof(SkAlpha) * 2 + sizeof(int16_t)) * (kQuickLen + 1)]; 719 SkAlpha* alphas; 720 721 if (len <= kQuickLen) { 722 alphas = (SkAlpha*)quickMemory; 723 } else { 724 alphas = new SkAlpha[(len + 1) * (sizeof(SkAlpha) * 2 + sizeof(int16_t))]; 725 } 726 727 SkAlpha* tempAlphas = alphas + len + 1; 728 int16_t* runs = (int16_t*)(alphas + (len + 1) * 2); 729 730 for (int i = 0; i < len; ++i) { 731 runs[i] = 1; 732 alphas[i] = fullAlpha; 733 } 734 runs[len] = 0; 735 736 int uL = SkFixedFloorToInt(ul); 737 int lL = SkFixedCeilToInt(ll); 738 if (uL + 2 == lL) { // We only need to compute two triangles, accelerate this special case 739 SkFixed first = SkIntToFixed(uL) + SK_Fixed1 - ul; 740 SkFixed second = ll - ul - first; 741 SkAlpha a1 = fullAlpha - partialTriangleToAlpha(first, lDY); 742 SkAlpha a2 = partialTriangleToAlpha(second, lDY); 743 alphas[0] = alphas[0] > a1 ? alphas[0] - a1 : 0; 744 alphas[1] = alphas[1] > a2 ? alphas[1] - a2 : 0; 745 } else { 746 computeAlphaBelowLine(tempAlphas + uL - L, ul - SkIntToFixed(uL), ll - SkIntToFixed(uL), 747 lDY, fullAlpha); 748 for (int i = uL; i < lL; ++i) { 749 if (alphas[i - L] > tempAlphas[i - L]) { 750 alphas[i - L] -= tempAlphas[i - L]; 751 } else { 752 alphas[i - L] = 0; 753 } 754 } 755 } 756 757 int uR = SkFixedFloorToInt(ur); 758 int lR = SkFixedCeilToInt(lr); 759 if (uR + 2 == lR) { // We only need to compute two triangles, accelerate this special case 760 SkFixed first = SkIntToFixed(uR) + SK_Fixed1 - ur; 761 SkFixed second = lr - ur - first; 762 SkAlpha a1 = partialTriangleToAlpha(first, rDY); 763 SkAlpha a2 = fullAlpha - partialTriangleToAlpha(second, rDY); 764 alphas[len-2] = alphas[len-2] > a1 ? alphas[len-2] - a1 : 0; 765 alphas[len-1] = alphas[len-1] > a2 ? alphas[len-1] - a2 : 0; 766 } else { 767 computeAlphaAboveLine(tempAlphas + uR - L, ur - SkIntToFixed(uR), lr - SkIntToFixed(uR), 768 rDY, fullAlpha); 769 for (int i = uR; i < lR; ++i) { 770 if (alphas[i - L] > tempAlphas[i - L]) { 771 alphas[i - L] -= tempAlphas[i - L]; 772 } else { 773 alphas[i - L] = 0; 774 } 775 } 776 } 777 778 if (isUsingMask) { 779 for (int i = 0; i < len; ++i) { 780 if (needSafeCheck) { 781 safelyAddAlpha(&maskRow[L + i], alphas[i]); 782 } else { 783 addAlpha(&maskRow[L + i], alphas[i]); 784 } 785 } 786 } else { 787 if (fullAlpha == 0xFF && !noRealBlitter) { 788 // Real blitter is faster than RunBasedAdditiveBlitter 789 blitter->getRealBlitter()->blitAntiH(L, y, alphas, runs); 790 } else { 791 blitter->blitAntiH(L, y, alphas, len); 792 } 793 } 794 795 if (len > kQuickLen) { 796 delete [] alphas; 797 } 798 } 799 800 static SK_ALWAYS_INLINE void blit_trapezoid_row(AdditiveBlitter* blitter, int y, 801 SkFixed ul, SkFixed ur, SkFixed ll, SkFixed lr, 802 SkFixed lDY, SkFixed rDY, SkAlpha fullAlpha, 803 SkAlpha* maskRow, bool isUsingMask, bool noRealBlitter = false, 804 bool needSafeCheck = false) { 805 SkASSERT(lDY >= 0 && rDY >= 0); // We should only send in the absolte value 806 807 if (ul > ur) { 808 #ifdef SK_DEBUG 809 // SkDebugf("ul = %f > ur = %f!\n", SkFixedToFloat(ul), SkFixedToFloat(ur)); 810 #endif 811 return; 812 } 813 814 // Edge crosses. Approximate it. This should only happend due to precision limit, 815 // so the approximation could be very coarse. 816 if (ll > lr) { 817 #ifdef SK_DEBUG 818 // SkDebugf("approximate intersection: %d %f %f\n", y, 819 // SkFixedToFloat(ll), SkFixedToFloat(lr)); 820 #endif 821 ll = lr = approximateIntersection(ul, ll, ur, lr); 822 } 823 824 if (ul == ur && ll == lr) { 825 return; // empty trapzoid 826 } 827 828 // We're going to use the left line ul-ll and the rite line ur-lr 829 // to exclude the area that's not covered by the path. 830 // Swapping (ul, ll) or (ur, lr) won't affect that exclusion 831 // so we'll do that for simplicity. 832 if (ul > ll) { SkTSwap(ul, ll); } 833 if (ur > lr) { SkTSwap(ur, lr); } 834 835 SkFixed joinLeft = SkFixedCeilToFixed(ll); 836 SkFixed joinRite = SkFixedFloorToFixed(ur); 837 if (joinLeft <= joinRite) { // There's a rect from joinLeft to joinRite that we can blit 838 if (ul < joinLeft) { 839 int len = SkFixedCeilToInt(joinLeft - ul); 840 if (len == 1) { 841 SkAlpha alpha = trapezoidToAlpha(joinLeft - ul, joinLeft - ll); 842 blit_single_alpha(blitter, y, ul >> 16, alpha, fullAlpha, maskRow, isUsingMask, 843 noRealBlitter, needSafeCheck); 844 } else if (len == 2) { 845 SkFixed first = joinLeft - SK_Fixed1 - ul; 846 SkFixed second = ll - ul - first; 847 SkAlpha a1 = partialTriangleToAlpha(first, lDY); 848 SkAlpha a2 = fullAlpha - partialTriangleToAlpha(second, lDY); 849 blit_two_alphas(blitter, y, ul >> 16, a1, a2, fullAlpha, maskRow, isUsingMask, 850 noRealBlitter, needSafeCheck); 851 } else { 852 blit_aaa_trapezoid_row(blitter, y, ul, joinLeft, ll, joinLeft, lDY, SK_MaxS32, 853 fullAlpha, maskRow, isUsingMask, noRealBlitter, 854 needSafeCheck); 855 } 856 } 857 // SkAAClip requires that we blit from left to right. 858 // Hence we must blit [ul, joinLeft] before blitting [joinLeft, joinRite] 859 if (joinLeft < joinRite) { 860 blit_full_alpha(blitter, y, SkFixedFloorToInt(joinLeft), 861 SkFixedFloorToInt(joinRite - joinLeft), 862 fullAlpha, maskRow, isUsingMask, noRealBlitter, needSafeCheck); 863 } 864 if (lr > joinRite) { 865 int len = SkFixedCeilToInt(lr - joinRite); 866 if (len == 1) { 867 SkAlpha alpha = trapezoidToAlpha(ur - joinRite, lr - joinRite); 868 blit_single_alpha(blitter, y, joinRite >> 16, alpha, fullAlpha, maskRow, 869 isUsingMask, noRealBlitter, needSafeCheck); 870 } else if (len == 2) { 871 SkFixed first = joinRite + SK_Fixed1 - ur; 872 SkFixed second = lr - ur - first; 873 SkAlpha a1 = fullAlpha - partialTriangleToAlpha(first, rDY); 874 SkAlpha a2 = partialTriangleToAlpha(second, rDY); 875 blit_two_alphas(blitter, y, joinRite >> 16, a1, a2, fullAlpha, maskRow, 876 isUsingMask, noRealBlitter, needSafeCheck); 877 } else { 878 blit_aaa_trapezoid_row(blitter, y, joinRite, ur, joinRite, lr, SK_MaxS32, rDY, 879 fullAlpha, maskRow, isUsingMask, noRealBlitter, 880 needSafeCheck); 881 } 882 } 883 } else { 884 blit_aaa_trapezoid_row(blitter, y, ul, ur, ll, lr, lDY, rDY, fullAlpha, maskRow, 885 isUsingMask, noRealBlitter, needSafeCheck); 886 } 887 } 888 889 /////////////////////////////////////////////////////////////////////////////// 890 891 static bool operator<(const SkAnalyticEdge& a, const SkAnalyticEdge& b) { 892 int valuea = a.fUpperY; 893 int valueb = b.fUpperY; 894 895 if (valuea == valueb) { 896 valuea = a.fX; 897 valueb = b.fX; 898 } 899 900 if (valuea == valueb) { 901 valuea = a.fDX; 902 valueb = b.fDX; 903 } 904 905 return valuea < valueb; 906 } 907 908 static SkAnalyticEdge* sort_edges(SkAnalyticEdge* list[], int count, SkAnalyticEdge** last) { 909 SkTQSort(list, list + count - 1); 910 911 // now make the edges linked in sorted order 912 for (int i = 1; i < count; ++i) { 913 list[i - 1]->fNext = list[i]; 914 list[i]->fPrev = list[i - 1]; 915 } 916 917 *last = list[count - 1]; 918 return list[0]; 919 } 920 921 #ifdef SK_DEBUG 922 static void validate_sort(const SkAnalyticEdge* edge) { 923 SkFixed y = SkIntToFixed(-32768); 924 925 while (edge->fUpperY != SK_MaxS32) { 926 edge->validate(); 927 SkASSERT(y <= edge->fUpperY); 928 929 y = edge->fUpperY; 930 edge = (SkAnalyticEdge*)edge->fNext; 931 } 932 } 933 #else 934 #define validate_sort(edge) 935 #endif 936 937 // For an edge, we consider it smooth if the Dx doesn't change much, and Dy is large enough 938 // For curves that are updating, the Dx is not changing much if fQDx/fCDx and fQDy/fCDy are 939 // relatively large compared to fQDDx/QCDDx and fQDDy/fCDDy 940 static inline bool isSmoothEnough(SkAnalyticEdge* thisEdge, SkAnalyticEdge* nextEdge, int stop_y) { 941 if (thisEdge->fCurveCount < 0) { 942 const SkCubicEdge& cEdge = static_cast<SkAnalyticCubicEdge*>(thisEdge)->fCEdge; 943 int ddshift = cEdge.fCurveShift; 944 return SkAbs32(cEdge.fCDx) >> 1 >= SkAbs32(cEdge.fCDDx) >> ddshift && 945 SkAbs32(cEdge.fCDy) >> 1 >= SkAbs32(cEdge.fCDDy) >> ddshift && 946 // current Dy is (fCDy - (fCDDy >> ddshift)) >> dshift 947 (cEdge.fCDy - (cEdge.fCDDy >> ddshift)) >> cEdge.fCubicDShift >= SK_Fixed1; 948 } else if (thisEdge->fCurveCount > 0) { 949 const SkQuadraticEdge& qEdge = static_cast<SkAnalyticQuadraticEdge*>(thisEdge)->fQEdge; 950 return SkAbs32(qEdge.fQDx) >> 1 >= SkAbs32(qEdge.fQDDx) && 951 SkAbs32(qEdge.fQDy) >> 1 >= SkAbs32(qEdge.fQDDy) && 952 // current Dy is (fQDy - fQDDy) >> shift 953 (qEdge.fQDy - qEdge.fQDDy) >> qEdge.fCurveShift 954 >= SK_Fixed1; 955 } 956 return SkAbs32(nextEdge->fDX - thisEdge->fDX) <= SK_Fixed1 && // DDx should be small 957 nextEdge->fLowerY - nextEdge->fUpperY >= SK_Fixed1; // Dy should be large 958 } 959 960 // Check if the leftE and riteE are changing smoothly in terms of fDX. 961 // If yes, we can later skip the fractional y and directly jump to integer y. 962 static inline bool isSmoothEnough(SkAnalyticEdge* leftE, SkAnalyticEdge* riteE, 963 SkAnalyticEdge* currE, int stop_y) { 964 if (currE->fUpperY >= stop_y << 16) { 965 return false; // We're at the end so we won't skip anything 966 } 967 if (leftE->fLowerY + SK_Fixed1 < riteE->fLowerY) { 968 return isSmoothEnough(leftE, currE, stop_y); // Only leftE is changing 969 } else if (leftE->fLowerY > riteE->fLowerY + SK_Fixed1) { 970 return isSmoothEnough(riteE, currE, stop_y); // Only riteE is changing 971 } 972 973 // Now both edges are changing, find the second next edge 974 SkAnalyticEdge* nextCurrE = currE->fNext; 975 if (nextCurrE->fUpperY >= stop_y << 16) { // Check if we're at the end 976 return false; 977 } 978 if (*nextCurrE < *currE) { 979 SkTSwap(currE, nextCurrE); 980 } 981 return isSmoothEnough(leftE, currE, stop_y) && isSmoothEnough(riteE, nextCurrE, stop_y); 982 } 983 984 static inline void aaa_walk_convex_edges(SkAnalyticEdge* prevHead, 985 AdditiveBlitter* blitter, int start_y, int stop_y, SkFixed leftBound, SkFixed riteBound, 986 bool isUsingMask) { 987 validate_sort((SkAnalyticEdge*)prevHead->fNext); 988 989 SkAnalyticEdge* leftE = (SkAnalyticEdge*) prevHead->fNext; 990 SkAnalyticEdge* riteE = (SkAnalyticEdge*) leftE->fNext; 991 SkAnalyticEdge* currE = (SkAnalyticEdge*) riteE->fNext; 992 993 SkFixed y = SkTMax(leftE->fUpperY, riteE->fUpperY); 994 995 #ifdef SK_DEBUG 996 int frac_y_cnt = 0; 997 int total_y_cnt = 0; 998 #endif 999 1000 for (;;) { 1001 // We have to check fLowerY first because some edges might be alone (e.g., there's only 1002 // a left edge but no right edge in a given y scan line) due to precision limit. 1003 while (leftE->fLowerY <= y) { // Due to smooth jump, we may pass multiple short edges 1004 if (!leftE->update(y)) { 1005 if (SkFixedFloorToInt(currE->fUpperY) >= stop_y) { 1006 goto END_WALK; 1007 } 1008 leftE = currE; 1009 currE = (SkAnalyticEdge*)currE->fNext; 1010 } 1011 } 1012 while (riteE->fLowerY <= y) { // Due to smooth jump, we may pass multiple short edges 1013 if (!riteE->update(y)) { 1014 if (SkFixedFloorToInt(currE->fUpperY) >= stop_y) { 1015 goto END_WALK; 1016 } 1017 riteE = currE; 1018 currE = (SkAnalyticEdge*)currE->fNext; 1019 } 1020 } 1021 1022 SkASSERT(leftE); 1023 SkASSERT(riteE); 1024 1025 // check our bottom clip 1026 if (SkFixedFloorToInt(y) >= stop_y) { 1027 break; 1028 } 1029 1030 SkASSERT(SkFixedFloorToInt(leftE->fUpperY) <= stop_y); 1031 SkASSERT(SkFixedFloorToInt(riteE->fUpperY) <= stop_y); 1032 1033 leftE->goY(y); 1034 riteE->goY(y); 1035 1036 if (leftE->fX > riteE->fX || (leftE->fX == riteE->fX && 1037 leftE->fDX > riteE->fDX)) { 1038 SkTSwap(leftE, riteE); 1039 } 1040 1041 SkFixed local_bot_fixed = SkMin32(leftE->fLowerY, riteE->fLowerY); 1042 if (isSmoothEnough(leftE, riteE, currE, stop_y)) { 1043 local_bot_fixed = SkFixedCeilToFixed(local_bot_fixed); 1044 } 1045 local_bot_fixed = SkMin32(local_bot_fixed, SkIntToFixed(stop_y)); 1046 1047 SkFixed left = SkTMax(leftBound, leftE->fX); 1048 SkFixed dLeft = leftE->fDX; 1049 SkFixed rite = SkTMin(riteBound, riteE->fX); 1050 SkFixed dRite = riteE->fDX; 1051 if (0 == (dLeft | dRite)) { 1052 int fullLeft = SkFixedCeilToInt(left); 1053 int fullRite = SkFixedFloorToInt(rite); 1054 SkFixed partialLeft = SkIntToFixed(fullLeft) - left; 1055 SkFixed partialRite = rite - SkIntToFixed(fullRite); 1056 int fullTop = SkFixedCeilToInt(y); 1057 int fullBot = SkFixedFloorToInt(local_bot_fixed); 1058 SkFixed partialTop = SkIntToFixed(fullTop) - y; 1059 SkFixed partialBot = local_bot_fixed - SkIntToFixed(fullBot); 1060 if (fullTop > fullBot) { // The rectangle is within one pixel height... 1061 partialTop -= (SK_Fixed1 - partialBot); 1062 partialBot = 0; 1063 } 1064 1065 if (fullRite >= fullLeft) { 1066 if (partialTop > 0) { // blit first partial row 1067 if (partialLeft > 0) { 1068 blitter->blitAntiH(fullLeft - 1, fullTop - 1, 1069 f2a(SkFixedMul(partialTop, partialLeft))); 1070 } 1071 blitter->blitAntiH(fullLeft, fullTop - 1, fullRite - fullLeft, 1072 f2a(partialTop)); 1073 if (partialRite > 0) { 1074 blitter->blitAntiH(fullRite, fullTop - 1, 1075 f2a(SkFixedMul(partialTop, partialRite))); 1076 } 1077 blitter->flush_if_y_changed(y, y + partialTop); 1078 } 1079 1080 // Blit all full-height rows from fullTop to fullBot 1081 if (fullBot > fullTop && 1082 // SkAAClip cannot handle the empty rect so check the non-emptiness here 1083 // (bug chromium:662800) 1084 (fullRite > fullLeft || f2a(partialLeft) > 0 || f2a(partialRite) > 0)) { 1085 blitter->getRealBlitter()->blitAntiRect(fullLeft - 1, fullTop, 1086 fullRite - fullLeft, fullBot - fullTop, 1087 f2a(partialLeft), f2a(partialRite)); 1088 } 1089 1090 if (partialBot > 0) { // blit last partial row 1091 if (partialLeft > 0) { 1092 blitter->blitAntiH(fullLeft - 1, fullBot, 1093 f2a(SkFixedMul(partialBot, partialLeft))); 1094 } 1095 blitter->blitAntiH(fullLeft, fullBot, fullRite - fullLeft, f2a(partialBot)); 1096 if (partialRite > 0) { 1097 blitter->blitAntiH(fullRite, fullBot, 1098 f2a(SkFixedMul(partialBot, partialRite))); 1099 } 1100 } 1101 } else { // left and rite are within the same pixel 1102 if (partialTop > 0) { 1103 blitter->blitAntiH(fullLeft - 1, fullTop - 1, 1, 1104 f2a(SkFixedMul(partialTop, rite - left))); 1105 blitter->flush_if_y_changed(y, y + partialTop); 1106 } 1107 if (fullBot > fullTop) { 1108 blitter->getRealBlitter()->blitV(fullLeft - 1, fullTop, fullBot - fullTop, 1109 f2a(rite - left)); 1110 } 1111 if (partialBot > 0) { 1112 blitter->blitAntiH(fullLeft - 1, fullBot, 1, 1113 f2a(SkFixedMul(partialBot, rite - left))); 1114 } 1115 } 1116 1117 y = local_bot_fixed; 1118 } else { 1119 // The following constant are used to snap X 1120 // We snap X mainly for speedup (no tiny triangle) and 1121 // avoiding edge cases caused by precision errors 1122 const SkFixed kSnapDigit = SK_Fixed1 >> 4; 1123 const SkFixed kSnapHalf = kSnapDigit >> 1; 1124 const SkFixed kSnapMask = (-1 ^ (kSnapDigit - 1)); 1125 left += kSnapHalf; rite += kSnapHalf; // For fast rounding 1126 1127 // Number of blit_trapezoid_row calls we'll have 1128 int count = SkFixedCeilToInt(local_bot_fixed) - SkFixedFloorToInt(y); 1129 #ifdef SK_DEBUG 1130 total_y_cnt += count; 1131 frac_y_cnt += ((int)(y & 0xFFFF0000) != y); 1132 if ((int)(y & 0xFFFF0000) != y) { 1133 // SkDebugf("frac_y = %f\n", SkFixedToFloat(y)); 1134 } 1135 #endif 1136 1137 // If we're using mask blitter, we advance the mask row in this function 1138 // to save some "if" condition checks. 1139 SkAlpha* maskRow = nullptr; 1140 if (isUsingMask) { 1141 maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16); 1142 } 1143 1144 // Instead of writing one loop that handles both partial-row blit_trapezoid_row 1145 // and full-row trapezoid_row together, we use the following 3-stage flow to 1146 // handle partial-row blit and full-row blit separately. It will save us much time 1147 // on changing y, left, and rite. 1148 if (count > 1) { 1149 if ((int)(y & 0xFFFF0000) != y) { // There's a partial-row on the top 1150 count--; 1151 SkFixed nextY = SkFixedCeilToFixed(y + 1); 1152 SkFixed dY = nextY - y; 1153 SkFixed nextLeft = left + SkFixedMul(dLeft, dY); 1154 SkFixed nextRite = rite + SkFixedMul(dRite, dY); 1155 SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound && 1156 (nextLeft & kSnapMask) >= leftBound && 1157 (nextRite & kSnapMask) <= riteBound); 1158 blit_trapezoid_row(blitter, y >> 16, left & kSnapMask, rite & kSnapMask, 1159 nextLeft & kSnapMask, nextRite & kSnapMask, leftE->fDY, riteE->fDY, 1160 getPartialAlpha(0xFF, dY), maskRow, isUsingMask); 1161 blitter->flush_if_y_changed(y, nextY); 1162 left = nextLeft; rite = nextRite; y = nextY; 1163 } 1164 1165 while (count > 1) { // Full rows in the middle 1166 count--; 1167 if (isUsingMask) { 1168 maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16); 1169 } 1170 SkFixed nextY = y + SK_Fixed1, nextLeft = left + dLeft, nextRite = rite + dRite; 1171 SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound && 1172 (nextLeft & kSnapMask) >= leftBound && 1173 (nextRite & kSnapMask) <= riteBound); 1174 blit_trapezoid_row(blitter, y >> 16, left & kSnapMask, rite & kSnapMask, 1175 nextLeft & kSnapMask, nextRite & kSnapMask, 1176 leftE->fDY, riteE->fDY, 0xFF, maskRow, isUsingMask); 1177 blitter->flush_if_y_changed(y, nextY); 1178 left = nextLeft; rite = nextRite; y = nextY; 1179 } 1180 } 1181 1182 if (isUsingMask) { 1183 maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16); 1184 } 1185 1186 SkFixed dY = local_bot_fixed - y; // partial-row on the bottom 1187 SkASSERT(dY <= SK_Fixed1); 1188 // Smooth jumping to integer y may make the last nextLeft/nextRite out of bound. 1189 // Take them back into the bound here. 1190 // Note that we substract kSnapHalf later so we have to add them to leftBound/riteBound 1191 SkFixed nextLeft = SkTMax(left + SkFixedMul(dLeft, dY), leftBound + kSnapHalf); 1192 SkFixed nextRite = SkTMin(rite + SkFixedMul(dRite, dY), riteBound + kSnapHalf); 1193 SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound && 1194 (nextLeft & kSnapMask) >= leftBound && (nextRite & kSnapMask) <= riteBound); 1195 blit_trapezoid_row(blitter, y >> 16, left & kSnapMask, rite & kSnapMask, 1196 nextLeft & kSnapMask, nextRite & kSnapMask, leftE->fDY, riteE->fDY, 1197 getPartialAlpha(0xFF, dY), maskRow, isUsingMask); 1198 blitter->flush_if_y_changed(y, local_bot_fixed); 1199 left = nextLeft; rite = nextRite; y = local_bot_fixed; 1200 left -= kSnapHalf; rite -= kSnapHalf; 1201 } 1202 1203 leftE->fX = left; 1204 riteE->fX = rite; 1205 leftE->fY = riteE->fY = y; 1206 } 1207 1208 END_WALK: 1209 ; 1210 #ifdef SK_DEBUG 1211 // SkDebugf("frac_y_cnt = %d, total_y_cnt = %d\n", frac_y_cnt, total_y_cnt); 1212 #endif 1213 } 1214 1215 /////////////////////////////////////////////////////////////////////////////// 1216 1217 static inline void updateNextNextY(SkFixed y, SkFixed nextY, SkFixed* nextNextY) { 1218 *nextNextY = y > nextY && y < *nextNextY ? y : *nextNextY; 1219 } 1220 1221 static inline void checkIntersection(const SkAnalyticEdge* edge, SkFixed nextY, SkFixed* nextNextY) 1222 { 1223 if (edge->fPrev->fPrev && edge->fPrev->fX + edge->fPrev->fDX > edge->fX + edge->fDX) { 1224 *nextNextY = nextY + (SK_Fixed1 >> SkAnalyticEdge::kDefaultAccuracy); 1225 } 1226 } 1227 1228 static void insert_new_edges(SkAnalyticEdge* newEdge, SkFixed y, SkFixed* nextNextY) { 1229 if (newEdge->fUpperY > y) { 1230 updateNextNextY(newEdge->fUpperY, y, nextNextY); 1231 return; 1232 } 1233 SkAnalyticEdge* prev = newEdge->fPrev; 1234 if (prev->fX <= newEdge->fX) { 1235 while (newEdge->fUpperY <= y) { 1236 checkIntersection(newEdge, y, nextNextY); 1237 updateNextNextY(newEdge->fLowerY, y, nextNextY); 1238 newEdge = newEdge->fNext; 1239 } 1240 updateNextNextY(newEdge->fUpperY, y, nextNextY); 1241 return; 1242 } 1243 // find first x pos to insert 1244 SkAnalyticEdge* start = backward_insert_start(prev, newEdge->fX); 1245 //insert the lot, fixing up the links as we go 1246 do { 1247 SkAnalyticEdge* next = newEdge->fNext; 1248 do { 1249 if (start->fNext == newEdge) { 1250 goto nextEdge; 1251 } 1252 SkAnalyticEdge* after = start->fNext; 1253 if (after->fX >= newEdge->fX) { 1254 break; 1255 } 1256 SkASSERT(start != after); 1257 start = after; 1258 } while (true); 1259 remove_edge(newEdge); 1260 insert_edge_after(newEdge, start); 1261 nextEdge: 1262 checkIntersection(newEdge, y, nextNextY); 1263 updateNextNextY(newEdge->fLowerY, y, nextNextY); 1264 start = newEdge; 1265 newEdge = next; 1266 } while (newEdge->fUpperY <= y); 1267 updateNextNextY(newEdge->fUpperY, y, nextNextY); 1268 } 1269 1270 static void validate_edges_for_y(const SkAnalyticEdge* edge, SkFixed y) { 1271 #ifdef SK_DEBUG 1272 while (edge->fUpperY <= y) { 1273 SkASSERT(edge->fPrev && edge->fNext); 1274 SkASSERT(edge->fPrev->fNext == edge); 1275 SkASSERT(edge->fNext->fPrev == edge); 1276 SkASSERT(edge->fUpperY <= edge->fLowerY); 1277 SkASSERT(edge->fPrev->fPrev == nullptr || edge->fPrev->fX <= edge->fX); 1278 edge = edge->fNext; 1279 } 1280 #endif 1281 } 1282 1283 // Return true if prev->fX, next->fX are too close in the current pixel row. 1284 static inline bool edges_too_close(SkAnalyticEdge* prev, SkAnalyticEdge* next, SkFixed lowerY) { 1285 // When next->fDX == 0, prev->fX >= next->fX - SkAbs32(next->fDX) would be false 1286 // even if prev->fX and next->fX are close and within one pixel (e.g., prev->fX == 0.1, 1287 // next->fX == 0.9). Adding SLACK = 1 to the formula would guarantee it to be true if two 1288 // edges prev and next are within one pixel. 1289 constexpr SkFixed SLACK = 1290 #ifdef SK_SUPPORT_LEGACY_AA_BEHAVIOR 1291 0; 1292 #else 1293 SK_Fixed1; 1294 #endif 1295 1296 // Note that even if the following test failed, the edges might still be very close to each 1297 // other at some point within the current pixel row because of prev->fDX and next->fDX. 1298 // However, to handle that case, we have to sacrafice more performance. 1299 // I think the current quality is good enough (mainly by looking at Nebraska-StateSeal.svg) 1300 // so I'll ignore fDX for performance tradeoff. 1301 return next && prev && next->fUpperY < lowerY && prev->fX + SLACK >= 1302 next->fX - SkAbs32(next->fDX); 1303 // The following is more accurate but also slower. 1304 // return (prev && prev->fPrev && next && next->fNext != nullptr && next->fUpperY < lowerY && 1305 // prev->fX + SkAbs32(prev->fDX) + SLACK >= next->fX - SkAbs32(next->fDX)); 1306 } 1307 1308 // This function exists for the case where the previous rite edge is removed because 1309 // its fLowerY <= nextY 1310 static inline bool edges_too_close(int prevRite, SkFixed ul, SkFixed ll) { 1311 return prevRite > SkFixedFloorToInt(ul) || prevRite > SkFixedFloorToInt(ll); 1312 } 1313 1314 static inline void blit_saved_trapezoid(SkAnalyticEdge* leftE, SkFixed lowerY, 1315 SkFixed lowerLeft, SkFixed lowerRite, 1316 AdditiveBlitter* blitter, SkAlpha* maskRow, bool isUsingMask, bool noRealBlitter, 1317 SkFixed leftClip, SkFixed rightClip) { 1318 SkAnalyticEdge* riteE = leftE->fRiteE; 1319 SkASSERT(riteE); 1320 SkASSERT(riteE->fNext == nullptr || leftE->fSavedY == riteE->fSavedY); 1321 SkASSERT(SkFixedFloorToInt(lowerY - 1) == SkFixedFloorToInt(leftE->fSavedY)); 1322 int y = SkFixedFloorToInt(leftE->fSavedY); 1323 // Instead of using f2a(lowerY - leftE->fSavedY), we use the following fullAlpha 1324 // to elimiate cumulative error: if there are many fractional y scan lines within the 1325 // same row, the former may accumulate the rounding error while the later won't. 1326 SkAlpha fullAlpha = f2a(lowerY - SkIntToFixed(y)) - f2a(leftE->fSavedY - SkIntToFixed(y)); 1327 // We need fSavedDY because the (quad or cubic) edge might be updated 1328 blit_trapezoid_row(blitter, y, 1329 SkTMax(leftE->fSavedX, leftClip), SkTMin(riteE->fSavedX, rightClip), 1330 SkTMax(lowerLeft, leftClip), SkTMin(lowerRite, rightClip), 1331 leftE->fSavedDY, riteE->fSavedDY, fullAlpha, maskRow, isUsingMask, 1332 noRealBlitter || 1333 (fullAlpha == 0xFF && (edges_too_close(leftE->fPrev, leftE, lowerY) 1334 || edges_too_close(riteE, riteE->fNext, lowerY))), 1335 true); 1336 leftE->fRiteE = nullptr; 1337 } 1338 1339 static inline void deferred_blit(SkAnalyticEdge* leftE, SkAnalyticEdge* riteE, 1340 SkFixed left, SkFixed leftDY, // don't save leftE->fX/fDY as they may have been updated 1341 SkFixed y, SkFixed nextY, bool isIntegralNextY, bool leftEnds, bool riteEnds, 1342 AdditiveBlitter* blitter, SkAlpha* maskRow, bool isUsingMask, bool noRealBlitter, 1343 SkFixed leftClip, SkFixed rightClip, int yShift) { 1344 if (leftE->fRiteE && leftE->fRiteE != riteE) { 1345 // leftE's right edge changed. Blit the saved trapezoid. 1346 SkASSERT(leftE->fRiteE->fNext == nullptr || leftE->fRiteE->fY == y); 1347 blit_saved_trapezoid(leftE, y, left, leftE->fRiteE->fX, 1348 blitter, maskRow, isUsingMask, noRealBlitter, leftClip, rightClip); 1349 } 1350 if (!leftE->fRiteE) { 1351 // Save and defer blitting the trapezoid 1352 SkASSERT(riteE->fRiteE == nullptr); 1353 SkASSERT(leftE->fPrev == nullptr || leftE->fY == nextY); 1354 SkASSERT(riteE->fNext == nullptr || riteE->fY == y); 1355 leftE->saveXY(left, y, leftDY); 1356 riteE->saveXY(riteE->fX, y, riteE->fDY); 1357 leftE->fRiteE = riteE; 1358 } 1359 SkASSERT(leftE->fPrev == nullptr || leftE->fY == nextY); 1360 riteE->goY(nextY, yShift); 1361 // Always blit when edges end or nextY is integral 1362 if (isIntegralNextY || leftEnds || riteEnds) { 1363 blit_saved_trapezoid(leftE, nextY, leftE->fX, riteE->fX, 1364 blitter, maskRow, isUsingMask, noRealBlitter, leftClip, rightClip); 1365 } 1366 } 1367 1368 static void aaa_walk_edges(SkAnalyticEdge* prevHead, SkAnalyticEdge* nextTail, 1369 SkPath::FillType fillType, AdditiveBlitter* blitter, int start_y, int stop_y, 1370 SkFixed leftClip, SkFixed rightClip, bool isUsingMask, bool forceRLE, bool useDeferred, 1371 bool skipIntersect) { 1372 prevHead->fX = prevHead->fUpperX = leftClip; 1373 nextTail->fX = nextTail->fUpperX = rightClip; 1374 SkFixed y = SkTMax(prevHead->fNext->fUpperY, SkIntToFixed(start_y)); 1375 SkFixed nextNextY = SK_MaxS32; 1376 1377 { 1378 SkAnalyticEdge* edge; 1379 for(edge = prevHead->fNext; edge->fUpperY <= y; edge = edge->fNext) { 1380 edge->goY(y); 1381 updateNextNextY(edge->fLowerY, y, &nextNextY); 1382 } 1383 updateNextNextY(edge->fUpperY, y, &nextNextY); 1384 } 1385 1386 // returns 1 for evenodd, -1 for winding, regardless of inverse-ness 1387 int windingMask = (fillType & 1) ? 1 : -1; 1388 1389 bool isInverse = SkPath::IsInverseFillType(fillType); 1390 1391 if (isInverse && SkIntToFixed(start_y) != y) { 1392 int width = SkFixedFloorToInt(rightClip - leftClip); 1393 if (SkFixedFloorToInt(y) != start_y) { 1394 blitter->getRealBlitter()->blitRect(SkFixedFloorToInt(leftClip), start_y, 1395 width, SkFixedFloorToInt(y) - start_y); 1396 start_y = SkFixedFloorToInt(y); 1397 } 1398 SkAlpha* maskRow = isUsingMask ? static_cast<MaskAdditiveBlitter*>(blitter)->getRow(start_y) 1399 : nullptr; 1400 blit_full_alpha(blitter, start_y, SkFixedFloorToInt(leftClip), width, 1401 f2a(y - SkIntToFixed(start_y)), maskRow, isUsingMask, false, false); 1402 } 1403 1404 while (true) { 1405 int w = 0; 1406 bool in_interval = isInverse; 1407 SkFixed prevX = prevHead->fX; 1408 SkFixed nextY = SkTMin(nextNextY, SkFixedCeilToFixed(y + 1)); 1409 bool isIntegralNextY = (nextY & (SK_Fixed1 - 1)) == 0; 1410 SkAnalyticEdge* currE = prevHead->fNext; 1411 SkAnalyticEdge* leftE = prevHead; 1412 SkFixed left = leftClip; 1413 SkFixed leftDY = 0; 1414 bool leftEnds = false; 1415 int prevRite = SkFixedFloorToInt(leftClip); 1416 1417 nextNextY = SK_MaxS32; 1418 1419 SkASSERT((nextY & ((SK_Fixed1 >> 2) - 1)) == 0); 1420 int yShift = 0; 1421 if ((nextY - y) & (SK_Fixed1 >> 2)) { 1422 yShift = 2; 1423 nextY = y + (SK_Fixed1 >> 2); 1424 } else if ((nextY - y) & (SK_Fixed1 >> 1)) { 1425 yShift = 1; 1426 SkASSERT(nextY == y + (SK_Fixed1 >> 1)); 1427 } 1428 1429 SkAlpha fullAlpha = f2a(nextY - y); 1430 1431 // If we're using mask blitter, we advance the mask row in this function 1432 // to save some "if" condition checks. 1433 SkAlpha* maskRow = nullptr; 1434 if (isUsingMask) { 1435 maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(SkFixedFloorToInt(y)); 1436 } 1437 1438 SkASSERT(currE->fPrev == prevHead); 1439 validate_edges_for_y(currE, y); 1440 1441 // Even if next - y == SK_Fixed1, we can still break the left-to-right order requirement 1442 // of the SKAAClip: |\| (two trapezoids with overlapping middle wedges) 1443 bool noRealBlitter = forceRLE; // forceRLE && (nextY - y != SK_Fixed1); 1444 1445 while (currE->fUpperY <= y) { 1446 SkASSERT(currE->fLowerY >= nextY); 1447 SkASSERT(currE->fY == y); 1448 1449 w += currE->fWinding; 1450 bool prev_in_interval = in_interval; 1451 in_interval = !(w & windingMask) == isInverse; 1452 1453 bool isLeft = in_interval && !prev_in_interval; 1454 bool isRite = !in_interval && prev_in_interval; 1455 bool currEnds = currE->fLowerY == nextY; 1456 1457 if (useDeferred) { 1458 if (currE->fRiteE && !isLeft) { 1459 // currE is a left edge previously, but now it's not. 1460 // Blit the trapezoid between fSavedY and y. 1461 SkASSERT(currE->fRiteE->fY == y); 1462 blit_saved_trapezoid(currE, y, currE->fX, currE->fRiteE->fX, 1463 blitter, maskRow, isUsingMask, noRealBlitter, leftClip, rightClip); 1464 } 1465 if (leftE->fRiteE == currE && !isRite) { 1466 // currE is a right edge previously, but now it's not. 1467 // Moreover, its corresponding leftE doesn't change (otherwise we'll handle it 1468 // in the previous if clause). Hence we blit the trapezoid. 1469 blit_saved_trapezoid(leftE, y, left, currE->fX, 1470 blitter, maskRow, isUsingMask, noRealBlitter, leftClip, rightClip); 1471 } 1472 } 1473 1474 if (isRite) { 1475 if (useDeferred) { 1476 deferred_blit(leftE, currE, left, leftDY, y, nextY, isIntegralNextY, 1477 leftEnds, currEnds, blitter, maskRow, isUsingMask, noRealBlitter, 1478 leftClip, rightClip, yShift); 1479 } else { 1480 SkFixed rite = currE->fX; 1481 currE->goY(nextY, yShift); 1482 #ifdef SK_SUPPORT_LEGACY_DELTA_AA 1483 leftE->fX = SkTMax(leftClip, leftE->fX); 1484 rite = SkTMin(rightClip, rite); 1485 currE->fX = SkTMin(rightClip, currE->fX); 1486 blit_trapezoid_row(blitter, y >> 16, left, rite, leftE->fX, currE->fX, 1487 #else 1488 SkFixed nextLeft = SkTMax(leftClip, leftE->fX); 1489 rite = SkTMin(rightClip, rite); 1490 SkFixed nextRite = SkTMin(rightClip, currE->fX); 1491 blit_trapezoid_row(blitter, y >> 16, left, rite, nextLeft, nextRite, 1492 #endif 1493 leftDY, currE->fDY, fullAlpha, maskRow, isUsingMask, 1494 noRealBlitter || (fullAlpha == 0xFF && ( 1495 edges_too_close(prevRite, left, leftE->fX) || 1496 edges_too_close(currE, currE->fNext, nextY) 1497 )), 1498 true); 1499 prevRite = SkFixedCeilToInt(SkTMax(rite, currE->fX)); 1500 } 1501 } else { 1502 if (isLeft) { 1503 left = SkTMax(currE->fX, leftClip); 1504 leftDY = currE->fDY; 1505 leftE = currE; 1506 leftEnds = leftE->fLowerY == nextY; 1507 } 1508 currE->goY(nextY, yShift); 1509 } 1510 1511 1512 SkAnalyticEdge* next = currE->fNext; 1513 SkFixed newX; 1514 1515 while (currE->fLowerY <= nextY) { 1516 if (currE->fCurveCount < 0) { 1517 SkAnalyticCubicEdge* cubicEdge = (SkAnalyticCubicEdge*)currE; 1518 cubicEdge->keepContinuous(); 1519 if (!cubicEdge->updateCubic()) { 1520 break; 1521 } 1522 } else if (currE->fCurveCount > 0) { 1523 SkAnalyticQuadraticEdge* quadEdge = (SkAnalyticQuadraticEdge*)currE; 1524 quadEdge->keepContinuous(); 1525 if (!quadEdge->updateQuadratic()) { 1526 break; 1527 } 1528 } else { 1529 break; 1530 } 1531 } 1532 SkASSERT(currE->fY == nextY); 1533 1534 if (currE->fLowerY <= nextY) { 1535 remove_edge(currE); 1536 } else { 1537 updateNextNextY(currE->fLowerY, nextY, &nextNextY); 1538 newX = currE->fX; 1539 SkASSERT(currE->fLowerY > nextY); 1540 if (newX < prevX) { // ripple currE backwards until it is x-sorted 1541 // If the crossing edge is a right edge, blit the saved trapezoid. 1542 if (leftE->fRiteE == currE && useDeferred) { 1543 SkASSERT(leftE->fY == nextY && currE->fY == nextY); 1544 blit_saved_trapezoid(leftE, nextY, leftE->fX, currE->fX, 1545 blitter, maskRow, isUsingMask, noRealBlitter, leftClip, rightClip); 1546 } 1547 backward_insert_edge_based_on_x(currE); 1548 } else { 1549 prevX = newX; 1550 } 1551 if (!skipIntersect) { 1552 checkIntersection(currE, nextY, &nextNextY); 1553 } 1554 } 1555 1556 currE = next; 1557 SkASSERT(currE); 1558 } 1559 1560 // was our right-edge culled away? 1561 if (in_interval) { 1562 if (useDeferred) { 1563 deferred_blit(leftE, nextTail, left, leftDY, y, nextY, isIntegralNextY, 1564 leftEnds, false, blitter, maskRow, isUsingMask, noRealBlitter, 1565 leftClip, rightClip, yShift); 1566 } else { 1567 blit_trapezoid_row(blitter, y >> 16, 1568 left, rightClip, 1569 SkTMax(leftClip, leftE->fX), rightClip, 1570 leftDY, 0, fullAlpha, maskRow, isUsingMask, 1571 noRealBlitter || 1572 (fullAlpha == 0xFF && edges_too_close(leftE->fPrev, leftE, nextY)), 1573 true); 1574 } 1575 } 1576 1577 if (forceRLE) { 1578 ((RunBasedAdditiveBlitter*)blitter)->flush_if_y_changed(y, nextY); 1579 } 1580 1581 y = nextY; 1582 if (y >= SkIntToFixed(stop_y)) { 1583 break; 1584 } 1585 1586 // now currE points to the first edge with a fUpperY larger than the previous y 1587 insert_new_edges(currE, y, &nextNextY); 1588 } 1589 } 1590 1591 static SK_ALWAYS_INLINE void aaa_fill_path(const SkPath& path, const SkIRect& clipRect, 1592 AdditiveBlitter* blitter, int start_y, int stop_y, bool pathContainedInClip, 1593 bool isUsingMask, bool forceRLE) { // forceRLE implies that SkAAClip is calling us 1594 SkASSERT(blitter); 1595 1596 SkEdgeBuilder builder; 1597 int count = builder.build_edges(path, &clipRect, 0, pathContainedInClip, 1598 SkEdgeBuilder::kAnalyticEdge); 1599 SkAnalyticEdge** list = builder.analyticEdgeList(); 1600 1601 SkIRect rect = clipRect; 1602 if (0 == count) { 1603 if (path.isInverseFillType()) { 1604 /* 1605 * Since we are in inverse-fill, our caller has already drawn above 1606 * our top (start_y) and will draw below our bottom (stop_y). Thus 1607 * we need to restrict our drawing to the intersection of the clip 1608 * and those two limits. 1609 */ 1610 if (rect.fTop < start_y) { 1611 rect.fTop = start_y; 1612 } 1613 if (rect.fBottom > stop_y) { 1614 rect.fBottom = stop_y; 1615 } 1616 if (!rect.isEmpty()) { 1617 blitter->getRealBlitter()->blitRect(rect.fLeft, rect.fTop, 1618 rect.width(), rect.height()); 1619 } 1620 } 1621 return; 1622 } 1623 1624 SkAnalyticEdge headEdge, tailEdge, *last; 1625 // this returns the first and last edge after they're sorted into a dlink list 1626 SkAnalyticEdge* edge = sort_edges(list, count, &last); 1627 1628 headEdge.fRiteE = nullptr; 1629 headEdge.fPrev = nullptr; 1630 headEdge.fNext = edge; 1631 headEdge.fUpperY = headEdge.fLowerY = SK_MinS32; 1632 headEdge.fX = SK_MinS32; 1633 headEdge.fDX = 0; 1634 headEdge.fDY = SK_MaxS32; 1635 headEdge.fUpperX = SK_MinS32; 1636 edge->fPrev = &headEdge; 1637 1638 tailEdge.fRiteE = nullptr; 1639 tailEdge.fPrev = last; 1640 tailEdge.fNext = nullptr; 1641 tailEdge.fUpperY = tailEdge.fLowerY = SK_MaxS32; 1642 tailEdge.fX = SK_MaxS32; 1643 tailEdge.fDX = 0; 1644 tailEdge.fDY = SK_MaxS32; 1645 tailEdge.fUpperX = SK_MaxS32; 1646 last->fNext = &tailEdge; 1647 1648 // now edge is the head of the sorted linklist 1649 1650 if (!pathContainedInClip && start_y < clipRect.fTop) { 1651 start_y = clipRect.fTop; 1652 } 1653 if (!pathContainedInClip && stop_y > clipRect.fBottom) { 1654 stop_y = clipRect.fBottom; 1655 } 1656 1657 SkFixed leftBound = SkIntToFixed(rect.fLeft); 1658 SkFixed rightBound = SkIntToFixed(rect.fRight); 1659 if (isUsingMask) { 1660 // If we're using mask, then we have to limit the bound within the path bounds. 1661 // Otherwise, the edge drift may access an invalid address inside the mask. 1662 SkIRect ir; 1663 path.getBounds().roundOut(&ir); 1664 leftBound = SkTMax(leftBound, SkIntToFixed(ir.fLeft)); 1665 rightBound = SkTMin(rightBound, SkIntToFixed(ir.fRight)); 1666 } 1667 1668 if (!path.isInverseFillType() && path.isConvex() && count >= 2) { 1669 aaa_walk_convex_edges(&headEdge, blitter, start_y, stop_y, 1670 leftBound, rightBound, isUsingMask); 1671 } else { 1672 // Only use deferred blitting if there are many edges. 1673 bool useDeferred = count > 1674 (SkFixedFloorToInt(tailEdge.fPrev->fLowerY - headEdge.fNext->fUpperY) + 1) * 4; 1675 1676 // We skip intersection computation if there are many points which probably already 1677 // give us enough fractional scan lines. 1678 bool skipIntersect = path.countPoints() > (stop_y - start_y) * 2; 1679 1680 aaa_walk_edges(&headEdge, &tailEdge, path.getFillType(), blitter, start_y, stop_y, 1681 leftBound, rightBound, isUsingMask, forceRLE, useDeferred, skipIntersect); 1682 } 1683 } 1684 1685 /////////////////////////////////////////////////////////////////////////////// 1686 1687 void SkScan::AAAFillPath(const SkPath& path, SkBlitter* blitter, const SkIRect& ir, 1688 const SkIRect& clipBounds, bool forceRLE) { 1689 bool containedInClip = clipBounds.contains(ir); 1690 bool isInverse = path.isInverseFillType(); 1691 1692 // The mask blitter (where we store intermediate alpha values directly in a mask, and then call 1693 // the real blitter once in the end to blit the whole mask) is faster than the RLE blitter when 1694 // the blit region is small enough (i.e., canHandleRect(ir)). When isInverse is true, the blit 1695 // region is no longer the rectangle ir so we won't use the mask blitter. The caller may also 1696 // use the forceRLE flag to force not using the mask blitter. Also, when the path is a simple 1697 // rect, preparing a mask and blitting it might have too much overhead. Hence we'll use 1698 // blitFatAntiRect to avoid the mask and its overhead. 1699 if (MaskAdditiveBlitter::canHandleRect(ir) && !isInverse && !forceRLE) { 1700 #ifdef SK_SUPPORT_LEGACY_SMALLRECT_AA 1701 MaskAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse); 1702 aaa_fill_path(path, clipBounds, &additiveBlitter, ir.fTop, ir.fBottom, 1703 containedInClip, true, forceRLE); 1704 #else 1705 // blitFatAntiRect is slower than the normal AAA flow without MaskAdditiveBlitter. 1706 // Hence only tryBlitFatAntiRect when MaskAdditiveBlitter would have been used. 1707 if (!TryBlitFatAntiRect(blitter, path, clipBounds)) { 1708 MaskAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse); 1709 aaa_fill_path(path, clipBounds, &additiveBlitter, ir.fTop, ir.fBottom, 1710 containedInClip, true, forceRLE); 1711 } 1712 #endif 1713 } else if (!isInverse && path.isConvex()) { 1714 // If the filling area is convex (i.e., path.isConvex && !isInverse), our simpler 1715 // aaa_walk_convex_edges won't generate alphas above 255. Hence we don't need 1716 // SafeRLEAdditiveBlitter (which is slow due to clamping). The basic RLE blitter 1717 // RunBasedAdditiveBlitter would suffice. 1718 RunBasedAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse); 1719 aaa_fill_path(path, clipBounds, &additiveBlitter, ir.fTop, ir.fBottom, 1720 containedInClip, false, forceRLE); 1721 } else { 1722 // If the filling area might not be convex, the more involved aaa_walk_edges would 1723 // be called and we have to clamp the alpha downto 255. The SafeRLEAdditiveBlitter 1724 // does that at a cost of performance. 1725 SafeRLEAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse); 1726 aaa_fill_path(path, clipBounds, &additiveBlitter, ir.fTop, ir.fBottom, 1727 containedInClip, false, forceRLE); 1728 } 1729 } 1730