1 /* 2 * Copyright 2012 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #include "GrAAConvexPathRenderer.h" 9 10 #include "GrAAConvexTessellator.h" 11 #include "GrCaps.h" 12 #include "GrContext.h" 13 #include "GrDefaultGeoProcFactory.h" 14 #include "GrDrawOpTest.h" 15 #include "GrGeometryProcessor.h" 16 #include "GrOpFlushState.h" 17 #include "GrPathUtils.h" 18 #include "GrProcessor.h" 19 #include "GrSimpleMeshDrawOpHelper.h" 20 #include "SkGeometry.h" 21 #include "SkPathPriv.h" 22 #include "SkPointPriv.h" 23 #include "SkString.h" 24 #include "SkTraceEvent.h" 25 #include "glsl/GrGLSLFragmentShaderBuilder.h" 26 #include "glsl/GrGLSLGeometryProcessor.h" 27 #include "glsl/GrGLSLProgramDataManager.h" 28 #include "glsl/GrGLSLUniformHandler.h" 29 #include "glsl/GrGLSLVarying.h" 30 #include "glsl/GrGLSLVertexGeoBuilder.h" 31 #include "ops/GrMeshDrawOp.h" 32 33 GrAAConvexPathRenderer::GrAAConvexPathRenderer() { 34 } 35 36 struct Segment { 37 enum { 38 // These enum values are assumed in member functions below. 39 kLine = 0, 40 kQuad = 1, 41 } fType; 42 43 // line uses one pt, quad uses 2 pts 44 SkPoint fPts[2]; 45 // normal to edge ending at each pt 46 SkVector fNorms[2]; 47 // is the corner where the previous segment meets this segment 48 // sharp. If so, fMid is a normalized bisector facing outward. 49 SkVector fMid; 50 51 int countPoints() { 52 GR_STATIC_ASSERT(0 == kLine && 1 == kQuad); 53 return fType + 1; 54 } 55 const SkPoint& endPt() const { 56 GR_STATIC_ASSERT(0 == kLine && 1 == kQuad); 57 return fPts[fType]; 58 } 59 const SkPoint& endNorm() const { 60 GR_STATIC_ASSERT(0 == kLine && 1 == kQuad); 61 return fNorms[fType]; 62 } 63 }; 64 65 typedef SkTArray<Segment, true> SegmentArray; 66 67 static void center_of_mass(const SegmentArray& segments, SkPoint* c) { 68 SkScalar area = 0; 69 SkPoint center = {0, 0}; 70 int count = segments.count(); 71 SkPoint p0 = {0, 0}; 72 if (count > 2) { 73 // We translate the polygon so that the first point is at the origin. 74 // This avoids some precision issues with small area polygons far away 75 // from the origin. 76 p0 = segments[0].endPt(); 77 SkPoint pi; 78 SkPoint pj; 79 // the first and last iteration of the below loop would compute 80 // zeros since the starting / ending point is (0,0). So instead we start 81 // at i=1 and make the last iteration i=count-2. 82 pj = segments[1].endPt() - p0; 83 for (int i = 1; i < count - 1; ++i) { 84 pi = pj; 85 pj = segments[i + 1].endPt() - p0; 86 87 SkScalar t = SkPoint::CrossProduct(pi, pj); 88 area += t; 89 center.fX += (pi.fX + pj.fX) * t; 90 center.fY += (pi.fY + pj.fY) * t; 91 } 92 } 93 94 // If the poly has no area then we instead return the average of 95 // its points. 96 if (SkScalarNearlyZero(area)) { 97 SkPoint avg; 98 avg.set(0, 0); 99 for (int i = 0; i < count; ++i) { 100 const SkPoint& pt = segments[i].endPt(); 101 avg.fX += pt.fX; 102 avg.fY += pt.fY; 103 } 104 SkScalar denom = SK_Scalar1 / count; 105 avg.scale(denom); 106 *c = avg; 107 } else { 108 area *= 3; 109 area = SkScalarInvert(area); 110 center.scale(area); 111 // undo the translate of p0 to the origin. 112 *c = center + p0; 113 } 114 SkASSERT(!SkScalarIsNaN(c->fX) && !SkScalarIsNaN(c->fY)); 115 } 116 117 static void compute_vectors(SegmentArray* segments, 118 SkPoint* fanPt, 119 SkPathPriv::FirstDirection dir, 120 int* vCount, 121 int* iCount) { 122 center_of_mass(*segments, fanPt); 123 int count = segments->count(); 124 125 // Make the normals point towards the outside 126 SkPointPriv::Side normSide; 127 if (dir == SkPathPriv::kCCW_FirstDirection) { 128 normSide = SkPointPriv::kRight_Side; 129 } else { 130 normSide = SkPointPriv::kLeft_Side; 131 } 132 133 *vCount = 0; 134 *iCount = 0; 135 // compute normals at all points 136 for (int a = 0; a < count; ++a) { 137 Segment& sega = (*segments)[a]; 138 int b = (a + 1) % count; 139 Segment& segb = (*segments)[b]; 140 141 const SkPoint* prevPt = &sega.endPt(); 142 int n = segb.countPoints(); 143 for (int p = 0; p < n; ++p) { 144 segb.fNorms[p] = segb.fPts[p] - *prevPt; 145 segb.fNorms[p].normalize(); 146 SkPointPriv::SetOrthog(&segb.fNorms[p], segb.fNorms[p], normSide); 147 prevPt = &segb.fPts[p]; 148 } 149 if (Segment::kLine == segb.fType) { 150 *vCount += 5; 151 *iCount += 9; 152 } else { 153 *vCount += 6; 154 *iCount += 12; 155 } 156 } 157 158 // compute mid-vectors where segments meet. TODO: Detect shallow corners 159 // and leave out the wedges and close gaps by stitching segments together. 160 for (int a = 0; a < count; ++a) { 161 const Segment& sega = (*segments)[a]; 162 int b = (a + 1) % count; 163 Segment& segb = (*segments)[b]; 164 segb.fMid = segb.fNorms[0] + sega.endNorm(); 165 segb.fMid.normalize(); 166 // corner wedges 167 *vCount += 4; 168 *iCount += 6; 169 } 170 } 171 172 struct DegenerateTestData { 173 DegenerateTestData() { fStage = kInitial; } 174 bool isDegenerate() const { return kNonDegenerate != fStage; } 175 enum { 176 kInitial, 177 kPoint, 178 kLine, 179 kNonDegenerate 180 } fStage; 181 SkPoint fFirstPoint; 182 SkVector fLineNormal; 183 SkScalar fLineC; 184 }; 185 186 static const SkScalar kClose = (SK_Scalar1 / 16); 187 static const SkScalar kCloseSqd = kClose * kClose; 188 189 static void update_degenerate_test(DegenerateTestData* data, const SkPoint& pt) { 190 switch (data->fStage) { 191 case DegenerateTestData::kInitial: 192 data->fFirstPoint = pt; 193 data->fStage = DegenerateTestData::kPoint; 194 break; 195 case DegenerateTestData::kPoint: 196 if (SkPointPriv::DistanceToSqd(pt, data->fFirstPoint) > kCloseSqd) { 197 data->fLineNormal = pt - data->fFirstPoint; 198 data->fLineNormal.normalize(); 199 SkPointPriv::SetOrthog(&data->fLineNormal, data->fLineNormal); 200 data->fLineC = -data->fLineNormal.dot(data->fFirstPoint); 201 data->fStage = DegenerateTestData::kLine; 202 } 203 break; 204 case DegenerateTestData::kLine: 205 if (SkScalarAbs(data->fLineNormal.dot(pt) + data->fLineC) > kClose) { 206 data->fStage = DegenerateTestData::kNonDegenerate; 207 } 208 case DegenerateTestData::kNonDegenerate: 209 break; 210 default: 211 SK_ABORT("Unexpected degenerate test stage."); 212 } 213 } 214 215 static inline bool get_direction(const SkPath& path, const SkMatrix& m, 216 SkPathPriv::FirstDirection* dir) { 217 if (!SkPathPriv::CheapComputeFirstDirection(path, dir)) { 218 return false; 219 } 220 // check whether m reverses the orientation 221 SkASSERT(!m.hasPerspective()); 222 SkScalar det2x2 = m.get(SkMatrix::kMScaleX) * m.get(SkMatrix::kMScaleY) - 223 m.get(SkMatrix::kMSkewX) * m.get(SkMatrix::kMSkewY); 224 if (det2x2 < 0) { 225 *dir = SkPathPriv::OppositeFirstDirection(*dir); 226 } 227 return true; 228 } 229 230 static inline void add_line_to_segment(const SkPoint& pt, 231 SegmentArray* segments) { 232 segments->push_back(); 233 segments->back().fType = Segment::kLine; 234 segments->back().fPts[0] = pt; 235 } 236 237 static inline void add_quad_segment(const SkPoint pts[3], 238 SegmentArray* segments) { 239 if (SkPointPriv::DistanceToSqd(pts[0], pts[1]) < kCloseSqd || 240 SkPointPriv::DistanceToSqd(pts[1], pts[2]) < kCloseSqd) { 241 if (pts[0] != pts[2]) { 242 add_line_to_segment(pts[2], segments); 243 } 244 } else { 245 segments->push_back(); 246 segments->back().fType = Segment::kQuad; 247 segments->back().fPts[0] = pts[1]; 248 segments->back().fPts[1] = pts[2]; 249 } 250 } 251 252 static inline void add_cubic_segments(const SkPoint pts[4], 253 SkPathPriv::FirstDirection dir, 254 SegmentArray* segments) { 255 SkSTArray<15, SkPoint, true> quads; 256 GrPathUtils::convertCubicToQuadsConstrainToTangents(pts, SK_Scalar1, dir, &quads); 257 int count = quads.count(); 258 for (int q = 0; q < count; q += 3) { 259 add_quad_segment(&quads[q], segments); 260 } 261 } 262 263 static bool get_segments(const SkPath& path, 264 const SkMatrix& m, 265 SegmentArray* segments, 266 SkPoint* fanPt, 267 int* vCount, 268 int* iCount) { 269 SkPath::Iter iter(path, true); 270 // This renderer over-emphasizes very thin path regions. We use the distance 271 // to the path from the sample to compute coverage. Every pixel intersected 272 // by the path will be hit and the maximum distance is sqrt(2)/2. We don't 273 // notice that the sample may be close to a very thin area of the path and 274 // thus should be very light. This is particularly egregious for degenerate 275 // line paths. We detect paths that are very close to a line (zero area) and 276 // draw nothing. 277 DegenerateTestData degenerateData; 278 SkPathPriv::FirstDirection dir; 279 // get_direction can fail for some degenerate paths. 280 if (!get_direction(path, m, &dir)) { 281 return false; 282 } 283 284 for (;;) { 285 SkPoint pts[4]; 286 SkPath::Verb verb = iter.next(pts, true, true); 287 switch (verb) { 288 case SkPath::kMove_Verb: 289 m.mapPoints(pts, 1); 290 update_degenerate_test(°enerateData, pts[0]); 291 break; 292 case SkPath::kLine_Verb: { 293 m.mapPoints(&pts[1], 1); 294 update_degenerate_test(°enerateData, pts[1]); 295 add_line_to_segment(pts[1], segments); 296 break; 297 } 298 case SkPath::kQuad_Verb: 299 m.mapPoints(pts, 3); 300 update_degenerate_test(°enerateData, pts[1]); 301 update_degenerate_test(°enerateData, pts[2]); 302 add_quad_segment(pts, segments); 303 break; 304 case SkPath::kConic_Verb: { 305 m.mapPoints(pts, 3); 306 SkScalar weight = iter.conicWeight(); 307 SkAutoConicToQuads converter; 308 const SkPoint* quadPts = converter.computeQuads(pts, weight, 0.5f); 309 for (int i = 0; i < converter.countQuads(); ++i) { 310 update_degenerate_test(°enerateData, quadPts[2*i + 1]); 311 update_degenerate_test(°enerateData, quadPts[2*i + 2]); 312 add_quad_segment(quadPts + 2*i, segments); 313 } 314 break; 315 } 316 case SkPath::kCubic_Verb: { 317 m.mapPoints(pts, 4); 318 update_degenerate_test(°enerateData, pts[1]); 319 update_degenerate_test(°enerateData, pts[2]); 320 update_degenerate_test(°enerateData, pts[3]); 321 add_cubic_segments(pts, dir, segments); 322 break; 323 }; 324 case SkPath::kDone_Verb: 325 if (degenerateData.isDegenerate()) { 326 return false; 327 } else { 328 compute_vectors(segments, fanPt, dir, vCount, iCount); 329 return true; 330 } 331 default: 332 break; 333 } 334 } 335 } 336 337 struct QuadVertex { 338 SkPoint fPos; 339 GrColor fColor; 340 SkPoint fUV; 341 SkScalar fD0; 342 SkScalar fD1; 343 }; 344 345 struct Draw { 346 Draw() : fVertexCnt(0), fIndexCnt(0) {} 347 int fVertexCnt; 348 int fIndexCnt; 349 }; 350 351 typedef SkTArray<Draw, true> DrawArray; 352 353 static void create_vertices(const SegmentArray& segments, 354 const SkPoint& fanPt, 355 GrColor color, 356 DrawArray* draws, 357 QuadVertex* verts, 358 uint16_t* idxs) { 359 Draw* draw = &draws->push_back(); 360 // alias just to make vert/index assignments easier to read. 361 int* v = &draw->fVertexCnt; 362 int* i = &draw->fIndexCnt; 363 364 int count = segments.count(); 365 for (int a = 0; a < count; ++a) { 366 const Segment& sega = segments[a]; 367 int b = (a + 1) % count; 368 const Segment& segb = segments[b]; 369 370 // Check whether adding the verts for this segment to the current draw would cause index 371 // values to overflow. 372 int vCount = 4; 373 if (Segment::kLine == segb.fType) { 374 vCount += 5; 375 } else { 376 vCount += 6; 377 } 378 if (draw->fVertexCnt + vCount > (1 << 16)) { 379 verts += *v; 380 idxs += *i; 381 draw = &draws->push_back(); 382 v = &draw->fVertexCnt; 383 i = &draw->fIndexCnt; 384 } 385 386 // FIXME: These tris are inset in the 1 unit arc around the corner 387 verts[*v + 0].fPos = sega.endPt(); 388 verts[*v + 1].fPos = verts[*v + 0].fPos + sega.endNorm(); 389 verts[*v + 2].fPos = verts[*v + 0].fPos + segb.fMid; 390 verts[*v + 3].fPos = verts[*v + 0].fPos + segb.fNorms[0]; 391 verts[*v + 0].fColor = color; 392 verts[*v + 1].fColor = color; 393 verts[*v + 2].fColor = color; 394 verts[*v + 3].fColor = color; 395 verts[*v + 0].fUV.set(0,0); 396 verts[*v + 1].fUV.set(0,-SK_Scalar1); 397 verts[*v + 2].fUV.set(0,-SK_Scalar1); 398 verts[*v + 3].fUV.set(0,-SK_Scalar1); 399 verts[*v + 0].fD0 = verts[*v + 0].fD1 = -SK_Scalar1; 400 verts[*v + 1].fD0 = verts[*v + 1].fD1 = -SK_Scalar1; 401 verts[*v + 2].fD0 = verts[*v + 2].fD1 = -SK_Scalar1; 402 verts[*v + 3].fD0 = verts[*v + 3].fD1 = -SK_Scalar1; 403 404 idxs[*i + 0] = *v + 0; 405 idxs[*i + 1] = *v + 2; 406 idxs[*i + 2] = *v + 1; 407 idxs[*i + 3] = *v + 0; 408 idxs[*i + 4] = *v + 3; 409 idxs[*i + 5] = *v + 2; 410 411 *v += 4; 412 *i += 6; 413 414 if (Segment::kLine == segb.fType) { 415 verts[*v + 0].fPos = fanPt; 416 verts[*v + 1].fPos = sega.endPt(); 417 verts[*v + 2].fPos = segb.fPts[0]; 418 419 verts[*v + 3].fPos = verts[*v + 1].fPos + segb.fNorms[0]; 420 verts[*v + 4].fPos = verts[*v + 2].fPos + segb.fNorms[0]; 421 422 verts[*v + 0].fColor = color; 423 verts[*v + 1].fColor = color; 424 verts[*v + 2].fColor = color; 425 verts[*v + 3].fColor = color; 426 verts[*v + 4].fColor = color; 427 428 // we draw the line edge as a degenerate quad (u is 0, v is the 429 // signed distance to the edge) 430 SkScalar dist = SkPointPriv::DistanceToLineBetween(fanPt, verts[*v + 1].fPos, 431 verts[*v + 2].fPos); 432 verts[*v + 0].fUV.set(0, dist); 433 verts[*v + 1].fUV.set(0, 0); 434 verts[*v + 2].fUV.set(0, 0); 435 verts[*v + 3].fUV.set(0, -SK_Scalar1); 436 verts[*v + 4].fUV.set(0, -SK_Scalar1); 437 438 verts[*v + 0].fD0 = verts[*v + 0].fD1 = -SK_Scalar1; 439 verts[*v + 1].fD0 = verts[*v + 1].fD1 = -SK_Scalar1; 440 verts[*v + 2].fD0 = verts[*v + 2].fD1 = -SK_Scalar1; 441 verts[*v + 3].fD0 = verts[*v + 3].fD1 = -SK_Scalar1; 442 verts[*v + 4].fD0 = verts[*v + 4].fD1 = -SK_Scalar1; 443 444 idxs[*i + 0] = *v + 3; 445 idxs[*i + 1] = *v + 1; 446 idxs[*i + 2] = *v + 2; 447 448 idxs[*i + 3] = *v + 4; 449 idxs[*i + 4] = *v + 3; 450 idxs[*i + 5] = *v + 2; 451 452 *i += 6; 453 454 // Draw the interior fan if it exists. 455 // TODO: Detect and combine colinear segments. This will ensure we catch every case 456 // with no interior, and that the resulting shared edge uses the same endpoints. 457 if (count >= 3) { 458 idxs[*i + 0] = *v + 0; 459 idxs[*i + 1] = *v + 2; 460 idxs[*i + 2] = *v + 1; 461 462 *i += 3; 463 } 464 465 *v += 5; 466 } else { 467 SkPoint qpts[] = {sega.endPt(), segb.fPts[0], segb.fPts[1]}; 468 469 SkVector midVec = segb.fNorms[0] + segb.fNorms[1]; 470 midVec.normalize(); 471 472 verts[*v + 0].fPos = fanPt; 473 verts[*v + 1].fPos = qpts[0]; 474 verts[*v + 2].fPos = qpts[2]; 475 verts[*v + 3].fPos = qpts[0] + segb.fNorms[0]; 476 verts[*v + 4].fPos = qpts[2] + segb.fNorms[1]; 477 verts[*v + 5].fPos = qpts[1] + midVec; 478 479 verts[*v + 0].fColor = color; 480 verts[*v + 1].fColor = color; 481 verts[*v + 2].fColor = color; 482 verts[*v + 3].fColor = color; 483 verts[*v + 4].fColor = color; 484 verts[*v + 5].fColor = color; 485 486 SkScalar c = segb.fNorms[0].dot(qpts[0]); 487 verts[*v + 0].fD0 = -segb.fNorms[0].dot(fanPt) + c; 488 verts[*v + 1].fD0 = 0.f; 489 verts[*v + 2].fD0 = -segb.fNorms[0].dot(qpts[2]) + c; 490 verts[*v + 3].fD0 = -SK_ScalarMax/100; 491 verts[*v + 4].fD0 = -SK_ScalarMax/100; 492 verts[*v + 5].fD0 = -SK_ScalarMax/100; 493 494 c = segb.fNorms[1].dot(qpts[2]); 495 verts[*v + 0].fD1 = -segb.fNorms[1].dot(fanPt) + c; 496 verts[*v + 1].fD1 = -segb.fNorms[1].dot(qpts[0]) + c; 497 verts[*v + 2].fD1 = 0.f; 498 verts[*v + 3].fD1 = -SK_ScalarMax/100; 499 verts[*v + 4].fD1 = -SK_ScalarMax/100; 500 verts[*v + 5].fD1 = -SK_ScalarMax/100; 501 502 GrPathUtils::QuadUVMatrix toUV(qpts); 503 toUV.apply<6, sizeof(QuadVertex), offsetof(QuadVertex, fUV)>(verts + *v); 504 505 idxs[*i + 0] = *v + 3; 506 idxs[*i + 1] = *v + 1; 507 idxs[*i + 2] = *v + 2; 508 idxs[*i + 3] = *v + 4; 509 idxs[*i + 4] = *v + 3; 510 idxs[*i + 5] = *v + 2; 511 512 idxs[*i + 6] = *v + 5; 513 idxs[*i + 7] = *v + 3; 514 idxs[*i + 8] = *v + 4; 515 516 *i += 9; 517 518 // Draw the interior fan if it exists. 519 // TODO: Detect and combine colinear segments. This will ensure we catch every case 520 // with no interior, and that the resulting shared edge uses the same endpoints. 521 if (count >= 3) { 522 idxs[*i + 0] = *v + 0; 523 idxs[*i + 1] = *v + 2; 524 idxs[*i + 2] = *v + 1; 525 526 *i += 3; 527 } 528 529 *v += 6; 530 } 531 } 532 } 533 534 /////////////////////////////////////////////////////////////////////////////// 535 536 /* 537 * Quadratic specified by 0=u^2-v canonical coords. u and v are the first 538 * two components of the vertex attribute. Coverage is based on signed 539 * distance with negative being inside, positive outside. The edge is specified in 540 * window space (y-down). If either the third or fourth component of the interpolated 541 * vertex coord is > 0 then the pixel is considered outside the edge. This is used to 542 * attempt to trim to a portion of the infinite quad. 543 * Requires shader derivative instruction support. 544 */ 545 546 class QuadEdgeEffect : public GrGeometryProcessor { 547 public: 548 static sk_sp<GrGeometryProcessor> Make(const SkMatrix& localMatrix, bool usesLocalCoords) { 549 return sk_sp<GrGeometryProcessor>(new QuadEdgeEffect(localMatrix, usesLocalCoords)); 550 } 551 552 ~QuadEdgeEffect() override {} 553 554 const char* name() const override { return "QuadEdge"; } 555 556 class GLSLProcessor : public GrGLSLGeometryProcessor { 557 public: 558 GLSLProcessor() {} 559 560 void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override { 561 const QuadEdgeEffect& qe = args.fGP.cast<QuadEdgeEffect>(); 562 GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder; 563 GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler; 564 GrGLSLUniformHandler* uniformHandler = args.fUniformHandler; 565 566 // emit attributes 567 varyingHandler->emitAttributes(qe); 568 569 GrGLSLVarying v(kHalf4_GrSLType); 570 varyingHandler->addVarying("QuadEdge", &v); 571 vertBuilder->codeAppendf("%s = %s;", v.vsOut(), qe.fInQuadEdge->fName); 572 573 // Setup pass through color 574 varyingHandler->addPassThroughAttribute(qe.fInColor, args.fOutputColor); 575 576 GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder; 577 578 // Setup position 579 this->writeOutputPosition(vertBuilder, gpArgs, qe.fInPosition->fName); 580 581 // emit transforms 582 this->emitTransforms(vertBuilder, 583 varyingHandler, 584 uniformHandler, 585 qe.fInPosition->asShaderVar(), 586 qe.fLocalMatrix, 587 args.fFPCoordTransformHandler); 588 589 fragBuilder->codeAppendf("half edgeAlpha;"); 590 591 // keep the derivative instructions outside the conditional 592 fragBuilder->codeAppendf("half2 duvdx = dFdx(%s.xy);", v.fsIn()); 593 fragBuilder->codeAppendf("half2 duvdy = dFdy(%s.xy);", v.fsIn()); 594 fragBuilder->codeAppendf("if (%s.z > 0.0 && %s.w > 0.0) {", v.fsIn(), v.fsIn()); 595 // today we know z and w are in device space. We could use derivatives 596 fragBuilder->codeAppendf("edgeAlpha = min(min(%s.z, %s.w) + 0.5, 1.0);", v.fsIn(), 597 v.fsIn()); 598 fragBuilder->codeAppendf ("} else {"); 599 fragBuilder->codeAppendf("half2 gF = half2(2.0*%s.x*duvdx.x - duvdx.y," 600 " 2.0*%s.x*duvdy.x - duvdy.y);", 601 v.fsIn(), v.fsIn()); 602 fragBuilder->codeAppendf("edgeAlpha = (%s.x*%s.x - %s.y);", v.fsIn(), v.fsIn(), 603 v.fsIn()); 604 fragBuilder->codeAppendf("edgeAlpha = " 605 "clamp(0.5 - edgeAlpha / length(gF), 0.0, 1.0);}"); 606 607 fragBuilder->codeAppendf("%s = half4(edgeAlpha);", args.fOutputCoverage); 608 } 609 610 static inline void GenKey(const GrGeometryProcessor& gp, 611 const GrShaderCaps&, 612 GrProcessorKeyBuilder* b) { 613 const QuadEdgeEffect& qee = gp.cast<QuadEdgeEffect>(); 614 b->add32(SkToBool(qee.fUsesLocalCoords && qee.fLocalMatrix.hasPerspective())); 615 } 616 617 void setData(const GrGLSLProgramDataManager& pdman, 618 const GrPrimitiveProcessor& gp, 619 FPCoordTransformIter&& transformIter) override { 620 const QuadEdgeEffect& qe = gp.cast<QuadEdgeEffect>(); 621 this->setTransformDataHelper(qe.fLocalMatrix, pdman, &transformIter); 622 } 623 624 private: 625 typedef GrGLSLGeometryProcessor INHERITED; 626 }; 627 628 void getGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override { 629 GLSLProcessor::GenKey(*this, caps, b); 630 } 631 632 GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override { 633 return new GLSLProcessor(); 634 } 635 636 private: 637 QuadEdgeEffect(const SkMatrix& localMatrix, bool usesLocalCoords) 638 : INHERITED(kQuadEdgeEffect_ClassID) 639 , fLocalMatrix(localMatrix) 640 , fUsesLocalCoords(usesLocalCoords) { 641 fInPosition = &this->addVertexAttrib("inPosition", kFloat2_GrVertexAttribType); 642 fInColor = &this->addVertexAttrib("inColor", kUByte4_norm_GrVertexAttribType); 643 fInQuadEdge = &this->addVertexAttrib("inQuadEdge", kHalf4_GrVertexAttribType); 644 } 645 646 const Attribute* fInPosition; 647 const Attribute* fInQuadEdge; 648 const Attribute* fInColor; 649 SkMatrix fLocalMatrix; 650 bool fUsesLocalCoords; 651 652 GR_DECLARE_GEOMETRY_PROCESSOR_TEST 653 654 typedef GrGeometryProcessor INHERITED; 655 }; 656 657 GR_DEFINE_GEOMETRY_PROCESSOR_TEST(QuadEdgeEffect); 658 659 #if GR_TEST_UTILS 660 sk_sp<GrGeometryProcessor> QuadEdgeEffect::TestCreate(GrProcessorTestData* d) { 661 // Doesn't work without derivative instructions. 662 return d->caps()->shaderCaps()->shaderDerivativeSupport() 663 ? QuadEdgeEffect::Make(GrTest::TestMatrix(d->fRandom), d->fRandom->nextBool()) 664 : nullptr; 665 } 666 #endif 667 668 /////////////////////////////////////////////////////////////////////////////// 669 670 GrPathRenderer::CanDrawPath 671 GrAAConvexPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const { 672 if (args.fCaps->shaderCaps()->shaderDerivativeSupport() && 673 (GrAAType::kCoverage == args.fAAType) && args.fShape->style().isSimpleFill() && 674 !args.fShape->inverseFilled() && args.fShape->knownToBeConvex()) { 675 return CanDrawPath::kYes; 676 } 677 return CanDrawPath::kNo; 678 } 679 680 // extract the result vertices and indices from the GrAAConvexTessellator 681 static void extract_lines_only_verts(const GrAAConvexTessellator& tess, 682 void* vertices, 683 size_t vertexStride, 684 GrColor color, 685 uint16_t* idxs, 686 bool tweakAlphaForCoverage) { 687 intptr_t verts = reinterpret_cast<intptr_t>(vertices); 688 689 for (int i = 0; i < tess.numPts(); ++i) { 690 *((SkPoint*)((intptr_t)verts + i * vertexStride)) = tess.point(i); 691 } 692 693 // Make 'verts' point to the colors 694 verts += sizeof(SkPoint); 695 for (int i = 0; i < tess.numPts(); ++i) { 696 if (tweakAlphaForCoverage) { 697 SkASSERT(SkScalarRoundToInt(255.0f * tess.coverage(i)) <= 255); 698 unsigned scale = SkScalarRoundToInt(255.0f * tess.coverage(i)); 699 GrColor scaledColor = (0xff == scale) ? color : SkAlphaMulQ(color, scale); 700 *reinterpret_cast<GrColor*>(verts + i * vertexStride) = scaledColor; 701 } else { 702 *reinterpret_cast<GrColor*>(verts + i * vertexStride) = color; 703 *reinterpret_cast<float*>(verts + i * vertexStride + sizeof(GrColor)) = 704 tess.coverage(i); 705 } 706 } 707 708 for (int i = 0; i < tess.numIndices(); ++i) { 709 idxs[i] = tess.index(i); 710 } 711 } 712 713 static sk_sp<GrGeometryProcessor> make_lines_only_gp(bool tweakAlphaForCoverage, 714 const SkMatrix& viewMatrix, 715 bool usesLocalCoords) { 716 using namespace GrDefaultGeoProcFactory; 717 718 Coverage::Type coverageType; 719 if (tweakAlphaForCoverage) { 720 coverageType = Coverage::kSolid_Type; 721 } else { 722 coverageType = Coverage::kAttribute_Type; 723 } 724 LocalCoords::Type localCoordsType = 725 usesLocalCoords ? LocalCoords::kUsePosition_Type : LocalCoords::kUnused_Type; 726 return MakeForDeviceSpace(Color::kPremulGrColorAttribute_Type, coverageType, localCoordsType, 727 viewMatrix); 728 } 729 730 namespace { 731 732 class AAConvexPathOp final : public GrMeshDrawOp { 733 private: 734 using Helper = GrSimpleMeshDrawOpHelperWithStencil; 735 736 public: 737 DEFINE_OP_CLASS_ID 738 739 static std::unique_ptr<GrDrawOp> Make(GrPaint&& paint, const SkMatrix& viewMatrix, 740 const SkPath& path, 741 const GrUserStencilSettings* stencilSettings) { 742 return Helper::FactoryHelper<AAConvexPathOp>(std::move(paint), viewMatrix, path, 743 stencilSettings); 744 } 745 746 AAConvexPathOp(const Helper::MakeArgs& helperArgs, GrColor color, const SkMatrix& viewMatrix, 747 const SkPath& path, const GrUserStencilSettings* stencilSettings) 748 : INHERITED(ClassID()), fHelper(helperArgs, GrAAType::kCoverage, stencilSettings) { 749 fPaths.emplace_back(PathData{viewMatrix, path, color}); 750 this->setTransformedBounds(path.getBounds(), viewMatrix, HasAABloat::kYes, IsZeroArea::kNo); 751 fLinesOnly = SkPath::kLine_SegmentMask == path.getSegmentMasks(); 752 } 753 754 const char* name() const override { return "AAConvexPathOp"; } 755 756 void visitProxies(const VisitProxyFunc& func) const override { 757 fHelper.visitProxies(func); 758 } 759 760 SkString dumpInfo() const override { 761 SkString string; 762 string.appendf("Count: %d\n", fPaths.count()); 763 string += fHelper.dumpInfo(); 764 string += INHERITED::dumpInfo(); 765 return string; 766 } 767 768 FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); } 769 770 RequiresDstTexture finalize(const GrCaps& caps, const GrAppliedClip* clip, 771 GrPixelConfigIsClamped dstIsClamped) override { 772 return fHelper.xpRequiresDstTexture(caps, clip, dstIsClamped, 773 GrProcessorAnalysisCoverage::kSingleChannel, 774 &fPaths.back().fColor); 775 } 776 777 private: 778 void prepareLinesOnlyDraws(Target* target) { 779 // Setup GrGeometryProcessor 780 sk_sp<GrGeometryProcessor> gp(make_lines_only_gp(fHelper.compatibleWithAlphaAsCoverage(), 781 fPaths.back().fViewMatrix, 782 fHelper.usesLocalCoords())); 783 if (!gp) { 784 SkDebugf("Could not create GrGeometryProcessor\n"); 785 return; 786 } 787 788 size_t vertexStride = gp->getVertexStride(); 789 790 SkASSERT(fHelper.compatibleWithAlphaAsCoverage() 791 ? vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorAttr) 792 : vertexStride == 793 sizeof(GrDefaultGeoProcFactory::PositionColorCoverageAttr)); 794 795 GrAAConvexTessellator tess; 796 797 int instanceCount = fPaths.count(); 798 const GrPipeline* pipeline = fHelper.makePipeline(target); 799 for (int i = 0; i < instanceCount; i++) { 800 tess.rewind(); 801 802 const PathData& args = fPaths[i]; 803 804 if (!tess.tessellate(args.fViewMatrix, args.fPath)) { 805 continue; 806 } 807 808 const GrBuffer* vertexBuffer; 809 int firstVertex; 810 811 void* verts = target->makeVertexSpace(vertexStride, tess.numPts(), &vertexBuffer, 812 &firstVertex); 813 if (!verts) { 814 SkDebugf("Could not allocate vertices\n"); 815 return; 816 } 817 818 const GrBuffer* indexBuffer; 819 int firstIndex; 820 821 uint16_t* idxs = target->makeIndexSpace(tess.numIndices(), &indexBuffer, &firstIndex); 822 if (!idxs) { 823 SkDebugf("Could not allocate indices\n"); 824 return; 825 } 826 827 extract_lines_only_verts(tess, verts, vertexStride, args.fColor, idxs, 828 fHelper.compatibleWithAlphaAsCoverage()); 829 830 GrMesh mesh(GrPrimitiveType::kTriangles); 831 mesh.setIndexed(indexBuffer, tess.numIndices(), firstIndex, 0, tess.numPts() - 1); 832 mesh.setVertexData(vertexBuffer, firstVertex); 833 target->draw(gp.get(), pipeline, mesh); 834 } 835 } 836 837 void onPrepareDraws(Target* target) override { 838 #ifndef SK_IGNORE_LINEONLY_AA_CONVEX_PATH_OPTS 839 if (fLinesOnly) { 840 this->prepareLinesOnlyDraws(target); 841 return; 842 } 843 #endif 844 const GrPipeline* pipeline = fHelper.makePipeline(target); 845 int instanceCount = fPaths.count(); 846 847 SkMatrix invert; 848 if (fHelper.usesLocalCoords() && !fPaths.back().fViewMatrix.invert(&invert)) { 849 SkDebugf("Could not invert viewmatrix\n"); 850 return; 851 } 852 853 // Setup GrGeometryProcessor 854 sk_sp<GrGeometryProcessor> quadProcessor( 855 QuadEdgeEffect::Make(invert, fHelper.usesLocalCoords())); 856 857 // TODO generate all segments for all paths and use one vertex buffer 858 for (int i = 0; i < instanceCount; i++) { 859 const PathData& args = fPaths[i]; 860 861 // We use the fact that SkPath::transform path does subdivision based on 862 // perspective. Otherwise, we apply the view matrix when copying to the 863 // segment representation. 864 const SkMatrix* viewMatrix = &args.fViewMatrix; 865 866 // We avoid initializing the path unless we have to 867 const SkPath* pathPtr = &args.fPath; 868 SkTLazy<SkPath> tmpPath; 869 if (viewMatrix->hasPerspective()) { 870 SkPath* tmpPathPtr = tmpPath.init(*pathPtr); 871 tmpPathPtr->setIsVolatile(true); 872 tmpPathPtr->transform(*viewMatrix); 873 viewMatrix = &SkMatrix::I(); 874 pathPtr = tmpPathPtr; 875 } 876 877 int vertexCount; 878 int indexCount; 879 enum { 880 kPreallocSegmentCnt = 512 / sizeof(Segment), 881 kPreallocDrawCnt = 4, 882 }; 883 SkSTArray<kPreallocSegmentCnt, Segment, true> segments; 884 SkPoint fanPt; 885 886 if (!get_segments(*pathPtr, *viewMatrix, &segments, &fanPt, &vertexCount, 887 &indexCount)) { 888 continue; 889 } 890 891 const GrBuffer* vertexBuffer; 892 int firstVertex; 893 894 size_t vertexStride = quadProcessor->getVertexStride(); 895 QuadVertex* verts = reinterpret_cast<QuadVertex*>(target->makeVertexSpace( 896 vertexStride, vertexCount, &vertexBuffer, &firstVertex)); 897 898 if (!verts) { 899 SkDebugf("Could not allocate vertices\n"); 900 return; 901 } 902 903 const GrBuffer* indexBuffer; 904 int firstIndex; 905 906 uint16_t *idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex); 907 if (!idxs) { 908 SkDebugf("Could not allocate indices\n"); 909 return; 910 } 911 912 SkSTArray<kPreallocDrawCnt, Draw, true> draws; 913 create_vertices(segments, fanPt, args.fColor, &draws, verts, idxs); 914 915 GrMesh mesh(GrPrimitiveType::kTriangles); 916 917 for (int j = 0; j < draws.count(); ++j) { 918 const Draw& draw = draws[j]; 919 mesh.setIndexed(indexBuffer, draw.fIndexCnt, firstIndex, 0, draw.fVertexCnt - 1); 920 mesh.setVertexData(vertexBuffer, firstVertex); 921 target->draw(quadProcessor.get(), pipeline, mesh); 922 firstIndex += draw.fIndexCnt; 923 firstVertex += draw.fVertexCnt; 924 } 925 } 926 } 927 928 bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override { 929 AAConvexPathOp* that = t->cast<AAConvexPathOp>(); 930 if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) { 931 return false; 932 } 933 if (fHelper.usesLocalCoords() && 934 !fPaths[0].fViewMatrix.cheapEqualTo(that->fPaths[0].fViewMatrix)) { 935 return false; 936 } 937 938 if (fLinesOnly != that->fLinesOnly) { 939 return false; 940 } 941 942 fPaths.push_back_n(that->fPaths.count(), that->fPaths.begin()); 943 this->joinBounds(*that); 944 return true; 945 } 946 947 struct PathData { 948 SkMatrix fViewMatrix; 949 SkPath fPath; 950 GrColor fColor; 951 }; 952 953 Helper fHelper; 954 SkSTArray<1, PathData, true> fPaths; 955 bool fLinesOnly; 956 957 typedef GrMeshDrawOp INHERITED; 958 }; 959 960 } // anonymous namespace 961 962 bool GrAAConvexPathRenderer::onDrawPath(const DrawPathArgs& args) { 963 GR_AUDIT_TRAIL_AUTO_FRAME(args.fRenderTargetContext->auditTrail(), 964 "GrAAConvexPathRenderer::onDrawPath"); 965 SkASSERT(GrFSAAType::kUnifiedMSAA != args.fRenderTargetContext->fsaaType()); 966 SkASSERT(!args.fShape->isEmpty()); 967 968 SkPath path; 969 args.fShape->asPath(&path); 970 971 std::unique_ptr<GrDrawOp> op = AAConvexPathOp::Make(std::move(args.fPaint), *args.fViewMatrix, 972 path, args.fUserStencilSettings); 973 args.fRenderTargetContext->addDrawOp(*args.fClip, std::move(op)); 974 return true; 975 } 976 977 /////////////////////////////////////////////////////////////////////////////////////////////////// 978 979 #if GR_TEST_UTILS 980 981 GR_DRAW_OP_TEST_DEFINE(AAConvexPathOp) { 982 SkMatrix viewMatrix = GrTest::TestMatrixInvertible(random); 983 SkPath path = GrTest::TestPathConvex(random); 984 const GrUserStencilSettings* stencilSettings = GrGetRandomStencil(random, context); 985 return AAConvexPathOp::Make(std::move(paint), viewMatrix, path, stencilSettings); 986 } 987 988 #endif 989