Home | History | Annotate | Download | only in batches
      1 
      2 /*
      3  * Copyright 2012 Google Inc.
      4  *
      5  * Use of this source code is governed by a BSD-style license that can be
      6  * found in the LICENSE file.
      7  */
      8 
      9 #include "GrAAConvexPathRenderer.h"
     10 
     11 #include "GrAAConvexTessellator.h"
     12 #include "GrBatchFlushState.h"
     13 #include "GrBatchTest.h"
     14 #include "GrCaps.h"
     15 #include "GrContext.h"
     16 #include "GrDefaultGeoProcFactory.h"
     17 #include "GrGeometryProcessor.h"
     18 #include "GrInvariantOutput.h"
     19 #include "GrPathUtils.h"
     20 #include "GrProcessor.h"
     21 #include "GrPipelineBuilder.h"
     22 #include "GrStrokeInfo.h"
     23 #include "SkGeometry.h"
     24 #include "SkPathPriv.h"
     25 #include "SkString.h"
     26 #include "SkTraceEvent.h"
     27 #include "batches/GrVertexBatch.h"
     28 #include "glsl/GrGLSLFragmentShaderBuilder.h"
     29 #include "glsl/GrGLSLGeometryProcessor.h"
     30 #include "glsl/GrGLSLProgramDataManager.h"
     31 #include "glsl/GrGLSLUniformHandler.h"
     32 #include "glsl/GrGLSLVarying.h"
     33 #include "glsl/GrGLSLVertexShaderBuilder.h"
     34 
     35 GrAAConvexPathRenderer::GrAAConvexPathRenderer() {
     36 }
     37 
     38 struct Segment {
     39     enum {
     40         // These enum values are assumed in member functions below.
     41         kLine = 0,
     42         kQuad = 1,
     43     } fType;
     44 
     45     // line uses one pt, quad uses 2 pts
     46     SkPoint fPts[2];
     47     // normal to edge ending at each pt
     48     SkVector fNorms[2];
     49     // is the corner where the previous segment meets this segment
     50     // sharp. If so, fMid is a normalized bisector facing outward.
     51     SkVector fMid;
     52 
     53     int countPoints() {
     54         GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
     55         return fType + 1;
     56     }
     57     const SkPoint& endPt() const {
     58         GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
     59         return fPts[fType];
     60     };
     61     const SkPoint& endNorm() const {
     62         GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
     63         return fNorms[fType];
     64     };
     65 };
     66 
     67 typedef SkTArray<Segment, true> SegmentArray;
     68 
     69 static void center_of_mass(const SegmentArray& segments, SkPoint* c) {
     70     SkScalar area = 0;
     71     SkPoint center = {0, 0};
     72     int count = segments.count();
     73     SkPoint p0 = {0, 0};
     74     if (count > 2) {
     75         // We translate the polygon so that the first point is at the origin.
     76         // This avoids some precision issues with small area polygons far away
     77         // from the origin.
     78         p0 = segments[0].endPt();
     79         SkPoint pi;
     80         SkPoint pj;
     81         // the first and last iteration of the below loop would compute
     82         // zeros since the starting / ending point is (0,0). So instead we start
     83         // at i=1 and make the last iteration i=count-2.
     84         pj = segments[1].endPt() - p0;
     85         for (int i = 1; i < count - 1; ++i) {
     86             pi = pj;
     87             pj = segments[i + 1].endPt() - p0;
     88 
     89             SkScalar t = SkPoint::CrossProduct(pi, pj);
     90             area += t;
     91             center.fX += (pi.fX + pj.fX) * t;
     92             center.fY += (pi.fY + pj.fY) * t;
     93         }
     94     }
     95 
     96     // If the poly has no area then we instead return the average of
     97     // its points.
     98     if (SkScalarNearlyZero(area)) {
     99         SkPoint avg;
    100         avg.set(0, 0);
    101         for (int i = 0; i < count; ++i) {
    102             const SkPoint& pt = segments[i].endPt();
    103             avg.fX += pt.fX;
    104             avg.fY += pt.fY;
    105         }
    106         SkScalar denom = SK_Scalar1 / count;
    107         avg.scale(denom);
    108         *c = avg;
    109     } else {
    110         area *= 3;
    111         area = SkScalarInvert(area);
    112         center.scale(area);
    113         // undo the translate of p0 to the origin.
    114         *c = center + p0;
    115     }
    116     SkASSERT(!SkScalarIsNaN(c->fX) && !SkScalarIsNaN(c->fY));
    117 }
    118 
    119 static void compute_vectors(SegmentArray* segments,
    120                             SkPoint* fanPt,
    121                             SkPathPriv::FirstDirection dir,
    122                             int* vCount,
    123                             int* iCount) {
    124     center_of_mass(*segments, fanPt);
    125     int count = segments->count();
    126 
    127     // Make the normals point towards the outside
    128     SkPoint::Side normSide;
    129     if (dir == SkPathPriv::kCCW_FirstDirection) {
    130         normSide = SkPoint::kRight_Side;
    131     } else {
    132         normSide = SkPoint::kLeft_Side;
    133     }
    134 
    135     *vCount = 0;
    136     *iCount = 0;
    137     // compute normals at all points
    138     for (int a = 0; a < count; ++a) {
    139         Segment& sega = (*segments)[a];
    140         int b = (a + 1) % count;
    141         Segment& segb = (*segments)[b];
    142 
    143         const SkPoint* prevPt = &sega.endPt();
    144         int n = segb.countPoints();
    145         for (int p = 0; p < n; ++p) {
    146             segb.fNorms[p] = segb.fPts[p] - *prevPt;
    147             segb.fNorms[p].normalize();
    148             segb.fNorms[p].setOrthog(segb.fNorms[p], normSide);
    149             prevPt = &segb.fPts[p];
    150         }
    151         if (Segment::kLine == segb.fType) {
    152             *vCount += 5;
    153             *iCount += 9;
    154         } else {
    155             *vCount += 6;
    156             *iCount += 12;
    157         }
    158     }
    159 
    160     // compute mid-vectors where segments meet. TODO: Detect shallow corners
    161     // and leave out the wedges and close gaps by stitching segments together.
    162     for (int a = 0; a < count; ++a) {
    163         const Segment& sega = (*segments)[a];
    164         int b = (a + 1) % count;
    165         Segment& segb = (*segments)[b];
    166         segb.fMid = segb.fNorms[0] + sega.endNorm();
    167         segb.fMid.normalize();
    168         // corner wedges
    169         *vCount += 4;
    170         *iCount += 6;
    171     }
    172 }
    173 
    174 struct DegenerateTestData {
    175     DegenerateTestData() { fStage = kInitial; }
    176     bool isDegenerate() const { return kNonDegenerate != fStage; }
    177     enum {
    178         kInitial,
    179         kPoint,
    180         kLine,
    181         kNonDegenerate
    182     }           fStage;
    183     SkPoint     fFirstPoint;
    184     SkVector    fLineNormal;
    185     SkScalar    fLineC;
    186 };
    187 
    188 static const SkScalar kClose = (SK_Scalar1 / 16);
    189 static const SkScalar kCloseSqd = SkScalarMul(kClose, kClose);
    190 
    191 static void update_degenerate_test(DegenerateTestData* data, const SkPoint& pt) {
    192     switch (data->fStage) {
    193         case DegenerateTestData::kInitial:
    194             data->fFirstPoint = pt;
    195             data->fStage = DegenerateTestData::kPoint;
    196             break;
    197         case DegenerateTestData::kPoint:
    198             if (pt.distanceToSqd(data->fFirstPoint) > kCloseSqd) {
    199                 data->fLineNormal = pt - data->fFirstPoint;
    200                 data->fLineNormal.normalize();
    201                 data->fLineNormal.setOrthog(data->fLineNormal);
    202                 data->fLineC = -data->fLineNormal.dot(data->fFirstPoint);
    203                 data->fStage = DegenerateTestData::kLine;
    204             }
    205             break;
    206         case DegenerateTestData::kLine:
    207             if (SkScalarAbs(data->fLineNormal.dot(pt) + data->fLineC) > kClose) {
    208                 data->fStage = DegenerateTestData::kNonDegenerate;
    209             }
    210         case DegenerateTestData::kNonDegenerate:
    211             break;
    212         default:
    213             SkFAIL("Unexpected degenerate test stage.");
    214     }
    215 }
    216 
    217 static inline bool get_direction(const SkPath& path, const SkMatrix& m,
    218                                  SkPathPriv::FirstDirection* dir) {
    219     if (!SkPathPriv::CheapComputeFirstDirection(path, dir)) {
    220         return false;
    221     }
    222     // check whether m reverses the orientation
    223     SkASSERT(!m.hasPerspective());
    224     SkScalar det2x2 = SkScalarMul(m.get(SkMatrix::kMScaleX), m.get(SkMatrix::kMScaleY)) -
    225                       SkScalarMul(m.get(SkMatrix::kMSkewX), m.get(SkMatrix::kMSkewY));
    226     if (det2x2 < 0) {
    227         *dir = SkPathPriv::OppositeFirstDirection(*dir);
    228     }
    229     return true;
    230 }
    231 
    232 static inline void add_line_to_segment(const SkPoint& pt,
    233                                        SegmentArray* segments) {
    234     segments->push_back();
    235     segments->back().fType = Segment::kLine;
    236     segments->back().fPts[0] = pt;
    237 }
    238 
    239 static inline void add_quad_segment(const SkPoint pts[3],
    240                                     SegmentArray* segments) {
    241     if (pts[0].distanceToSqd(pts[1]) < kCloseSqd || pts[1].distanceToSqd(pts[2]) < kCloseSqd) {
    242         if (pts[0] != pts[2]) {
    243             add_line_to_segment(pts[2], segments);
    244         }
    245     } else {
    246         segments->push_back();
    247         segments->back().fType = Segment::kQuad;
    248         segments->back().fPts[0] = pts[1];
    249         segments->back().fPts[1] = pts[2];
    250     }
    251 }
    252 
    253 static inline void add_cubic_segments(const SkPoint pts[4],
    254                                       SkPathPriv::FirstDirection dir,
    255                                       SegmentArray* segments) {
    256     SkSTArray<15, SkPoint, true> quads;
    257     GrPathUtils::convertCubicToQuadsConstrainToTangents(pts, SK_Scalar1, dir, &quads);
    258     int count = quads.count();
    259     for (int q = 0; q < count; q += 3) {
    260         add_quad_segment(&quads[q], segments);
    261     }
    262 }
    263 
    264 static bool get_segments(const SkPath& path,
    265                          const SkMatrix& m,
    266                          SegmentArray* segments,
    267                          SkPoint* fanPt,
    268                          int* vCount,
    269                          int* iCount) {
    270     SkPath::Iter iter(path, true);
    271     // This renderer over-emphasizes very thin path regions. We use the distance
    272     // to the path from the sample to compute coverage. Every pixel intersected
    273     // by the path will be hit and the maximum distance is sqrt(2)/2. We don't
    274     // notice that the sample may be close to a very thin area of the path and
    275     // thus should be very light. This is particularly egregious for degenerate
    276     // line paths. We detect paths that are very close to a line (zero area) and
    277     // draw nothing.
    278     DegenerateTestData degenerateData;
    279     SkPathPriv::FirstDirection dir;
    280     // get_direction can fail for some degenerate paths.
    281     if (!get_direction(path, m, &dir)) {
    282         return false;
    283     }
    284 
    285     for (;;) {
    286         SkPoint pts[4];
    287         SkPath::Verb verb = iter.next(pts);
    288         switch (verb) {
    289             case SkPath::kMove_Verb:
    290                 m.mapPoints(pts, 1);
    291                 update_degenerate_test(&degenerateData, pts[0]);
    292                 break;
    293             case SkPath::kLine_Verb: {
    294                 m.mapPoints(&pts[1], 1);
    295                 update_degenerate_test(&degenerateData, pts[1]);
    296                 add_line_to_segment(pts[1], segments);
    297                 break;
    298             }
    299             case SkPath::kQuad_Verb:
    300                 m.mapPoints(pts, 3);
    301                 update_degenerate_test(&degenerateData, pts[1]);
    302                 update_degenerate_test(&degenerateData, pts[2]);
    303                 add_quad_segment(pts, segments);
    304                 break;
    305             case SkPath::kConic_Verb: {
    306                 m.mapPoints(pts, 3);
    307                 SkScalar weight = iter.conicWeight();
    308                 SkAutoConicToQuads converter;
    309                 const SkPoint* quadPts = converter.computeQuads(pts, weight, 0.5f);
    310                 for (int i = 0; i < converter.countQuads(); ++i) {
    311                     update_degenerate_test(&degenerateData, quadPts[2*i + 1]);
    312                     update_degenerate_test(&degenerateData, quadPts[2*i + 2]);
    313                     add_quad_segment(quadPts + 2*i, segments);
    314                 }
    315                 break;
    316             }
    317             case SkPath::kCubic_Verb: {
    318                 m.mapPoints(pts, 4);
    319                 update_degenerate_test(&degenerateData, pts[1]);
    320                 update_degenerate_test(&degenerateData, pts[2]);
    321                 update_degenerate_test(&degenerateData, pts[3]);
    322                 add_cubic_segments(pts, dir, segments);
    323                 break;
    324             };
    325             case SkPath::kDone_Verb:
    326                 if (degenerateData.isDegenerate()) {
    327                     return false;
    328                 } else {
    329                     compute_vectors(segments, fanPt, dir, vCount, iCount);
    330                     return true;
    331                 }
    332             default:
    333                 break;
    334         }
    335     }
    336 }
    337 
    338 struct QuadVertex {
    339     SkPoint  fPos;
    340     SkPoint  fUV;
    341     SkScalar fD0;
    342     SkScalar fD1;
    343 };
    344 
    345 struct Draw {
    346     Draw() : fVertexCnt(0), fIndexCnt(0) {}
    347     int fVertexCnt;
    348     int fIndexCnt;
    349 };
    350 
    351 typedef SkTArray<Draw, true> DrawArray;
    352 
    353 static void create_vertices(const SegmentArray&  segments,
    354                             const SkPoint& fanPt,
    355                             DrawArray*     draws,
    356                             QuadVertex*    verts,
    357                             uint16_t*      idxs) {
    358     Draw* draw = &draws->push_back();
    359     // alias just to make vert/index assignments easier to read.
    360     int* v = &draw->fVertexCnt;
    361     int* i = &draw->fIndexCnt;
    362 
    363     int count = segments.count();
    364     for (int a = 0; a < count; ++a) {
    365         const Segment& sega = segments[a];
    366         int b = (a + 1) % count;
    367         const Segment& segb = segments[b];
    368 
    369         // Check whether adding the verts for this segment to the current draw would cause index
    370         // values to overflow.
    371         int vCount = 4;
    372         if (Segment::kLine == segb.fType) {
    373             vCount += 5;
    374         } else {
    375             vCount += 6;
    376         }
    377         if (draw->fVertexCnt + vCount > (1 << 16)) {
    378             verts += *v;
    379             idxs += *i;
    380             draw = &draws->push_back();
    381             v = &draw->fVertexCnt;
    382             i = &draw->fIndexCnt;
    383         }
    384 
    385         // FIXME: These tris are inset in the 1 unit arc around the corner
    386         verts[*v + 0].fPos = sega.endPt();
    387         verts[*v + 1].fPos = verts[*v + 0].fPos + sega.endNorm();
    388         verts[*v + 2].fPos = verts[*v + 0].fPos + segb.fMid;
    389         verts[*v + 3].fPos = verts[*v + 0].fPos + segb.fNorms[0];
    390         verts[*v + 0].fUV.set(0,0);
    391         verts[*v + 1].fUV.set(0,-SK_Scalar1);
    392         verts[*v + 2].fUV.set(0,-SK_Scalar1);
    393         verts[*v + 3].fUV.set(0,-SK_Scalar1);
    394         verts[*v + 0].fD0 = verts[*v + 0].fD1 = -SK_Scalar1;
    395         verts[*v + 1].fD0 = verts[*v + 1].fD1 = -SK_Scalar1;
    396         verts[*v + 2].fD0 = verts[*v + 2].fD1 = -SK_Scalar1;
    397         verts[*v + 3].fD0 = verts[*v + 3].fD1 = -SK_Scalar1;
    398 
    399         idxs[*i + 0] = *v + 0;
    400         idxs[*i + 1] = *v + 2;
    401         idxs[*i + 2] = *v + 1;
    402         idxs[*i + 3] = *v + 0;
    403         idxs[*i + 4] = *v + 3;
    404         idxs[*i + 5] = *v + 2;
    405 
    406         *v += 4;
    407         *i += 6;
    408 
    409         if (Segment::kLine == segb.fType) {
    410             verts[*v + 0].fPos = fanPt;
    411             verts[*v + 1].fPos = sega.endPt();
    412             verts[*v + 2].fPos = segb.fPts[0];
    413 
    414             verts[*v + 3].fPos = verts[*v + 1].fPos + segb.fNorms[0];
    415             verts[*v + 4].fPos = verts[*v + 2].fPos + segb.fNorms[0];
    416 
    417             // we draw the line edge as a degenerate quad (u is 0, v is the
    418             // signed distance to the edge)
    419             SkScalar dist = fanPt.distanceToLineBetween(verts[*v + 1].fPos,
    420                                                         verts[*v + 2].fPos);
    421             verts[*v + 0].fUV.set(0, dist);
    422             verts[*v + 1].fUV.set(0, 0);
    423             verts[*v + 2].fUV.set(0, 0);
    424             verts[*v + 3].fUV.set(0, -SK_Scalar1);
    425             verts[*v + 4].fUV.set(0, -SK_Scalar1);
    426 
    427             verts[*v + 0].fD0 = verts[*v + 0].fD1 = -SK_Scalar1;
    428             verts[*v + 1].fD0 = verts[*v + 1].fD1 = -SK_Scalar1;
    429             verts[*v + 2].fD0 = verts[*v + 2].fD1 = -SK_Scalar1;
    430             verts[*v + 3].fD0 = verts[*v + 3].fD1 = -SK_Scalar1;
    431             verts[*v + 4].fD0 = verts[*v + 4].fD1 = -SK_Scalar1;
    432 
    433             idxs[*i + 0] = *v + 3;
    434             idxs[*i + 1] = *v + 1;
    435             idxs[*i + 2] = *v + 2;
    436 
    437             idxs[*i + 3] = *v + 4;
    438             idxs[*i + 4] = *v + 3;
    439             idxs[*i + 5] = *v + 2;
    440 
    441             *i += 6;
    442 
    443             // Draw the interior fan if it exists.
    444             // TODO: Detect and combine colinear segments. This will ensure we catch every case
    445             // with no interior, and that the resulting shared edge uses the same endpoints.
    446             if (count >= 3) {
    447                 idxs[*i + 0] = *v + 0;
    448                 idxs[*i + 1] = *v + 2;
    449                 idxs[*i + 2] = *v + 1;
    450 
    451                 *i += 3;
    452             }
    453 
    454             *v += 5;
    455         } else {
    456             SkPoint qpts[] = {sega.endPt(), segb.fPts[0], segb.fPts[1]};
    457 
    458             SkVector midVec = segb.fNorms[0] + segb.fNorms[1];
    459             midVec.normalize();
    460 
    461             verts[*v + 0].fPos = fanPt;
    462             verts[*v + 1].fPos = qpts[0];
    463             verts[*v + 2].fPos = qpts[2];
    464             verts[*v + 3].fPos = qpts[0] + segb.fNorms[0];
    465             verts[*v + 4].fPos = qpts[2] + segb.fNorms[1];
    466             verts[*v + 5].fPos = qpts[1] + midVec;
    467 
    468             SkScalar c = segb.fNorms[0].dot(qpts[0]);
    469             verts[*v + 0].fD0 =  -segb.fNorms[0].dot(fanPt) + c;
    470             verts[*v + 1].fD0 =  0.f;
    471             verts[*v + 2].fD0 =  -segb.fNorms[0].dot(qpts[2]) + c;
    472             verts[*v + 3].fD0 = -SK_ScalarMax/100;
    473             verts[*v + 4].fD0 = -SK_ScalarMax/100;
    474             verts[*v + 5].fD0 = -SK_ScalarMax/100;
    475 
    476             c = segb.fNorms[1].dot(qpts[2]);
    477             verts[*v + 0].fD1 =  -segb.fNorms[1].dot(fanPt) + c;
    478             verts[*v + 1].fD1 =  -segb.fNorms[1].dot(qpts[0]) + c;
    479             verts[*v + 2].fD1 =  0.f;
    480             verts[*v + 3].fD1 = -SK_ScalarMax/100;
    481             verts[*v + 4].fD1 = -SK_ScalarMax/100;
    482             verts[*v + 5].fD1 = -SK_ScalarMax/100;
    483 
    484             GrPathUtils::QuadUVMatrix toUV(qpts);
    485             toUV.apply<6, sizeof(QuadVertex), sizeof(SkPoint)>(verts + *v);
    486 
    487             idxs[*i + 0] = *v + 3;
    488             idxs[*i + 1] = *v + 1;
    489             idxs[*i + 2] = *v + 2;
    490             idxs[*i + 3] = *v + 4;
    491             idxs[*i + 4] = *v + 3;
    492             idxs[*i + 5] = *v + 2;
    493 
    494             idxs[*i + 6] = *v + 5;
    495             idxs[*i + 7] = *v + 3;
    496             idxs[*i + 8] = *v + 4;
    497 
    498             *i += 9;
    499 
    500             // Draw the interior fan if it exists.
    501             // TODO: Detect and combine colinear segments. This will ensure we catch every case
    502             // with no interior, and that the resulting shared edge uses the same endpoints.
    503             if (count >= 3) {
    504                 idxs[*i + 0] = *v + 0;
    505                 idxs[*i + 1] = *v + 2;
    506                 idxs[*i + 2] = *v + 1;
    507 
    508                 *i += 3;
    509             }
    510 
    511             *v += 6;
    512         }
    513     }
    514 }
    515 
    516 ///////////////////////////////////////////////////////////////////////////////
    517 
    518 /*
    519  * Quadratic specified by 0=u^2-v canonical coords. u and v are the first
    520  * two components of the vertex attribute. Coverage is based on signed
    521  * distance with negative being inside, positive outside. The edge is specified in
    522  * window space (y-down). If either the third or fourth component of the interpolated
    523  * vertex coord is > 0 then the pixel is considered outside the edge. This is used to
    524  * attempt to trim to a portion of the infinite quad.
    525  * Requires shader derivative instruction support.
    526  */
    527 
    528 class QuadEdgeEffect : public GrGeometryProcessor {
    529 public:
    530 
    531     static GrGeometryProcessor* Create(GrColor color, const SkMatrix& localMatrix,
    532                                        bool usesLocalCoords) {
    533         return new QuadEdgeEffect(color, localMatrix, usesLocalCoords);
    534     }
    535 
    536     virtual ~QuadEdgeEffect() {}
    537 
    538     const char* name() const override { return "QuadEdge"; }
    539 
    540     const Attribute* inPosition() const { return fInPosition; }
    541     const Attribute* inQuadEdge() const { return fInQuadEdge; }
    542     GrColor color() const { return fColor; }
    543     bool colorIgnored() const { return GrColor_ILLEGAL == fColor; }
    544     const SkMatrix& localMatrix() const { return fLocalMatrix; }
    545     bool usesLocalCoords() const { return fUsesLocalCoords; }
    546 
    547     class GLSLProcessor : public GrGLSLGeometryProcessor {
    548     public:
    549         GLSLProcessor()
    550             : fColor(GrColor_ILLEGAL) {}
    551 
    552         void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
    553             const QuadEdgeEffect& qe = args.fGP.cast<QuadEdgeEffect>();
    554             GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
    555             GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
    556             GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
    557 
    558             // emit attributes
    559             varyingHandler->emitAttributes(qe);
    560 
    561             GrGLSLVertToFrag v(kVec4f_GrSLType);
    562             varyingHandler->addVarying("QuadEdge", &v);
    563             vertBuilder->codeAppendf("%s = %s;", v.vsOut(), qe.inQuadEdge()->fName);
    564 
    565             GrGLSLPPFragmentBuilder* fragBuilder = args.fFragBuilder;
    566             // Setup pass through color
    567             if (!qe.colorIgnored()) {
    568                 this->setupUniformColor(fragBuilder, uniformHandler, args.fOutputColor,
    569                                         &fColorUniform);
    570             }
    571 
    572             // Setup position
    573             this->setupPosition(vertBuilder, gpArgs, qe.inPosition()->fName);
    574 
    575             // emit transforms
    576             this->emitTransforms(vertBuilder,
    577                                  varyingHandler,
    578                                  uniformHandler,
    579                                  gpArgs->fPositionVar,
    580                                  qe.inPosition()->fName,
    581                                  qe.localMatrix(),
    582                                  args.fTransformsIn,
    583                                  args.fTransformsOut);
    584 
    585             SkAssertResult(fragBuilder->enableFeature(
    586                     GrGLSLFragmentShaderBuilder::kStandardDerivatives_GLSLFeature));
    587             fragBuilder->codeAppendf("float edgeAlpha;");
    588 
    589             // keep the derivative instructions outside the conditional
    590             fragBuilder->codeAppendf("vec2 duvdx = dFdx(%s.xy);", v.fsIn());
    591             fragBuilder->codeAppendf("vec2 duvdy = dFdy(%s.xy);", v.fsIn());
    592             fragBuilder->codeAppendf("if (%s.z > 0.0 && %s.w > 0.0) {", v.fsIn(), v.fsIn());
    593             // today we know z and w are in device space. We could use derivatives
    594             fragBuilder->codeAppendf("edgeAlpha = min(min(%s.z, %s.w) + 0.5, 1.0);", v.fsIn(),
    595                                      v.fsIn());
    596             fragBuilder->codeAppendf ("} else {");
    597             fragBuilder->codeAppendf("vec2 gF = vec2(2.0*%s.x*duvdx.x - duvdx.y,"
    598                                      "               2.0*%s.x*duvdy.x - duvdy.y);",
    599                                      v.fsIn(), v.fsIn());
    600             fragBuilder->codeAppendf("edgeAlpha = (%s.x*%s.x - %s.y);", v.fsIn(), v.fsIn(),
    601                                      v.fsIn());
    602             fragBuilder->codeAppendf("edgeAlpha = "
    603                                      "clamp(0.5 - edgeAlpha / length(gF), 0.0, 1.0);}");
    604 
    605             fragBuilder->codeAppendf("%s = vec4(edgeAlpha);", args.fOutputCoverage);
    606         }
    607 
    608         static inline void GenKey(const GrGeometryProcessor& gp,
    609                                   const GrGLSLCaps&,
    610                                   GrProcessorKeyBuilder* b) {
    611             const QuadEdgeEffect& qee = gp.cast<QuadEdgeEffect>();
    612             uint32_t key = 0;
    613             key |= qee.usesLocalCoords() && qee.localMatrix().hasPerspective() ? 0x1 : 0x0;
    614             key |= qee.colorIgnored() ? 0x2 : 0x0;
    615             b->add32(key);
    616         }
    617 
    618         void setData(const GrGLSLProgramDataManager& pdman,
    619                      const GrPrimitiveProcessor& gp) override {
    620             const QuadEdgeEffect& qe = gp.cast<QuadEdgeEffect>();
    621             if (qe.color() != fColor) {
    622                 float c[4];
    623                 GrColorToRGBAFloat(qe.color(), c);
    624                 pdman.set4fv(fColorUniform, 1, c);
    625                 fColor = qe.color();
    626             }
    627         }
    628 
    629         void setTransformData(const GrPrimitiveProcessor& primProc,
    630                               const GrGLSLProgramDataManager& pdman,
    631                               int index,
    632                               const SkTArray<const GrCoordTransform*, true>& transforms) override {
    633             this->setTransformDataHelper<QuadEdgeEffect>(primProc, pdman, index, transforms);
    634         }
    635 
    636     private:
    637         GrColor fColor;
    638         UniformHandle fColorUniform;
    639 
    640         typedef GrGLSLGeometryProcessor INHERITED;
    641     };
    642 
    643     void getGLSLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override {
    644         GLSLProcessor::GenKey(*this, caps, b);
    645     }
    646 
    647     GrGLSLPrimitiveProcessor* createGLSLInstance(const GrGLSLCaps&) const override {
    648         return new GLSLProcessor();
    649     }
    650 
    651 private:
    652     QuadEdgeEffect(GrColor color, const SkMatrix& localMatrix, bool usesLocalCoords)
    653         : fColor(color)
    654         , fLocalMatrix(localMatrix)
    655         , fUsesLocalCoords(usesLocalCoords) {
    656         this->initClassID<QuadEdgeEffect>();
    657         fInPosition = &this->addVertexAttrib(Attribute("inPosition", kVec2f_GrVertexAttribType));
    658         fInQuadEdge = &this->addVertexAttrib(Attribute("inQuadEdge", kVec4f_GrVertexAttribType));
    659     }
    660 
    661     const Attribute* fInPosition;
    662     const Attribute* fInQuadEdge;
    663     GrColor          fColor;
    664     SkMatrix         fLocalMatrix;
    665     bool             fUsesLocalCoords;
    666 
    667     GR_DECLARE_GEOMETRY_PROCESSOR_TEST;
    668 
    669     typedef GrGeometryProcessor INHERITED;
    670 };
    671 
    672 GR_DEFINE_GEOMETRY_PROCESSOR_TEST(QuadEdgeEffect);
    673 
    674 const GrGeometryProcessor* QuadEdgeEffect::TestCreate(GrProcessorTestData* d) {
    675     // Doesn't work without derivative instructions.
    676     return d->fCaps->shaderCaps()->shaderDerivativeSupport() ?
    677            QuadEdgeEffect::Create(GrRandomColor(d->fRandom),
    678                                   GrTest::TestMatrix(d->fRandom),
    679                                   d->fRandom->nextBool()) : nullptr;
    680 }
    681 
    682 ///////////////////////////////////////////////////////////////////////////////
    683 
    684 bool GrAAConvexPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
    685     return (args.fShaderCaps->shaderDerivativeSupport() && args.fAntiAlias &&
    686             args.fStroke->isFillStyle() && !args.fPath->isInverseFillType() &&
    687             args.fPath->isConvex());
    688 }
    689 
    690 // extract the result vertices and indices from the GrAAConvexTessellator
    691 static void extract_verts(const GrAAConvexTessellator& tess,
    692                           void* vertices,
    693                           size_t vertexStride,
    694                           GrColor color,
    695                           uint16_t* idxs,
    696                           bool tweakAlphaForCoverage) {
    697     intptr_t verts = reinterpret_cast<intptr_t>(vertices);
    698 
    699     for (int i = 0; i < tess.numPts(); ++i) {
    700         *((SkPoint*)((intptr_t)verts + i * vertexStride)) = tess.point(i);
    701     }
    702 
    703     // Make 'verts' point to the colors
    704     verts += sizeof(SkPoint);
    705     for (int i = 0; i < tess.numPts(); ++i) {
    706         if (tweakAlphaForCoverage) {
    707             SkASSERT(SkScalarRoundToInt(255.0f * tess.coverage(i)) <= 255);
    708             unsigned scale = SkScalarRoundToInt(255.0f * tess.coverage(i));
    709             GrColor scaledColor = (0xff == scale) ? color : SkAlphaMulQ(color, scale);
    710             *reinterpret_cast<GrColor*>(verts + i * vertexStride) = scaledColor;
    711         } else {
    712             *reinterpret_cast<GrColor*>(verts + i * vertexStride) = color;
    713             *reinterpret_cast<float*>(verts + i * vertexStride + sizeof(GrColor)) =
    714                     tess.coverage(i);
    715         }
    716     }
    717 
    718     for (int i = 0; i < tess.numIndices(); ++i) {
    719         idxs[i] = tess.index(i);
    720     }
    721 }
    722 
    723 static const GrGeometryProcessor* create_fill_gp(bool tweakAlphaForCoverage,
    724                                                  const SkMatrix& viewMatrix,
    725                                                  bool usesLocalCoords,
    726                                                  bool coverageIgnored) {
    727     using namespace GrDefaultGeoProcFactory;
    728 
    729     Color color(Color::kAttribute_Type);
    730     Coverage::Type coverageType;
    731     // TODO remove coverage if coverage is ignored
    732     /*if (coverageIgnored) {
    733         coverageType = Coverage::kNone_Type;
    734     } else*/ if (tweakAlphaForCoverage) {
    735         coverageType = Coverage::kSolid_Type;
    736     } else {
    737         coverageType = Coverage::kAttribute_Type;
    738     }
    739     Coverage coverage(coverageType);
    740     LocalCoords localCoords(usesLocalCoords ? LocalCoords::kUsePosition_Type :
    741                                               LocalCoords::kUnused_Type);
    742     return CreateForDeviceSpace(color, coverage, localCoords, viewMatrix);
    743 }
    744 
    745 class AAConvexPathBatch : public GrVertexBatch {
    746 public:
    747     DEFINE_BATCH_CLASS_ID
    748     struct Geometry {
    749         GrColor fColor;
    750         SkMatrix fViewMatrix;
    751         SkPath fPath;
    752     };
    753 
    754     static GrDrawBatch* Create(const Geometry& geometry) { return new AAConvexPathBatch(geometry); }
    755 
    756     const char* name() const override { return "AAConvexBatch"; }
    757 
    758     void computePipelineOptimizations(GrInitInvariantOutput* color,
    759                                       GrInitInvariantOutput* coverage,
    760                                       GrBatchToXPOverrides* overrides) const override {
    761         // When this is called on a batch, there is only one geometry bundle
    762         color->setKnownFourComponents(fGeoData[0].fColor);
    763         coverage->setUnknownSingleComponent();
    764     }
    765 
    766 private:
    767     void initBatchTracker(const GrXPOverridesForBatch& overrides) override {
    768         // Handle any color overrides
    769         if (!overrides.readsColor()) {
    770             fGeoData[0].fColor = GrColor_ILLEGAL;
    771         }
    772         overrides.getOverrideColorIfSet(&fGeoData[0].fColor);
    773 
    774         // setup batch properties
    775         fBatch.fColorIgnored = !overrides.readsColor();
    776         fBatch.fColor = fGeoData[0].fColor;
    777         fBatch.fUsesLocalCoords = overrides.readsLocalCoords();
    778         fBatch.fCoverageIgnored = !overrides.readsCoverage();
    779         fBatch.fLinesOnly = SkPath::kLine_SegmentMask == fGeoData[0].fPath.getSegmentMasks();
    780         fBatch.fCanTweakAlphaForCoverage = overrides.canTweakAlphaForCoverage();
    781     }
    782 
    783     void prepareLinesOnlyDraws(Target* target) const {
    784         bool canTweakAlphaForCoverage = this->canTweakAlphaForCoverage();
    785 
    786         // Setup GrGeometryProcessor
    787         SkAutoTUnref<const GrGeometryProcessor> gp(create_fill_gp(canTweakAlphaForCoverage,
    788                                                                   this->viewMatrix(),
    789                                                                   this->usesLocalCoords(),
    790                                                                   this->coverageIgnored()));
    791         if (!gp) {
    792             SkDebugf("Could not create GrGeometryProcessor\n");
    793             return;
    794         }
    795 
    796         target->initDraw(gp, this->pipeline());
    797 
    798         size_t vertexStride = gp->getVertexStride();
    799 
    800         SkASSERT(canTweakAlphaForCoverage ?
    801                  vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorAttr) :
    802                  vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorCoverageAttr));
    803 
    804         GrAAConvexTessellator tess;
    805 
    806         int instanceCount = fGeoData.count();
    807 
    808         for (int i = 0; i < instanceCount; i++) {
    809             tess.rewind();
    810 
    811             const Geometry& args = fGeoData[i];
    812 
    813             if (!tess.tessellate(args.fViewMatrix, args.fPath)) {
    814                 continue;
    815             }
    816 
    817             const GrVertexBuffer* vertexBuffer;
    818             int firstVertex;
    819 
    820             void* verts = target->makeVertexSpace(vertexStride, tess.numPts(), &vertexBuffer,
    821                                                   &firstVertex);
    822             if (!verts) {
    823                 SkDebugf("Could not allocate vertices\n");
    824                 return;
    825             }
    826 
    827             const GrIndexBuffer* indexBuffer;
    828             int firstIndex;
    829 
    830             uint16_t* idxs = target->makeIndexSpace(tess.numIndices(), &indexBuffer, &firstIndex);
    831             if (!idxs) {
    832                 SkDebugf("Could not allocate indices\n");
    833                 return;
    834             }
    835 
    836             extract_verts(tess, verts, vertexStride, args.fColor, idxs, canTweakAlphaForCoverage);
    837 
    838             GrVertices info;
    839             info.initIndexed(kTriangles_GrPrimitiveType,
    840                              vertexBuffer, indexBuffer,
    841                              firstVertex, firstIndex,
    842                              tess.numPts(), tess.numIndices());
    843             target->draw(info);
    844         }
    845     }
    846 
    847     void onPrepareDraws(Target* target) const override {
    848 #ifndef SK_IGNORE_LINEONLY_AA_CONVEX_PATH_OPTS
    849         if (this->linesOnly()) {
    850             this->prepareLinesOnlyDraws(target);
    851             return;
    852         }
    853 #endif
    854 
    855         int instanceCount = fGeoData.count();
    856 
    857         SkMatrix invert;
    858         if (this->usesLocalCoords() && !this->viewMatrix().invert(&invert)) {
    859             SkDebugf("Could not invert viewmatrix\n");
    860             return;
    861         }
    862 
    863         // Setup GrGeometryProcessor
    864         SkAutoTUnref<GrGeometryProcessor> quadProcessor(
    865                 QuadEdgeEffect::Create(this->color(), invert, this->usesLocalCoords()));
    866 
    867         target->initDraw(quadProcessor, this->pipeline());
    868 
    869         // TODO generate all segments for all paths and use one vertex buffer
    870         for (int i = 0; i < instanceCount; i++) {
    871             const Geometry& args = fGeoData[i];
    872 
    873             // We use the fact that SkPath::transform path does subdivision based on
    874             // perspective. Otherwise, we apply the view matrix when copying to the
    875             // segment representation.
    876             const SkMatrix* viewMatrix = &args.fViewMatrix;
    877 
    878             // We avoid initializing the path unless we have to
    879             const SkPath* pathPtr = &args.fPath;
    880             SkTLazy<SkPath> tmpPath;
    881             if (viewMatrix->hasPerspective()) {
    882                 SkPath* tmpPathPtr = tmpPath.init(*pathPtr);
    883                 tmpPathPtr->setIsVolatile(true);
    884                 tmpPathPtr->transform(*viewMatrix);
    885                 viewMatrix = &SkMatrix::I();
    886                 pathPtr = tmpPathPtr;
    887             }
    888 
    889             int vertexCount;
    890             int indexCount;
    891             enum {
    892                 kPreallocSegmentCnt = 512 / sizeof(Segment),
    893                 kPreallocDrawCnt = 4,
    894             };
    895             SkSTArray<kPreallocSegmentCnt, Segment, true> segments;
    896             SkPoint fanPt;
    897 
    898             if (!get_segments(*pathPtr, *viewMatrix, &segments, &fanPt, &vertexCount,
    899                               &indexCount)) {
    900                 continue;
    901             }
    902 
    903             const GrVertexBuffer* vertexBuffer;
    904             int firstVertex;
    905 
    906             size_t vertexStride = quadProcessor->getVertexStride();
    907             QuadVertex* verts = reinterpret_cast<QuadVertex*>(target->makeVertexSpace(
    908                 vertexStride, vertexCount, &vertexBuffer, &firstVertex));
    909 
    910             if (!verts) {
    911                 SkDebugf("Could not allocate vertices\n");
    912                 return;
    913             }
    914 
    915             const GrIndexBuffer* indexBuffer;
    916             int firstIndex;
    917 
    918             uint16_t *idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
    919             if (!idxs) {
    920                 SkDebugf("Could not allocate indices\n");
    921                 return;
    922             }
    923 
    924             SkSTArray<kPreallocDrawCnt, Draw, true> draws;
    925             create_vertices(segments, fanPt, &draws, verts, idxs);
    926 
    927             GrVertices vertices;
    928 
    929             for (int j = 0; j < draws.count(); ++j) {
    930                 const Draw& draw = draws[j];
    931                 vertices.initIndexed(kTriangles_GrPrimitiveType, vertexBuffer, indexBuffer,
    932                                      firstVertex, firstIndex, draw.fVertexCnt, draw.fIndexCnt);
    933                 target->draw(vertices);
    934                 firstVertex += draw.fVertexCnt;
    935                 firstIndex += draw.fIndexCnt;
    936             }
    937         }
    938     }
    939 
    940     SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
    941 
    942     AAConvexPathBatch(const Geometry& geometry) : INHERITED(ClassID()) {
    943         fGeoData.push_back(geometry);
    944 
    945         // compute bounds
    946         fBounds = geometry.fPath.getBounds();
    947         geometry.fViewMatrix.mapRect(&fBounds);
    948     }
    949 
    950     bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
    951         AAConvexPathBatch* that = t->cast<AAConvexPathBatch>();
    952         if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
    953                                     that->bounds(), caps)) {
    954             return false;
    955         }
    956 
    957         if (this->color() != that->color()) {
    958             return false;
    959         }
    960 
    961         SkASSERT(this->usesLocalCoords() == that->usesLocalCoords());
    962         if (this->usesLocalCoords() && !this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
    963             return false;
    964         }
    965 
    966         if (this->linesOnly() != that->linesOnly()) {
    967             return false;
    968         }
    969 
    970         // In the event of two batches, one who can tweak, one who cannot, we just fall back to
    971         // not tweaking
    972         if (this->canTweakAlphaForCoverage() != that->canTweakAlphaForCoverage()) {
    973             fBatch.fCanTweakAlphaForCoverage = false;
    974         }
    975 
    976         fGeoData.push_back_n(that->geoData()->count(), that->geoData()->begin());
    977         this->joinBounds(that->bounds());
    978         return true;
    979     }
    980 
    981     GrColor color() const { return fBatch.fColor; }
    982     bool linesOnly() const { return fBatch.fLinesOnly; }
    983     bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
    984     bool canTweakAlphaForCoverage() const { return fBatch.fCanTweakAlphaForCoverage; }
    985     const SkMatrix& viewMatrix() const { return fGeoData[0].fViewMatrix; }
    986     bool coverageIgnored() const { return fBatch.fCoverageIgnored; }
    987 
    988     struct BatchTracker {
    989         GrColor fColor;
    990         bool fUsesLocalCoords;
    991         bool fColorIgnored;
    992         bool fCoverageIgnored;
    993         bool fLinesOnly;
    994         bool fCanTweakAlphaForCoverage;
    995     };
    996 
    997     BatchTracker fBatch;
    998     SkSTArray<1, Geometry, true> fGeoData;
    999 
   1000     typedef GrVertexBatch INHERITED;
   1001 };
   1002 
   1003 bool GrAAConvexPathRenderer::onDrawPath(const DrawPathArgs& args) {
   1004     GR_AUDIT_TRAIL_AUTO_FRAME(args.fTarget->getAuditTrail(), "GrAAConvexPathRenderer::onDrawPath");
   1005     if (args.fPath->isEmpty()) {
   1006         return true;
   1007     }
   1008 
   1009     AAConvexPathBatch::Geometry geometry;
   1010     geometry.fColor = args.fColor;
   1011     geometry.fViewMatrix = *args.fViewMatrix;
   1012     geometry.fPath = *args.fPath;
   1013 
   1014     SkAutoTUnref<GrDrawBatch> batch(AAConvexPathBatch::Create(geometry));
   1015     args.fTarget->drawBatch(*args.fPipelineBuilder, batch);
   1016 
   1017     return true;
   1018 
   1019 }
   1020 
   1021 ///////////////////////////////////////////////////////////////////////////////////////////////////
   1022 
   1023 #ifdef GR_TEST_UTILS
   1024 
   1025 DRAW_BATCH_TEST_DEFINE(AAConvexPathBatch) {
   1026     AAConvexPathBatch::Geometry geometry;
   1027     geometry.fColor = GrRandomColor(random);
   1028     geometry.fViewMatrix = GrTest::TestMatrixInvertible(random);
   1029     geometry.fPath = GrTest::TestPathConvex(random);
   1030 
   1031     return AAConvexPathBatch::Create(geometry);
   1032 }
   1033 
   1034 #endif
   1035