Home | History | Annotate | Download | only in gl
      1 /*
      2  * Copyright 2011 Google Inc.
      3  *
      4  * Use of this source code is governed by a BSD-style license that can be
      5  * found in the LICENSE file.
      6  */
      7 
      8 
      9 #include "GrGpuGL.h"
     10 #include "GrGLNameAllocator.h"
     11 #include "GrGLStencilBuffer.h"
     12 #include "GrGLPath.h"
     13 #include "GrGLShaderBuilder.h"
     14 #include "GrTemplates.h"
     15 #include "GrTypes.h"
     16 #include "SkStrokeRec.h"
     17 #include "SkTemplates.h"
     18 
     19 #define GL_CALL(X) GR_GL_CALL(this->glInterface(), X)
     20 #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X)
     21 
     22 #define SKIP_CACHE_CHECK    true
     23 
     24 #if GR_GL_CHECK_ALLOC_WITH_GET_ERROR
     25     #define CLEAR_ERROR_BEFORE_ALLOC(iface)   GrGLClearErr(iface)
     26     #define GL_ALLOC_CALL(iface, call)        GR_GL_CALL_NOERRCHECK(iface, call)
     27     #define CHECK_ALLOC_ERROR(iface)          GR_GL_GET_ERROR(iface)
     28 #else
     29     #define CLEAR_ERROR_BEFORE_ALLOC(iface)
     30     #define GL_ALLOC_CALL(iface, call)        GR_GL_CALL(iface, call)
     31     #define CHECK_ALLOC_ERROR(iface)          GR_GL_NO_ERROR
     32 #endif
     33 
     34 
     35 ///////////////////////////////////////////////////////////////////////////////
     36 
     37 static const GrGLenum gXfermodeCoeff2Blend[] = {
     38     GR_GL_ZERO,
     39     GR_GL_ONE,
     40     GR_GL_SRC_COLOR,
     41     GR_GL_ONE_MINUS_SRC_COLOR,
     42     GR_GL_DST_COLOR,
     43     GR_GL_ONE_MINUS_DST_COLOR,
     44     GR_GL_SRC_ALPHA,
     45     GR_GL_ONE_MINUS_SRC_ALPHA,
     46     GR_GL_DST_ALPHA,
     47     GR_GL_ONE_MINUS_DST_ALPHA,
     48     GR_GL_CONSTANT_COLOR,
     49     GR_GL_ONE_MINUS_CONSTANT_COLOR,
     50     GR_GL_CONSTANT_ALPHA,
     51     GR_GL_ONE_MINUS_CONSTANT_ALPHA,
     52 
     53     // extended blend coeffs
     54     GR_GL_SRC1_COLOR,
     55     GR_GL_ONE_MINUS_SRC1_COLOR,
     56     GR_GL_SRC1_ALPHA,
     57     GR_GL_ONE_MINUS_SRC1_ALPHA,
     58 };
     59 
     60 bool GrGpuGL::BlendCoeffReferencesConstant(GrBlendCoeff coeff) {
     61     static const bool gCoeffReferencesBlendConst[] = {
     62         false,
     63         false,
     64         false,
     65         false,
     66         false,
     67         false,
     68         false,
     69         false,
     70         false,
     71         false,
     72         true,
     73         true,
     74         true,
     75         true,
     76 
     77         // extended blend coeffs
     78         false,
     79         false,
     80         false,
     81         false,
     82     };
     83     return gCoeffReferencesBlendConst[coeff];
     84     GR_STATIC_ASSERT(kTotalGrBlendCoeffCount ==
     85                      SK_ARRAY_COUNT(gCoeffReferencesBlendConst));
     86 
     87     GR_STATIC_ASSERT(0 == kZero_GrBlendCoeff);
     88     GR_STATIC_ASSERT(1 == kOne_GrBlendCoeff);
     89     GR_STATIC_ASSERT(2 == kSC_GrBlendCoeff);
     90     GR_STATIC_ASSERT(3 == kISC_GrBlendCoeff);
     91     GR_STATIC_ASSERT(4 == kDC_GrBlendCoeff);
     92     GR_STATIC_ASSERT(5 == kIDC_GrBlendCoeff);
     93     GR_STATIC_ASSERT(6 == kSA_GrBlendCoeff);
     94     GR_STATIC_ASSERT(7 == kISA_GrBlendCoeff);
     95     GR_STATIC_ASSERT(8 == kDA_GrBlendCoeff);
     96     GR_STATIC_ASSERT(9 == kIDA_GrBlendCoeff);
     97     GR_STATIC_ASSERT(10 == kConstC_GrBlendCoeff);
     98     GR_STATIC_ASSERT(11 == kIConstC_GrBlendCoeff);
     99     GR_STATIC_ASSERT(12 == kConstA_GrBlendCoeff);
    100     GR_STATIC_ASSERT(13 == kIConstA_GrBlendCoeff);
    101 
    102     GR_STATIC_ASSERT(14 == kS2C_GrBlendCoeff);
    103     GR_STATIC_ASSERT(15 == kIS2C_GrBlendCoeff);
    104     GR_STATIC_ASSERT(16 == kS2A_GrBlendCoeff);
    105     GR_STATIC_ASSERT(17 == kIS2A_GrBlendCoeff);
    106 
    107     // assertion for gXfermodeCoeff2Blend have to be in GrGpu scope
    108     GR_STATIC_ASSERT(kTotalGrBlendCoeffCount ==
    109                      SK_ARRAY_COUNT(gXfermodeCoeff2Blend));
    110 }
    111 
    112 ///////////////////////////////////////////////////////////////////////////////
    113 
    114 static bool gPrintStartupSpew;
    115 
    116 GrGpuGL::GrGpuGL(const GrGLContext& ctx, GrContext* context)
    117     : GrGpu(context)
    118     , fGLContext(ctx) {
    119 
    120     SkASSERT(ctx.isInitialized());
    121     fCaps.reset(SkRef(ctx.caps()));
    122 
    123     fHWBoundTextures.reset(this->glCaps().maxFragmentTextureUnits());
    124     fHWPathTexGenSettings.reset(this->glCaps().maxFixedFunctionTextureCoords());
    125 
    126     GrGLClearErr(fGLContext.interface());
    127     if (gPrintStartupSpew) {
    128         const GrGLubyte* vendor;
    129         const GrGLubyte* renderer;
    130         const GrGLubyte* version;
    131         GL_CALL_RET(vendor, GetString(GR_GL_VENDOR));
    132         GL_CALL_RET(renderer, GetString(GR_GL_RENDERER));
    133         GL_CALL_RET(version, GetString(GR_GL_VERSION));
    134         GrPrintf("------------------------- create GrGpuGL %p --------------\n",
    135                  this);
    136         GrPrintf("------ VENDOR %s\n", vendor);
    137         GrPrintf("------ RENDERER %s\n", renderer);
    138         GrPrintf("------ VERSION %s\n",  version);
    139         GrPrintf("------ EXTENSIONS\n");
    140 #if 0  // TODO: Reenable this after GrGLInterface's extensions can be accessed safely.
    141        ctx.extensions().print();
    142 #endif
    143         GrPrintf("\n");
    144         GrPrintf(this->glCaps().dump().c_str());
    145     }
    146 
    147     fProgramCache = SkNEW_ARGS(ProgramCache, (this));
    148 
    149     SkASSERT(this->glCaps().maxVertexAttributes() >= GrDrawState::kMaxVertexAttribCnt);
    150 
    151     fLastSuccessfulStencilFmtIdx = 0;
    152     fHWProgramID = 0;
    153 }
    154 
    155 GrGpuGL::~GrGpuGL() {
    156     if (0 != fHWProgramID) {
    157         // detach the current program so there is no confusion on OpenGL's part
    158         // that we want it to be deleted
    159         SkASSERT(fHWProgramID == fCurrentProgram->programID());
    160         GL_CALL(UseProgram(0));
    161     }
    162 
    163     delete fProgramCache;
    164 
    165     // This must be called by before the GrDrawTarget destructor
    166     this->releaseGeometry();
    167     // This subclass must do this before the base class destructor runs
    168     // since we will unref the GrGLInterface.
    169     this->releaseResources();
    170 }
    171 
    172 ///////////////////////////////////////////////////////////////////////////////
    173 
    174 
    175 GrPixelConfig GrGpuGL::preferredReadPixelsConfig(GrPixelConfig readConfig,
    176                                                  GrPixelConfig surfaceConfig) const {
    177     if (GR_GL_RGBA_8888_PIXEL_OPS_SLOW && kRGBA_8888_GrPixelConfig == readConfig) {
    178         return kBGRA_8888_GrPixelConfig;
    179     } else if (this->glContext().isMesa() &&
    180                GrBytesPerPixel(readConfig) == 4 &&
    181                GrPixelConfigSwapRAndB(readConfig) == surfaceConfig) {
    182         // Mesa 3D takes a slow path on when reading back  BGRA from an RGBA surface and vice-versa.
    183         // Perhaps this should be guarded by some compiletime or runtime check.
    184         return surfaceConfig;
    185     } else if (readConfig == kBGRA_8888_GrPixelConfig &&
    186                !this->glCaps().readPixelsSupported(this->glInterface(),
    187                                                    GR_GL_BGRA, GR_GL_UNSIGNED_BYTE)) {
    188         return kRGBA_8888_GrPixelConfig;
    189     } else {
    190         return readConfig;
    191     }
    192 }
    193 
    194 GrPixelConfig GrGpuGL::preferredWritePixelsConfig(GrPixelConfig writeConfig,
    195                                                   GrPixelConfig surfaceConfig) const {
    196     if (GR_GL_RGBA_8888_PIXEL_OPS_SLOW && kRGBA_8888_GrPixelConfig == writeConfig) {
    197         return kBGRA_8888_GrPixelConfig;
    198     } else {
    199         return writeConfig;
    200     }
    201 }
    202 
    203 bool GrGpuGL::canWriteTexturePixels(const GrTexture* texture, GrPixelConfig srcConfig) const {
    204     if (kIndex_8_GrPixelConfig == srcConfig || kIndex_8_GrPixelConfig == texture->config()) {
    205         return false;
    206     }
    207     if (srcConfig != texture->config() && kGLES_GrGLStandard == this->glStandard()) {
    208         // In general ES2 requires the internal format of the texture and the format of the src
    209         // pixels to match. However, It may or may not be possible to upload BGRA data to a RGBA
    210         // texture. It depends upon which extension added BGRA. The Apple extension allows it
    211         // (BGRA's internal format is RGBA) while the EXT extension does not (BGRA is its own
    212         // internal format).
    213         if (this->glCaps().isConfigTexturable(kBGRA_8888_GrPixelConfig) &&
    214             !this->glCaps().bgraIsInternalFormat() &&
    215             kBGRA_8888_GrPixelConfig == srcConfig &&
    216             kRGBA_8888_GrPixelConfig == texture->config()) {
    217             return true;
    218         } else {
    219             return false;
    220         }
    221     } else {
    222         return true;
    223     }
    224 }
    225 
    226 bool GrGpuGL::fullReadPixelsIsFasterThanPartial() const {
    227     return SkToBool(GR_GL_FULL_READPIXELS_FASTER_THAN_PARTIAL);
    228 }
    229 
    230 void GrGpuGL::onResetContext(uint32_t resetBits) {
    231     // we don't use the zb at all
    232     if (resetBits & kMisc_GrGLBackendState) {
    233         GL_CALL(Disable(GR_GL_DEPTH_TEST));
    234         GL_CALL(DepthMask(GR_GL_FALSE));
    235 
    236         fHWDrawFace = GrDrawState::kInvalid_DrawFace;
    237         fHWDitherEnabled = kUnknown_TriState;
    238 
    239         if (kGL_GrGLStandard == this->glStandard()) {
    240             // Desktop-only state that we never change
    241             if (!this->glCaps().isCoreProfile()) {
    242                 GL_CALL(Disable(GR_GL_POINT_SMOOTH));
    243                 GL_CALL(Disable(GR_GL_LINE_SMOOTH));
    244                 GL_CALL(Disable(GR_GL_POLYGON_SMOOTH));
    245                 GL_CALL(Disable(GR_GL_POLYGON_STIPPLE));
    246                 GL_CALL(Disable(GR_GL_COLOR_LOGIC_OP));
    247                 GL_CALL(Disable(GR_GL_INDEX_LOGIC_OP));
    248             }
    249             // The windows NVIDIA driver has GL_ARB_imaging in the extension string when using a
    250             // core profile. This seems like a bug since the core spec removes any mention of
    251             // GL_ARB_imaging.
    252             if (this->glCaps().imagingSupport() && !this->glCaps().isCoreProfile()) {
    253                 GL_CALL(Disable(GR_GL_COLOR_TABLE));
    254             }
    255             GL_CALL(Disable(GR_GL_POLYGON_OFFSET_FILL));
    256             // Since ES doesn't support glPointSize at all we always use the VS to
    257             // set the point size
    258             GL_CALL(Enable(GR_GL_VERTEX_PROGRAM_POINT_SIZE));
    259 
    260             // We should set glPolygonMode(FRONT_AND_BACK,FILL) here, too. It isn't
    261             // currently part of our gl interface. There are probably others as
    262             // well.
    263         }
    264         fHWWriteToColor = kUnknown_TriState;
    265         // we only ever use lines in hairline mode
    266         GL_CALL(LineWidth(1));
    267     }
    268 
    269     if (resetBits & kAA_GrGLBackendState) {
    270         fHWAAState.invalidate();
    271     }
    272 
    273     fHWActiveTextureUnitIdx = -1; // invalid
    274 
    275     if (resetBits & kTextureBinding_GrGLBackendState) {
    276         for (int s = 0; s < fHWBoundTextures.count(); ++s) {
    277             fHWBoundTextures[s] = NULL;
    278         }
    279     }
    280 
    281     if (resetBits & kBlend_GrGLBackendState) {
    282         fHWBlendState.invalidate();
    283     }
    284 
    285     if (resetBits & kView_GrGLBackendState) {
    286         fHWScissorSettings.invalidate();
    287         fHWViewport.invalidate();
    288     }
    289 
    290     if (resetBits & kStencil_GrGLBackendState) {
    291         fHWStencilSettings.invalidate();
    292         fHWStencilTestEnabled = kUnknown_TriState;
    293     }
    294 
    295     // Vertex
    296     if (resetBits & kVertex_GrGLBackendState) {
    297         fHWGeometryState.invalidate();
    298     }
    299 
    300     if (resetBits & kRenderTarget_GrGLBackendState) {
    301         fHWBoundRenderTarget = NULL;
    302     }
    303 
    304     if (resetBits & kPathRendering_GrGLBackendState) {
    305         if (this->caps()->pathRenderingSupport()) {
    306             fHWProjectionMatrixState.invalidate();
    307             // we don't use the model view matrix.
    308             GL_CALL(MatrixLoadIdentity(GR_GL_MODELVIEW));
    309 
    310             for (int i = 0; i < this->glCaps().maxFixedFunctionTextureCoords(); ++i) {
    311                 GL_CALL(PathTexGen(GR_GL_TEXTURE0 + i, GR_GL_NONE, 0, NULL));
    312                 fHWPathTexGenSettings[i].fMode = GR_GL_NONE;
    313                 fHWPathTexGenSettings[i].fNumComponents = 0;
    314             }
    315             fHWActivePathTexGenSets = 0;
    316         }
    317         fHWPathStencilSettings.invalidate();
    318     }
    319 
    320     // we assume these values
    321     if (resetBits & kPixelStore_GrGLBackendState) {
    322         if (this->glCaps().unpackRowLengthSupport()) {
    323             GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
    324         }
    325         if (this->glCaps().packRowLengthSupport()) {
    326             GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0));
    327         }
    328         if (this->glCaps().unpackFlipYSupport()) {
    329             GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE));
    330         }
    331         if (this->glCaps().packFlipYSupport()) {
    332             GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, GR_GL_FALSE));
    333         }
    334     }
    335 
    336     if (resetBits & kProgram_GrGLBackendState) {
    337         fHWProgramID = 0;
    338         fSharedGLProgramState.invalidate();
    339     }
    340 }
    341 
    342 namespace {
    343 
    344 GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin, bool renderTarget) {
    345     // By default, GrRenderTargets are GL's normal orientation so that they
    346     // can be drawn to by the outside world without the client having
    347     // to render upside down.
    348     if (kDefault_GrSurfaceOrigin == origin) {
    349         return renderTarget ? kBottomLeft_GrSurfaceOrigin : kTopLeft_GrSurfaceOrigin;
    350     } else {
    351         return origin;
    352     }
    353 }
    354 
    355 }
    356 
    357 GrTexture* GrGpuGL::onWrapBackendTexture(const GrBackendTextureDesc& desc) {
    358     if (!this->configToGLFormats(desc.fConfig, false, NULL, NULL, NULL)) {
    359         return NULL;
    360     }
    361 
    362     if (0 == desc.fTextureHandle) {
    363         return NULL;
    364     }
    365 
    366     int maxSize = this->caps()->maxTextureSize();
    367     if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
    368         return NULL;
    369     }
    370 
    371     GrGLTexture::Desc glTexDesc;
    372     // next line relies on GrBackendTextureDesc's flags matching GrTexture's
    373     glTexDesc.fFlags = (GrTextureFlags) desc.fFlags;
    374     glTexDesc.fWidth = desc.fWidth;
    375     glTexDesc.fHeight = desc.fHeight;
    376     glTexDesc.fConfig = desc.fConfig;
    377     glTexDesc.fSampleCnt = desc.fSampleCnt;
    378     glTexDesc.fTextureID = static_cast<GrGLuint>(desc.fTextureHandle);
    379     glTexDesc.fIsWrapped = true;
    380     bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag);
    381     // FIXME:  this should be calling resolve_origin(), but Chrome code is currently
    382     // assuming the old behaviour, which is that backend textures are always
    383     // BottomLeft, even for non-RT's.  Once Chrome is fixed, change this to:
    384     // glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget);
    385     if (kDefault_GrSurfaceOrigin == desc.fOrigin) {
    386         glTexDesc.fOrigin = kBottomLeft_GrSurfaceOrigin;
    387     } else {
    388         glTexDesc.fOrigin = desc.fOrigin;
    389     }
    390 
    391     GrGLTexture* texture = NULL;
    392     if (renderTarget) {
    393         GrGLRenderTarget::Desc glRTDesc;
    394         glRTDesc.fRTFBOID = 0;
    395         glRTDesc.fTexFBOID = 0;
    396         glRTDesc.fMSColorRenderbufferID = 0;
    397         glRTDesc.fConfig = desc.fConfig;
    398         glRTDesc.fSampleCnt = desc.fSampleCnt;
    399         glRTDesc.fOrigin = glTexDesc.fOrigin;
    400         glRTDesc.fCheckAllocation = false;
    401         if (!this->createRenderTargetObjects(glTexDesc.fWidth,
    402                                              glTexDesc.fHeight,
    403                                              glTexDesc.fTextureID,
    404                                              &glRTDesc)) {
    405             return NULL;
    406         }
    407         texture = SkNEW_ARGS(GrGLTexture, (this, glTexDesc, glRTDesc));
    408     } else {
    409         texture = SkNEW_ARGS(GrGLTexture, (this, glTexDesc));
    410     }
    411     if (NULL == texture) {
    412         return NULL;
    413     }
    414 
    415     return texture;
    416 }
    417 
    418 GrRenderTarget* GrGpuGL::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) {
    419     GrGLRenderTarget::Desc glDesc;
    420     glDesc.fConfig = desc.fConfig;
    421     glDesc.fRTFBOID = static_cast<GrGLuint>(desc.fRenderTargetHandle);
    422     glDesc.fMSColorRenderbufferID = 0;
    423     glDesc.fTexFBOID = GrGLRenderTarget::kUnresolvableFBOID;
    424     glDesc.fSampleCnt = desc.fSampleCnt;
    425     glDesc.fIsWrapped = true;
    426     glDesc.fCheckAllocation = false;
    427 
    428     glDesc.fOrigin = resolve_origin(desc.fOrigin, true);
    429     GrGLIRect viewport;
    430     viewport.fLeft   = 0;
    431     viewport.fBottom = 0;
    432     viewport.fWidth  = desc.fWidth;
    433     viewport.fHeight = desc.fHeight;
    434 
    435     GrRenderTarget* tgt = SkNEW_ARGS(GrGLRenderTarget,
    436                                      (this, glDesc, viewport));
    437     if (desc.fStencilBits) {
    438         GrGLStencilBuffer::Format format;
    439         format.fInternalFormat = GrGLStencilBuffer::kUnknownInternalFormat;
    440         format.fPacked = false;
    441         format.fStencilBits = desc.fStencilBits;
    442         format.fTotalBits = desc.fStencilBits;
    443         static const bool kIsSBWrapped = false;
    444         GrGLStencilBuffer* sb = SkNEW_ARGS(GrGLStencilBuffer,
    445                                            (this,
    446                                             kIsSBWrapped,
    447                                             0,
    448                                             desc.fWidth,
    449                                             desc.fHeight,
    450                                             desc.fSampleCnt,
    451                                             format));
    452         tgt->setStencilBuffer(sb);
    453         sb->unref();
    454     }
    455     return tgt;
    456 }
    457 
    458 ////////////////////////////////////////////////////////////////////////////////
    459 
    460 bool GrGpuGL::onWriteTexturePixels(GrTexture* texture,
    461                                    int left, int top, int width, int height,
    462                                    GrPixelConfig config, const void* buffer,
    463                                    size_t rowBytes) {
    464     if (NULL == buffer) {
    465         return false;
    466     }
    467     GrGLTexture* glTex = static_cast<GrGLTexture*>(texture);
    468 
    469     this->setScratchTextureUnit();
    470     GL_CALL(BindTexture(GR_GL_TEXTURE_2D, glTex->textureID()));
    471     GrGLTexture::Desc desc;
    472     desc.fFlags = glTex->desc().fFlags;
    473     desc.fWidth = glTex->width();
    474     desc.fHeight = glTex->height();
    475     desc.fConfig = glTex->config();
    476     desc.fSampleCnt = glTex->desc().fSampleCnt;
    477     desc.fTextureID = glTex->textureID();
    478     desc.fOrigin = glTex->origin();
    479 
    480     bool success = false;
    481     if (GrPixelConfigIsCompressed(desc.fConfig)) {
    482         // We check that config == desc.fConfig in GrGpuGL::canWriteTexturePixels()
    483         SkASSERT(config == desc.fConfig);
    484         success = this->uploadCompressedTexData(desc, buffer, false,
    485                                                 left, top, width, height);
    486     } else {
    487         success = this->uploadTexData(desc, false,
    488                                       left, top, width, height,
    489                                       config, buffer, rowBytes);
    490     }
    491 
    492     if (success) {
    493         texture->impl()->dirtyMipMaps(true);
    494         return true;
    495     }
    496 
    497     return false;
    498 }
    499 
    500 namespace {
    501 bool adjust_pixel_ops_params(int surfaceWidth,
    502                              int surfaceHeight,
    503                              size_t bpp,
    504                              int* left, int* top, int* width, int* height,
    505                              const void** data,
    506                              size_t* rowBytes) {
    507     if (!*rowBytes) {
    508         *rowBytes = *width * bpp;
    509     }
    510 
    511     SkIRect subRect = SkIRect::MakeXYWH(*left, *top, *width, *height);
    512     SkIRect bounds = SkIRect::MakeWH(surfaceWidth, surfaceHeight);
    513 
    514     if (!subRect.intersect(bounds)) {
    515         return false;
    516     }
    517     *data = reinterpret_cast<const void*>(reinterpret_cast<intptr_t>(*data) +
    518           (subRect.fTop - *top) * *rowBytes + (subRect.fLeft - *left) * bpp);
    519 
    520     *left = subRect.fLeft;
    521     *top = subRect.fTop;
    522     *width = subRect.width();
    523     *height = subRect.height();
    524     return true;
    525 }
    526 
    527 GrGLenum check_alloc_error(const GrTextureDesc& desc, const GrGLInterface* interface) {
    528     if (SkToBool(desc.fFlags & kCheckAllocation_GrTextureFlagBit)) {
    529         return GR_GL_GET_ERROR(interface);
    530     } else {
    531         return CHECK_ALLOC_ERROR(interface);
    532     }
    533 }
    534 
    535 }
    536 
    537 bool GrGpuGL::uploadTexData(const GrGLTexture::Desc& desc,
    538                             bool isNewTexture,
    539                             int left, int top, int width, int height,
    540                             GrPixelConfig dataConfig,
    541                             const void* data,
    542                             size_t rowBytes) {
    543     SkASSERT(NULL != data || isNewTexture);
    544 
    545     // If we're uploading compressed data then we should be using uploadCompressedTexData
    546     SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
    547 
    548     size_t bpp = GrBytesPerPixel(dataConfig);
    549     if (!adjust_pixel_ops_params(desc.fWidth, desc.fHeight, bpp, &left, &top,
    550                                  &width, &height, &data, &rowBytes)) {
    551         return false;
    552     }
    553     size_t trimRowBytes = width * bpp;
    554 
    555     // in case we need a temporary, trimmed copy of the src pixels
    556     SkAutoSMalloc<128 * 128> tempStorage;
    557 
    558     // paletted textures cannot be partially updated
    559     // We currently lazily create MIPMAPs when the we see a draw with
    560     // GrTextureParams::kMipMap_FilterMode. Using texture storage requires that the
    561     // MIP levels are all created when the texture is created. So for now we don't use
    562     // texture storage.
    563     bool useTexStorage = false &&
    564                          isNewTexture &&
    565                          kIndex_8_GrPixelConfig != desc.fConfig &&
    566                          this->glCaps().texStorageSupport();
    567 
    568     if (useTexStorage && kGL_GrGLStandard == this->glStandard()) {
    569         // 565 is not a sized internal format on desktop GL. So on desktop with
    570         // 565 we always use an unsized internal format to let the system pick
    571         // the best sized format to convert the 565 data to. Since TexStorage
    572         // only allows sized internal formats we will instead use TexImage2D.
    573         useTexStorage = desc.fConfig != kRGB_565_GrPixelConfig;
    574     }
    575 
    576     GrGLenum internalFormat;
    577     GrGLenum externalFormat;
    578     GrGLenum externalType;
    579     // glTexStorage requires sized internal formats on both desktop and ES. ES2 requires an unsized
    580     // format for glTexImage, unlike ES3 and desktop. However, we allow the driver to decide the
    581     // size of the internal format whenever possible and so only use a sized internal format when
    582     // using texture storage.
    583     if (!this->configToGLFormats(dataConfig, useTexStorage, &internalFormat,
    584                                  &externalFormat, &externalType)) {
    585         return false;
    586     }
    587 
    588     if (!isNewTexture && GR_GL_PALETTE8_RGBA8 == internalFormat) {
    589         // paletted textures cannot be updated
    590         return false;
    591     }
    592 
    593     /*
    594      *  check whether to allocate a temporary buffer for flipping y or
    595      *  because our srcData has extra bytes past each row. If so, we need
    596      *  to trim those off here, since GL ES may not let us specify
    597      *  GL_UNPACK_ROW_LENGTH.
    598      */
    599     bool restoreGLRowLength = false;
    600     bool swFlipY = false;
    601     bool glFlipY = false;
    602     if (NULL != data) {
    603         if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
    604             if (this->glCaps().unpackFlipYSupport()) {
    605                 glFlipY = true;
    606             } else {
    607                 swFlipY = true;
    608             }
    609         }
    610         if (this->glCaps().unpackRowLengthSupport() && !swFlipY) {
    611             // can't use this for flipping, only non-neg values allowed. :(
    612             if (rowBytes != trimRowBytes) {
    613                 GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp);
    614                 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength));
    615                 restoreGLRowLength = true;
    616             }
    617         } else {
    618             if (trimRowBytes != rowBytes || swFlipY) {
    619                 // copy data into our new storage, skipping the trailing bytes
    620                 size_t trimSize = height * trimRowBytes;
    621                 const char* src = (const char*)data;
    622                 if (swFlipY) {
    623                     src += (height - 1) * rowBytes;
    624                 }
    625                 char* dst = (char*)tempStorage.reset(trimSize);
    626                 for (int y = 0; y < height; y++) {
    627                     memcpy(dst, src, trimRowBytes);
    628                     if (swFlipY) {
    629                         src -= rowBytes;
    630                     } else {
    631                         src += rowBytes;
    632                     }
    633                     dst += trimRowBytes;
    634                 }
    635                 // now point data to our copied version
    636                 data = tempStorage.get();
    637             }
    638         }
    639         if (glFlipY) {
    640             GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_TRUE));
    641         }
    642         GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, static_cast<GrGLint>(bpp)));
    643     }
    644     bool succeeded = true;
    645     if (isNewTexture &&
    646         0 == left && 0 == top &&
    647         desc.fWidth == width && desc.fHeight == height) {
    648         CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
    649         if (useTexStorage) {
    650             // We never resize  or change formats of textures.
    651             GL_ALLOC_CALL(this->glInterface(),
    652                           TexStorage2D(GR_GL_TEXTURE_2D,
    653                                        1, // levels
    654                                        internalFormat,
    655                                        desc.fWidth, desc.fHeight));
    656         } else {
    657             if (GR_GL_PALETTE8_RGBA8 == internalFormat) {
    658                 GrGLsizei imageSize = desc.fWidth * desc.fHeight +
    659                                       kGrColorTableSize;
    660                 GL_ALLOC_CALL(this->glInterface(),
    661                               CompressedTexImage2D(GR_GL_TEXTURE_2D,
    662                                                    0, // level
    663                                                    internalFormat,
    664                                                    desc.fWidth, desc.fHeight,
    665                                                    0, // border
    666                                                    imageSize,
    667                                                    data));
    668             } else {
    669                 GL_ALLOC_CALL(this->glInterface(),
    670                               TexImage2D(GR_GL_TEXTURE_2D,
    671                                          0, // level
    672                                          internalFormat,
    673                                          desc.fWidth, desc.fHeight,
    674                                          0, // border
    675                                          externalFormat, externalType,
    676                                          data));
    677             }
    678         }
    679         GrGLenum error = check_alloc_error(desc, this->glInterface());
    680         if (error != GR_GL_NO_ERROR) {
    681             succeeded = false;
    682         } else {
    683             // if we have data and we used TexStorage to create the texture, we
    684             // now upload with TexSubImage.
    685             if (NULL != data && useTexStorage) {
    686                 GL_CALL(TexSubImage2D(GR_GL_TEXTURE_2D,
    687                                       0, // level
    688                                       left, top,
    689                                       width, height,
    690                                       externalFormat, externalType,
    691                                       data));
    692             }
    693         }
    694     } else {
    695         if (swFlipY || glFlipY) {
    696             top = desc.fHeight - (top + height);
    697         }
    698         GL_CALL(TexSubImage2D(GR_GL_TEXTURE_2D,
    699                               0, // level
    700                               left, top,
    701                               width, height,
    702                               externalFormat, externalType, data));
    703     }
    704 
    705     if (restoreGLRowLength) {
    706         SkASSERT(this->glCaps().unpackRowLengthSupport());
    707         GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
    708     }
    709     if (glFlipY) {
    710         GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE));
    711     }
    712     return succeeded;
    713 }
    714 
    715 // TODO: This function is using a lot of wonky semantics like, if width == -1
    716 // then set width = desc.fWdith ... blah. A better way to do it might be to
    717 // create a CompressedTexData struct that takes a desc/ptr and figures out
    718 // the proper upload semantics. Then users can construct this function how they
    719 // see fit if they want to go against the "standard" way to do it.
    720 bool GrGpuGL::uploadCompressedTexData(const GrGLTexture::Desc& desc,
    721                                       const void* data,
    722                                       bool isNewTexture,
    723                                       int left, int top, int width, int height) {
    724     SkASSERT(NULL != data || isNewTexture);
    725 
    726     // No support for software flip y, yet...
    727     SkASSERT(kBottomLeft_GrSurfaceOrigin != desc.fOrigin);
    728 
    729     if (-1 == width) {
    730         width = desc.fWidth;
    731     }
    732 #ifdef SK_DEBUG
    733     else {
    734         SkASSERT(width <= desc.fWidth);
    735     }
    736 #endif
    737 
    738     if (-1 == height) {
    739         height = desc.fHeight;
    740     }
    741 #ifdef SK_DEBUG
    742     else {
    743         SkASSERT(height <= desc.fHeight);
    744     }
    745 #endif
    746 
    747     // Make sure that the width and height that we pass to OpenGL
    748     // is a multiple of the block size.
    749     int dataSize = GrCompressedFormatDataSize(desc.fConfig, width, height);
    750 
    751     // We only need the internal format for compressed 2D textures.
    752     GrGLenum internalFormat = 0;
    753     if (!this->configToGLFormats(desc.fConfig, false, &internalFormat, NULL, NULL)) {
    754         return false;
    755     }
    756 
    757     bool succeeded = true;
    758     CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
    759 
    760     if (isNewTexture) {
    761         GL_ALLOC_CALL(this->glInterface(),
    762                       CompressedTexImage2D(GR_GL_TEXTURE_2D,
    763                                            0, // level
    764                                            internalFormat,
    765                                            width, height,
    766                                            0, // border
    767                                            dataSize,
    768                                            data));
    769     } else {
    770         GL_ALLOC_CALL(this->glInterface(),
    771                       CompressedTexSubImage2D(GR_GL_TEXTURE_2D,
    772                                               0, // level
    773                                               left, top,
    774                                               width, height,
    775                                               internalFormat,
    776                                               dataSize,
    777                                               data));
    778     }
    779 
    780     GrGLenum error = check_alloc_error(desc, this->glInterface());
    781     if (error != GR_GL_NO_ERROR) {
    782         succeeded = false;
    783     }
    784     return succeeded;
    785 }
    786 
    787 static bool renderbuffer_storage_msaa(GrGLContext& ctx,
    788                                       int sampleCount,
    789                                       GrGLenum format,
    790                                       int width, int height) {
    791     CLEAR_ERROR_BEFORE_ALLOC(ctx.interface());
    792     SkASSERT(GrGLCaps::kNone_MSFBOType != ctx.caps()->msFBOType());
    793     switch (ctx.caps()->msFBOType()) {
    794         case GrGLCaps::kDesktop_ARB_MSFBOType:
    795         case GrGLCaps::kDesktop_EXT_MSFBOType:
    796         case GrGLCaps::kES_3_0_MSFBOType:
    797             GL_ALLOC_CALL(ctx.interface(),
    798                             RenderbufferStorageMultisample(GR_GL_RENDERBUFFER,
    799                                                             sampleCount,
    800                                                             format,
    801                                                             width, height));
    802             break;
    803         case GrGLCaps::kES_Apple_MSFBOType:
    804             GL_ALLOC_CALL(ctx.interface(),
    805                             RenderbufferStorageMultisampleES2APPLE(GR_GL_RENDERBUFFER,
    806                                                                     sampleCount,
    807                                                                     format,
    808                                                                     width, height));
    809             break;
    810         case GrGLCaps::kES_EXT_MsToTexture_MSFBOType:
    811         case GrGLCaps::kES_IMG_MsToTexture_MSFBOType:
    812             GL_ALLOC_CALL(ctx.interface(),
    813                             RenderbufferStorageMultisampleES2EXT(GR_GL_RENDERBUFFER,
    814                                                                 sampleCount,
    815                                                                 format,
    816                                                                 width, height));
    817             break;
    818         case GrGLCaps::kNone_MSFBOType:
    819             SkFAIL("Shouldn't be here if we don't support multisampled renderbuffers.");
    820             break;
    821     }
    822     return (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(ctx.interface()));;
    823 }
    824 
    825 bool GrGpuGL::createRenderTargetObjects(int width, int height,
    826                                         GrGLuint texID,
    827                                         GrGLRenderTarget::Desc* desc) {
    828     desc->fMSColorRenderbufferID = 0;
    829     desc->fRTFBOID = 0;
    830     desc->fTexFBOID = 0;
    831     desc->fIsWrapped = false;
    832 
    833     GrGLenum status;
    834 
    835     GrGLenum msColorFormat = 0; // suppress warning
    836 
    837     if (desc->fSampleCnt > 0 && GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType()) {
    838         goto FAILED;
    839     }
    840 
    841     GL_CALL(GenFramebuffers(1, &desc->fTexFBOID));
    842     if (!desc->fTexFBOID) {
    843         goto FAILED;
    844     }
    845 
    846 
    847     // If we are using multisampling we will create two FBOS. We render to one and then resolve to
    848     // the texture bound to the other. The exception is the IMG multisample extension. With this
    849     // extension the texture is multisampled when rendered to and then auto-resolves it when it is
    850     // rendered from.
    851     if (desc->fSampleCnt > 0 && this->glCaps().usesMSAARenderBuffers()) {
    852         GL_CALL(GenFramebuffers(1, &desc->fRTFBOID));
    853         GL_CALL(GenRenderbuffers(1, &desc->fMSColorRenderbufferID));
    854         if (!desc->fRTFBOID ||
    855             !desc->fMSColorRenderbufferID ||
    856             !this->configToGLFormats(desc->fConfig,
    857                                      // ES2 and ES3 require sized internal formats for rb storage.
    858                                      kGLES_GrGLStandard == this->glStandard(),
    859                                      &msColorFormat,
    860                                      NULL,
    861                                      NULL)) {
    862             goto FAILED;
    863         }
    864     } else {
    865         desc->fRTFBOID = desc->fTexFBOID;
    866     }
    867 
    868     // below here we may bind the FBO
    869     fHWBoundRenderTarget = NULL;
    870     if (desc->fRTFBOID != desc->fTexFBOID) {
    871         SkASSERT(desc->fSampleCnt > 0);
    872         GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER,
    873                                desc->fMSColorRenderbufferID));
    874         if (!renderbuffer_storage_msaa(fGLContext,
    875                                        desc->fSampleCnt,
    876                                        msColorFormat,
    877                                        width, height)) {
    878             goto FAILED;
    879         }
    880         GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, desc->fRTFBOID));
    881         GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
    882                                       GR_GL_COLOR_ATTACHMENT0,
    883                                       GR_GL_RENDERBUFFER,
    884                                       desc->fMSColorRenderbufferID));
    885         if (desc->fCheckAllocation ||
    886             !this->glCaps().isConfigVerifiedColorAttachment(desc->fConfig)) {
    887             GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
    888             if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
    889                 goto FAILED;
    890             }
    891             fGLContext.caps()->markConfigAsValidColorAttachment(desc->fConfig);
    892         }
    893     }
    894     GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, desc->fTexFBOID));
    895 
    896     if (this->glCaps().usesImplicitMSAAResolve() && desc->fSampleCnt > 0) {
    897         GL_CALL(FramebufferTexture2DMultisample(GR_GL_FRAMEBUFFER,
    898                                                 GR_GL_COLOR_ATTACHMENT0,
    899                                                 GR_GL_TEXTURE_2D,
    900                                                 texID, 0, desc->fSampleCnt));
    901     } else {
    902         GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
    903                                      GR_GL_COLOR_ATTACHMENT0,
    904                                      GR_GL_TEXTURE_2D,
    905                                      texID, 0));
    906     }
    907     if (desc->fCheckAllocation ||
    908         !this->glCaps().isConfigVerifiedColorAttachment(desc->fConfig)) {
    909         GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
    910         if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
    911             goto FAILED;
    912         }
    913         fGLContext.caps()->markConfigAsValidColorAttachment(desc->fConfig);
    914     }
    915 
    916     return true;
    917 
    918 FAILED:
    919     if (desc->fMSColorRenderbufferID) {
    920         GL_CALL(DeleteRenderbuffers(1, &desc->fMSColorRenderbufferID));
    921     }
    922     if (desc->fRTFBOID != desc->fTexFBOID) {
    923         GL_CALL(DeleteFramebuffers(1, &desc->fRTFBOID));
    924     }
    925     if (desc->fTexFBOID) {
    926         GL_CALL(DeleteFramebuffers(1, &desc->fTexFBOID));
    927     }
    928     return false;
    929 }
    930 
    931 // good to set a break-point here to know when createTexture fails
    932 static GrTexture* return_null_texture() {
    933 //    SkDEBUGFAIL("null texture");
    934     return NULL;
    935 }
    936 
    937 #if 0 && defined(SK_DEBUG)
    938 static size_t as_size_t(int x) {
    939     return x;
    940 }
    941 #endif
    942 
    943 GrTexture* GrGpuGL::onCreateTexture(const GrTextureDesc& desc,
    944                                     const void* srcData,
    945                                     size_t rowBytes) {
    946 
    947     GrGLTexture::Desc glTexDesc;
    948     GrGLRenderTarget::Desc  glRTDesc;
    949 
    950     // Attempt to catch un- or wrongly initialized sample counts;
    951     SkASSERT(desc.fSampleCnt >= 0 && desc.fSampleCnt <= 64);
    952     // We fail if the MSAA was requested and is not available.
    953     if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && desc.fSampleCnt) {
    954         //GrPrintf("MSAA RT requested but not supported on this platform.");
    955         return return_null_texture();
    956     }
    957     // If the sample count exceeds the max then we clamp it.
    958     glTexDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
    959 
    960     glTexDesc.fFlags  = desc.fFlags;
    961     glTexDesc.fWidth  = desc.fWidth;
    962     glTexDesc.fHeight = desc.fHeight;
    963     glTexDesc.fConfig = desc.fConfig;
    964     glTexDesc.fIsWrapped = false;
    965 
    966     glRTDesc.fMSColorRenderbufferID = 0;
    967     glRTDesc.fRTFBOID = 0;
    968     glRTDesc.fTexFBOID = 0;
    969     glRTDesc.fIsWrapped = false;
    970     glRTDesc.fConfig = glTexDesc.fConfig;
    971     glRTDesc.fCheckAllocation = SkToBool(desc.fFlags & kCheckAllocation_GrTextureFlagBit);
    972 
    973     bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrTextureFlagBit);
    974 
    975     glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget);
    976     glRTDesc.fOrigin = glTexDesc.fOrigin;
    977 
    978     glRTDesc.fSampleCnt = glTexDesc.fSampleCnt;
    979     if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() &&
    980         desc.fSampleCnt) {
    981         //GrPrintf("MSAA RT requested but not supported on this platform.");
    982         return return_null_texture();
    983     }
    984 
    985     if (renderTarget) {
    986         int maxRTSize = this->caps()->maxRenderTargetSize();
    987         if (glTexDesc.fWidth > maxRTSize || glTexDesc.fHeight > maxRTSize) {
    988             return return_null_texture();
    989         }
    990     } else {
    991         int maxSize = this->caps()->maxTextureSize();
    992         if (glTexDesc.fWidth > maxSize || glTexDesc.fHeight > maxSize) {
    993             return return_null_texture();
    994         }
    995     }
    996 
    997     GL_CALL(GenTextures(1, &glTexDesc.fTextureID));
    998 
    999     if (!glTexDesc.fTextureID) {
   1000         return return_null_texture();
   1001     }
   1002 
   1003     this->setScratchTextureUnit();
   1004     GL_CALL(BindTexture(GR_GL_TEXTURE_2D, glTexDesc.fTextureID));
   1005 
   1006     if (renderTarget && this->glCaps().textureUsageSupport()) {
   1007         // provides a hint about how this texture will be used
   1008         GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
   1009                               GR_GL_TEXTURE_USAGE,
   1010                               GR_GL_FRAMEBUFFER_ATTACHMENT));
   1011     }
   1012 
   1013     // Some drivers like to know filter/wrap before seeing glTexImage2D. Some
   1014     // drivers have a bug where an FBO won't be complete if it includes a
   1015     // texture that is not mipmap complete (considering the filter in use).
   1016     GrGLTexture::TexParams initialTexParams;
   1017     // we only set a subset here so invalidate first
   1018     initialTexParams.invalidate();
   1019     initialTexParams.fMinFilter = GR_GL_NEAREST;
   1020     initialTexParams.fMagFilter = GR_GL_NEAREST;
   1021     initialTexParams.fWrapS = GR_GL_CLAMP_TO_EDGE;
   1022     initialTexParams.fWrapT = GR_GL_CLAMP_TO_EDGE;
   1023     GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
   1024                           GR_GL_TEXTURE_MAG_FILTER,
   1025                           initialTexParams.fMagFilter));
   1026     GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
   1027                           GR_GL_TEXTURE_MIN_FILTER,
   1028                           initialTexParams.fMinFilter));
   1029     GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
   1030                           GR_GL_TEXTURE_WRAP_S,
   1031                           initialTexParams.fWrapS));
   1032     GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
   1033                           GR_GL_TEXTURE_WRAP_T,
   1034                           initialTexParams.fWrapT));
   1035     if (!this->uploadTexData(glTexDesc, true, 0, 0,
   1036                              glTexDesc.fWidth, glTexDesc.fHeight,
   1037                              desc.fConfig, srcData, rowBytes)) {
   1038         GL_CALL(DeleteTextures(1, &glTexDesc.fTextureID));
   1039         return return_null_texture();
   1040     }
   1041 
   1042     GrGLTexture* tex;
   1043     if (renderTarget) {
   1044         // unbind the texture from the texture unit before binding it to the frame buffer
   1045         GL_CALL(BindTexture(GR_GL_TEXTURE_2D, 0));
   1046 
   1047         if (!this->createRenderTargetObjects(glTexDesc.fWidth,
   1048                                              glTexDesc.fHeight,
   1049                                              glTexDesc.fTextureID,
   1050                                              &glRTDesc)) {
   1051             GL_CALL(DeleteTextures(1, &glTexDesc.fTextureID));
   1052             return return_null_texture();
   1053         }
   1054         tex = SkNEW_ARGS(GrGLTexture, (this, glTexDesc, glRTDesc));
   1055     } else {
   1056         tex = SkNEW_ARGS(GrGLTexture, (this, glTexDesc));
   1057     }
   1058     tex->setCachedTexParams(initialTexParams, this->getResetTimestamp());
   1059 #ifdef TRACE_TEXTURE_CREATION
   1060     GrPrintf("--- new texture [%d] size=(%d %d) config=%d\n",
   1061              glTexDesc.fTextureID, desc.fWidth, desc.fHeight, desc.fConfig);
   1062 #endif
   1063     return tex;
   1064 }
   1065 
   1066 GrTexture* GrGpuGL::onCreateCompressedTexture(const GrTextureDesc& desc,
   1067                                               const void* srcData) {
   1068 
   1069     if(SkToBool(desc.fFlags & kRenderTarget_GrTextureFlagBit)) {
   1070         return return_null_texture();
   1071     }
   1072 
   1073     // Make sure that we're not flipping Y.
   1074     GrSurfaceOrigin texOrigin = resolve_origin(desc.fOrigin, false);
   1075     if (kBottomLeft_GrSurfaceOrigin == texOrigin) {
   1076         return return_null_texture();
   1077     }
   1078 
   1079     GrGLTexture::Desc glTexDesc;
   1080 
   1081     glTexDesc.fFlags  = desc.fFlags;
   1082     glTexDesc.fWidth  = desc.fWidth;
   1083     glTexDesc.fHeight = desc.fHeight;
   1084     glTexDesc.fConfig = desc.fConfig;
   1085     glTexDesc.fIsWrapped = false;
   1086     glTexDesc.fOrigin = texOrigin;
   1087 
   1088     int maxSize = this->caps()->maxTextureSize();
   1089     if (glTexDesc.fWidth > maxSize || glTexDesc.fHeight > maxSize) {
   1090         return return_null_texture();
   1091     }
   1092 
   1093     GL_CALL(GenTextures(1, &glTexDesc.fTextureID));
   1094 
   1095     if (!glTexDesc.fTextureID) {
   1096         return return_null_texture();
   1097     }
   1098 
   1099     this->setScratchTextureUnit();
   1100     GL_CALL(BindTexture(GR_GL_TEXTURE_2D, glTexDesc.fTextureID));
   1101 
   1102     // Some drivers like to know filter/wrap before seeing glTexImage2D. Some
   1103     // drivers have a bug where an FBO won't be complete if it includes a
   1104     // texture that is not mipmap complete (considering the filter in use).
   1105     GrGLTexture::TexParams initialTexParams;
   1106     // we only set a subset here so invalidate first
   1107     initialTexParams.invalidate();
   1108     initialTexParams.fMinFilter = GR_GL_NEAREST;
   1109     initialTexParams.fMagFilter = GR_GL_NEAREST;
   1110     initialTexParams.fWrapS = GR_GL_CLAMP_TO_EDGE;
   1111     initialTexParams.fWrapT = GR_GL_CLAMP_TO_EDGE;
   1112     GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
   1113                           GR_GL_TEXTURE_MAG_FILTER,
   1114                           initialTexParams.fMagFilter));
   1115     GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
   1116                           GR_GL_TEXTURE_MIN_FILTER,
   1117                           initialTexParams.fMinFilter));
   1118     GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
   1119                           GR_GL_TEXTURE_WRAP_S,
   1120                           initialTexParams.fWrapS));
   1121     GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
   1122                           GR_GL_TEXTURE_WRAP_T,
   1123                           initialTexParams.fWrapT));
   1124 
   1125     if (!this->uploadCompressedTexData(glTexDesc, srcData)) {
   1126         GL_CALL(DeleteTextures(1, &glTexDesc.fTextureID));
   1127         return return_null_texture();
   1128     }
   1129 
   1130     GrGLTexture* tex;
   1131     tex = SkNEW_ARGS(GrGLTexture, (this, glTexDesc));
   1132     tex->setCachedTexParams(initialTexParams, this->getResetTimestamp());
   1133 #ifdef TRACE_TEXTURE_CREATION
   1134     GrPrintf("--- new compressed texture [%d] size=(%d %d) config=%d\n",
   1135              glTexDesc.fTextureID, desc.fWidth, desc.fHeight, desc.fConfig);
   1136 #endif
   1137     return tex;
   1138 }
   1139 
   1140 namespace {
   1141 
   1142 const GrGLuint kUnknownBitCount = GrGLStencilBuffer::kUnknownBitCount;
   1143 
   1144 void inline get_stencil_rb_sizes(const GrGLInterface* gl,
   1145                                  GrGLStencilBuffer::Format* format) {
   1146 
   1147     // we shouldn't ever know one size and not the other
   1148     SkASSERT((kUnknownBitCount == format->fStencilBits) ==
   1149              (kUnknownBitCount == format->fTotalBits));
   1150     if (kUnknownBitCount == format->fStencilBits) {
   1151         GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER,
   1152                                          GR_GL_RENDERBUFFER_STENCIL_SIZE,
   1153                                          (GrGLint*)&format->fStencilBits);
   1154         if (format->fPacked) {
   1155             GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER,
   1156                                              GR_GL_RENDERBUFFER_DEPTH_SIZE,
   1157                                              (GrGLint*)&format->fTotalBits);
   1158             format->fTotalBits += format->fStencilBits;
   1159         } else {
   1160             format->fTotalBits = format->fStencilBits;
   1161         }
   1162     }
   1163 }
   1164 }
   1165 
   1166 bool GrGpuGL::createStencilBufferForRenderTarget(GrRenderTarget* rt,
   1167                                                  int width, int height) {
   1168 
   1169     // All internally created RTs are also textures. We don't create
   1170     // SBs for a client's standalone RT (that is a RT that isn't also a texture).
   1171     SkASSERT(rt->asTexture());
   1172     SkASSERT(width >= rt->width());
   1173     SkASSERT(height >= rt->height());
   1174 
   1175     int samples = rt->numSamples();
   1176     GrGLuint sbID;
   1177     GL_CALL(GenRenderbuffers(1, &sbID));
   1178     if (!sbID) {
   1179         return false;
   1180     }
   1181 
   1182     int stencilFmtCnt = this->glCaps().stencilFormats().count();
   1183     for (int i = 0; i < stencilFmtCnt; ++i) {
   1184         GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbID));
   1185         // we start with the last stencil format that succeeded in hopes
   1186         // that we won't go through this loop more than once after the
   1187         // first (painful) stencil creation.
   1188         int sIdx = (i + fLastSuccessfulStencilFmtIdx) % stencilFmtCnt;
   1189         const GrGLCaps::StencilFormat& sFmt =
   1190                 this->glCaps().stencilFormats()[sIdx];
   1191         CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
   1192         // we do this "if" so that we don't call the multisample
   1193         // version on a GL that doesn't have an MSAA extension.
   1194         bool created;
   1195         if (samples > 0) {
   1196             created = renderbuffer_storage_msaa(fGLContext,
   1197                                                 samples,
   1198                                                 sFmt.fInternalFormat,
   1199                                                 width, height);
   1200         } else {
   1201             GL_ALLOC_CALL(this->glInterface(),
   1202                           RenderbufferStorage(GR_GL_RENDERBUFFER,
   1203                                               sFmt.fInternalFormat,
   1204                                               width, height));
   1205             created =
   1206                 (GR_GL_NO_ERROR == check_alloc_error(rt->desc(), this->glInterface()));
   1207         }
   1208         if (created) {
   1209             // After sized formats we attempt an unsized format and take
   1210             // whatever sizes GL gives us. In that case we query for the size.
   1211             GrGLStencilBuffer::Format format = sFmt;
   1212             get_stencil_rb_sizes(this->glInterface(), &format);
   1213             static const bool kIsWrapped = false;
   1214             SkAutoTUnref<GrStencilBuffer> sb(SkNEW_ARGS(GrGLStencilBuffer,
   1215                                                   (this, kIsWrapped, sbID, width, height,
   1216                                                   samples, format)));
   1217             if (this->attachStencilBufferToRenderTarget(sb, rt)) {
   1218                 fLastSuccessfulStencilFmtIdx = sIdx;
   1219                 sb->transferToCache();
   1220                 rt->setStencilBuffer(sb);
   1221                 return true;
   1222            }
   1223            sb->abandon(); // otherwise we lose sbID
   1224         }
   1225     }
   1226     GL_CALL(DeleteRenderbuffers(1, &sbID));
   1227     return false;
   1228 }
   1229 
   1230 bool GrGpuGL::attachStencilBufferToRenderTarget(GrStencilBuffer* sb, GrRenderTarget* rt) {
   1231     GrGLRenderTarget* glrt = (GrGLRenderTarget*) rt;
   1232 
   1233     GrGLuint fbo = glrt->renderFBOID();
   1234 
   1235     if (NULL == sb) {
   1236         if (NULL != rt->getStencilBuffer()) {
   1237             GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
   1238                                             GR_GL_STENCIL_ATTACHMENT,
   1239                                             GR_GL_RENDERBUFFER, 0));
   1240             GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
   1241                                             GR_GL_DEPTH_ATTACHMENT,
   1242                                             GR_GL_RENDERBUFFER, 0));
   1243 #ifdef SK_DEBUG
   1244             GrGLenum status;
   1245             GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
   1246             SkASSERT(GR_GL_FRAMEBUFFER_COMPLETE == status);
   1247 #endif
   1248         }
   1249         return true;
   1250     } else {
   1251         GrGLStencilBuffer* glsb = static_cast<GrGLStencilBuffer*>(sb);
   1252         GrGLuint rb = glsb->renderbufferID();
   1253 
   1254         fHWBoundRenderTarget = NULL;
   1255         GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, fbo));
   1256         GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
   1257                                         GR_GL_STENCIL_ATTACHMENT,
   1258                                         GR_GL_RENDERBUFFER, rb));
   1259         if (glsb->format().fPacked) {
   1260             GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
   1261                                             GR_GL_DEPTH_ATTACHMENT,
   1262                                             GR_GL_RENDERBUFFER, rb));
   1263         } else {
   1264             GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
   1265                                             GR_GL_DEPTH_ATTACHMENT,
   1266                                             GR_GL_RENDERBUFFER, 0));
   1267         }
   1268 
   1269         GrGLenum status;
   1270         if (!this->glCaps().isColorConfigAndStencilFormatVerified(rt->config(), glsb->format())) {
   1271             GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
   1272             if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
   1273                 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
   1274                                               GR_GL_STENCIL_ATTACHMENT,
   1275                                               GR_GL_RENDERBUFFER, 0));
   1276                 if (glsb->format().fPacked) {
   1277                     GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
   1278                                                   GR_GL_DEPTH_ATTACHMENT,
   1279                                                   GR_GL_RENDERBUFFER, 0));
   1280                 }
   1281                 return false;
   1282             } else {
   1283                 fGLContext.caps()->markColorConfigAndStencilFormatAsVerified(
   1284                     rt->config(),
   1285                     glsb->format());
   1286             }
   1287         }
   1288         return true;
   1289     }
   1290 }
   1291 
   1292 ////////////////////////////////////////////////////////////////////////////////
   1293 
   1294 GrVertexBuffer* GrGpuGL::onCreateVertexBuffer(size_t size, bool dynamic) {
   1295     GrGLVertexBuffer::Desc desc;
   1296     desc.fDynamic = dynamic;
   1297     desc.fSizeInBytes = size;
   1298     desc.fIsWrapped = false;
   1299 
   1300     if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && desc.fDynamic) {
   1301         desc.fID = 0;
   1302         GrGLVertexBuffer* vertexBuffer = SkNEW_ARGS(GrGLVertexBuffer, (this, desc));
   1303         return vertexBuffer;
   1304     } else {
   1305         GL_CALL(GenBuffers(1, &desc.fID));
   1306         if (desc.fID) {
   1307             fHWGeometryState.setVertexBufferID(this, desc.fID);
   1308             CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
   1309             // make sure driver can allocate memory for this buffer
   1310             GL_ALLOC_CALL(this->glInterface(),
   1311                           BufferData(GR_GL_ARRAY_BUFFER,
   1312                                      (GrGLsizeiptr) desc.fSizeInBytes,
   1313                                      NULL,   // data ptr
   1314                                      desc.fDynamic ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW));
   1315             if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) {
   1316                 GL_CALL(DeleteBuffers(1, &desc.fID));
   1317                 this->notifyVertexBufferDelete(desc.fID);
   1318                 return NULL;
   1319             }
   1320             GrGLVertexBuffer* vertexBuffer = SkNEW_ARGS(GrGLVertexBuffer, (this, desc));
   1321             return vertexBuffer;
   1322         }
   1323         return NULL;
   1324     }
   1325 }
   1326 
   1327 GrIndexBuffer* GrGpuGL::onCreateIndexBuffer(size_t size, bool dynamic) {
   1328     GrGLIndexBuffer::Desc desc;
   1329     desc.fDynamic = dynamic;
   1330     desc.fSizeInBytes = size;
   1331     desc.fIsWrapped = false;
   1332 
   1333     if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && desc.fDynamic) {
   1334         desc.fID = 0;
   1335         GrIndexBuffer* indexBuffer = SkNEW_ARGS(GrGLIndexBuffer, (this, desc));
   1336         return indexBuffer;
   1337     } else {
   1338         GL_CALL(GenBuffers(1, &desc.fID));
   1339         if (desc.fID) {
   1340             fHWGeometryState.setIndexBufferIDOnDefaultVertexArray(this, desc.fID);
   1341             CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
   1342             // make sure driver can allocate memory for this buffer
   1343             GL_ALLOC_CALL(this->glInterface(),
   1344                           BufferData(GR_GL_ELEMENT_ARRAY_BUFFER,
   1345                                      (GrGLsizeiptr) desc.fSizeInBytes,
   1346                                      NULL,  // data ptr
   1347                                      desc.fDynamic ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW));
   1348             if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) {
   1349                 GL_CALL(DeleteBuffers(1, &desc.fID));
   1350                 this->notifyIndexBufferDelete(desc.fID);
   1351                 return NULL;
   1352             }
   1353             GrIndexBuffer* indexBuffer = SkNEW_ARGS(GrGLIndexBuffer, (this, desc));
   1354             return indexBuffer;
   1355         }
   1356         return NULL;
   1357     }
   1358 }
   1359 
   1360 GrPath* GrGpuGL::onCreatePath(const SkPath& inPath, const SkStrokeRec& stroke) {
   1361     SkASSERT(this->caps()->pathRenderingSupport());
   1362     return SkNEW_ARGS(GrGLPath, (this, inPath, stroke));
   1363 }
   1364 
   1365 void GrGpuGL::flushScissor() {
   1366     if (fScissorState.fEnabled) {
   1367         // Only access the RT if scissoring is being enabled. We can call this before performing
   1368         // a glBitframebuffer for a surface->surface copy, which requires no RT to be bound to the
   1369         // GrDrawState.
   1370         const GrDrawState& drawState = this->getDrawState();
   1371         const GrGLRenderTarget* rt =
   1372             static_cast<const GrGLRenderTarget*>(drawState.getRenderTarget());
   1373 
   1374         SkASSERT(NULL != rt);
   1375         const GrGLIRect& vp = rt->getViewport();
   1376         GrGLIRect scissor;
   1377         scissor.setRelativeTo(vp,
   1378                               fScissorState.fRect.fLeft,
   1379                               fScissorState.fRect.fTop,
   1380                               fScissorState.fRect.width(),
   1381                               fScissorState.fRect.height(),
   1382                               rt->origin());
   1383         // if the scissor fully contains the viewport then we fall through and
   1384         // disable the scissor test.
   1385         if (!scissor.contains(vp)) {
   1386             if (fHWScissorSettings.fRect != scissor) {
   1387                 scissor.pushToGLScissor(this->glInterface());
   1388                 fHWScissorSettings.fRect = scissor;
   1389             }
   1390             if (kYes_TriState != fHWScissorSettings.fEnabled) {
   1391                 GL_CALL(Enable(GR_GL_SCISSOR_TEST));
   1392                 fHWScissorSettings.fEnabled = kYes_TriState;
   1393             }
   1394             return;
   1395         }
   1396     }
   1397     if (kNo_TriState != fHWScissorSettings.fEnabled) {
   1398         GL_CALL(Disable(GR_GL_SCISSOR_TEST));
   1399         fHWScissorSettings.fEnabled = kNo_TriState;
   1400         return;
   1401     }
   1402 }
   1403 
   1404 void GrGpuGL::onClear(const SkIRect* rect, GrColor color, bool canIgnoreRect) {
   1405     const GrDrawState& drawState = this->getDrawState();
   1406     const GrRenderTarget* rt = drawState.getRenderTarget();
   1407     // parent class should never let us get here with no RT
   1408     SkASSERT(NULL != rt);
   1409 
   1410     if (canIgnoreRect && this->glCaps().fullClearIsFree()) {
   1411         rect = NULL;
   1412     }
   1413 
   1414     SkIRect clippedRect;
   1415     if (NULL != rect) {
   1416         // flushScissor expects rect to be clipped to the target.
   1417         clippedRect = *rect;
   1418         SkIRect rtRect = SkIRect::MakeWH(rt->width(), rt->height());
   1419         if (clippedRect.intersect(rtRect)) {
   1420             rect = &clippedRect;
   1421         } else {
   1422             return;
   1423         }
   1424     }
   1425 
   1426     this->flushRenderTarget(rect);
   1427     GrAutoTRestore<ScissorState> asr(&fScissorState);
   1428     fScissorState.fEnabled = (NULL != rect);
   1429     if (fScissorState.fEnabled) {
   1430         fScissorState.fRect = *rect;
   1431     }
   1432     this->flushScissor();
   1433 
   1434     GrGLfloat r, g, b, a;
   1435     static const GrGLfloat scale255 = 1.f / 255.f;
   1436     a = GrColorUnpackA(color) * scale255;
   1437     GrGLfloat scaleRGB = scale255;
   1438     r = GrColorUnpackR(color) * scaleRGB;
   1439     g = GrColorUnpackG(color) * scaleRGB;
   1440     b = GrColorUnpackB(color) * scaleRGB;
   1441 
   1442     GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE));
   1443     fHWWriteToColor = kYes_TriState;
   1444     GL_CALL(ClearColor(r, g, b, a));
   1445     GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT));
   1446 }
   1447 
   1448 void GrGpuGL::discard(GrRenderTarget* renderTarget) {
   1449     if (!this->caps()->discardRenderTargetSupport()) {
   1450         return;
   1451     }
   1452     if (NULL == renderTarget) {
   1453         renderTarget = this->drawState()->getRenderTarget();
   1454         if (NULL == renderTarget) {
   1455             return;
   1456         }
   1457     }
   1458 
   1459     GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(renderTarget);
   1460     if (renderTarget != fHWBoundRenderTarget) {
   1461         fHWBoundRenderTarget = NULL;
   1462         GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, glRT->renderFBOID()));
   1463     }
   1464     switch (this->glCaps().invalidateFBType()) {
   1465         case GrGLCaps::kNone_FBFetchType:
   1466             SkFAIL("Should never get here.");
   1467             break;
   1468         case GrGLCaps::kInvalidate_InvalidateFBType:
   1469             if (0 == glRT->renderFBOID()) {
   1470                 //  When rendering to the default framebuffer the legal values for attachments
   1471                 //  are GL_COLOR, GL_DEPTH, GL_STENCIL, ... rather than the various FBO attachment
   1472                 //  types.
   1473                 static const GrGLenum attachments[] = { GR_GL_COLOR };
   1474                 GL_CALL(InvalidateFramebuffer(GR_GL_FRAMEBUFFER, SK_ARRAY_COUNT(attachments),
   1475                         attachments));
   1476             } else {
   1477                 static const GrGLenum attachments[] = { GR_GL_COLOR_ATTACHMENT0 };
   1478                 GL_CALL(InvalidateFramebuffer(GR_GL_FRAMEBUFFER, SK_ARRAY_COUNT(attachments),
   1479                         attachments));
   1480             }
   1481             break;
   1482         case GrGLCaps::kDiscard_InvalidateFBType: {
   1483             if (0 == glRT->renderFBOID()) {
   1484                 //  When rendering to the default framebuffer the legal values for attachments
   1485                 //  are GL_COLOR, GL_DEPTH, GL_STENCIL, ... rather than the various FBO attachment
   1486                 //  types. See glDiscardFramebuffer() spec.
   1487                 static const GrGLenum attachments[] = { GR_GL_COLOR };
   1488                 GL_CALL(DiscardFramebuffer(GR_GL_FRAMEBUFFER, SK_ARRAY_COUNT(attachments),
   1489                         attachments));
   1490             } else {
   1491                 static const GrGLenum attachments[] = { GR_GL_COLOR_ATTACHMENT0 };
   1492                 GL_CALL(DiscardFramebuffer(GR_GL_FRAMEBUFFER, SK_ARRAY_COUNT(attachments),
   1493                         attachments));
   1494             }
   1495             break;
   1496         }
   1497     }
   1498     renderTarget->flagAsResolved();
   1499 }
   1500 
   1501 
   1502 void GrGpuGL::clearStencil() {
   1503     if (NULL == this->getDrawState().getRenderTarget()) {
   1504         return;
   1505     }
   1506 
   1507     this->flushRenderTarget(&SkIRect::EmptyIRect());
   1508 
   1509     GrAutoTRestore<ScissorState> asr(&fScissorState);
   1510     fScissorState.fEnabled = false;
   1511     this->flushScissor();
   1512 
   1513     GL_CALL(StencilMask(0xffffffff));
   1514     GL_CALL(ClearStencil(0));
   1515     GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT));
   1516     fHWStencilSettings.invalidate();
   1517 }
   1518 
   1519 void GrGpuGL::clearStencilClip(const SkIRect& rect, bool insideClip) {
   1520     const GrDrawState& drawState = this->getDrawState();
   1521     const GrRenderTarget* rt = drawState.getRenderTarget();
   1522     SkASSERT(NULL != rt);
   1523 
   1524     // this should only be called internally when we know we have a
   1525     // stencil buffer.
   1526     SkASSERT(NULL != rt->getStencilBuffer());
   1527     GrGLint stencilBitCount =  rt->getStencilBuffer()->bits();
   1528 #if 0
   1529     SkASSERT(stencilBitCount > 0);
   1530     GrGLint clipStencilMask  = (1 << (stencilBitCount - 1));
   1531 #else
   1532     // we could just clear the clip bit but when we go through
   1533     // ANGLE a partial stencil mask will cause clears to be
   1534     // turned into draws. Our contract on GrDrawTarget says that
   1535     // changing the clip between stencil passes may or may not
   1536     // zero the client's clip bits. So we just clear the whole thing.
   1537     static const GrGLint clipStencilMask  = ~0;
   1538 #endif
   1539     GrGLint value;
   1540     if (insideClip) {
   1541         value = (1 << (stencilBitCount - 1));
   1542     } else {
   1543         value = 0;
   1544     }
   1545     this->flushRenderTarget(&SkIRect::EmptyIRect());
   1546 
   1547     GrAutoTRestore<ScissorState> asr(&fScissorState);
   1548     fScissorState.fEnabled = true;
   1549     fScissorState.fRect = rect;
   1550     this->flushScissor();
   1551 
   1552     GL_CALL(StencilMask((uint32_t) clipStencilMask));
   1553     GL_CALL(ClearStencil(value));
   1554     GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT));
   1555     fHWStencilSettings.invalidate();
   1556 }
   1557 
   1558 bool GrGpuGL::readPixelsWillPayForYFlip(GrRenderTarget* renderTarget,
   1559                                         int left, int top,
   1560                                         int width, int height,
   1561                                         GrPixelConfig config,
   1562                                         size_t rowBytes) const {
   1563     // If this rendertarget is aready TopLeft, we don't need to flip.
   1564     if (kTopLeft_GrSurfaceOrigin == renderTarget->origin()) {
   1565         return false;
   1566     }
   1567 
   1568     // if GL can do the flip then we'll never pay for it.
   1569     if (this->glCaps().packFlipYSupport()) {
   1570         return false;
   1571     }
   1572 
   1573     // If we have to do memcpy to handle non-trim rowBytes then we
   1574     // get the flip for free. Otherwise it costs.
   1575     if (this->glCaps().packRowLengthSupport()) {
   1576         return true;
   1577     }
   1578     // If we have to do memcpys to handle rowBytes then y-flip is free
   1579     // Note the rowBytes might be tight to the passed in data, but if data
   1580     // gets clipped in x to the target the rowBytes will no longer be tight.
   1581     if (left >= 0 && (left + width) < renderTarget->width()) {
   1582            return 0 == rowBytes ||
   1583                   GrBytesPerPixel(config) * width == rowBytes;
   1584     } else {
   1585         return false;
   1586     }
   1587 }
   1588 
   1589 bool GrGpuGL::onReadPixels(GrRenderTarget* target,
   1590                            int left, int top,
   1591                            int width, int height,
   1592                            GrPixelConfig config,
   1593                            void* buffer,
   1594                            size_t rowBytes) {
   1595     // We cannot read pixels into a compressed buffer
   1596     if (GrPixelConfigIsCompressed(config)) {
   1597         return false;
   1598     }
   1599 
   1600     GrGLenum format = 0;
   1601     GrGLenum type = 0;
   1602     bool flipY = kBottomLeft_GrSurfaceOrigin == target->origin();
   1603     if (!this->configToGLFormats(config, false, NULL, &format, &type)) {
   1604         return false;
   1605     }
   1606     size_t bpp = GrBytesPerPixel(config);
   1607     if (!adjust_pixel_ops_params(target->width(), target->height(), bpp,
   1608                                  &left, &top, &width, &height,
   1609                                  const_cast<const void**>(&buffer),
   1610                                  &rowBytes)) {
   1611         return false;
   1612     }
   1613 
   1614     // resolve the render target if necessary
   1615     GrGLRenderTarget* tgt = static_cast<GrGLRenderTarget*>(target);
   1616     GrDrawState::AutoRenderTargetRestore artr;
   1617     switch (tgt->getResolveType()) {
   1618         case GrGLRenderTarget::kCantResolve_ResolveType:
   1619             return false;
   1620         case GrGLRenderTarget::kAutoResolves_ResolveType:
   1621             artr.set(this->drawState(), target);
   1622             this->flushRenderTarget(&SkIRect::EmptyIRect());
   1623             break;
   1624         case GrGLRenderTarget::kCanResolve_ResolveType:
   1625             this->onResolveRenderTarget(tgt);
   1626             // we don't track the state of the READ FBO ID.
   1627             GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER,
   1628                                     tgt->textureFBOID()));
   1629             break;
   1630         default:
   1631             SkFAIL("Unknown resolve type");
   1632     }
   1633 
   1634     const GrGLIRect& glvp = tgt->getViewport();
   1635 
   1636     // the read rect is viewport-relative
   1637     GrGLIRect readRect;
   1638     readRect.setRelativeTo(glvp, left, top, width, height, target->origin());
   1639 
   1640     size_t tightRowBytes = bpp * width;
   1641     if (0 == rowBytes) {
   1642         rowBytes = tightRowBytes;
   1643     }
   1644     size_t readDstRowBytes = tightRowBytes;
   1645     void* readDst = buffer;
   1646 
   1647     // determine if GL can read using the passed rowBytes or if we need
   1648     // a scratch buffer.
   1649     SkAutoSMalloc<32 * sizeof(GrColor)> scratch;
   1650     if (rowBytes != tightRowBytes) {
   1651         if (this->glCaps().packRowLengthSupport()) {
   1652             SkASSERT(!(rowBytes % sizeof(GrColor)));
   1653             GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH,
   1654                                 static_cast<GrGLint>(rowBytes / sizeof(GrColor))));
   1655             readDstRowBytes = rowBytes;
   1656         } else {
   1657             scratch.reset(tightRowBytes * height);
   1658             readDst = scratch.get();
   1659         }
   1660     }
   1661     if (flipY && this->glCaps().packFlipYSupport()) {
   1662         GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, 1));
   1663     }
   1664     GL_CALL(ReadPixels(readRect.fLeft, readRect.fBottom,
   1665                        readRect.fWidth, readRect.fHeight,
   1666                        format, type, readDst));
   1667     if (readDstRowBytes != tightRowBytes) {
   1668         SkASSERT(this->glCaps().packRowLengthSupport());
   1669         GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0));
   1670     }
   1671     if (flipY && this->glCaps().packFlipYSupport()) {
   1672         GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, 0));
   1673         flipY = false;
   1674     }
   1675 
   1676     // now reverse the order of the rows, since GL's are bottom-to-top, but our
   1677     // API presents top-to-bottom. We must preserve the padding contents. Note
   1678     // that the above readPixels did not overwrite the padding.
   1679     if (readDst == buffer) {
   1680         SkASSERT(rowBytes == readDstRowBytes);
   1681         if (flipY) {
   1682             scratch.reset(tightRowBytes);
   1683             void* tmpRow = scratch.get();
   1684             // flip y in-place by rows
   1685             const int halfY = height >> 1;
   1686             char* top = reinterpret_cast<char*>(buffer);
   1687             char* bottom = top + (height - 1) * rowBytes;
   1688             for (int y = 0; y < halfY; y++) {
   1689                 memcpy(tmpRow, top, tightRowBytes);
   1690                 memcpy(top, bottom, tightRowBytes);
   1691                 memcpy(bottom, tmpRow, tightRowBytes);
   1692                 top += rowBytes;
   1693                 bottom -= rowBytes;
   1694             }
   1695         }
   1696     } else {
   1697         SkASSERT(readDst != buffer);        SkASSERT(rowBytes != tightRowBytes);
   1698         // copy from readDst to buffer while flipping y
   1699         // const int halfY = height >> 1;
   1700         const char* src = reinterpret_cast<const char*>(readDst);
   1701         char* dst = reinterpret_cast<char*>(buffer);
   1702         if (flipY) {
   1703             dst += (height-1) * rowBytes;
   1704         }
   1705         for (int y = 0; y < height; y++) {
   1706             memcpy(dst, src, tightRowBytes);
   1707             src += readDstRowBytes;
   1708             if (!flipY) {
   1709                 dst += rowBytes;
   1710             } else {
   1711                 dst -= rowBytes;
   1712             }
   1713         }
   1714     }
   1715     return true;
   1716 }
   1717 
   1718 void GrGpuGL::flushRenderTarget(const SkIRect* bound) {
   1719 
   1720     GrGLRenderTarget* rt =
   1721         static_cast<GrGLRenderTarget*>(this->drawState()->getRenderTarget());
   1722     SkASSERT(NULL != rt);
   1723 
   1724     if (fHWBoundRenderTarget != rt) {
   1725         GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, rt->renderFBOID()));
   1726 #ifdef SK_DEBUG
   1727         // don't do this check in Chromium -- this is causing
   1728         // lots of repeated command buffer flushes when the compositor is
   1729         // rendering with Ganesh, which is really slow; even too slow for
   1730         // Debug mode.
   1731         if (!this->glContext().isChromium()) {
   1732             GrGLenum status;
   1733             GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
   1734             if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
   1735                 GrPrintf("GrGpuGL::flushRenderTarget glCheckFramebufferStatus %x\n", status);
   1736             }
   1737         }
   1738 #endif
   1739         fHWBoundRenderTarget = rt;
   1740         const GrGLIRect& vp = rt->getViewport();
   1741         if (fHWViewport != vp) {
   1742             vp.pushToGLViewport(this->glInterface());
   1743             fHWViewport = vp;
   1744         }
   1745     }
   1746     if (NULL == bound || !bound->isEmpty()) {
   1747         rt->flagAsNeedingResolve(bound);
   1748     }
   1749 
   1750     GrTexture *texture = rt->asTexture();
   1751     if (NULL != texture) {
   1752         texture->impl()->dirtyMipMaps(true);
   1753     }
   1754 }
   1755 
   1756 GrGLenum gPrimitiveType2GLMode[] = {
   1757     GR_GL_TRIANGLES,
   1758     GR_GL_TRIANGLE_STRIP,
   1759     GR_GL_TRIANGLE_FAN,
   1760     GR_GL_POINTS,
   1761     GR_GL_LINES,
   1762     GR_GL_LINE_STRIP
   1763 };
   1764 
   1765 #define SWAP_PER_DRAW 0
   1766 
   1767 #if SWAP_PER_DRAW
   1768     #if defined(SK_BUILD_FOR_MAC)
   1769         #include <AGL/agl.h>
   1770     #elif defined(SK_BUILD_FOR_WIN32)
   1771         #include <gl/GL.h>
   1772         void SwapBuf() {
   1773             DWORD procID = GetCurrentProcessId();
   1774             HWND hwnd = GetTopWindow(GetDesktopWindow());
   1775             while(hwnd) {
   1776                 DWORD wndProcID = 0;
   1777                 GetWindowThreadProcessId(hwnd, &wndProcID);
   1778                 if(wndProcID == procID) {
   1779                     SwapBuffers(GetDC(hwnd));
   1780                 }
   1781                 hwnd = GetNextWindow(hwnd, GW_HWNDNEXT);
   1782             }
   1783          }
   1784     #endif
   1785 #endif
   1786 
   1787 void GrGpuGL::onGpuDraw(const DrawInfo& info) {
   1788     size_t indexOffsetInBytes;
   1789     this->setupGeometry(info, &indexOffsetInBytes);
   1790 
   1791     SkASSERT((size_t)info.primitiveType() < SK_ARRAY_COUNT(gPrimitiveType2GLMode));
   1792 
   1793     if (info.isIndexed()) {
   1794         GrGLvoid* indices =
   1795             reinterpret_cast<GrGLvoid*>(indexOffsetInBytes + sizeof(uint16_t) * info.startIndex());
   1796         // info.startVertex() was accounted for by setupGeometry.
   1797         GL_CALL(DrawElements(gPrimitiveType2GLMode[info.primitiveType()],
   1798                              info.indexCount(),
   1799                              GR_GL_UNSIGNED_SHORT,
   1800                              indices));
   1801     } else {
   1802         // Pass 0 for parameter first. We have to adjust glVertexAttribPointer() to account for
   1803         // startVertex in the DrawElements case. So we always rely on setupGeometry to have
   1804         // accounted for startVertex.
   1805         GL_CALL(DrawArrays(gPrimitiveType2GLMode[info.primitiveType()], 0, info.vertexCount()));
   1806     }
   1807 #if SWAP_PER_DRAW
   1808     glFlush();
   1809     #if defined(SK_BUILD_FOR_MAC)
   1810         aglSwapBuffers(aglGetCurrentContext());
   1811         int set_a_break_pt_here = 9;
   1812         aglSwapBuffers(aglGetCurrentContext());
   1813     #elif defined(SK_BUILD_FOR_WIN32)
   1814         SwapBuf();
   1815         int set_a_break_pt_here = 9;
   1816         SwapBuf();
   1817     #endif
   1818 #endif
   1819 }
   1820 
   1821 static GrGLenum gr_stencil_op_to_gl_path_rendering_fill_mode(GrStencilOp op) {
   1822     switch (op) {
   1823         default:
   1824             SkFAIL("Unexpected path fill.");
   1825             /* fallthrough */;
   1826         case kIncClamp_StencilOp:
   1827             return GR_GL_COUNT_UP;
   1828         case kInvert_StencilOp:
   1829             return GR_GL_INVERT;
   1830     }
   1831 }
   1832 
   1833 void GrGpuGL::onGpuStencilPath(const GrPath* path, SkPath::FillType fill) {
   1834     SkASSERT(this->caps()->pathRenderingSupport());
   1835 
   1836     GrGLuint id = static_cast<const GrGLPath*>(path)->pathID();
   1837     SkASSERT(NULL != this->drawState()->getRenderTarget());
   1838     SkASSERT(NULL != this->drawState()->getRenderTarget()->getStencilBuffer());
   1839 
   1840     flushPathStencilSettings(fill);
   1841 
   1842     // Decide how to manipulate the stencil buffer based on the fill rule.
   1843     SkASSERT(!fHWPathStencilSettings.isTwoSided());
   1844 
   1845     GrGLenum fillMode =
   1846         gr_stencil_op_to_gl_path_rendering_fill_mode(fHWPathStencilSettings.passOp(GrStencilSettings::kFront_Face));
   1847     GrGLint writeMask = fHWPathStencilSettings.writeMask(GrStencilSettings::kFront_Face);
   1848     GL_CALL(StencilFillPath(id, fillMode, writeMask));
   1849 }
   1850 
   1851 void GrGpuGL::onGpuDrawPath(const GrPath* path, SkPath::FillType fill) {
   1852     SkASSERT(this->caps()->pathRenderingSupport());
   1853 
   1854     GrGLuint id = static_cast<const GrGLPath*>(path)->pathID();
   1855     SkASSERT(NULL != this->drawState()->getRenderTarget());
   1856     SkASSERT(NULL != this->drawState()->getRenderTarget()->getStencilBuffer());
   1857     SkASSERT(!fCurrentProgram->hasVertexShader());
   1858 
   1859     flushPathStencilSettings(fill);
   1860     const SkStrokeRec& stroke = path->getStroke();
   1861 
   1862     SkPath::FillType nonInvertedFill = SkPath::ConvertToNonInverseFillType(fill);
   1863     SkASSERT(!fHWPathStencilSettings.isTwoSided());
   1864     GrGLenum fillMode =
   1865         gr_stencil_op_to_gl_path_rendering_fill_mode(fHWPathStencilSettings.passOp(GrStencilSettings::kFront_Face));
   1866     GrGLint writeMask = fHWPathStencilSettings.writeMask(GrStencilSettings::kFront_Face);
   1867 
   1868     if (stroke.isFillStyle() || SkStrokeRec::kStrokeAndFill_Style == stroke.getStyle()) {
   1869         GL_CALL(StencilFillPath(id, fillMode, writeMask));
   1870     }
   1871     if (stroke.needToApply()) {
   1872         GL_CALL(StencilStrokePath(id, 0xffff, writeMask));
   1873     }
   1874 
   1875     if (nonInvertedFill == fill) {
   1876         if (stroke.needToApply()) {
   1877             GL_CALL(CoverStrokePath(id, GR_GL_BOUNDING_BOX));
   1878         } else {
   1879             GL_CALL(CoverFillPath(id, GR_GL_BOUNDING_BOX));
   1880         }
   1881     } else {
   1882         GrDrawState* drawState = this->drawState();
   1883         GrDrawState::AutoViewMatrixRestore avmr;
   1884         SkRect bounds = SkRect::MakeLTRB(0, 0,
   1885                                          SkIntToScalar(drawState->getRenderTarget()->width()),
   1886                                          SkIntToScalar(drawState->getRenderTarget()->height()));
   1887         SkMatrix vmi;
   1888         // mapRect through persp matrix may not be correct
   1889         if (!drawState->getViewMatrix().hasPerspective() && drawState->getViewInverse(&vmi)) {
   1890             vmi.mapRect(&bounds);
   1891             // theoretically could set bloat = 0, instead leave it because of matrix inversion
   1892             // precision.
   1893             SkScalar bloat = drawState->getViewMatrix().getMaxScale() * SK_ScalarHalf;
   1894             bounds.outset(bloat, bloat);
   1895         } else {
   1896             avmr.setIdentity(drawState);
   1897         }
   1898 
   1899         this->drawSimpleRect(bounds, NULL);
   1900     }
   1901 }
   1902 
   1903 void GrGpuGL::onGpuDrawPaths(int pathCount, const GrPath** paths,
   1904                              const SkMatrix* transforms,
   1905                              SkPath::FillType fill,
   1906                              SkStrokeRec::Style stroke) {
   1907     SkASSERT(this->caps()->pathRenderingSupport());
   1908     SkASSERT(NULL != this->drawState()->getRenderTarget());
   1909     SkASSERT(NULL != this->drawState()->getRenderTarget()->getStencilBuffer());
   1910     SkASSERT(!fCurrentProgram->hasVertexShader());
   1911     SkASSERT(stroke != SkStrokeRec::kHairline_Style);
   1912 
   1913     SkAutoMalloc pathData(pathCount * sizeof(GrGLuint));
   1914     SkAutoMalloc transformData(pathCount * sizeof(GrGLfloat) * 6);
   1915     GrGLfloat* transformValues =
   1916         reinterpret_cast<GrGLfloat*>(transformData.get());
   1917     GrGLuint* pathIDs = reinterpret_cast<GrGLuint*>(pathData.get());
   1918 
   1919     for (int i = 0; i < pathCount; ++i) {
   1920         SkASSERT(transforms[i].asAffine(NULL));
   1921         const SkMatrix& m = transforms[i];
   1922         transformValues[i * 6] = m.getScaleX();
   1923         transformValues[i * 6 + 1] = m.getSkewY();
   1924         transformValues[i * 6 + 2] = m.getSkewX();
   1925         transformValues[i * 6 + 3] = m.getScaleY();
   1926         transformValues[i * 6 + 4] = m.getTranslateX();
   1927         transformValues[i * 6 + 5] = m.getTranslateY();
   1928         pathIDs[i] = static_cast<const GrGLPath*>(paths[i])->pathID();
   1929     }
   1930 
   1931     flushPathStencilSettings(fill);
   1932 
   1933     SkPath::FillType nonInvertedFill =
   1934         SkPath::ConvertToNonInverseFillType(fill);
   1935 
   1936     SkASSERT(!fHWPathStencilSettings.isTwoSided());
   1937     GrGLenum fillMode =
   1938         gr_stencil_op_to_gl_path_rendering_fill_mode(
   1939             fHWPathStencilSettings.passOp(GrStencilSettings::kFront_Face));
   1940     GrGLint writeMask =
   1941         fHWPathStencilSettings.writeMask(GrStencilSettings::kFront_Face);
   1942 
   1943     bool doFill = stroke == SkStrokeRec::kFill_Style
   1944         || stroke == SkStrokeRec::kStrokeAndFill_Style;
   1945     bool doStroke = stroke == SkStrokeRec::kStroke_Style
   1946         || stroke == SkStrokeRec::kStrokeAndFill_Style;
   1947 
   1948     if (doFill) {
   1949         GL_CALL(StencilFillPathInstanced(pathCount, GR_GL_UNSIGNED_INT,
   1950                                          pathIDs, 0,
   1951                                          fillMode, writeMask,
   1952                                          GR_GL_AFFINE_2D, transformValues));
   1953     }
   1954     if (doStroke) {
   1955         GL_CALL(StencilStrokePathInstanced(pathCount, GR_GL_UNSIGNED_INT,
   1956                                            pathIDs, 0,
   1957                                            0xffff, writeMask,
   1958                                            GR_GL_AFFINE_2D, transformValues));
   1959     }
   1960 
   1961     if (nonInvertedFill == fill) {
   1962         if (doStroke) {
   1963             GL_CALL(CoverStrokePathInstanced(
   1964                         pathCount, GR_GL_UNSIGNED_INT, pathIDs, 0,
   1965                         GR_GL_BOUNDING_BOX_OF_BOUNDING_BOXES,
   1966                         GR_GL_AFFINE_2D, transformValues));
   1967         } else {
   1968             GL_CALL(CoverFillPathInstanced(
   1969                         pathCount, GR_GL_UNSIGNED_INT, pathIDs, 0,
   1970                         GR_GL_BOUNDING_BOX_OF_BOUNDING_BOXES,
   1971                         GR_GL_AFFINE_2D, transformValues));
   1972 
   1973         }
   1974     } else {
   1975         GrDrawState* drawState = this->drawState();
   1976         GrDrawState::AutoViewMatrixRestore avmr;
   1977         SkRect bounds = SkRect::MakeLTRB(0, 0,
   1978                                          SkIntToScalar(drawState->getRenderTarget()->width()),
   1979                                          SkIntToScalar(drawState->getRenderTarget()->height()));
   1980         SkMatrix vmi;
   1981         // mapRect through persp matrix may not be correct
   1982         if (!drawState->getViewMatrix().hasPerspective() && drawState->getViewInverse(&vmi)) {
   1983             vmi.mapRect(&bounds);
   1984             // theoretically could set bloat = 0, instead leave it because of matrix inversion
   1985             // precision.
   1986             SkScalar bloat = drawState->getViewMatrix().getMaxScale() * SK_ScalarHalf;
   1987             bounds.outset(bloat, bloat);
   1988         } else {
   1989             avmr.setIdentity(drawState);
   1990         }
   1991 
   1992         this->drawSimpleRect(bounds, NULL);
   1993     }
   1994 }
   1995 
   1996 void GrGpuGL::onResolveRenderTarget(GrRenderTarget* target) {
   1997     GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(target);
   1998     if (rt->needsResolve()) {
   1999         // Some extensions automatically resolves the texture when it is read.
   2000         if (this->glCaps().usesMSAARenderBuffers()) {
   2001             SkASSERT(rt->textureFBOID() != rt->renderFBOID());
   2002             GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER, rt->renderFBOID()));
   2003             GL_CALL(BindFramebuffer(GR_GL_DRAW_FRAMEBUFFER, rt->textureFBOID()));
   2004             // make sure we go through flushRenderTarget() since we've modified
   2005             // the bound DRAW FBO ID.
   2006             fHWBoundRenderTarget = NULL;
   2007             const GrGLIRect& vp = rt->getViewport();
   2008             const SkIRect dirtyRect = rt->getResolveRect();
   2009             GrGLIRect r;
   2010             r.setRelativeTo(vp, dirtyRect.fLeft, dirtyRect.fTop,
   2011                             dirtyRect.width(), dirtyRect.height(), target->origin());
   2012 
   2013             GrAutoTRestore<ScissorState> asr;
   2014             if (GrGLCaps::kES_Apple_MSFBOType == this->glCaps().msFBOType()) {
   2015                 // Apple's extension uses the scissor as the blit bounds.
   2016                 asr.reset(&fScissorState);
   2017                 fScissorState.fEnabled = true;
   2018                 fScissorState.fRect = dirtyRect;
   2019                 this->flushScissor();
   2020                 GL_CALL(ResolveMultisampleFramebuffer());
   2021             } else {
   2022                 if (GrGLCaps::kDesktop_EXT_MSFBOType == this->glCaps().msFBOType()) {
   2023                     // this respects the scissor during the blit, so disable it.
   2024                     asr.reset(&fScissorState);
   2025                     fScissorState.fEnabled = false;
   2026                     this->flushScissor();
   2027                 }
   2028                 int right = r.fLeft + r.fWidth;
   2029                 int top = r.fBottom + r.fHeight;
   2030                 GL_CALL(BlitFramebuffer(r.fLeft, r.fBottom, right, top,
   2031                                         r.fLeft, r.fBottom, right, top,
   2032                                         GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST));
   2033             }
   2034         }
   2035         rt->flagAsResolved();
   2036     }
   2037 }
   2038 
   2039 namespace {
   2040 
   2041 GrGLenum gr_to_gl_stencil_func(GrStencilFunc basicFunc) {
   2042     static const GrGLenum gTable[] = {
   2043         GR_GL_ALWAYS,           // kAlways_StencilFunc
   2044         GR_GL_NEVER,            // kNever_StencilFunc
   2045         GR_GL_GREATER,          // kGreater_StencilFunc
   2046         GR_GL_GEQUAL,           // kGEqual_StencilFunc
   2047         GR_GL_LESS,             // kLess_StencilFunc
   2048         GR_GL_LEQUAL,           // kLEqual_StencilFunc,
   2049         GR_GL_EQUAL,            // kEqual_StencilFunc,
   2050         GR_GL_NOTEQUAL,         // kNotEqual_StencilFunc,
   2051     };
   2052     GR_STATIC_ASSERT(SK_ARRAY_COUNT(gTable) == kBasicStencilFuncCount);
   2053     GR_STATIC_ASSERT(0 == kAlways_StencilFunc);
   2054     GR_STATIC_ASSERT(1 == kNever_StencilFunc);
   2055     GR_STATIC_ASSERT(2 == kGreater_StencilFunc);
   2056     GR_STATIC_ASSERT(3 == kGEqual_StencilFunc);
   2057     GR_STATIC_ASSERT(4 == kLess_StencilFunc);
   2058     GR_STATIC_ASSERT(5 == kLEqual_StencilFunc);
   2059     GR_STATIC_ASSERT(6 == kEqual_StencilFunc);
   2060     GR_STATIC_ASSERT(7 == kNotEqual_StencilFunc);
   2061     SkASSERT((unsigned) basicFunc < kBasicStencilFuncCount);
   2062 
   2063     return gTable[basicFunc];
   2064 }
   2065 
   2066 GrGLenum gr_to_gl_stencil_op(GrStencilOp op) {
   2067     static const GrGLenum gTable[] = {
   2068         GR_GL_KEEP,        // kKeep_StencilOp
   2069         GR_GL_REPLACE,     // kReplace_StencilOp
   2070         GR_GL_INCR_WRAP,   // kIncWrap_StencilOp
   2071         GR_GL_INCR,        // kIncClamp_StencilOp
   2072         GR_GL_DECR_WRAP,   // kDecWrap_StencilOp
   2073         GR_GL_DECR,        // kDecClamp_StencilOp
   2074         GR_GL_ZERO,        // kZero_StencilOp
   2075         GR_GL_INVERT,      // kInvert_StencilOp
   2076     };
   2077     GR_STATIC_ASSERT(SK_ARRAY_COUNT(gTable) == kStencilOpCount);
   2078     GR_STATIC_ASSERT(0 == kKeep_StencilOp);
   2079     GR_STATIC_ASSERT(1 == kReplace_StencilOp);
   2080     GR_STATIC_ASSERT(2 == kIncWrap_StencilOp);
   2081     GR_STATIC_ASSERT(3 == kIncClamp_StencilOp);
   2082     GR_STATIC_ASSERT(4 == kDecWrap_StencilOp);
   2083     GR_STATIC_ASSERT(5 == kDecClamp_StencilOp);
   2084     GR_STATIC_ASSERT(6 == kZero_StencilOp);
   2085     GR_STATIC_ASSERT(7 == kInvert_StencilOp);
   2086     SkASSERT((unsigned) op < kStencilOpCount);
   2087     return gTable[op];
   2088 }
   2089 
   2090 void set_gl_stencil(const GrGLInterface* gl,
   2091                     const GrStencilSettings& settings,
   2092                     GrGLenum glFace,
   2093                     GrStencilSettings::Face grFace) {
   2094     GrGLenum glFunc = gr_to_gl_stencil_func(settings.func(grFace));
   2095     GrGLenum glFailOp = gr_to_gl_stencil_op(settings.failOp(grFace));
   2096     GrGLenum glPassOp = gr_to_gl_stencil_op(settings.passOp(grFace));
   2097 
   2098     GrGLint ref = settings.funcRef(grFace);
   2099     GrGLint mask = settings.funcMask(grFace);
   2100     GrGLint writeMask = settings.writeMask(grFace);
   2101 
   2102     if (GR_GL_FRONT_AND_BACK == glFace) {
   2103         // we call the combined func just in case separate stencil is not
   2104         // supported.
   2105         GR_GL_CALL(gl, StencilFunc(glFunc, ref, mask));
   2106         GR_GL_CALL(gl, StencilMask(writeMask));
   2107         GR_GL_CALL(gl, StencilOp(glFailOp, glPassOp, glPassOp));
   2108     } else {
   2109         GR_GL_CALL(gl, StencilFuncSeparate(glFace, glFunc, ref, mask));
   2110         GR_GL_CALL(gl, StencilMaskSeparate(glFace, writeMask));
   2111         GR_GL_CALL(gl, StencilOpSeparate(glFace, glFailOp, glPassOp, glPassOp));
   2112     }
   2113 }
   2114 }
   2115 
   2116 void GrGpuGL::flushStencil(DrawType type) {
   2117     if (kStencilPath_DrawType != type && fHWStencilSettings != fStencilSettings) {
   2118         if (fStencilSettings.isDisabled()) {
   2119             if (kNo_TriState != fHWStencilTestEnabled) {
   2120                 GL_CALL(Disable(GR_GL_STENCIL_TEST));
   2121                 fHWStencilTestEnabled = kNo_TriState;
   2122             }
   2123         } else {
   2124             if (kYes_TriState != fHWStencilTestEnabled) {
   2125                 GL_CALL(Enable(GR_GL_STENCIL_TEST));
   2126                 fHWStencilTestEnabled = kYes_TriState;
   2127             }
   2128         }
   2129         if (!fStencilSettings.isDisabled()) {
   2130             if (this->caps()->twoSidedStencilSupport()) {
   2131                 set_gl_stencil(this->glInterface(),
   2132                                fStencilSettings,
   2133                                GR_GL_FRONT,
   2134                                GrStencilSettings::kFront_Face);
   2135                 set_gl_stencil(this->glInterface(),
   2136                                fStencilSettings,
   2137                                GR_GL_BACK,
   2138                                GrStencilSettings::kBack_Face);
   2139             } else {
   2140                 set_gl_stencil(this->glInterface(),
   2141                                fStencilSettings,
   2142                                GR_GL_FRONT_AND_BACK,
   2143                                GrStencilSettings::kFront_Face);
   2144             }
   2145         }
   2146         fHWStencilSettings = fStencilSettings;
   2147     }
   2148 }
   2149 
   2150 void GrGpuGL::flushAAState(DrawType type) {
   2151 // At least some ATI linux drivers will render GL_LINES incorrectly when MSAA state is enabled but
   2152 // the target is not multisampled. Single pixel wide lines are rendered thicker than 1 pixel wide.
   2153 #if 0
   2154     // Replace RT_HAS_MSAA with this definition once this driver bug is no longer a relevant concern
   2155     #define RT_HAS_MSAA rt->isMultisampled()
   2156 #else
   2157     #define RT_HAS_MSAA (rt->isMultisampled() || kDrawLines_DrawType == type)
   2158 #endif
   2159 
   2160     const GrRenderTarget* rt = this->getDrawState().getRenderTarget();
   2161     if (kGL_GrGLStandard == this->glStandard()) {
   2162         // ES doesn't support toggling GL_MULTISAMPLE and doesn't have
   2163         // smooth lines.
   2164         // we prefer smooth lines over multisampled lines
   2165         bool smoothLines = false;
   2166 
   2167         if (kDrawLines_DrawType == type) {
   2168             smoothLines = this->willUseHWAALines();
   2169             if (smoothLines) {
   2170                 if (kYes_TriState != fHWAAState.fSmoothLineEnabled) {
   2171                     GL_CALL(Enable(GR_GL_LINE_SMOOTH));
   2172                     fHWAAState.fSmoothLineEnabled = kYes_TriState;
   2173                     // must disable msaa to use line smoothing
   2174                     if (RT_HAS_MSAA &&
   2175                         kNo_TriState != fHWAAState.fMSAAEnabled) {
   2176                         GL_CALL(Disable(GR_GL_MULTISAMPLE));
   2177                         fHWAAState.fMSAAEnabled = kNo_TriState;
   2178                     }
   2179                 }
   2180             } else {
   2181                 if (kNo_TriState != fHWAAState.fSmoothLineEnabled) {
   2182                     GL_CALL(Disable(GR_GL_LINE_SMOOTH));
   2183                     fHWAAState.fSmoothLineEnabled = kNo_TriState;
   2184                 }
   2185             }
   2186         }
   2187         if (!smoothLines && RT_HAS_MSAA) {
   2188             // FIXME: GL_NV_pr doesn't seem to like MSAA disabled. The paths
   2189             // convex hulls of each segment appear to get filled.
   2190             bool enableMSAA = kStencilPath_DrawType == type ||
   2191                               this->getDrawState().isHWAntialiasState();
   2192             if (enableMSAA) {
   2193                 if (kYes_TriState != fHWAAState.fMSAAEnabled) {
   2194                     GL_CALL(Enable(GR_GL_MULTISAMPLE));
   2195                     fHWAAState.fMSAAEnabled = kYes_TriState;
   2196                 }
   2197             } else {
   2198                 if (kNo_TriState != fHWAAState.fMSAAEnabled) {
   2199                     GL_CALL(Disable(GR_GL_MULTISAMPLE));
   2200                     fHWAAState.fMSAAEnabled = kNo_TriState;
   2201                 }
   2202             }
   2203         }
   2204     }
   2205 }
   2206 
   2207 void GrGpuGL::flushPathStencilSettings(SkPath::FillType fill) {
   2208     GrStencilSettings pathStencilSettings;
   2209     this->getPathStencilSettingsForFillType(fill, &pathStencilSettings);
   2210     if (fHWPathStencilSettings != pathStencilSettings) {
   2211         // Just the func, ref, and mask is set here. The op and write mask are params to the call
   2212         // that draws the path to the SB (glStencilFillPath)
   2213         GrGLenum func =
   2214             gr_to_gl_stencil_func(pathStencilSettings.func(GrStencilSettings::kFront_Face));
   2215         GL_CALL(PathStencilFunc(func,
   2216                                 pathStencilSettings.funcRef(GrStencilSettings::kFront_Face),
   2217                                 pathStencilSettings.funcMask(GrStencilSettings::kFront_Face)));
   2218 
   2219         fHWPathStencilSettings = pathStencilSettings;
   2220     }
   2221 }
   2222 
   2223 void GrGpuGL::flushBlend(bool isLines,
   2224                          GrBlendCoeff srcCoeff,
   2225                          GrBlendCoeff dstCoeff) {
   2226     if (isLines && this->willUseHWAALines()) {
   2227         if (kYes_TriState != fHWBlendState.fEnabled) {
   2228             GL_CALL(Enable(GR_GL_BLEND));
   2229             fHWBlendState.fEnabled = kYes_TriState;
   2230         }
   2231         if (kSA_GrBlendCoeff != fHWBlendState.fSrcCoeff ||
   2232             kISA_GrBlendCoeff != fHWBlendState.fDstCoeff) {
   2233             GL_CALL(BlendFunc(gXfermodeCoeff2Blend[kSA_GrBlendCoeff],
   2234                               gXfermodeCoeff2Blend[kISA_GrBlendCoeff]));
   2235             fHWBlendState.fSrcCoeff = kSA_GrBlendCoeff;
   2236             fHWBlendState.fDstCoeff = kISA_GrBlendCoeff;
   2237         }
   2238     } else {
   2239         // any optimization to disable blending should
   2240         // have already been applied and tweaked the coeffs
   2241         // to (1, 0).
   2242         bool blendOff = kOne_GrBlendCoeff == srcCoeff &&
   2243                         kZero_GrBlendCoeff == dstCoeff;
   2244         if (blendOff) {
   2245             if (kNo_TriState != fHWBlendState.fEnabled) {
   2246                 GL_CALL(Disable(GR_GL_BLEND));
   2247                 fHWBlendState.fEnabled = kNo_TriState;
   2248             }
   2249         } else {
   2250             if (kYes_TriState != fHWBlendState.fEnabled) {
   2251                 GL_CALL(Enable(GR_GL_BLEND));
   2252                 fHWBlendState.fEnabled = kYes_TriState;
   2253             }
   2254             if (fHWBlendState.fSrcCoeff != srcCoeff ||
   2255                 fHWBlendState.fDstCoeff != dstCoeff) {
   2256                 GL_CALL(BlendFunc(gXfermodeCoeff2Blend[srcCoeff],
   2257                                   gXfermodeCoeff2Blend[dstCoeff]));
   2258                 fHWBlendState.fSrcCoeff = srcCoeff;
   2259                 fHWBlendState.fDstCoeff = dstCoeff;
   2260             }
   2261             GrColor blendConst = this->getDrawState().getBlendConstant();
   2262             if ((BlendCoeffReferencesConstant(srcCoeff) ||
   2263                  BlendCoeffReferencesConstant(dstCoeff)) &&
   2264                 (!fHWBlendState.fConstColorValid ||
   2265                  fHWBlendState.fConstColor != blendConst)) {
   2266                 GrGLfloat c[4];
   2267                 GrColorToRGBAFloat(blendConst, c);
   2268                 GL_CALL(BlendColor(c[0], c[1], c[2], c[3]));
   2269                 fHWBlendState.fConstColor = blendConst;
   2270                 fHWBlendState.fConstColorValid = true;
   2271             }
   2272         }
   2273     }
   2274 }
   2275 
   2276 static inline GrGLenum tile_to_gl_wrap(SkShader::TileMode tm) {
   2277     static const GrGLenum gWrapModes[] = {
   2278         GR_GL_CLAMP_TO_EDGE,
   2279         GR_GL_REPEAT,
   2280         GR_GL_MIRRORED_REPEAT
   2281     };
   2282     GR_STATIC_ASSERT(SkShader::kTileModeCount == SK_ARRAY_COUNT(gWrapModes));
   2283     GR_STATIC_ASSERT(0 == SkShader::kClamp_TileMode);
   2284     GR_STATIC_ASSERT(1 == SkShader::kRepeat_TileMode);
   2285     GR_STATIC_ASSERT(2 == SkShader::kMirror_TileMode);
   2286     return gWrapModes[tm];
   2287 }
   2288 
   2289 void GrGpuGL::bindTexture(int unitIdx, const GrTextureParams& params, GrGLTexture* texture) {
   2290     SkASSERT(NULL != texture);
   2291 
   2292     // If we created a rt/tex and rendered to it without using a texture and now we're texturing
   2293     // from the rt it will still be the last bound texture, but it needs resolving. So keep this
   2294     // out of the "last != next" check.
   2295     GrGLRenderTarget* texRT =  static_cast<GrGLRenderTarget*>(texture->asRenderTarget());
   2296     if (NULL != texRT) {
   2297         this->onResolveRenderTarget(texRT);
   2298     }
   2299 
   2300     if (fHWBoundTextures[unitIdx] != texture) {
   2301         this->setTextureUnit(unitIdx);
   2302         GL_CALL(BindTexture(GR_GL_TEXTURE_2D, texture->textureID()));
   2303         fHWBoundTextures[unitIdx] = texture;
   2304     }
   2305 
   2306     ResetTimestamp timestamp;
   2307     const GrGLTexture::TexParams& oldTexParams = texture->getCachedTexParams(&timestamp);
   2308     bool setAll = timestamp < this->getResetTimestamp();
   2309     GrGLTexture::TexParams newTexParams;
   2310 
   2311     static GrGLenum glMinFilterModes[] = {
   2312         GR_GL_NEAREST,
   2313         GR_GL_LINEAR,
   2314         GR_GL_LINEAR_MIPMAP_LINEAR
   2315     };
   2316     static GrGLenum glMagFilterModes[] = {
   2317         GR_GL_NEAREST,
   2318         GR_GL_LINEAR,
   2319         GR_GL_LINEAR
   2320     };
   2321     GrTextureParams::FilterMode filterMode = params.filterMode();
   2322     if (!this->caps()->mipMapSupport() && GrTextureParams::kMipMap_FilterMode == filterMode) {
   2323         filterMode = GrTextureParams::kBilerp_FilterMode;
   2324     }
   2325     newTexParams.fMinFilter = glMinFilterModes[filterMode];
   2326     newTexParams.fMagFilter = glMagFilterModes[filterMode];
   2327 
   2328     if (GrTextureParams::kMipMap_FilterMode == filterMode &&
   2329         texture->mipMapsAreDirty() && !GrPixelConfigIsCompressed(texture->config())) {
   2330         GL_CALL(GenerateMipmap(GR_GL_TEXTURE_2D));
   2331         texture->dirtyMipMaps(false);
   2332     }
   2333 
   2334     newTexParams.fWrapS = tile_to_gl_wrap(params.getTileModeX());
   2335     newTexParams.fWrapT = tile_to_gl_wrap(params.getTileModeY());
   2336     memcpy(newTexParams.fSwizzleRGBA,
   2337            GrGLShaderBuilder::GetTexParamSwizzle(texture->config(), this->glCaps()),
   2338            sizeof(newTexParams.fSwizzleRGBA));
   2339     if (setAll || newTexParams.fMagFilter != oldTexParams.fMagFilter) {
   2340         this->setTextureUnit(unitIdx);
   2341         GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
   2342                               GR_GL_TEXTURE_MAG_FILTER,
   2343                               newTexParams.fMagFilter));
   2344     }
   2345     if (setAll || newTexParams.fMinFilter != oldTexParams.fMinFilter) {
   2346         this->setTextureUnit(unitIdx);
   2347         GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
   2348                               GR_GL_TEXTURE_MIN_FILTER,
   2349                               newTexParams.fMinFilter));
   2350     }
   2351     if (setAll || newTexParams.fWrapS != oldTexParams.fWrapS) {
   2352         this->setTextureUnit(unitIdx);
   2353         GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
   2354                               GR_GL_TEXTURE_WRAP_S,
   2355                               newTexParams.fWrapS));
   2356     }
   2357     if (setAll || newTexParams.fWrapT != oldTexParams.fWrapT) {
   2358         this->setTextureUnit(unitIdx);
   2359         GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
   2360                               GR_GL_TEXTURE_WRAP_T,
   2361                               newTexParams.fWrapT));
   2362     }
   2363     if (this->glCaps().textureSwizzleSupport() &&
   2364         (setAll || memcmp(newTexParams.fSwizzleRGBA,
   2365                           oldTexParams.fSwizzleRGBA,
   2366                           sizeof(newTexParams.fSwizzleRGBA)))) {
   2367         this->setTextureUnit(unitIdx);
   2368         if (this->glStandard() == kGLES_GrGLStandard) {
   2369             // ES3 added swizzle support but not GL_TEXTURE_SWIZZLE_RGBA.
   2370             const GrGLenum* swizzle = newTexParams.fSwizzleRGBA;
   2371             GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_R, swizzle[0]));
   2372             GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_G, swizzle[1]));
   2373             GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_B, swizzle[2]));
   2374             GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_A, swizzle[3]));
   2375         } else {
   2376             GR_STATIC_ASSERT(sizeof(newTexParams.fSwizzleRGBA[0]) == sizeof(GrGLint));
   2377             const GrGLint* swizzle = reinterpret_cast<const GrGLint*>(newTexParams.fSwizzleRGBA);
   2378             GL_CALL(TexParameteriv(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_RGBA, swizzle));
   2379         }
   2380     }
   2381     texture->setCachedTexParams(newTexParams, this->getResetTimestamp());
   2382 }
   2383 
   2384 void GrGpuGL::setProjectionMatrix(const SkMatrix& matrix,
   2385                                   const SkISize& renderTargetSize,
   2386                                   GrSurfaceOrigin renderTargetOrigin) {
   2387 
   2388     SkASSERT(this->glCaps().pathRenderingSupport());
   2389 
   2390     if (renderTargetOrigin == fHWProjectionMatrixState.fRenderTargetOrigin &&
   2391         renderTargetSize == fHWProjectionMatrixState.fRenderTargetSize &&
   2392         matrix.cheapEqualTo(fHWProjectionMatrixState.fViewMatrix)) {
   2393         return;
   2394     }
   2395 
   2396     fHWProjectionMatrixState.fViewMatrix = matrix;
   2397     fHWProjectionMatrixState.fRenderTargetSize = renderTargetSize;
   2398     fHWProjectionMatrixState.fRenderTargetOrigin = renderTargetOrigin;
   2399 
   2400     GrGLfloat glMatrix[4 * 4];
   2401     fHWProjectionMatrixState.getRTAdjustedGLMatrix<4>(glMatrix);
   2402     GL_CALL(MatrixLoadf(GR_GL_PROJECTION, glMatrix));
   2403 }
   2404 
   2405 void GrGpuGL::enablePathTexGen(int unitIdx,
   2406                                PathTexGenComponents components,
   2407                                const GrGLfloat* coefficients) {
   2408     SkASSERT(this->glCaps().pathRenderingSupport());
   2409     SkASSERT(components >= kS_PathTexGenComponents &&
   2410              components <= kSTR_PathTexGenComponents);
   2411     SkASSERT(this->glCaps().maxFixedFunctionTextureCoords() >= unitIdx);
   2412 
   2413     if (GR_GL_OBJECT_LINEAR == fHWPathTexGenSettings[unitIdx].fMode &&
   2414         components == fHWPathTexGenSettings[unitIdx].fNumComponents &&
   2415         !memcmp(coefficients, fHWPathTexGenSettings[unitIdx].fCoefficients,
   2416                 3 * components * sizeof(GrGLfloat))) {
   2417         return;
   2418     }
   2419 
   2420     this->setTextureUnit(unitIdx);
   2421 
   2422     fHWPathTexGenSettings[unitIdx].fNumComponents = components;
   2423     GL_CALL(PathTexGen(GR_GL_TEXTURE0 + unitIdx,
   2424                        GR_GL_OBJECT_LINEAR,
   2425                        components,
   2426                        coefficients));
   2427 
   2428     memcpy(fHWPathTexGenSettings[unitIdx].fCoefficients, coefficients,
   2429            3 * components * sizeof(GrGLfloat));
   2430 }
   2431 
   2432 void GrGpuGL::enablePathTexGen(int unitIdx, PathTexGenComponents components,
   2433                                const SkMatrix& matrix) {
   2434     GrGLfloat coefficients[3 * 3];
   2435     SkASSERT(this->glCaps().pathRenderingSupport());
   2436     SkASSERT(components >= kS_PathTexGenComponents &&
   2437              components <= kSTR_PathTexGenComponents);
   2438 
   2439     coefficients[0] = SkScalarToFloat(matrix[SkMatrix::kMScaleX]);
   2440     coefficients[1] = SkScalarToFloat(matrix[SkMatrix::kMSkewX]);
   2441     coefficients[2] = SkScalarToFloat(matrix[SkMatrix::kMTransX]);
   2442 
   2443     if (components >= kST_PathTexGenComponents) {
   2444         coefficients[3] = SkScalarToFloat(matrix[SkMatrix::kMSkewY]);
   2445         coefficients[4] = SkScalarToFloat(matrix[SkMatrix::kMScaleY]);
   2446         coefficients[5] = SkScalarToFloat(matrix[SkMatrix::kMTransY]);
   2447     }
   2448 
   2449     if (components >= kSTR_PathTexGenComponents) {
   2450         coefficients[6] = SkScalarToFloat(matrix[SkMatrix::kMPersp0]);
   2451         coefficients[7] = SkScalarToFloat(matrix[SkMatrix::kMPersp1]);
   2452         coefficients[8] = SkScalarToFloat(matrix[SkMatrix::kMPersp2]);
   2453     }
   2454 
   2455     enablePathTexGen(unitIdx, components, coefficients);
   2456 }
   2457 
   2458 void GrGpuGL::flushPathTexGenSettings(int numUsedTexCoordSets) {
   2459     SkASSERT(this->glCaps().pathRenderingSupport());
   2460     SkASSERT(this->glCaps().maxFixedFunctionTextureCoords() >= numUsedTexCoordSets);
   2461 
   2462     // Only write the inactive path tex gens, since active path tex gens were
   2463     // written when they were enabled.
   2464 
   2465     SkDEBUGCODE(
   2466         for (int i = 0; i < numUsedTexCoordSets; i++) {
   2467             SkASSERT(0 != fHWPathTexGenSettings[i].fNumComponents);
   2468         }
   2469     );
   2470 
   2471     for (int i = numUsedTexCoordSets; i < fHWActivePathTexGenSets; i++) {
   2472         SkASSERT(0 != fHWPathTexGenSettings[i].fNumComponents);
   2473 
   2474         this->setTextureUnit(i);
   2475         GL_CALL(PathTexGen(GR_GL_TEXTURE0 + i, GR_GL_NONE, 0, NULL));
   2476         fHWPathTexGenSettings[i].fNumComponents = 0;
   2477     }
   2478 
   2479     fHWActivePathTexGenSets = numUsedTexCoordSets;
   2480 }
   2481 
   2482 void GrGpuGL::flushMiscFixedFunctionState() {
   2483 
   2484     const GrDrawState& drawState = this->getDrawState();
   2485 
   2486     if (drawState.isDitherState()) {
   2487         if (kYes_TriState != fHWDitherEnabled) {
   2488             GL_CALL(Enable(GR_GL_DITHER));
   2489             fHWDitherEnabled = kYes_TriState;
   2490         }
   2491     } else {
   2492         if (kNo_TriState != fHWDitherEnabled) {
   2493             GL_CALL(Disable(GR_GL_DITHER));
   2494             fHWDitherEnabled = kNo_TriState;
   2495         }
   2496     }
   2497 
   2498     if (drawState.isColorWriteDisabled()) {
   2499         if (kNo_TriState != fHWWriteToColor) {
   2500             GL_CALL(ColorMask(GR_GL_FALSE, GR_GL_FALSE,
   2501                               GR_GL_FALSE, GR_GL_FALSE));
   2502             fHWWriteToColor = kNo_TriState;
   2503         }
   2504     } else {
   2505         if (kYes_TriState != fHWWriteToColor) {
   2506             GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE));
   2507             fHWWriteToColor = kYes_TriState;
   2508         }
   2509     }
   2510 
   2511     if (fHWDrawFace != drawState.getDrawFace()) {
   2512         switch (this->getDrawState().getDrawFace()) {
   2513             case GrDrawState::kCCW_DrawFace:
   2514                 GL_CALL(Enable(GR_GL_CULL_FACE));
   2515                 GL_CALL(CullFace(GR_GL_BACK));
   2516                 break;
   2517             case GrDrawState::kCW_DrawFace:
   2518                 GL_CALL(Enable(GR_GL_CULL_FACE));
   2519                 GL_CALL(CullFace(GR_GL_FRONT));
   2520                 break;
   2521             case GrDrawState::kBoth_DrawFace:
   2522                 GL_CALL(Disable(GR_GL_CULL_FACE));
   2523                 break;
   2524             default:
   2525                 SkFAIL("Unknown draw face.");
   2526         }
   2527         fHWDrawFace = drawState.getDrawFace();
   2528     }
   2529 }
   2530 
   2531 void GrGpuGL::notifyRenderTargetDelete(GrRenderTarget* renderTarget) {
   2532     SkASSERT(NULL != renderTarget);
   2533     if (fHWBoundRenderTarget == renderTarget) {
   2534         fHWBoundRenderTarget = NULL;
   2535     }
   2536 }
   2537 
   2538 void GrGpuGL::notifyTextureDelete(GrGLTexture* texture) {
   2539     for (int s = 0; s < fHWBoundTextures.count(); ++s) {
   2540         if (fHWBoundTextures[s] == texture) {
   2541             // deleting bound texture does implied bind to 0
   2542             fHWBoundTextures[s] = NULL;
   2543        }
   2544     }
   2545 }
   2546 
   2547 
   2548 GrGLuint GrGpuGL::createGLPathObject() {
   2549     if (NULL == fPathNameAllocator.get()) {
   2550         static const int range = 65536;
   2551         GrGLuint firstName;
   2552         GL_CALL_RET(firstName, GenPaths(range));
   2553         fPathNameAllocator.reset(SkNEW_ARGS(GrGLNameAllocator, (firstName, firstName + range)));
   2554     }
   2555 
   2556     GrGLuint name = fPathNameAllocator->allocateName();
   2557     if (0 == name) {
   2558         // Our reserved path names are all in use. Fall back on GenPaths.
   2559         GL_CALL_RET(name, GenPaths(1));
   2560     }
   2561 
   2562     return name;
   2563 }
   2564 
   2565 void GrGpuGL::deleteGLPathObject(GrGLuint name) {
   2566     if (NULL == fPathNameAllocator.get() ||
   2567         name < fPathNameAllocator->firstName() ||
   2568         name >= fPathNameAllocator->endName()) {
   2569         // If we aren't inside fPathNameAllocator's range then this name was
   2570         // generated by the GenPaths fallback (or else the name is unallocated).
   2571         GL_CALL(DeletePaths(name, 1));
   2572         return;
   2573     }
   2574 
   2575     // Make the path empty to save memory, but don't free the name in the driver.
   2576     GL_CALL(PathCommands(name, 0, NULL, 0, GR_GL_FLOAT, NULL));
   2577     fPathNameAllocator->free(name);
   2578 }
   2579 
   2580 bool GrGpuGL::configToGLFormats(GrPixelConfig config,
   2581                                 bool getSizedInternalFormat,
   2582                                 GrGLenum* internalFormat,
   2583                                 GrGLenum* externalFormat,
   2584                                 GrGLenum* externalType) {
   2585     GrGLenum dontCare;
   2586     if (NULL == internalFormat) {
   2587         internalFormat = &dontCare;
   2588     }
   2589     if (NULL == externalFormat) {
   2590         externalFormat = &dontCare;
   2591     }
   2592     if (NULL == externalType) {
   2593         externalType = &dontCare;
   2594     }
   2595 
   2596     if(!this->glCaps().isConfigTexturable(config)) {
   2597         return false;
   2598     }
   2599 
   2600     switch (config) {
   2601         case kRGBA_8888_GrPixelConfig:
   2602             *internalFormat = GR_GL_RGBA;
   2603             *externalFormat = GR_GL_RGBA;
   2604             if (getSizedInternalFormat) {
   2605                 *internalFormat = GR_GL_RGBA8;
   2606             } else {
   2607                 *internalFormat = GR_GL_RGBA;
   2608             }
   2609             *externalType = GR_GL_UNSIGNED_BYTE;
   2610             break;
   2611         case kBGRA_8888_GrPixelConfig:
   2612             if (this->glCaps().bgraIsInternalFormat()) {
   2613                 if (getSizedInternalFormat) {
   2614                     *internalFormat = GR_GL_BGRA8;
   2615                 } else {
   2616                     *internalFormat = GR_GL_BGRA;
   2617                 }
   2618             } else {
   2619                 if (getSizedInternalFormat) {
   2620                     *internalFormat = GR_GL_RGBA8;
   2621                 } else {
   2622                     *internalFormat = GR_GL_RGBA;
   2623                 }
   2624             }
   2625             *externalFormat = GR_GL_BGRA;
   2626             *externalType = GR_GL_UNSIGNED_BYTE;
   2627             break;
   2628         case kRGB_565_GrPixelConfig:
   2629             *internalFormat = GR_GL_RGB;
   2630             *externalFormat = GR_GL_RGB;
   2631             if (getSizedInternalFormat) {
   2632                 if (this->glStandard() == kGL_GrGLStandard) {
   2633                     return false;
   2634                 } else {
   2635                     *internalFormat = GR_GL_RGB565;
   2636                 }
   2637             } else {
   2638                 *internalFormat = GR_GL_RGB;
   2639             }
   2640             *externalType = GR_GL_UNSIGNED_SHORT_5_6_5;
   2641             break;
   2642         case kRGBA_4444_GrPixelConfig:
   2643             *internalFormat = GR_GL_RGBA;
   2644             *externalFormat = GR_GL_RGBA;
   2645             if (getSizedInternalFormat) {
   2646                 *internalFormat = GR_GL_RGBA4;
   2647             } else {
   2648                 *internalFormat = GR_GL_RGBA;
   2649             }
   2650             *externalType = GR_GL_UNSIGNED_SHORT_4_4_4_4;
   2651             break;
   2652         case kIndex_8_GrPixelConfig:
   2653             // glCompressedTexImage doesn't take external params
   2654             *externalFormat = GR_GL_PALETTE8_RGBA8;
   2655             // no sized/unsized internal format distinction here
   2656             *internalFormat = GR_GL_PALETTE8_RGBA8;
   2657             // unused with CompressedTexImage
   2658             *externalType = GR_GL_UNSIGNED_BYTE;
   2659             break;
   2660         case kAlpha_8_GrPixelConfig:
   2661             if (this->glCaps().textureRedSupport()) {
   2662                 *internalFormat = GR_GL_RED;
   2663                 *externalFormat = GR_GL_RED;
   2664                 if (getSizedInternalFormat) {
   2665                     *internalFormat = GR_GL_R8;
   2666                 } else {
   2667                     *internalFormat = GR_GL_RED;
   2668                 }
   2669                 *externalType = GR_GL_UNSIGNED_BYTE;
   2670             } else {
   2671                 *internalFormat = GR_GL_ALPHA;
   2672                 *externalFormat = GR_GL_ALPHA;
   2673                 if (getSizedInternalFormat) {
   2674                     *internalFormat = GR_GL_ALPHA8;
   2675                 } else {
   2676                     *internalFormat = GR_GL_ALPHA;
   2677                 }
   2678                 *externalType = GR_GL_UNSIGNED_BYTE;
   2679             }
   2680             break;
   2681         case kETC1_GrPixelConfig:
   2682             *internalFormat = GR_GL_COMPRESSED_RGB8_ETC1;
   2683             break;
   2684         case kLATC_GrPixelConfig:
   2685             switch(this->glCaps().latcAlias()) {
   2686                 case GrGLCaps::kLATC_LATCAlias:
   2687                     *internalFormat = GR_GL_COMPRESSED_LUMINANCE_LATC1;
   2688                     break;
   2689                 case GrGLCaps::kRGTC_LATCAlias:
   2690                     *internalFormat = GR_GL_COMPRESSED_RED_RGTC1;
   2691                     break;
   2692                 case GrGLCaps::k3DC_LATCAlias:
   2693                     *internalFormat = GR_GL_COMPRESSED_3DC_X;
   2694                     break;
   2695             }
   2696             break;
   2697         default:
   2698             return false;
   2699     }
   2700     return true;
   2701 }
   2702 
   2703 void GrGpuGL::setTextureUnit(int unit) {
   2704     SkASSERT(unit >= 0 && unit < fHWBoundTextures.count());
   2705     if (unit != fHWActiveTextureUnitIdx) {
   2706         GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + unit));
   2707         fHWActiveTextureUnitIdx = unit;
   2708     }
   2709 }
   2710 
   2711 void GrGpuGL::setScratchTextureUnit() {
   2712     // Bind the last texture unit since it is the least likely to be used by GrGLProgram.
   2713     int lastUnitIdx = fHWBoundTextures.count() - 1;
   2714     if (lastUnitIdx != fHWActiveTextureUnitIdx) {
   2715         GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + lastUnitIdx));
   2716         fHWActiveTextureUnitIdx = lastUnitIdx;
   2717     }
   2718     // clear out the this field so that if a program does use this unit it will rebind the correct
   2719     // texture.
   2720     fHWBoundTextures[lastUnitIdx] = NULL;
   2721 }
   2722 
   2723 namespace {
   2724 // Determines whether glBlitFramebuffer could be used between src and dst.
   2725 inline bool can_blit_framebuffer(const GrSurface* dst,
   2726                                  const GrSurface* src,
   2727                                  const GrGpuGL* gpu,
   2728                                  bool* wouldNeedTempFBO = NULL) {
   2729     if (gpu->glCaps().isConfigRenderable(dst->config(), dst->desc().fSampleCnt > 0) &&
   2730         gpu->glCaps().isConfigRenderable(src->config(), src->desc().fSampleCnt > 0) &&
   2731         gpu->glCaps().usesMSAARenderBuffers()) {
   2732         // ES3 doesn't allow framebuffer blits when the src has MSAA and the configs don't match
   2733         // or the rects are not the same (not just the same size but have the same edges).
   2734         if (GrGLCaps::kES_3_0_MSFBOType == gpu->glCaps().msFBOType() &&
   2735             (src->desc().fSampleCnt > 0 || src->config() != dst->config())) {
   2736            return false;
   2737         }
   2738         if (NULL != wouldNeedTempFBO) {
   2739             *wouldNeedTempFBO = NULL == dst->asRenderTarget() || NULL == src->asRenderTarget();
   2740         }
   2741         return true;
   2742     } else {
   2743         return false;
   2744     }
   2745 }
   2746 
   2747 inline bool can_copy_texsubimage(const GrSurface* dst,
   2748                                  const GrSurface* src,
   2749                                  const GrGpuGL* gpu,
   2750                                  bool* wouldNeedTempFBO = NULL) {
   2751     // Table 3.9 of the ES2 spec indicates the supported formats with CopyTexSubImage
   2752     // and BGRA isn't in the spec. There doesn't appear to be any extension that adds it. Perhaps
   2753     // many drivers would allow it to work, but ANGLE does not.
   2754     if (kGLES_GrGLStandard == gpu->glStandard() && gpu->glCaps().bgraIsInternalFormat() &&
   2755         (kBGRA_8888_GrPixelConfig == dst->config() || kBGRA_8888_GrPixelConfig == src->config())) {
   2756         return false;
   2757     }
   2758     const GrGLRenderTarget* dstRT = static_cast<const GrGLRenderTarget*>(dst->asRenderTarget());
   2759     // If dst is multisampled (and uses an extension where there is a separate MSAA renderbuffer)
   2760     // then we don't want to copy to the texture but to the MSAA buffer.
   2761     if (NULL != dstRT && dstRT->renderFBOID() != dstRT->textureFBOID()) {
   2762         return false;
   2763     }
   2764     const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->asRenderTarget());
   2765     // If the src is multisampled (and uses an extension where there is a separate MSAA
   2766     // renderbuffer) then it is an invalid operation to call CopyTexSubImage
   2767     if (NULL != srcRT && srcRT->renderFBOID() != srcRT->textureFBOID()) {
   2768         return false;
   2769     }
   2770     if (gpu->glCaps().isConfigRenderable(src->config(), src->desc().fSampleCnt > 0) &&
   2771         NULL != dst->asTexture() &&
   2772         dst->origin() == src->origin() &&
   2773         kIndex_8_GrPixelConfig != src->config() &&
   2774         !GrPixelConfigIsCompressed(src->config())) {
   2775         if (NULL != wouldNeedTempFBO) {
   2776             *wouldNeedTempFBO = NULL == src->asRenderTarget();
   2777         }
   2778         return true;
   2779     } else {
   2780         return false;
   2781     }
   2782 }
   2783 
   2784 // If a temporary FBO was created, its non-zero ID is returned. The viewport that the copy rect is
   2785 // relative to is output.
   2786 inline GrGLuint bind_surface_as_fbo(const GrGLInterface* gl,
   2787                                     GrSurface* surface,
   2788                                     GrGLenum fboTarget,
   2789                                     GrGLIRect* viewport) {
   2790     GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(surface->asRenderTarget());
   2791     GrGLuint tempFBOID;
   2792     if (NULL == rt) {
   2793         SkASSERT(NULL != surface->asTexture());
   2794         GrGLuint texID = static_cast<GrGLTexture*>(surface->asTexture())->textureID();
   2795         GR_GL_CALL(gl, GenFramebuffers(1, &tempFBOID));
   2796         GR_GL_CALL(gl, BindFramebuffer(fboTarget, tempFBOID));
   2797         GR_GL_CALL(gl, FramebufferTexture2D(fboTarget,
   2798                                             GR_GL_COLOR_ATTACHMENT0,
   2799                                             GR_GL_TEXTURE_2D,
   2800                                             texID,
   2801                                             0));
   2802         viewport->fLeft = 0;
   2803         viewport->fBottom = 0;
   2804         viewport->fWidth = surface->width();
   2805         viewport->fHeight = surface->height();
   2806     } else {
   2807         tempFBOID = 0;
   2808         GR_GL_CALL(gl, BindFramebuffer(fboTarget, rt->renderFBOID()));
   2809         *viewport = rt->getViewport();
   2810     }
   2811     return tempFBOID;
   2812 }
   2813 
   2814 }
   2815 
   2816 void GrGpuGL::initCopySurfaceDstDesc(const GrSurface* src, GrTextureDesc* desc) {
   2817     // Check for format issues with glCopyTexSubImage2D
   2818     if (kGLES_GrGLStandard == this->glStandard() && this->glCaps().bgraIsInternalFormat() &&
   2819         kBGRA_8888_GrPixelConfig == src->config()) {
   2820         // glCopyTexSubImage2D doesn't work with this config. We'll want to make it a render target
   2821         // in order to call glBlitFramebuffer or to copy to it by rendering.
   2822         INHERITED::initCopySurfaceDstDesc(src, desc);
   2823         return;
   2824     } else if (NULL == src->asRenderTarget()) {
   2825         // We don't want to have to create an FBO just to use glCopyTexSubImage2D. Let the base
   2826         // class handle it by rendering.
   2827         INHERITED::initCopySurfaceDstDesc(src, desc);
   2828         return;
   2829     }
   2830 
   2831     const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->asRenderTarget());
   2832     if (NULL != srcRT && srcRT->renderFBOID() != srcRT->textureFBOID()) {
   2833         // It's illegal to call CopyTexSubImage2D on a MSAA renderbuffer.
   2834         INHERITED::initCopySurfaceDstDesc(src, desc);
   2835     } else {
   2836         desc->fConfig = src->config();
   2837         desc->fOrigin = src->origin();
   2838         desc->fFlags = kNone_GrTextureFlags;
   2839     }
   2840 }
   2841 
   2842 bool GrGpuGL::onCopySurface(GrSurface* dst,
   2843                             GrSurface* src,
   2844                             const SkIRect& srcRect,
   2845                             const SkIPoint& dstPoint) {
   2846     bool inheritedCouldCopy = INHERITED::onCanCopySurface(dst, src, srcRect, dstPoint);
   2847     bool copied = false;
   2848     bool wouldNeedTempFBO = false;
   2849     if (can_copy_texsubimage(dst, src, this, &wouldNeedTempFBO) &&
   2850         (!wouldNeedTempFBO || !inheritedCouldCopy)) {
   2851         GrGLuint srcFBO;
   2852         GrGLIRect srcVP;
   2853         srcFBO = bind_surface_as_fbo(this->glInterface(), src, GR_GL_FRAMEBUFFER, &srcVP);
   2854         GrGLTexture* dstTex = static_cast<GrGLTexture*>(dst->asTexture());
   2855         SkASSERT(NULL != dstTex);
   2856         // We modified the bound FBO
   2857         fHWBoundRenderTarget = NULL;
   2858         GrGLIRect srcGLRect;
   2859         srcGLRect.setRelativeTo(srcVP,
   2860                                 srcRect.fLeft,
   2861                                 srcRect.fTop,
   2862                                 srcRect.width(),
   2863                                 srcRect.height(),
   2864                                 src->origin());
   2865 
   2866         this->setScratchTextureUnit();
   2867         GL_CALL(BindTexture(GR_GL_TEXTURE_2D, dstTex->textureID()));
   2868         GrGLint dstY;
   2869         if (kBottomLeft_GrSurfaceOrigin == dst->origin()) {
   2870             dstY = dst->height() - (dstPoint.fY + srcGLRect.fHeight);
   2871         } else {
   2872             dstY = dstPoint.fY;
   2873         }
   2874         GL_CALL(CopyTexSubImage2D(GR_GL_TEXTURE_2D, 0,
   2875                                   dstPoint.fX, dstY,
   2876                                   srcGLRect.fLeft, srcGLRect.fBottom,
   2877                                   srcGLRect.fWidth, srcGLRect.fHeight));
   2878         copied = true;
   2879         if (srcFBO) {
   2880             GL_CALL(DeleteFramebuffers(1, &srcFBO));
   2881         }
   2882     } else if (can_blit_framebuffer(dst, src, this, &wouldNeedTempFBO) &&
   2883                (!wouldNeedTempFBO || !inheritedCouldCopy)) {
   2884         SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
   2885                                             srcRect.width(), srcRect.height());
   2886         bool selfOverlap = false;
   2887         if (dst->isSameAs(src)) {
   2888             selfOverlap = SkIRect::IntersectsNoEmptyCheck(dstRect, srcRect);
   2889         }
   2890 
   2891         if (!selfOverlap) {
   2892             GrGLuint dstFBO;
   2893             GrGLuint srcFBO;
   2894             GrGLIRect dstVP;
   2895             GrGLIRect srcVP;
   2896             dstFBO = bind_surface_as_fbo(this->glInterface(), dst, GR_GL_DRAW_FRAMEBUFFER, &dstVP);
   2897             srcFBO = bind_surface_as_fbo(this->glInterface(), src, GR_GL_READ_FRAMEBUFFER, &srcVP);
   2898             // We modified the bound FBO
   2899             fHWBoundRenderTarget = NULL;
   2900             GrGLIRect srcGLRect;
   2901             GrGLIRect dstGLRect;
   2902             srcGLRect.setRelativeTo(srcVP,
   2903                                     srcRect.fLeft,
   2904                                     srcRect.fTop,
   2905                                     srcRect.width(),
   2906                                     srcRect.height(),
   2907                                     src->origin());
   2908             dstGLRect.setRelativeTo(dstVP,
   2909                                     dstRect.fLeft,
   2910                                     dstRect.fTop,
   2911                                     dstRect.width(),
   2912                                     dstRect.height(),
   2913                                     dst->origin());
   2914 
   2915             GrAutoTRestore<ScissorState> asr;
   2916             if (GrGLCaps::kDesktop_EXT_MSFBOType == this->glCaps().msFBOType()) {
   2917                 // The EXT version applies the scissor during the blit, so disable it.
   2918                 asr.reset(&fScissorState);
   2919                 fScissorState.fEnabled = false;
   2920                 this->flushScissor();
   2921             }
   2922             GrGLint srcY0;
   2923             GrGLint srcY1;
   2924             // Does the blit need to y-mirror or not?
   2925             if (src->origin() == dst->origin()) {
   2926                 srcY0 = srcGLRect.fBottom;
   2927                 srcY1 = srcGLRect.fBottom + srcGLRect.fHeight;
   2928             } else {
   2929                 srcY0 = srcGLRect.fBottom + srcGLRect.fHeight;
   2930                 srcY1 = srcGLRect.fBottom;
   2931             }
   2932             GL_CALL(BlitFramebuffer(srcGLRect.fLeft,
   2933                                     srcY0,
   2934                                     srcGLRect.fLeft + srcGLRect.fWidth,
   2935                                     srcY1,
   2936                                     dstGLRect.fLeft,
   2937                                     dstGLRect.fBottom,
   2938                                     dstGLRect.fLeft + dstGLRect.fWidth,
   2939                                     dstGLRect.fBottom + dstGLRect.fHeight,
   2940                                     GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST));
   2941             if (dstFBO) {
   2942                 GL_CALL(DeleteFramebuffers(1, &dstFBO));
   2943             }
   2944             if (srcFBO) {
   2945                 GL_CALL(DeleteFramebuffers(1, &srcFBO));
   2946             }
   2947             copied = true;
   2948         }
   2949     }
   2950     if (!copied && inheritedCouldCopy) {
   2951         copied = INHERITED::onCopySurface(dst, src, srcRect, dstPoint);
   2952         SkASSERT(copied);
   2953     }
   2954     return copied;
   2955 }
   2956 
   2957 bool GrGpuGL::onCanCopySurface(GrSurface* dst,
   2958                                GrSurface* src,
   2959                                const SkIRect& srcRect,
   2960                                const SkIPoint& dstPoint) {
   2961     // This mirrors the logic in onCopySurface.
   2962     if (can_copy_texsubimage(dst, src, this)) {
   2963         return true;
   2964     }
   2965     if (can_blit_framebuffer(dst, src, this)) {
   2966         if (dst->isSameAs(src)) {
   2967             SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
   2968                                                 srcRect.width(), srcRect.height());
   2969             if(!SkIRect::IntersectsNoEmptyCheck(dstRect, srcRect)) {
   2970                 return true;
   2971             }
   2972         } else {
   2973             return true;
   2974         }
   2975     }
   2976     return INHERITED::onCanCopySurface(dst, src, srcRect, dstPoint);
   2977 }
   2978 
   2979 void GrGpuGL::didAddGpuTraceMarker() {
   2980     if (this->caps()->gpuTracingSupport()) {
   2981         const GrTraceMarkerSet& markerArray = this->getActiveTraceMarkers();
   2982         SkString markerString = markerArray.toString();
   2983         GL_CALL(PushGroupMarker(0, markerString.c_str()));
   2984     }
   2985 }
   2986 
   2987 void GrGpuGL::didRemoveGpuTraceMarker() {
   2988     if (this->caps()->gpuTracingSupport()) {
   2989         GL_CALL(PopGroupMarker());
   2990     }
   2991 }
   2992 ///////////////////////////////////////////////////////////////////////////////
   2993 
   2994 GrGLAttribArrayState* GrGpuGL::HWGeometryState::bindArrayAndBuffersToDraw(
   2995                                                 GrGpuGL* gpu,
   2996                                                 const GrGLVertexBuffer* vbuffer,
   2997                                                 const GrGLIndexBuffer* ibuffer) {
   2998     SkASSERT(NULL != vbuffer);
   2999     GrGLAttribArrayState* attribState;
   3000 
   3001     // We use a vertex array if we're on a core profile and the verts are in a VBO.
   3002     if (gpu->glCaps().isCoreProfile() && !vbuffer->isCPUBacked()) {
   3003         if (NULL == fVBOVertexArray || fVBOVertexArray->wasDestroyed()) {
   3004             SkSafeUnref(fVBOVertexArray);
   3005             GrGLuint arrayID;
   3006             GR_GL_CALL(gpu->glInterface(), GenVertexArrays(1, &arrayID));
   3007             int attrCount = gpu->glCaps().maxVertexAttributes();
   3008             fVBOVertexArray = SkNEW_ARGS(GrGLVertexArray, (gpu, arrayID, attrCount));
   3009         }
   3010         attribState = fVBOVertexArray->bindWithIndexBuffer(ibuffer);
   3011     } else {
   3012         if (NULL != ibuffer) {
   3013             this->setIndexBufferIDOnDefaultVertexArray(gpu, ibuffer->bufferID());
   3014         } else {
   3015             this->setVertexArrayID(gpu, 0);
   3016         }
   3017         int attrCount = gpu->glCaps().maxVertexAttributes();
   3018         if (fDefaultVertexArrayAttribState.count() != attrCount) {
   3019             fDefaultVertexArrayAttribState.resize(attrCount);
   3020         }
   3021         attribState = &fDefaultVertexArrayAttribState;
   3022     }
   3023     return attribState;
   3024 }
   3025