Home | History | Annotate | Download | only in gpu
      1 
      2 /*
      3  * Copyright 2011 Google Inc.
      4  *
      5  * Use of this source code is governed by a BSD-style license that can be
      6  * found in the LICENSE file.
      7  */
      8 
      9 
     10 #include "GrContext.h"
     11 
     12 #include "effects/GrSingleTextureEffect.h"
     13 #include "effects/GrConfigConversionEffect.h"
     14 
     15 #include "GrAARectRenderer.h"
     16 #include "GrBufferAllocPool.h"
     17 #include "GrGpu.h"
     18 #include "GrDrawTargetCaps.h"
     19 #include "GrIndexBuffer.h"
     20 #include "GrInOrderDrawBuffer.h"
     21 #include "GrOvalRenderer.h"
     22 #include "GrPathRenderer.h"
     23 #include "GrPathUtils.h"
     24 #include "GrResourceCache.h"
     25 #include "GrSoftwarePathRenderer.h"
     26 #include "GrStencilBuffer.h"
     27 #include "GrTextStrike.h"
     28 #include "SkRTConf.h"
     29 #include "SkRRect.h"
     30 #include "SkStrokeRec.h"
     31 #include "SkTLazy.h"
     32 #include "SkTLS.h"
     33 #include "SkTrace.h"
     34 
     35 // It can be useful to set this to false to test whether a bug is caused by using the
     36 // InOrderDrawBuffer, to compare performance of using/not using InOrderDrawBuffer, or to make
     37 // debugging simpler.
     38 SK_CONF_DECLARE(bool, c_Defer, "gpu.deferContext", true,
     39                 "Defers rendering in GrContext via GrInOrderDrawBuffer.");
     40 
     41 #define BUFFERED_DRAW (c_Defer ? kYes_BufferedDraw : kNo_BufferedDraw)
     42 
     43 #ifdef SK_DEBUG
     44     // change this to a 1 to see notifications when partial coverage fails
     45     #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
     46 #else
     47     #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
     48 #endif
     49 
     50 static const size_t MAX_RESOURCE_CACHE_COUNT = GR_DEFAULT_RESOURCE_CACHE_COUNT_LIMIT;
     51 static const size_t MAX_RESOURCE_CACHE_BYTES = GR_DEFAULT_RESOURCE_CACHE_MB_LIMIT * 1024 * 1024;
     52 
     53 static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15;
     54 static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4;
     55 
     56 static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11;
     57 static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4;
     58 
     59 #define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this)
     60 
     61 // Glorified typedef to avoid including GrDrawState.h in GrContext.h
     62 class GrContext::AutoRestoreEffects : public GrDrawState::AutoRestoreEffects {};
     63 
     64 class GrContext::AutoCheckFlush {
     65 public:
     66     AutoCheckFlush(GrContext* context) : fContext(context) { SkASSERT(NULL != context); }
     67 
     68     ~AutoCheckFlush() {
     69         if (fContext->fFlushToReduceCacheSize) {
     70             fContext->flush();
     71         }
     72     }
     73 
     74 private:
     75     GrContext* fContext;
     76 };
     77 
     78 GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext) {
     79     GrContext* context = SkNEW(GrContext);
     80     if (context->init(backend, backendContext)) {
     81         return context;
     82     } else {
     83         context->unref();
     84         return NULL;
     85     }
     86 }
     87 
     88 GrContext::GrContext() {
     89     fDrawState = NULL;
     90     fGpu = NULL;
     91     fClip = NULL;
     92     fPathRendererChain = NULL;
     93     fSoftwarePathRenderer = NULL;
     94     fTextureCache = NULL;
     95     fFontCache = NULL;
     96     fDrawBuffer = NULL;
     97     fDrawBufferVBAllocPool = NULL;
     98     fDrawBufferIBAllocPool = NULL;
     99     fFlushToReduceCacheSize = false;
    100     fAARectRenderer = NULL;
    101     fOvalRenderer = NULL;
    102     fViewMatrix.reset();
    103     fMaxTextureSizeOverride = 1 << 20;
    104 }
    105 
    106 bool GrContext::init(GrBackend backend, GrBackendContext backendContext) {
    107     SkASSERT(NULL == fGpu);
    108 
    109     fGpu = GrGpu::Create(backend, backendContext, this);
    110     if (NULL == fGpu) {
    111         return false;
    112     }
    113 
    114     fDrawState = SkNEW(GrDrawState);
    115     fGpu->setDrawState(fDrawState);
    116 
    117     fTextureCache = SkNEW_ARGS(GrResourceCache,
    118                                (MAX_RESOURCE_CACHE_COUNT,
    119                                 MAX_RESOURCE_CACHE_BYTES));
    120     fTextureCache->setOverbudgetCallback(OverbudgetCB, this);
    121 
    122     fFontCache = SkNEW_ARGS(GrFontCache, (fGpu));
    123 
    124     fLastDrawWasBuffered = kNo_BufferedDraw;
    125 
    126     fAARectRenderer = SkNEW(GrAARectRenderer);
    127     fOvalRenderer = SkNEW(GrOvalRenderer);
    128 
    129     fDidTestPMConversions = false;
    130 
    131     this->setupDrawBuffer();
    132 
    133     return true;
    134 }
    135 
    136 GrContext::~GrContext() {
    137     if (NULL == fGpu) {
    138         return;
    139     }
    140 
    141     this->flush();
    142 
    143     for (int i = 0; i < fCleanUpData.count(); ++i) {
    144         (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo);
    145     }
    146 
    147     // Since the gpu can hold scratch textures, give it a chance to let go
    148     // of them before freeing the texture cache
    149     fGpu->purgeResources();
    150 
    151     delete fTextureCache;
    152     fTextureCache = NULL;
    153     delete fFontCache;
    154     delete fDrawBuffer;
    155     delete fDrawBufferVBAllocPool;
    156     delete fDrawBufferIBAllocPool;
    157 
    158     fAARectRenderer->unref();
    159     fOvalRenderer->unref();
    160 
    161     fGpu->unref();
    162     SkSafeUnref(fPathRendererChain);
    163     SkSafeUnref(fSoftwarePathRenderer);
    164     fDrawState->unref();
    165 }
    166 
    167 void GrContext::contextLost() {
    168     this->contextDestroyed();
    169     this->setupDrawBuffer();
    170 }
    171 
    172 void GrContext::contextDestroyed() {
    173     // abandon first to so destructors
    174     // don't try to free the resources in the API.
    175     fGpu->abandonResources();
    176 
    177     // a path renderer may be holding onto resources that
    178     // are now unusable
    179     SkSafeSetNull(fPathRendererChain);
    180     SkSafeSetNull(fSoftwarePathRenderer);
    181 
    182     delete fDrawBuffer;
    183     fDrawBuffer = NULL;
    184 
    185     delete fDrawBufferVBAllocPool;
    186     fDrawBufferVBAllocPool = NULL;
    187 
    188     delete fDrawBufferIBAllocPool;
    189     fDrawBufferIBAllocPool = NULL;
    190 
    191     fAARectRenderer->reset();
    192     fOvalRenderer->reset();
    193 
    194     fTextureCache->purgeAllUnlocked();
    195 
    196     fFontCache->freeAll();
    197     fGpu->markContextDirty();
    198 }
    199 
    200 void GrContext::resetContext(uint32_t state) {
    201     fGpu->markContextDirty(state);
    202 }
    203 
    204 void GrContext::freeGpuResources() {
    205     this->flush();
    206 
    207     fGpu->purgeResources();
    208 
    209     fAARectRenderer->reset();
    210     fOvalRenderer->reset();
    211 
    212     fTextureCache->purgeAllUnlocked();
    213     fFontCache->freeAll();
    214     // a path renderer may be holding onto resources
    215     SkSafeSetNull(fPathRendererChain);
    216     SkSafeSetNull(fSoftwarePathRenderer);
    217 }
    218 
    219 size_t GrContext::getGpuTextureCacheBytes() const {
    220   return fTextureCache->getCachedResourceBytes();
    221 }
    222 
    223 ////////////////////////////////////////////////////////////////////////////////
    224 
    225 GrTexture* GrContext::findAndRefTexture(const GrTextureDesc& desc,
    226                                         const GrCacheID& cacheID,
    227                                         const GrTextureParams* params) {
    228     GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID);
    229     GrResource* resource = fTextureCache->find(resourceKey);
    230     SkSafeRef(resource);
    231     return static_cast<GrTexture*>(resource);
    232 }
    233 
    234 bool GrContext::isTextureInCache(const GrTextureDesc& desc,
    235                                  const GrCacheID& cacheID,
    236                                  const GrTextureParams* params) const {
    237     GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID);
    238     return fTextureCache->hasKey(resourceKey);
    239 }
    240 
    241 void GrContext::addStencilBuffer(GrStencilBuffer* sb) {
    242     ASSERT_OWNED_RESOURCE(sb);
    243 
    244     GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(sb->width(),
    245                                                             sb->height(),
    246                                                             sb->numSamples());
    247     fTextureCache->addResource(resourceKey, sb);
    248 }
    249 
    250 GrStencilBuffer* GrContext::findStencilBuffer(int width, int height,
    251                                               int sampleCnt) {
    252     GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(width,
    253                                                             height,
    254                                                             sampleCnt);
    255     GrResource* resource = fTextureCache->find(resourceKey);
    256     return static_cast<GrStencilBuffer*>(resource);
    257 }
    258 
    259 static void stretchImage(void* dst,
    260                          int dstW,
    261                          int dstH,
    262                          void* src,
    263                          int srcW,
    264                          int srcH,
    265                          size_t bpp) {
    266     GrFixed dx = (srcW << 16) / dstW;
    267     GrFixed dy = (srcH << 16) / dstH;
    268 
    269     GrFixed y = dy >> 1;
    270 
    271     size_t dstXLimit = dstW*bpp;
    272     for (int j = 0; j < dstH; ++j) {
    273         GrFixed x = dx >> 1;
    274         void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp;
    275         void* dstRow = (uint8_t*)dst + j*dstW*bpp;
    276         for (size_t i = 0; i < dstXLimit; i += bpp) {
    277             memcpy((uint8_t*) dstRow + i,
    278                    (uint8_t*) srcRow + (x>>16)*bpp,
    279                    bpp);
    280             x += dx;
    281         }
    282         y += dy;
    283     }
    284 }
    285 
    286 namespace {
    287 
    288 // position + local coordinate
    289 extern const GrVertexAttrib gVertexAttribs[] = {
    290     {kVec2f_GrVertexAttribType, 0,               kPosition_GrVertexAttribBinding},
    291     {kVec2f_GrVertexAttribType, sizeof(GrPoint), kLocalCoord_GrVertexAttribBinding}
    292 };
    293 
    294 };
    295 
    296 // The desired texture is NPOT and tiled but that isn't supported by
    297 // the current hardware. Resize the texture to be a POT
    298 GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc,
    299                                            const GrCacheID& cacheID,
    300                                            void* srcData,
    301                                            size_t rowBytes,
    302                                            bool filter) {
    303     SkAutoTUnref<GrTexture> clampedTexture(this->findAndRefTexture(desc, cacheID, NULL));
    304     if (NULL == clampedTexture) {
    305         clampedTexture.reset(this->createTexture(NULL, desc, cacheID, srcData, rowBytes));
    306 
    307         if (NULL == clampedTexture) {
    308             return NULL;
    309         }
    310     }
    311 
    312     GrTextureDesc rtDesc = desc;
    313     rtDesc.fFlags =  rtDesc.fFlags |
    314                      kRenderTarget_GrTextureFlagBit |
    315                      kNoStencil_GrTextureFlagBit;
    316     rtDesc.fWidth  = GrNextPow2(desc.fWidth);
    317     rtDesc.fHeight = GrNextPow2(desc.fHeight);
    318 
    319     GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0);
    320 
    321     if (NULL != texture) {
    322         GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
    323         GrDrawState* drawState = fGpu->drawState();
    324         drawState->setRenderTarget(texture->asRenderTarget());
    325 
    326         // if filtering is not desired then we want to ensure all
    327         // texels in the resampled image are copies of texels from
    328         // the original.
    329         GrTextureParams params(SkShader::kClamp_TileMode, filter ? GrTextureParams::kBilerp_FilterMode :
    330                                                                    GrTextureParams::kNone_FilterMode);
    331         drawState->addColorTextureEffect(clampedTexture, SkMatrix::I(), params);
    332 
    333         drawState->setVertexAttribs<gVertexAttribs>(SK_ARRAY_COUNT(gVertexAttribs));
    334 
    335         GrDrawTarget::AutoReleaseGeometry arg(fGpu, 4, 0);
    336 
    337         if (arg.succeeded()) {
    338             GrPoint* verts = (GrPoint*) arg.vertices();
    339             verts[0].setIRectFan(0, 0, texture->width(), texture->height(), 2 * sizeof(GrPoint));
    340             verts[1].setIRectFan(0, 0, 1, 1, 2 * sizeof(GrPoint));
    341             fGpu->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4);
    342         }
    343     } else {
    344         // TODO: Our CPU stretch doesn't filter. But we create separate
    345         // stretched textures when the texture params is either filtered or
    346         // not. Either implement filtered stretch blit on CPU or just create
    347         // one when FBO case fails.
    348 
    349         rtDesc.fFlags = kNone_GrTextureFlags;
    350         // no longer need to clamp at min RT size.
    351         rtDesc.fWidth  = GrNextPow2(desc.fWidth);
    352         rtDesc.fHeight = GrNextPow2(desc.fHeight);
    353         size_t bpp = GrBytesPerPixel(desc.fConfig);
    354         SkAutoSMalloc<128*128*4> stretchedPixels(bpp * rtDesc.fWidth * rtDesc.fHeight);
    355         stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
    356                      srcData, desc.fWidth, desc.fHeight, bpp);
    357 
    358         size_t stretchedRowBytes = rtDesc.fWidth * bpp;
    359 
    360         SkDEBUGCODE(GrTexture* texture = )fGpu->createTexture(rtDesc, stretchedPixels.get(),
    361                                                               stretchedRowBytes);
    362         SkASSERT(NULL != texture);
    363     }
    364 
    365     return texture;
    366 }
    367 
    368 GrTexture* GrContext::createTexture(const GrTextureParams* params,
    369                                     const GrTextureDesc& desc,
    370                                     const GrCacheID& cacheID,
    371                                     void* srcData,
    372                                     size_t rowBytes,
    373                                     GrResourceKey* cacheKey) {
    374     SK_TRACE_EVENT0("GrContext::createTexture");
    375 
    376     GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID);
    377 
    378     GrTexture* texture;
    379     if (GrTexture::NeedsResizing(resourceKey)) {
    380         texture = this->createResizedTexture(desc, cacheID,
    381                                              srcData, rowBytes,
    382                                              GrTexture::NeedsBilerp(resourceKey));
    383     } else {
    384         texture= fGpu->createTexture(desc, srcData, rowBytes);
    385     }
    386 
    387     if (NULL != texture) {
    388         // Adding a resource could put us overbudget. Try to free up the
    389         // necessary space before adding it.
    390         fTextureCache->purgeAsNeeded(1, texture->sizeInBytes());
    391         fTextureCache->addResource(resourceKey, texture);
    392 
    393         if (NULL != cacheKey) {
    394             *cacheKey = resourceKey;
    395         }
    396     }
    397 
    398     return texture;
    399 }
    400 
    401 static GrTexture* create_scratch_texture(GrGpu* gpu,
    402                                          GrResourceCache* textureCache,
    403                                          const GrTextureDesc& desc) {
    404     GrTexture* texture = gpu->createTexture(desc, NULL, 0);
    405     if (NULL != texture) {
    406         GrResourceKey key = GrTexture::ComputeScratchKey(texture->desc());
    407         // Adding a resource could put us overbudget. Try to free up the
    408         // necessary space before adding it.
    409         textureCache->purgeAsNeeded(1, texture->sizeInBytes());
    410         // Make the resource exclusive so future 'find' calls don't return it
    411         textureCache->addResource(key, texture, GrResourceCache::kHide_OwnershipFlag);
    412     }
    413     return texture;
    414 }
    415 
    416 GrTexture* GrContext::lockAndRefScratchTexture(const GrTextureDesc& inDesc, ScratchTexMatch match) {
    417 
    418     SkASSERT((inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
    419              !(inDesc.fFlags & kNoStencil_GrTextureFlagBit));
    420 
    421     // Renderable A8 targets are not universally supported (e.g., not on ANGLE)
    422     SkASSERT(this->isConfigRenderable(kAlpha_8_GrPixelConfig, inDesc.fSampleCnt > 0) ||
    423              !(inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
    424              (inDesc.fConfig != kAlpha_8_GrPixelConfig));
    425 
    426     if (!fGpu->caps()->reuseScratchTextures() &&
    427         !(inDesc.fFlags & kRenderTarget_GrTextureFlagBit)) {
    428         // If we're never recycling this texture we can always make it the right size
    429         return create_scratch_texture(fGpu, fTextureCache, inDesc);
    430     }
    431 
    432     GrTextureDesc desc = inDesc;
    433 
    434     if (kApprox_ScratchTexMatch == match) {
    435         // bin by pow2 with a reasonable min
    436         static const int MIN_SIZE = 16;
    437         desc.fWidth  = GrMax(MIN_SIZE, GrNextPow2(desc.fWidth));
    438         desc.fHeight = GrMax(MIN_SIZE, GrNextPow2(desc.fHeight));
    439     }
    440 
    441     GrResource* resource = NULL;
    442     int origWidth = desc.fWidth;
    443     int origHeight = desc.fHeight;
    444 
    445     do {
    446         GrResourceKey key = GrTexture::ComputeScratchKey(desc);
    447         // Ensure we have exclusive access to the texture so future 'find' calls don't return it
    448         resource = fTextureCache->find(key, GrResourceCache::kHide_OwnershipFlag);
    449         if (NULL != resource) {
    450             resource->ref();
    451             break;
    452         }
    453         if (kExact_ScratchTexMatch == match) {
    454             break;
    455         }
    456         // We had a cache miss and we are in approx mode, relax the fit of the flags.
    457 
    458         // We no longer try to reuse textures that were previously used as render targets in
    459         // situations where no RT is needed; doing otherwise can confuse the video driver and
    460         // cause significant performance problems in some cases.
    461         if (desc.fFlags & kNoStencil_GrTextureFlagBit) {
    462             desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit;
    463         } else {
    464             break;
    465         }
    466 
    467     } while (true);
    468 
    469     if (NULL == resource) {
    470         desc.fFlags = inDesc.fFlags;
    471         desc.fWidth = origWidth;
    472         desc.fHeight = origHeight;
    473         resource = create_scratch_texture(fGpu, fTextureCache, desc);
    474     }
    475 
    476     return static_cast<GrTexture*>(resource);
    477 }
    478 
    479 void GrContext::addExistingTextureToCache(GrTexture* texture) {
    480 
    481     if (NULL == texture) {
    482         return;
    483     }
    484 
    485     // This texture should already have a cache entry since it was once
    486     // attached
    487     SkASSERT(NULL != texture->getCacheEntry());
    488 
    489     // Conceptually, the cache entry is going to assume responsibility
    490     // for the creation ref. Assert refcnt == 1.
    491     SkASSERT(texture->unique());
    492 
    493     if (fGpu->caps()->reuseScratchTextures() || NULL != texture->asRenderTarget()) {
    494         // Since this texture came from an AutoScratchTexture it should
    495         // still be in the exclusive pile. Recycle it.
    496         fTextureCache->makeNonExclusive(texture->getCacheEntry());
    497         this->purgeCache();
    498     } else if (texture->getDeferredRefCount() <= 0) {
    499         // When we aren't reusing textures we know this scratch texture
    500         // will never be reused and would be just wasting time in the cache
    501         fTextureCache->makeNonExclusive(texture->getCacheEntry());
    502         fTextureCache->deleteResource(texture->getCacheEntry());
    503     } else {
    504         // In this case (fDeferredRefCount > 0) but the cache is the only
    505         // one holding a real ref. Mark the object so when the deferred
    506         // ref count goes to 0 the texture will be deleted (remember
    507         // in this code path scratch textures aren't getting reused).
    508         texture->setNeedsDeferredUnref();
    509     }
    510 }
    511 
    512 
    513 void GrContext::unlockScratchTexture(GrTexture* texture) {
    514     ASSERT_OWNED_RESOURCE(texture);
    515     SkASSERT(NULL != texture->getCacheEntry());
    516 
    517     // If this is a scratch texture we detached it from the cache
    518     // while it was locked (to avoid two callers simultaneously getting
    519     // the same texture).
    520     if (texture->getCacheEntry()->key().isScratch()) {
    521         if (fGpu->caps()->reuseScratchTextures() || NULL != texture->asRenderTarget()) {
    522             fTextureCache->makeNonExclusive(texture->getCacheEntry());
    523             this->purgeCache();
    524         } else if (texture->unique() && texture->getDeferredRefCount() <= 0) {
    525             // Only the cache now knows about this texture. Since we're never
    526             // reusing scratch textures (in this code path) it would just be
    527             // wasting time sitting in the cache.
    528             fTextureCache->makeNonExclusive(texture->getCacheEntry());
    529             fTextureCache->deleteResource(texture->getCacheEntry());
    530         } else {
    531             // In this case (fRefCnt > 1 || defRefCnt > 0) but we don't really
    532             // want to readd it to the cache (since it will never be reused).
    533             // Instead, give up the cache's ref and leave the decision up to
    534             // addExistingTextureToCache once its ref count reaches 0. For
    535             // this to work we need to leave it in the exclusive list.
    536             texture->setFlag((GrTextureFlags) GrTexture::kReturnToCache_FlagBit);
    537             // Give up the cache's ref to the texture
    538             texture->unref();
    539         }
    540     }
    541 }
    542 
    543 void GrContext::purgeCache() {
    544     if (NULL != fTextureCache) {
    545         fTextureCache->purgeAsNeeded();
    546     }
    547 }
    548 
    549 bool GrContext::OverbudgetCB(void* data) {
    550     SkASSERT(NULL != data);
    551 
    552     GrContext* context = reinterpret_cast<GrContext*>(data);
    553 
    554     // Flush the InOrderDrawBuffer to possibly free up some textures
    555     context->fFlushToReduceCacheSize = true;
    556 
    557     return true;
    558 }
    559 
    560 
    561 GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& descIn,
    562                                             void* srcData,
    563                                             size_t rowBytes) {
    564     GrTextureDesc descCopy = descIn;
    565     return fGpu->createTexture(descCopy, srcData, rowBytes);
    566 }
    567 
    568 void GrContext::getTextureCacheLimits(int* maxTextures,
    569                                       size_t* maxTextureBytes) const {
    570     fTextureCache->getLimits(maxTextures, maxTextureBytes);
    571 }
    572 
    573 void GrContext::setTextureCacheLimits(int maxTextures, size_t maxTextureBytes) {
    574     fTextureCache->setLimits(maxTextures, maxTextureBytes);
    575 }
    576 
    577 int GrContext::getMaxTextureSize() const {
    578     return GrMin(fGpu->caps()->maxTextureSize(), fMaxTextureSizeOverride);
    579 }
    580 
    581 int GrContext::getMaxRenderTargetSize() const {
    582     return fGpu->caps()->maxRenderTargetSize();
    583 }
    584 
    585 int GrContext::getMaxSampleCount() const {
    586     return fGpu->caps()->maxSampleCount();
    587 }
    588 
    589 ///////////////////////////////////////////////////////////////////////////////
    590 
    591 GrTexture* GrContext::wrapBackendTexture(const GrBackendTextureDesc& desc) {
    592     return fGpu->wrapBackendTexture(desc);
    593 }
    594 
    595 GrRenderTarget* GrContext::wrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) {
    596     return fGpu->wrapBackendRenderTarget(desc);
    597 }
    598 
    599 ///////////////////////////////////////////////////////////////////////////////
    600 
    601 bool GrContext::supportsIndex8PixelConfig(const GrTextureParams* params,
    602                                           int width, int height) const {
    603     const GrDrawTargetCaps* caps = fGpu->caps();
    604     if (!caps->eightBitPaletteSupport()) {
    605         return false;
    606     }
    607 
    608     bool isPow2 = GrIsPow2(width) && GrIsPow2(height);
    609 
    610     if (!isPow2) {
    611         bool tiled = NULL != params && params->isTiled();
    612         if (tiled && !caps->npotTextureTileSupport()) {
    613             return false;
    614         }
    615     }
    616     return true;
    617 }
    618 
    619 
    620 ////////////////////////////////////////////////////////////////////////////////
    621 
    622 void GrContext::clear(const SkIRect* rect,
    623                       const GrColor color,
    624                       bool canIgnoreRect,
    625                       GrRenderTarget* target) {
    626     AutoRestoreEffects are;
    627     AutoCheckFlush acf(this);
    628     this->prepareToDraw(NULL, BUFFERED_DRAW, &are, &acf)->clear(rect, color,
    629                                                                 canIgnoreRect, target);
    630 }
    631 
    632 void GrContext::drawPaint(const GrPaint& origPaint) {
    633     // set rect to be big enough to fill the space, but not super-huge, so we
    634     // don't overflow fixed-point implementations
    635     SkRect r;
    636     r.setLTRB(0, 0,
    637               SkIntToScalar(getRenderTarget()->width()),
    638               SkIntToScalar(getRenderTarget()->height()));
    639     SkMatrix inverse;
    640     SkTCopyOnFirstWrite<GrPaint> paint(origPaint);
    641     AutoMatrix am;
    642 
    643     // We attempt to map r by the inverse matrix and draw that. mapRect will
    644     // map the four corners and bound them with a new rect. This will not
    645     // produce a correct result for some perspective matrices.
    646     if (!this->getMatrix().hasPerspective()) {
    647         if (!fViewMatrix.invert(&inverse)) {
    648             GrPrintf("Could not invert matrix\n");
    649             return;
    650         }
    651         inverse.mapRect(&r);
    652     } else {
    653         if (!am.setIdentity(this, paint.writable())) {
    654             GrPrintf("Could not invert matrix\n");
    655             return;
    656         }
    657     }
    658     // by definition this fills the entire clip, no need for AA
    659     if (paint->isAntiAlias()) {
    660         paint.writable()->setAntiAlias(false);
    661     }
    662     this->drawRect(*paint, r);
    663 }
    664 
    665 #ifdef SK_DEVELOPER
    666 void GrContext::dumpFontCache() const {
    667     fFontCache->dump();
    668 }
    669 #endif
    670 
    671 ////////////////////////////////////////////////////////////////////////////////
    672 
    673 /*  create a triangle strip that strokes the specified triangle. There are 8
    674  unique vertices, but we repreat the last 2 to close up. Alternatively we
    675  could use an indices array, and then only send 8 verts, but not sure that
    676  would be faster.
    677  */
    678 static void setStrokeRectStrip(GrPoint verts[10], SkRect rect,
    679                                SkScalar width) {
    680     const SkScalar rad = SkScalarHalf(width);
    681     rect.sort();
    682 
    683     verts[0].set(rect.fLeft + rad, rect.fTop + rad);
    684     verts[1].set(rect.fLeft - rad, rect.fTop - rad);
    685     verts[2].set(rect.fRight - rad, rect.fTop + rad);
    686     verts[3].set(rect.fRight + rad, rect.fTop - rad);
    687     verts[4].set(rect.fRight - rad, rect.fBottom - rad);
    688     verts[5].set(rect.fRight + rad, rect.fBottom + rad);
    689     verts[6].set(rect.fLeft + rad, rect.fBottom - rad);
    690     verts[7].set(rect.fLeft - rad, rect.fBottom + rad);
    691     verts[8] = verts[0];
    692     verts[9] = verts[1];
    693 }
    694 
    695 static bool isIRect(const SkRect& r) {
    696     return SkScalarIsInt(r.fLeft)  && SkScalarIsInt(r.fTop) &&
    697            SkScalarIsInt(r.fRight) && SkScalarIsInt(r.fBottom);
    698 }
    699 
    700 static bool apply_aa_to_rect(GrDrawTarget* target,
    701                              const SkRect& rect,
    702                              SkScalar strokeWidth,
    703                              const SkMatrix& combinedMatrix,
    704                              SkRect* devBoundRect,
    705                              bool* useVertexCoverage) {
    706     // we use a simple coverage ramp to do aa on axis-aligned rects
    707     // we check if the rect will be axis-aligned, and the rect won't land on
    708     // integer coords.
    709 
    710     // we are keeping around the "tweak the alpha" trick because
    711     // it is our only hope for the fixed-pipe implementation.
    712     // In a shader implementation we can give a separate coverage input
    713     // TODO: remove this ugliness when we drop the fixed-pipe impl
    714     *useVertexCoverage = false;
    715     if (!target->getDrawState().canTweakAlphaForCoverage()) {
    716         if (target->shouldDisableCoverageAAForBlend()) {
    717 #ifdef SK_DEBUG
    718             //GrPrintf("Turning off AA to correctly apply blend.\n");
    719 #endif
    720             return false;
    721         } else {
    722             *useVertexCoverage = true;
    723         }
    724     }
    725     const GrDrawState& drawState = target->getDrawState();
    726     if (drawState.getRenderTarget()->isMultisampled()) {
    727         return false;
    728     }
    729 
    730     if (0 == strokeWidth && target->willUseHWAALines()) {
    731         return false;
    732     }
    733 
    734 #if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
    735     if (strokeWidth >= 0) {
    736 #endif
    737         if (!combinedMatrix.preservesAxisAlignment()) {
    738             return false;
    739         }
    740 
    741 #if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
    742     } else {
    743         if (!combinedMatrix.preservesRightAngles()) {
    744             return false;
    745         }
    746     }
    747 #endif
    748 
    749     combinedMatrix.mapRect(devBoundRect, rect);
    750 
    751     if (strokeWidth < 0) {
    752         return !isIRect(*devBoundRect);
    753     } else {
    754         return true;
    755     }
    756 }
    757 
    758 static inline bool rect_contains_inclusive(const SkRect& rect, const SkPoint& point) {
    759     return point.fX >= rect.fLeft && point.fX <= rect.fRight &&
    760            point.fY >= rect.fTop && point.fY <= rect.fBottom;
    761 }
    762 
    763 void GrContext::drawRect(const GrPaint& paint,
    764                          const SkRect& rect,
    765                          const SkStrokeRec* stroke,
    766                          const SkMatrix* matrix) {
    767     SK_TRACE_EVENT0("GrContext::drawRect");
    768 
    769     AutoRestoreEffects are;
    770     AutoCheckFlush acf(this);
    771     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
    772 
    773     SkScalar width = stroke == NULL ? -1 : stroke->getWidth();
    774     SkMatrix combinedMatrix = target->drawState()->getViewMatrix();
    775     if (NULL != matrix) {
    776         combinedMatrix.preConcat(*matrix);
    777     }
    778 
    779     // Check if this is a full RT draw and can be replaced with a clear. We don't bother checking
    780     // cases where the RT is fully inside a stroke.
    781     if (width < 0) {
    782         SkRect rtRect;
    783         target->getDrawState().getRenderTarget()->getBoundsRect(&rtRect);
    784         SkRect clipSpaceRTRect = rtRect;
    785         bool checkClip = false;
    786         if (NULL != this->getClip()) {
    787             checkClip = true;
    788             clipSpaceRTRect.offset(SkIntToScalar(this->getClip()->fOrigin.fX),
    789                                    SkIntToScalar(this->getClip()->fOrigin.fY));
    790         }
    791         // Does the clip contain the entire RT?
    792         if (!checkClip || target->getClip()->fClipStack->quickContains(clipSpaceRTRect)) {
    793             SkMatrix invM;
    794             if (!combinedMatrix.invert(&invM)) {
    795                 return;
    796             }
    797             // Does the rect bound the RT?
    798             SkPoint srcSpaceRTQuad[4];
    799             invM.mapRectToQuad(srcSpaceRTQuad, rtRect);
    800             if (rect_contains_inclusive(rect, srcSpaceRTQuad[0]) &&
    801                 rect_contains_inclusive(rect, srcSpaceRTQuad[1]) &&
    802                 rect_contains_inclusive(rect, srcSpaceRTQuad[2]) &&
    803                 rect_contains_inclusive(rect, srcSpaceRTQuad[3])) {
    804                 // Will it blend?
    805                 GrColor clearColor;
    806                 if (paint.isOpaqueAndConstantColor(&clearColor)) {
    807                     target->clear(NULL, clearColor, true);
    808                     return;
    809                 }
    810             }
    811         }
    812     }
    813 
    814     SkRect devBoundRect;
    815     bool useVertexCoverage;
    816     bool needAA = paint.isAntiAlias() &&
    817                   !target->getDrawState().getRenderTarget()->isMultisampled();
    818     bool doAA = needAA && apply_aa_to_rect(target, rect, width, combinedMatrix, &devBoundRect,
    819                                            &useVertexCoverage);
    820     if (doAA) {
    821         GrDrawState::AutoViewMatrixRestore avmr;
    822         if (!avmr.setIdentity(target->drawState())) {
    823             return;
    824         }
    825         if (width >= 0) {
    826             fAARectRenderer->strokeAARect(this->getGpu(), target, rect,
    827                                           combinedMatrix, devBoundRect,
    828                                           stroke, useVertexCoverage);
    829         } else {
    830             // filled AA rect
    831             fAARectRenderer->fillAARect(this->getGpu(), target,
    832                                         rect, combinedMatrix, devBoundRect,
    833                                         useVertexCoverage);
    834         }
    835         return;
    836     }
    837 
    838     if (width >= 0) {
    839         // TODO: consider making static vertex buffers for these cases.
    840         // Hairline could be done by just adding closing vertex to
    841         // unitSquareVertexBuffer()
    842 
    843         static const int worstCaseVertCount = 10;
    844         target->drawState()->setDefaultVertexAttribs();
    845         GrDrawTarget::AutoReleaseGeometry geo(target, worstCaseVertCount, 0);
    846 
    847         if (!geo.succeeded()) {
    848             GrPrintf("Failed to get space for vertices!\n");
    849             return;
    850         }
    851 
    852         GrPrimitiveType primType;
    853         int vertCount;
    854         GrPoint* vertex = geo.positions();
    855 
    856         if (width > 0) {
    857             vertCount = 10;
    858             primType = kTriangleStrip_GrPrimitiveType;
    859             setStrokeRectStrip(vertex, rect, width);
    860         } else {
    861             // hairline
    862             vertCount = 5;
    863             primType = kLineStrip_GrPrimitiveType;
    864             vertex[0].set(rect.fLeft, rect.fTop);
    865             vertex[1].set(rect.fRight, rect.fTop);
    866             vertex[2].set(rect.fRight, rect.fBottom);
    867             vertex[3].set(rect.fLeft, rect.fBottom);
    868             vertex[4].set(rect.fLeft, rect.fTop);
    869         }
    870 
    871         GrDrawState::AutoViewMatrixRestore avmr;
    872         if (NULL != matrix) {
    873             GrDrawState* drawState = target->drawState();
    874             avmr.set(drawState, *matrix);
    875         }
    876 
    877         target->drawNonIndexed(primType, 0, vertCount);
    878     } else {
    879         // filled BW rect
    880         target->drawSimpleRect(rect, matrix);
    881     }
    882 }
    883 
    884 void GrContext::drawRectToRect(const GrPaint& paint,
    885                                const SkRect& dstRect,
    886                                const SkRect& localRect,
    887                                const SkMatrix* dstMatrix,
    888                                const SkMatrix* localMatrix) {
    889     SK_TRACE_EVENT0("GrContext::drawRectToRect");
    890     AutoRestoreEffects are;
    891     AutoCheckFlush acf(this);
    892     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
    893 
    894     target->drawRect(dstRect, dstMatrix, &localRect, localMatrix);
    895 }
    896 
    897 namespace {
    898 
    899 extern const GrVertexAttrib gPosUVColorAttribs[] = {
    900     {kVec2f_GrVertexAttribType,  0, kPosition_GrVertexAttribBinding },
    901     {kVec2f_GrVertexAttribType,  sizeof(GrPoint), kLocalCoord_GrVertexAttribBinding },
    902     {kVec4ub_GrVertexAttribType, 2*sizeof(GrPoint), kColor_GrVertexAttribBinding}
    903 };
    904 
    905 extern const GrVertexAttrib gPosColorAttribs[] = {
    906     {kVec2f_GrVertexAttribType,  0, kPosition_GrVertexAttribBinding},
    907     {kVec4ub_GrVertexAttribType, sizeof(GrPoint), kColor_GrVertexAttribBinding},
    908 };
    909 
    910 static void set_vertex_attributes(GrDrawState* drawState,
    911                                   const GrPoint* texCoords,
    912                                   const GrColor* colors,
    913                                   int* colorOffset,
    914                                   int* texOffset) {
    915     *texOffset = -1;
    916     *colorOffset = -1;
    917 
    918     if (NULL != texCoords && NULL != colors) {
    919         *texOffset = sizeof(GrPoint);
    920         *colorOffset = 2*sizeof(GrPoint);
    921         drawState->setVertexAttribs<gPosUVColorAttribs>(3);
    922     } else if (NULL != texCoords) {
    923         *texOffset = sizeof(GrPoint);
    924         drawState->setVertexAttribs<gPosUVColorAttribs>(2);
    925     } else if (NULL != colors) {
    926         *colorOffset = sizeof(GrPoint);
    927         drawState->setVertexAttribs<gPosColorAttribs>(2);
    928     } else {
    929         drawState->setVertexAttribs<gPosColorAttribs>(1);
    930     }
    931 }
    932 
    933 };
    934 
    935 void GrContext::drawVertices(const GrPaint& paint,
    936                              GrPrimitiveType primitiveType,
    937                              int vertexCount,
    938                              const GrPoint positions[],
    939                              const GrPoint texCoords[],
    940                              const GrColor colors[],
    941                              const uint16_t indices[],
    942                              int indexCount) {
    943     SK_TRACE_EVENT0("GrContext::drawVertices");
    944 
    945     GrDrawTarget::AutoReleaseGeometry geo;
    946 
    947     AutoRestoreEffects are;
    948     AutoCheckFlush acf(this);
    949     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
    950 
    951     GrDrawState* drawState = target->drawState();
    952 
    953     int colorOffset = -1, texOffset = -1;
    954     set_vertex_attributes(drawState, texCoords, colors, &colorOffset, &texOffset);
    955 
    956     size_t vertexSize = drawState->getVertexSize();
    957     if (sizeof(GrPoint) != vertexSize) {
    958         if (!geo.set(target, vertexCount, 0)) {
    959             GrPrintf("Failed to get space for vertices!\n");
    960             return;
    961         }
    962         void* curVertex = geo.vertices();
    963 
    964         for (int i = 0; i < vertexCount; ++i) {
    965             *((GrPoint*)curVertex) = positions[i];
    966 
    967             if (texOffset >= 0) {
    968                 *(GrPoint*)((intptr_t)curVertex + texOffset) = texCoords[i];
    969             }
    970             if (colorOffset >= 0) {
    971                 *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i];
    972             }
    973             curVertex = (void*)((intptr_t)curVertex + vertexSize);
    974         }
    975     } else {
    976         target->setVertexSourceToArray(positions, vertexCount);
    977     }
    978 
    979     // we don't currently apply offscreen AA to this path. Need improved
    980     // management of GrDrawTarget's geometry to avoid copying points per-tile.
    981 
    982     if (NULL != indices) {
    983         target->setIndexSourceToArray(indices, indexCount);
    984         target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount);
    985         target->resetIndexSource();
    986     } else {
    987         target->drawNonIndexed(primitiveType, 0, vertexCount);
    988     }
    989 }
    990 
    991 ///////////////////////////////////////////////////////////////////////////////
    992 
    993 void GrContext::drawRRect(const GrPaint& paint,
    994                           const SkRRect& rect,
    995                           const SkStrokeRec& stroke) {
    996     if (rect.isEmpty()) {
    997        return;
    998     }
    999 
   1000     AutoRestoreEffects are;
   1001     AutoCheckFlush acf(this);
   1002     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
   1003 
   1004     if (!fOvalRenderer->drawSimpleRRect(target, this, paint.isAntiAlias(), rect, stroke)) {
   1005         SkPath path;
   1006         path.addRRect(rect);
   1007         this->internalDrawPath(target, paint.isAntiAlias(), path, stroke);
   1008     }
   1009 }
   1010 
   1011 ///////////////////////////////////////////////////////////////////////////////
   1012 
   1013 void GrContext::drawOval(const GrPaint& paint,
   1014                          const SkRect& oval,
   1015                          const SkStrokeRec& stroke) {
   1016     if (oval.isEmpty()) {
   1017        return;
   1018     }
   1019 
   1020     AutoRestoreEffects are;
   1021     AutoCheckFlush acf(this);
   1022     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
   1023 
   1024     if (!fOvalRenderer->drawOval(target, this, paint.isAntiAlias(), oval, stroke)) {
   1025         SkPath path;
   1026         path.addOval(oval);
   1027         this->internalDrawPath(target, paint.isAntiAlias(), path, stroke);
   1028     }
   1029 }
   1030 
   1031 // Can 'path' be drawn as a pair of filled nested rectangles?
   1032 static bool is_nested_rects(GrDrawTarget* target,
   1033                             const SkPath& path,
   1034                             const SkStrokeRec& stroke,
   1035                             SkRect rects[2],
   1036                             bool* useVertexCoverage) {
   1037     SkASSERT(stroke.isFillStyle());
   1038 
   1039     if (path.isInverseFillType()) {
   1040         return false;
   1041     }
   1042 
   1043     const GrDrawState& drawState = target->getDrawState();
   1044 
   1045     // TODO: this restriction could be lifted if we were willing to apply
   1046     // the matrix to all the points individually rather than just to the rect
   1047     if (!drawState.getViewMatrix().preservesAxisAlignment()) {
   1048         return false;
   1049     }
   1050 
   1051     *useVertexCoverage = false;
   1052     if (!target->getDrawState().canTweakAlphaForCoverage()) {
   1053         if (target->shouldDisableCoverageAAForBlend()) {
   1054             return false;
   1055         } else {
   1056             *useVertexCoverage = true;
   1057         }
   1058     }
   1059 
   1060     SkPath::Direction dirs[2];
   1061     if (!path.isNestedRects(rects, dirs)) {
   1062         return false;
   1063     }
   1064 
   1065     if (SkPath::kWinding_FillType == path.getFillType() && dirs[0] == dirs[1]) {
   1066         // The two rects need to be wound opposite to each other
   1067         return false;
   1068     }
   1069 
   1070     // Right now, nested rects where the margin is not the same width
   1071     // all around do not render correctly
   1072     const SkScalar* outer = rects[0].asScalars();
   1073     const SkScalar* inner = rects[1].asScalars();
   1074 
   1075     SkScalar margin = SkScalarAbs(outer[0] - inner[0]);
   1076     for (int i = 1; i < 4; ++i) {
   1077         SkScalar temp = SkScalarAbs(outer[i] - inner[i]);
   1078         if (!SkScalarNearlyEqual(margin, temp)) {
   1079             return false;
   1080         }
   1081     }
   1082 
   1083     return true;
   1084 }
   1085 
   1086 void GrContext::drawPath(const GrPaint& paint, const SkPath& path, const SkStrokeRec& stroke) {
   1087 
   1088     if (path.isEmpty()) {
   1089        if (path.isInverseFillType()) {
   1090            this->drawPaint(paint);
   1091        }
   1092        return;
   1093     }
   1094 
   1095     // Note that internalDrawPath may sw-rasterize the path into a scratch texture.
   1096     // Scratch textures can be recycled after they are returned to the texture
   1097     // cache. This presents a potential hazard for buffered drawing. However,
   1098     // the writePixels that uploads to the scratch will perform a flush so we're
   1099     // OK.
   1100     AutoRestoreEffects are;
   1101     AutoCheckFlush acf(this);
   1102     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
   1103     GrDrawState* drawState = target->drawState();
   1104 
   1105     bool useCoverageAA = paint.isAntiAlias() && !drawState->getRenderTarget()->isMultisampled();
   1106 
   1107     if (useCoverageAA && stroke.getWidth() < 0 && !path.isConvex()) {
   1108         // Concave AA paths are expensive - try to avoid them for special cases
   1109         bool useVertexCoverage;
   1110         SkRect rects[2];
   1111 
   1112         if (is_nested_rects(target, path, stroke, rects, &useVertexCoverage)) {
   1113             SkMatrix origViewMatrix = drawState->getViewMatrix();
   1114             GrDrawState::AutoViewMatrixRestore avmr;
   1115             if (!avmr.setIdentity(target->drawState())) {
   1116                 return;
   1117             }
   1118 
   1119             fAARectRenderer->fillAANestedRects(this->getGpu(), target,
   1120                                                rects,
   1121                                                origViewMatrix,
   1122                                                useVertexCoverage);
   1123             return;
   1124         }
   1125     }
   1126 
   1127     SkRect ovalRect;
   1128     bool isOval = path.isOval(&ovalRect);
   1129 
   1130     if (!isOval || path.isInverseFillType()
   1131         || !fOvalRenderer->drawOval(target, this, paint.isAntiAlias(), ovalRect, stroke)) {
   1132         this->internalDrawPath(target, paint.isAntiAlias(), path, stroke);
   1133     }
   1134 }
   1135 
   1136 void GrContext::internalDrawPath(GrDrawTarget* target, bool useAA, const SkPath& path,
   1137                                  const SkStrokeRec& stroke) {
   1138     SkASSERT(!path.isEmpty());
   1139 
   1140     // An Assumption here is that path renderer would use some form of tweaking
   1141     // the src color (either the input alpha or in the frag shader) to implement
   1142     // aa. If we have some future driver-mojo path AA that can do the right
   1143     // thing WRT to the blend then we'll need some query on the PR.
   1144     bool useCoverageAA = useAA &&
   1145         !target->getDrawState().getRenderTarget()->isMultisampled() &&
   1146         !target->shouldDisableCoverageAAForBlend();
   1147 
   1148 
   1149     GrPathRendererChain::DrawType type =
   1150         useCoverageAA ? GrPathRendererChain::kColorAntiAlias_DrawType :
   1151                            GrPathRendererChain::kColor_DrawType;
   1152 
   1153     const SkPath* pathPtr = &path;
   1154     SkPath tmpPath;
   1155     SkStrokeRec strokeRec(stroke);
   1156 
   1157     // Try a 1st time without stroking the path and without allowing the SW renderer
   1158     GrPathRenderer* pr = this->getPathRenderer(*pathPtr, strokeRec, target, false, type);
   1159 
   1160     if (NULL == pr) {
   1161         if (!GrPathRenderer::IsStrokeHairlineOrEquivalent(strokeRec, this->getMatrix(), NULL)) {
   1162             // It didn't work the 1st time, so try again with the stroked path
   1163             if (strokeRec.applyToPath(&tmpPath, *pathPtr)) {
   1164                 pathPtr = &tmpPath;
   1165                 strokeRec.setFillStyle();
   1166                 if (pathPtr->isEmpty()) {
   1167                     return;
   1168                 }
   1169             }
   1170         }
   1171 
   1172         // This time, allow SW renderer
   1173         pr = this->getPathRenderer(*pathPtr, strokeRec, target, true, type);
   1174     }
   1175 
   1176     if (NULL == pr) {
   1177 #ifdef SK_DEBUG
   1178         GrPrintf("Unable to find path renderer compatible with path.\n");
   1179 #endif
   1180         return;
   1181     }
   1182 
   1183     pr->drawPath(*pathPtr, strokeRec, target, useCoverageAA);
   1184 }
   1185 
   1186 ////////////////////////////////////////////////////////////////////////////////
   1187 
   1188 void GrContext::flush(int flagsBitfield) {
   1189     if (NULL == fDrawBuffer) {
   1190         return;
   1191     }
   1192 
   1193     if (kDiscard_FlushBit & flagsBitfield) {
   1194         fDrawBuffer->reset();
   1195     } else {
   1196         fDrawBuffer->flush();
   1197     }
   1198     fFlushToReduceCacheSize = false;
   1199 }
   1200 
   1201 bool GrContext::writeTexturePixels(GrTexture* texture,
   1202                                    int left, int top, int width, int height,
   1203                                    GrPixelConfig config, const void* buffer, size_t rowBytes,
   1204                                    uint32_t flags) {
   1205     SK_TRACE_EVENT0("GrContext::writeTexturePixels");
   1206     ASSERT_OWNED_RESOURCE(texture);
   1207 
   1208     if ((kUnpremul_PixelOpsFlag & flags) || !fGpu->canWriteTexturePixels(texture, config)) {
   1209         if (NULL != texture->asRenderTarget()) {
   1210             return this->writeRenderTargetPixels(texture->asRenderTarget(),
   1211                                                  left, top, width, height,
   1212                                                  config, buffer, rowBytes, flags);
   1213         } else {
   1214             return false;
   1215         }
   1216     }
   1217 
   1218     if (!(kDontFlush_PixelOpsFlag & flags)) {
   1219         this->flush();
   1220     }
   1221 
   1222     return fGpu->writeTexturePixels(texture, left, top, width, height,
   1223                                     config, buffer, rowBytes);
   1224 }
   1225 
   1226 bool GrContext::readTexturePixels(GrTexture* texture,
   1227                                   int left, int top, int width, int height,
   1228                                   GrPixelConfig config, void* buffer, size_t rowBytes,
   1229                                   uint32_t flags) {
   1230     SK_TRACE_EVENT0("GrContext::readTexturePixels");
   1231     ASSERT_OWNED_RESOURCE(texture);
   1232 
   1233     GrRenderTarget* target = texture->asRenderTarget();
   1234     if (NULL != target) {
   1235         return this->readRenderTargetPixels(target,
   1236                                             left, top, width, height,
   1237                                             config, buffer, rowBytes,
   1238                                             flags);
   1239     } else {
   1240         // TODO: make this more efficient for cases where we're reading the entire
   1241         //       texture, i.e., use GetTexImage() instead
   1242 
   1243         // create scratch rendertarget and read from that
   1244         GrAutoScratchTexture ast;
   1245         GrTextureDesc desc;
   1246         desc.fFlags = kRenderTarget_GrTextureFlagBit;
   1247         desc.fWidth = width;
   1248         desc.fHeight = height;
   1249         desc.fConfig = config;
   1250         desc.fOrigin = kTopLeft_GrSurfaceOrigin;
   1251         ast.set(this, desc, kExact_ScratchTexMatch);
   1252         GrTexture* dst = ast.texture();
   1253         if (NULL != dst && NULL != (target = dst->asRenderTarget())) {
   1254             this->copyTexture(texture, target, NULL);
   1255             return this->readRenderTargetPixels(target,
   1256                                                 left, top, width, height,
   1257                                                 config, buffer, rowBytes,
   1258                                                 flags);
   1259         }
   1260 
   1261         return false;
   1262     }
   1263 }
   1264 
   1265 #include "SkConfig8888.h"
   1266 
   1267 namespace {
   1268 /**
   1269  * Converts a GrPixelConfig to a SkCanvas::Config8888. Only byte-per-channel
   1270  * formats are representable as Config8888 and so the function returns false
   1271  * if the GrPixelConfig has no equivalent Config8888.
   1272  */
   1273 bool grconfig_to_config8888(GrPixelConfig config,
   1274                             bool unpremul,
   1275                             SkCanvas::Config8888* config8888) {
   1276     switch (config) {
   1277         case kRGBA_8888_GrPixelConfig:
   1278             if (unpremul) {
   1279                 *config8888 = SkCanvas::kRGBA_Unpremul_Config8888;
   1280             } else {
   1281                 *config8888 = SkCanvas::kRGBA_Premul_Config8888;
   1282             }
   1283             return true;
   1284         case kBGRA_8888_GrPixelConfig:
   1285             if (unpremul) {
   1286                 *config8888 = SkCanvas::kBGRA_Unpremul_Config8888;
   1287             } else {
   1288                 *config8888 = SkCanvas::kBGRA_Premul_Config8888;
   1289             }
   1290             return true;
   1291         default:
   1292             return false;
   1293     }
   1294 }
   1295 
   1296 // It returns a configuration with where the byte position of the R & B components are swapped in
   1297 // relation to the input config. This should only be called with the result of
   1298 // grconfig_to_config8888 as it will fail for other configs.
   1299 SkCanvas::Config8888 swap_config8888_red_and_blue(SkCanvas::Config8888 config8888) {
   1300     switch (config8888) {
   1301         case SkCanvas::kBGRA_Premul_Config8888:
   1302             return SkCanvas::kRGBA_Premul_Config8888;
   1303         case SkCanvas::kBGRA_Unpremul_Config8888:
   1304             return SkCanvas::kRGBA_Unpremul_Config8888;
   1305         case SkCanvas::kRGBA_Premul_Config8888:
   1306             return SkCanvas::kBGRA_Premul_Config8888;
   1307         case SkCanvas::kRGBA_Unpremul_Config8888:
   1308             return SkCanvas::kBGRA_Unpremul_Config8888;
   1309         default:
   1310             GrCrash("Unexpected input");
   1311             return SkCanvas::kBGRA_Unpremul_Config8888;;
   1312     }
   1313 }
   1314 }
   1315 
   1316 bool GrContext::readRenderTargetPixels(GrRenderTarget* target,
   1317                                        int left, int top, int width, int height,
   1318                                        GrPixelConfig dstConfig, void* buffer, size_t rowBytes,
   1319                                        uint32_t flags) {
   1320     SK_TRACE_EVENT0("GrContext::readRenderTargetPixels");
   1321     ASSERT_OWNED_RESOURCE(target);
   1322 
   1323     if (NULL == target) {
   1324         target = fRenderTarget.get();
   1325         if (NULL == target) {
   1326             return false;
   1327         }
   1328     }
   1329 
   1330     if (!(kDontFlush_PixelOpsFlag & flags)) {
   1331         this->flush();
   1332     }
   1333 
   1334     // Determine which conversions have to be applied: flipY, swapRAnd, and/or unpremul.
   1335 
   1336     // If fGpu->readPixels would incur a y-flip cost then we will read the pixels upside down. We'll
   1337     // either do the flipY by drawing into a scratch with a matrix or on the cpu after the read.
   1338     bool flipY = fGpu->readPixelsWillPayForYFlip(target, left, top,
   1339                                                  width, height, dstConfig,
   1340                                                  rowBytes);
   1341     // We ignore the preferred config if it is different than our config unless it is an R/B swap.
   1342     // In that case we'll perform an R and B swap while drawing to a scratch texture of the swapped
   1343     // config. Then we will call readPixels on the scratch with the swapped config. The swaps during
   1344     // the draw cancels out the fact that we call readPixels with a config that is R/B swapped from
   1345     // dstConfig.
   1346     GrPixelConfig readConfig = dstConfig;
   1347     bool swapRAndB = false;
   1348     if (GrPixelConfigSwapRAndB(dstConfig) ==
   1349         fGpu->preferredReadPixelsConfig(dstConfig, target->config())) {
   1350         readConfig = GrPixelConfigSwapRAndB(readConfig);
   1351         swapRAndB = true;
   1352     }
   1353 
   1354     bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags);
   1355 
   1356     if (unpremul && !GrPixelConfigIs8888(dstConfig)) {
   1357         // The unpremul flag is only allowed for these two configs.
   1358         return false;
   1359     }
   1360 
   1361     // If the src is a texture and we would have to do conversions after read pixels, we instead
   1362     // do the conversions by drawing the src to a scratch texture. If we handle any of the
   1363     // conversions in the draw we set the corresponding bool to false so that we don't reapply it
   1364     // on the read back pixels.
   1365     GrTexture* src = target->asTexture();
   1366     GrAutoScratchTexture ast;
   1367     if (NULL != src && (swapRAndB || unpremul || flipY)) {
   1368         // Make the scratch a render target because we don't have a robust readTexturePixels as of
   1369         // yet. It calls this function.
   1370         GrTextureDesc desc;
   1371         desc.fFlags = kRenderTarget_GrTextureFlagBit;
   1372         desc.fWidth = width;
   1373         desc.fHeight = height;
   1374         desc.fConfig = readConfig;
   1375         desc.fOrigin = kTopLeft_GrSurfaceOrigin;
   1376 
   1377         // When a full read back is faster than a partial we could always make the scratch exactly
   1378         // match the passed rect. However, if we see many different size rectangles we will trash
   1379         // our texture cache and pay the cost of creating and destroying many textures. So, we only
   1380         // request an exact match when the caller is reading an entire RT.
   1381         ScratchTexMatch match = kApprox_ScratchTexMatch;
   1382         if (0 == left &&
   1383             0 == top &&
   1384             target->width() == width &&
   1385             target->height() == height &&
   1386             fGpu->fullReadPixelsIsFasterThanPartial()) {
   1387             match = kExact_ScratchTexMatch;
   1388         }
   1389         ast.set(this, desc, match);
   1390         GrTexture* texture = ast.texture();
   1391         if (texture) {
   1392             // compute a matrix to perform the draw
   1393             SkMatrix textureMatrix;
   1394             textureMatrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top);
   1395             textureMatrix.postIDiv(src->width(), src->height());
   1396 
   1397             SkAutoTUnref<const GrEffectRef> effect;
   1398             if (unpremul) {
   1399                 effect.reset(this->createPMToUPMEffect(src, swapRAndB, textureMatrix));
   1400                 if (NULL != effect) {
   1401                     unpremul = false; // we no longer need to do this on CPU after the read back.
   1402                 }
   1403             }
   1404             // If we failed to create a PM->UPM effect and have no other conversions to perform then
   1405             // there is no longer any point to using the scratch.
   1406             if (NULL != effect || flipY || swapRAndB) {
   1407                 if (!effect) {
   1408                     effect.reset(GrConfigConversionEffect::Create(
   1409                                                     src,
   1410                                                     swapRAndB,
   1411                                                     GrConfigConversionEffect::kNone_PMConversion,
   1412                                                     textureMatrix));
   1413                 }
   1414                 swapRAndB = false; // we will handle the swap in the draw.
   1415 
   1416                 // We protect the existing geometry here since it may not be
   1417                 // clear to the caller that a draw operation (i.e., drawSimpleRect)
   1418                 // can be invoked in this method
   1419                 GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit);
   1420                 GrDrawState* drawState = fGpu->drawState();
   1421                 SkASSERT(effect);
   1422                 drawState->addColorEffect(effect);
   1423 
   1424                 drawState->setRenderTarget(texture->asRenderTarget());
   1425                 SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height));
   1426                 fGpu->drawSimpleRect(rect, NULL);
   1427                 // we want to read back from the scratch's origin
   1428                 left = 0;
   1429                 top = 0;
   1430                 target = texture->asRenderTarget();
   1431             }
   1432         }
   1433     }
   1434     if (!fGpu->readPixels(target,
   1435                           left, top, width, height,
   1436                           readConfig, buffer, rowBytes)) {
   1437         return false;
   1438     }
   1439     // Perform any conversions we weren't able to perform using a scratch texture.
   1440     if (unpremul || swapRAndB) {
   1441         // These are initialized to suppress a warning
   1442         SkCanvas::Config8888 srcC8888 = SkCanvas::kNative_Premul_Config8888;
   1443         SkCanvas::Config8888 dstC8888 = SkCanvas::kNative_Premul_Config8888;
   1444 
   1445         SkDEBUGCODE(bool c8888IsValid =) grconfig_to_config8888(dstConfig, false, &srcC8888);
   1446         grconfig_to_config8888(dstConfig, unpremul, &dstC8888);
   1447 
   1448         if (swapRAndB) {
   1449             SkASSERT(c8888IsValid); // we should only do r/b swap on 8888 configs
   1450             srcC8888 = swap_config8888_red_and_blue(srcC8888);
   1451         }
   1452         SkASSERT(c8888IsValid);
   1453         uint32_t* b32 = reinterpret_cast<uint32_t*>(buffer);
   1454         SkConvertConfig8888Pixels(b32, rowBytes, dstC8888,
   1455                                   b32, rowBytes, srcC8888,
   1456                                   width, height);
   1457     }
   1458     return true;
   1459 }
   1460 
   1461 void GrContext::resolveRenderTarget(GrRenderTarget* target) {
   1462     SkASSERT(target);
   1463     ASSERT_OWNED_RESOURCE(target);
   1464     // In the future we may track whether there are any pending draws to this
   1465     // target. We don't today so we always perform a flush. We don't promise
   1466     // this to our clients, though.
   1467     this->flush();
   1468     fGpu->resolveRenderTarget(target);
   1469 }
   1470 
   1471 void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst, const SkIPoint* topLeft) {
   1472     if (NULL == src || NULL == dst) {
   1473         return;
   1474     }
   1475     ASSERT_OWNED_RESOURCE(src);
   1476 
   1477     // Writes pending to the source texture are not tracked, so a flush
   1478     // is required to ensure that the copy captures the most recent contents
   1479     // of the source texture. See similar behavior in
   1480     // GrContext::resolveRenderTarget.
   1481     this->flush();
   1482 
   1483     GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
   1484     GrDrawState* drawState = fGpu->drawState();
   1485     drawState->setRenderTarget(dst);
   1486     SkMatrix sampleM;
   1487     sampleM.setIDiv(src->width(), src->height());
   1488     SkIRect srcRect = SkIRect::MakeWH(dst->width(), dst->height());
   1489     if (NULL != topLeft) {
   1490         srcRect.offset(*topLeft);
   1491     }
   1492     SkIRect srcBounds = SkIRect::MakeWH(src->width(), src->height());
   1493     if (!srcRect.intersect(srcBounds)) {
   1494         return;
   1495     }
   1496     sampleM.preTranslate(SkIntToScalar(srcRect.fLeft), SkIntToScalar(srcRect.fTop));
   1497     drawState->addColorTextureEffect(src, sampleM);
   1498     SkRect dstR = SkRect::MakeWH(SkIntToScalar(srcRect.width()), SkIntToScalar(srcRect.height()));
   1499     fGpu->drawSimpleRect(dstR, NULL);
   1500 }
   1501 
   1502 bool GrContext::writeRenderTargetPixels(GrRenderTarget* target,
   1503                                         int left, int top, int width, int height,
   1504                                         GrPixelConfig srcConfig,
   1505                                         const void* buffer,
   1506                                         size_t rowBytes,
   1507                                         uint32_t flags) {
   1508     SK_TRACE_EVENT0("GrContext::writeRenderTargetPixels");
   1509     ASSERT_OWNED_RESOURCE(target);
   1510 
   1511     if (NULL == target) {
   1512         target = fRenderTarget.get();
   1513         if (NULL == target) {
   1514             return false;
   1515         }
   1516     }
   1517 
   1518     // TODO: when underlying api has a direct way to do this we should use it (e.g. glDrawPixels on
   1519     // desktop GL).
   1520 
   1521     // We will always call some form of writeTexturePixels and we will pass our flags on to it.
   1522     // Thus, we don't perform a flush here since that call will do it (if the kNoFlush flag isn't
   1523     // set.)
   1524 
   1525     // If the RT is also a texture and we don't have to premultiply then take the texture path.
   1526     // We expect to be at least as fast or faster since it doesn't use an intermediate texture as
   1527     // we do below.
   1528 
   1529 #if !defined(SK_BUILD_FOR_MAC)
   1530     // At least some drivers on the Mac get confused when glTexImage2D is called on a texture
   1531     // attached to an FBO. The FBO still sees the old image. TODO: determine what OS versions and/or
   1532     // HW is affected.
   1533     if (NULL != target->asTexture() && !(kUnpremul_PixelOpsFlag & flags) &&
   1534         fGpu->canWriteTexturePixels(target->asTexture(), srcConfig)) {
   1535         return this->writeTexturePixels(target->asTexture(),
   1536                                         left, top, width, height,
   1537                                         srcConfig, buffer, rowBytes, flags);
   1538     }
   1539 #endif
   1540 
   1541     // We ignore the preferred config unless it is a R/B swap of the src config. In that case
   1542     // we will upload the original src data to a scratch texture but we will spoof it as the swapped
   1543     // config. This scratch will then have R and B swapped. We correct for this by swapping again
   1544     // when drawing the scratch to the dst using a conversion effect.
   1545     bool swapRAndB = false;
   1546     GrPixelConfig writeConfig = srcConfig;
   1547     if (GrPixelConfigSwapRAndB(srcConfig) ==
   1548         fGpu->preferredWritePixelsConfig(srcConfig, target->config())) {
   1549         writeConfig = GrPixelConfigSwapRAndB(srcConfig);
   1550         swapRAndB = true;
   1551     }
   1552 
   1553     GrTextureDesc desc;
   1554     desc.fWidth = width;
   1555     desc.fHeight = height;
   1556     desc.fConfig = writeConfig;
   1557     GrAutoScratchTexture ast(this, desc);
   1558     GrTexture* texture = ast.texture();
   1559     if (NULL == texture) {
   1560         return false;
   1561     }
   1562 
   1563     SkAutoTUnref<const GrEffectRef> effect;
   1564     SkMatrix textureMatrix;
   1565     textureMatrix.setIDiv(texture->width(), texture->height());
   1566 
   1567     // allocate a tmp buffer and sw convert the pixels to premul
   1568     SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0);
   1569 
   1570     if (kUnpremul_PixelOpsFlag & flags) {
   1571         if (!GrPixelConfigIs8888(srcConfig)) {
   1572             return false;
   1573         }
   1574         effect.reset(this->createUPMToPMEffect(texture, swapRAndB, textureMatrix));
   1575         // handle the unpremul step on the CPU if we couldn't create an effect to do it.
   1576         if (NULL == effect) {
   1577             SkCanvas::Config8888 srcConfig8888, dstConfig8888;
   1578             SkDEBUGCODE(bool success = )
   1579             grconfig_to_config8888(srcConfig, true, &srcConfig8888);
   1580             SkASSERT(success);
   1581             SkDEBUGCODE(success = )
   1582             grconfig_to_config8888(srcConfig, false, &dstConfig8888);
   1583             SkASSERT(success);
   1584             const uint32_t* src = reinterpret_cast<const uint32_t*>(buffer);
   1585             tmpPixels.reset(width * height);
   1586             SkConvertConfig8888Pixels(tmpPixels.get(), 4 * width, dstConfig8888,
   1587                                       src, rowBytes, srcConfig8888,
   1588                                       width, height);
   1589             buffer = tmpPixels.get();
   1590             rowBytes = 4 * width;
   1591         }
   1592     }
   1593     if (NULL == effect) {
   1594         effect.reset(GrConfigConversionEffect::Create(texture,
   1595                                                       swapRAndB,
   1596                                                       GrConfigConversionEffect::kNone_PMConversion,
   1597                                                       textureMatrix));
   1598     }
   1599 
   1600     if (!this->writeTexturePixels(texture,
   1601                                   0, 0, width, height,
   1602                                   writeConfig, buffer, rowBytes,
   1603                                   flags & ~kUnpremul_PixelOpsFlag)) {
   1604         return false;
   1605     }
   1606 
   1607     // writeRenderTargetPixels can be called in the midst of drawing another
   1608     // object (e.g., when uploading a SW path rendering to the gpu while
   1609     // drawing a rect) so preserve the current geometry.
   1610     SkMatrix matrix;
   1611     matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top));
   1612     GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit, &matrix);
   1613     GrDrawState* drawState = fGpu->drawState();
   1614     SkASSERT(effect);
   1615     drawState->addColorEffect(effect);
   1616 
   1617     drawState->setRenderTarget(target);
   1618 
   1619     fGpu->drawSimpleRect(SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)), NULL);
   1620     return true;
   1621 }
   1622 ////////////////////////////////////////////////////////////////////////////////
   1623 
   1624 GrDrawTarget* GrContext::prepareToDraw(const GrPaint* paint,
   1625                                        BufferedDraw buffered,
   1626                                        AutoRestoreEffects* are,
   1627                                        AutoCheckFlush* acf) {
   1628     // All users of this draw state should be freeing up all effects when they're done.
   1629     // Otherwise effects that own resources may keep those resources alive indefinitely.
   1630     SkASSERT(0 == fDrawState->numColorStages() && 0 == fDrawState->numCoverageStages());
   1631 
   1632     if (kNo_BufferedDraw == buffered && kYes_BufferedDraw == fLastDrawWasBuffered) {
   1633         fDrawBuffer->flush();
   1634         fLastDrawWasBuffered = kNo_BufferedDraw;
   1635     }
   1636     ASSERT_OWNED_RESOURCE(fRenderTarget.get());
   1637     if (NULL != paint) {
   1638         SkASSERT(NULL != are);
   1639         SkASSERT(NULL != acf);
   1640         are->set(fDrawState);
   1641         fDrawState->setFromPaint(*paint, fViewMatrix, fRenderTarget.get());
   1642 #if GR_DEBUG_PARTIAL_COVERAGE_CHECK
   1643         if ((paint->hasMask() || 0xff != paint->fCoverage) &&
   1644             !fGpu->canApplyCoverage()) {
   1645             GrPrintf("Partial pixel coverage will be incorrectly blended.\n");
   1646         }
   1647 #endif
   1648     } else {
   1649         fDrawState->reset(fViewMatrix);
   1650         fDrawState->setRenderTarget(fRenderTarget.get());
   1651     }
   1652     GrDrawTarget* target;
   1653     if (kYes_BufferedDraw == buffered) {
   1654         fLastDrawWasBuffered = kYes_BufferedDraw;
   1655         target = fDrawBuffer;
   1656     } else {
   1657         SkASSERT(kNo_BufferedDraw == buffered);
   1658         fLastDrawWasBuffered = kNo_BufferedDraw;
   1659         target = fGpu;
   1660     }
   1661     fDrawState->setState(GrDrawState::kClip_StateBit, NULL != fClip &&
   1662                                                      !fClip->fClipStack->isWideOpen());
   1663     target->setClip(fClip);
   1664     SkASSERT(fDrawState == target->drawState());
   1665     return target;
   1666 }
   1667 
   1668 /*
   1669  * This method finds a path renderer that can draw the specified path on
   1670  * the provided target.
   1671  * Due to its expense, the software path renderer has split out so it can
   1672  * can be individually allowed/disallowed via the "allowSW" boolean.
   1673  */
   1674 GrPathRenderer* GrContext::getPathRenderer(const SkPath& path,
   1675                                            const SkStrokeRec& stroke,
   1676                                            const GrDrawTarget* target,
   1677                                            bool allowSW,
   1678                                            GrPathRendererChain::DrawType drawType,
   1679                                            GrPathRendererChain::StencilSupport* stencilSupport) {
   1680 
   1681     if (NULL == fPathRendererChain) {
   1682         fPathRendererChain = SkNEW_ARGS(GrPathRendererChain, (this));
   1683     }
   1684 
   1685     GrPathRenderer* pr = fPathRendererChain->getPathRenderer(path,
   1686                                                              stroke,
   1687                                                              target,
   1688                                                              drawType,
   1689                                                              stencilSupport);
   1690 
   1691     if (NULL == pr && allowSW) {
   1692         if (NULL == fSoftwarePathRenderer) {
   1693             fSoftwarePathRenderer = SkNEW_ARGS(GrSoftwarePathRenderer, (this));
   1694         }
   1695         pr = fSoftwarePathRenderer;
   1696     }
   1697 
   1698     return pr;
   1699 }
   1700 
   1701 ////////////////////////////////////////////////////////////////////////////////
   1702 bool GrContext::isConfigRenderable(GrPixelConfig config, bool withMSAA) const {
   1703     return fGpu->caps()->isConfigRenderable(config, withMSAA);
   1704 }
   1705 
   1706 void GrContext::setupDrawBuffer() {
   1707     SkASSERT(NULL == fDrawBuffer);
   1708     SkASSERT(NULL == fDrawBufferVBAllocPool);
   1709     SkASSERT(NULL == fDrawBufferIBAllocPool);
   1710 
   1711     fDrawBufferVBAllocPool =
   1712         SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, false,
   1713                                     DRAW_BUFFER_VBPOOL_BUFFER_SIZE,
   1714                                     DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS));
   1715     fDrawBufferIBAllocPool =
   1716         SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu, false,
   1717                                    DRAW_BUFFER_IBPOOL_BUFFER_SIZE,
   1718                                    DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS));
   1719 
   1720     fDrawBuffer = SkNEW_ARGS(GrInOrderDrawBuffer, (fGpu,
   1721                                                    fDrawBufferVBAllocPool,
   1722                                                    fDrawBufferIBAllocPool));
   1723 
   1724     fDrawBuffer->setDrawState(fDrawState);
   1725 }
   1726 
   1727 GrDrawTarget* GrContext::getTextTarget() {
   1728     return this->prepareToDraw(NULL, BUFFERED_DRAW, NULL, NULL);
   1729 }
   1730 
   1731 const GrIndexBuffer* GrContext::getQuadIndexBuffer() const {
   1732     return fGpu->getQuadIndexBuffer();
   1733 }
   1734 
   1735 namespace {
   1736 void test_pm_conversions(GrContext* ctx, int* pmToUPMValue, int* upmToPMValue) {
   1737     GrConfigConversionEffect::PMConversion pmToUPM;
   1738     GrConfigConversionEffect::PMConversion upmToPM;
   1739     GrConfigConversionEffect::TestForPreservingPMConversions(ctx, &pmToUPM, &upmToPM);
   1740     *pmToUPMValue = pmToUPM;
   1741     *upmToPMValue = upmToPM;
   1742 }
   1743 }
   1744 
   1745 const GrEffectRef* GrContext::createPMToUPMEffect(GrTexture* texture,
   1746                                                   bool swapRAndB,
   1747                                                   const SkMatrix& matrix) {
   1748     if (!fDidTestPMConversions) {
   1749         test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
   1750         fDidTestPMConversions = true;
   1751     }
   1752     GrConfigConversionEffect::PMConversion pmToUPM =
   1753         static_cast<GrConfigConversionEffect::PMConversion>(fPMToUPMConversion);
   1754     if (GrConfigConversionEffect::kNone_PMConversion != pmToUPM) {
   1755         return GrConfigConversionEffect::Create(texture, swapRAndB, pmToUPM, matrix);
   1756     } else {
   1757         return NULL;
   1758     }
   1759 }
   1760 
   1761 const GrEffectRef* GrContext::createUPMToPMEffect(GrTexture* texture,
   1762                                                   bool swapRAndB,
   1763                                                   const SkMatrix& matrix) {
   1764     if (!fDidTestPMConversions) {
   1765         test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
   1766         fDidTestPMConversions = true;
   1767     }
   1768     GrConfigConversionEffect::PMConversion upmToPM =
   1769         static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion);
   1770     if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) {
   1771         return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM, matrix);
   1772     } else {
   1773         return NULL;
   1774     }
   1775 }
   1776 
   1777 GrPath* GrContext::createPath(const SkPath& inPath, const SkStrokeRec& stroke) {
   1778     SkASSERT(fGpu->caps()->pathRenderingSupport());
   1779 
   1780     // TODO: now we add to fTextureCache. This should change to fResourceCache.
   1781     GrResourceKey resourceKey = GrPath::ComputeKey(inPath, stroke);
   1782     GrPath* path = static_cast<GrPath*>(fTextureCache->find(resourceKey));
   1783     if (NULL != path && path->isEqualTo(inPath, stroke)) {
   1784         path->ref();
   1785     } else {
   1786         path = fGpu->createPath(inPath, stroke);
   1787         fTextureCache->purgeAsNeeded(1, path->sizeInBytes());
   1788         fTextureCache->addResource(resourceKey, path);
   1789     }
   1790     return path;
   1791 }
   1792 
   1793 ///////////////////////////////////////////////////////////////////////////////
   1794 #if GR_CACHE_STATS
   1795 void GrContext::printCacheStats() const {
   1796     fTextureCache->printStats();
   1797 }
   1798 #endif
   1799