Home | History | Annotate | Download | only in gpu
      1 
      2 /*
      3  * Copyright 2011 Google Inc.
      4  *
      5  * Use of this source code is governed by a BSD-style license that can be
      6  * found in the LICENSE file.
      7  */
      8 
      9 
     10 #include "GrContext.h"
     11 
     12 #include "effects/GrConfigConversionEffect.h"
     13 #include "effects/GrDashingEffect.h"
     14 #include "effects/GrSingleTextureEffect.h"
     15 
     16 #include "GrAARectRenderer.h"
     17 #include "GrBufferAllocPool.h"
     18 #include "GrGpu.h"
     19 #include "GrDrawTargetCaps.h"
     20 #include "GrIndexBuffer.h"
     21 #include "GrInOrderDrawBuffer.h"
     22 #include "GrLayerCache.h"
     23 #include "GrOvalRenderer.h"
     24 #include "GrPathRenderer.h"
     25 #include "GrPathUtils.h"
     26 #include "GrResourceCache.h"
     27 #include "GrSoftwarePathRenderer.h"
     28 #include "GrStencilBuffer.h"
     29 #include "GrStrokeInfo.h"
     30 #include "GrTextStrike.h"
     31 #include "GrTraceMarker.h"
     32 #include "GrTracing.h"
     33 #include "SkDashPathPriv.h"
     34 #include "SkGr.h"
     35 #include "SkRTConf.h"
     36 #include "SkRRect.h"
     37 #include "SkStrokeRec.h"
     38 #include "SkTLazy.h"
     39 #include "SkTLS.h"
     40 #include "SkTraceEvent.h"
     41 
     42 // It can be useful to set this to false to test whether a bug is caused by using the
     43 // InOrderDrawBuffer, to compare performance of using/not using InOrderDrawBuffer, or to make
     44 // debugging simpler.
     45 SK_CONF_DECLARE(bool, c_Defer, "gpu.deferContext", true,
     46                 "Defers rendering in GrContext via GrInOrderDrawBuffer.");
     47 
     48 #define BUFFERED_DRAW (c_Defer ? kYes_BufferedDraw : kNo_BufferedDraw)
     49 
     50 #ifdef SK_DEBUG
     51     // change this to a 1 to see notifications when partial coverage fails
     52     #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
     53 #else
     54     #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
     55 #endif
     56 
     57 static const size_t MAX_RESOURCE_CACHE_COUNT = GR_DEFAULT_RESOURCE_CACHE_COUNT_LIMIT;
     58 static const size_t MAX_RESOURCE_CACHE_BYTES = GR_DEFAULT_RESOURCE_CACHE_MB_LIMIT * 1024 * 1024;
     59 
     60 static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15;
     61 static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4;
     62 
     63 static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11;
     64 static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4;
     65 
     66 #define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this)
     67 
     68 // Glorified typedef to avoid including GrDrawState.h in GrContext.h
     69 class GrContext::AutoRestoreEffects : public GrDrawState::AutoRestoreEffects {};
     70 
     71 class GrContext::AutoCheckFlush {
     72 public:
     73     AutoCheckFlush(GrContext* context) : fContext(context) { SkASSERT(NULL != context); }
     74 
     75     ~AutoCheckFlush() {
     76         if (fContext->fFlushToReduceCacheSize) {
     77             fContext->flush();
     78         }
     79     }
     80 
     81 private:
     82     GrContext* fContext;
     83 };
     84 
     85 GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext) {
     86     GrContext* context = SkNEW(GrContext);
     87     if (context->init(backend, backendContext)) {
     88         return context;
     89     } else {
     90         context->unref();
     91         return NULL;
     92     }
     93 }
     94 
     95 GrContext::GrContext() {
     96     fDrawState = NULL;
     97     fGpu = NULL;
     98     fClip = NULL;
     99     fPathRendererChain = NULL;
    100     fSoftwarePathRenderer = NULL;
    101     fResourceCache = NULL;
    102     fFontCache = NULL;
    103     fDrawBuffer = NULL;
    104     fDrawBufferVBAllocPool = NULL;
    105     fDrawBufferIBAllocPool = NULL;
    106     fFlushToReduceCacheSize = false;
    107     fAARectRenderer = NULL;
    108     fOvalRenderer = NULL;
    109     fViewMatrix.reset();
    110     fMaxTextureSizeOverride = 1 << 20;
    111     fGpuTracingEnabled = false;
    112 }
    113 
    114 bool GrContext::init(GrBackend backend, GrBackendContext backendContext) {
    115     SkASSERT(NULL == fGpu);
    116 
    117     fGpu = GrGpu::Create(backend, backendContext, this);
    118     if (NULL == fGpu) {
    119         return false;
    120     }
    121 
    122     fDrawState = SkNEW(GrDrawState);
    123     fGpu->setDrawState(fDrawState);
    124 
    125     fResourceCache = SkNEW_ARGS(GrResourceCache, (MAX_RESOURCE_CACHE_COUNT,
    126                                                   MAX_RESOURCE_CACHE_BYTES));
    127     fResourceCache->setOverbudgetCallback(OverbudgetCB, this);
    128 
    129     fFontCache = SkNEW_ARGS(GrFontCache, (fGpu));
    130 
    131     fLayerCache.reset(SkNEW_ARGS(GrLayerCache, (fGpu)));
    132 
    133     fLastDrawWasBuffered = kNo_BufferedDraw;
    134 
    135     fAARectRenderer = SkNEW(GrAARectRenderer);
    136     fOvalRenderer = SkNEW(GrOvalRenderer);
    137 
    138     fDidTestPMConversions = false;
    139 
    140     this->setupDrawBuffer();
    141 
    142     return true;
    143 }
    144 
    145 GrContext::~GrContext() {
    146     if (NULL == fGpu) {
    147         return;
    148     }
    149 
    150     this->flush();
    151 
    152     for (int i = 0; i < fCleanUpData.count(); ++i) {
    153         (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo);
    154     }
    155 
    156     // Since the gpu can hold scratch textures, give it a chance to let go
    157     // of them before freeing the texture cache
    158     fGpu->purgeResources();
    159 
    160     delete fResourceCache;
    161     fResourceCache = NULL;
    162     delete fFontCache;
    163     delete fDrawBuffer;
    164     delete fDrawBufferVBAllocPool;
    165     delete fDrawBufferIBAllocPool;
    166 
    167     fAARectRenderer->unref();
    168     fOvalRenderer->unref();
    169 
    170     fGpu->unref();
    171     SkSafeUnref(fPathRendererChain);
    172     SkSafeUnref(fSoftwarePathRenderer);
    173     fDrawState->unref();
    174 }
    175 
    176 void GrContext::contextLost() {
    177     this->contextDestroyed();
    178     this->setupDrawBuffer();
    179 }
    180 
    181 void GrContext::contextDestroyed() {
    182     // abandon first to so destructors
    183     // don't try to free the resources in the API.
    184     fGpu->abandonResources();
    185 
    186     // a path renderer may be holding onto resources that
    187     // are now unusable
    188     SkSafeSetNull(fPathRendererChain);
    189     SkSafeSetNull(fSoftwarePathRenderer);
    190 
    191     delete fDrawBuffer;
    192     fDrawBuffer = NULL;
    193 
    194     delete fDrawBufferVBAllocPool;
    195     fDrawBufferVBAllocPool = NULL;
    196 
    197     delete fDrawBufferIBAllocPool;
    198     fDrawBufferIBAllocPool = NULL;
    199 
    200     fAARectRenderer->reset();
    201     fOvalRenderer->reset();
    202 
    203     fResourceCache->purgeAllUnlocked();
    204 
    205     fFontCache->freeAll();
    206     fLayerCache->freeAll();
    207     fGpu->markContextDirty();
    208 }
    209 
    210 void GrContext::resetContext(uint32_t state) {
    211     fGpu->markContextDirty(state);
    212 }
    213 
    214 void GrContext::freeGpuResources() {
    215     this->flush();
    216 
    217     fGpu->purgeResources();
    218 
    219     fAARectRenderer->reset();
    220     fOvalRenderer->reset();
    221 
    222     fResourceCache->purgeAllUnlocked();
    223     fFontCache->freeAll();
    224     fLayerCache->freeAll();
    225     // a path renderer may be holding onto resources
    226     SkSafeSetNull(fPathRendererChain);
    227     SkSafeSetNull(fSoftwarePathRenderer);
    228 }
    229 
    230 void GrContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const {
    231   if (NULL != resourceCount) {
    232     *resourceCount = fResourceCache->getCachedResourceCount();
    233   }
    234   if (NULL != resourceBytes) {
    235     *resourceBytes = fResourceCache->getCachedResourceBytes();
    236   }
    237 }
    238 
    239 ////////////////////////////////////////////////////////////////////////////////
    240 
    241 GrTexture* GrContext::findAndRefTexture(const GrTextureDesc& desc,
    242                                         const GrCacheID& cacheID,
    243                                         const GrTextureParams* params) {
    244     GrResourceKey resourceKey = GrTextureImpl::ComputeKey(fGpu, params, desc, cacheID);
    245     GrCacheable* resource = fResourceCache->find(resourceKey);
    246     SkSafeRef(resource);
    247     return static_cast<GrTexture*>(resource);
    248 }
    249 
    250 bool GrContext::isTextureInCache(const GrTextureDesc& desc,
    251                                  const GrCacheID& cacheID,
    252                                  const GrTextureParams* params) const {
    253     GrResourceKey resourceKey = GrTextureImpl::ComputeKey(fGpu, params, desc, cacheID);
    254     return fResourceCache->hasKey(resourceKey);
    255 }
    256 
    257 void GrContext::addStencilBuffer(GrStencilBuffer* sb) {
    258     ASSERT_OWNED_RESOURCE(sb);
    259 
    260     GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(sb->width(),
    261                                                             sb->height(),
    262                                                             sb->numSamples());
    263     fResourceCache->addResource(resourceKey, sb);
    264 }
    265 
    266 GrStencilBuffer* GrContext::findStencilBuffer(int width, int height,
    267                                               int sampleCnt) {
    268     GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(width,
    269                                                             height,
    270                                                             sampleCnt);
    271     GrCacheable* resource = fResourceCache->find(resourceKey);
    272     return static_cast<GrStencilBuffer*>(resource);
    273 }
    274 
    275 static void stretch_image(void* dst,
    276                           int dstW,
    277                           int dstH,
    278                           const void* src,
    279                           int srcW,
    280                           int srcH,
    281                           size_t bpp) {
    282     SkFixed dx = (srcW << 16) / dstW;
    283     SkFixed dy = (srcH << 16) / dstH;
    284 
    285     SkFixed y = dy >> 1;
    286 
    287     size_t dstXLimit = dstW*bpp;
    288     for (int j = 0; j < dstH; ++j) {
    289         SkFixed x = dx >> 1;
    290         const uint8_t* srcRow = reinterpret_cast<const uint8_t *>(src) + (y>>16)*srcW*bpp;
    291         uint8_t* dstRow = reinterpret_cast<uint8_t *>(dst) + j*dstW*bpp;
    292         for (size_t i = 0; i < dstXLimit; i += bpp) {
    293             memcpy(dstRow + i, srcRow + (x>>16)*bpp, bpp);
    294             x += dx;
    295         }
    296         y += dy;
    297     }
    298 }
    299 
    300 namespace {
    301 
    302 // position + local coordinate
    303 extern const GrVertexAttrib gVertexAttribs[] = {
    304     {kVec2f_GrVertexAttribType, 0,               kPosition_GrVertexAttribBinding},
    305     {kVec2f_GrVertexAttribType, sizeof(SkPoint), kLocalCoord_GrVertexAttribBinding}
    306 };
    307 
    308 };
    309 
    310 // The desired texture is NPOT and tiled but that isn't supported by
    311 // the current hardware. Resize the texture to be a POT
    312 GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc,
    313                                            const GrCacheID& cacheID,
    314                                            const void* srcData,
    315                                            size_t rowBytes,
    316                                            bool filter) {
    317     SkAutoTUnref<GrTexture> clampedTexture(this->findAndRefTexture(desc, cacheID, NULL));
    318     if (NULL == clampedTexture) {
    319         clampedTexture.reset(this->createTexture(NULL, desc, cacheID, srcData, rowBytes));
    320 
    321         if (NULL == clampedTexture) {
    322             return NULL;
    323         }
    324     }
    325 
    326     GrTextureDesc rtDesc = desc;
    327     rtDesc.fFlags =  rtDesc.fFlags |
    328                      kRenderTarget_GrTextureFlagBit |
    329                      kNoStencil_GrTextureFlagBit;
    330     rtDesc.fWidth  = GrNextPow2(desc.fWidth);
    331     rtDesc.fHeight = GrNextPow2(desc.fHeight);
    332 
    333     GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0);
    334 
    335     if (NULL != texture) {
    336         GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
    337         GrDrawState* drawState = fGpu->drawState();
    338         drawState->setRenderTarget(texture->asRenderTarget());
    339 
    340         // if filtering is not desired then we want to ensure all
    341         // texels in the resampled image are copies of texels from
    342         // the original.
    343         GrTextureParams params(SkShader::kClamp_TileMode, filter ? GrTextureParams::kBilerp_FilterMode :
    344                                                                    GrTextureParams::kNone_FilterMode);
    345         drawState->addColorTextureEffect(clampedTexture, SkMatrix::I(), params);
    346 
    347         drawState->setVertexAttribs<gVertexAttribs>(SK_ARRAY_COUNT(gVertexAttribs));
    348 
    349         GrDrawTarget::AutoReleaseGeometry arg(fGpu, 4, 0);
    350 
    351         if (arg.succeeded()) {
    352             SkPoint* verts = (SkPoint*) arg.vertices();
    353             verts[0].setIRectFan(0, 0, texture->width(), texture->height(), 2 * sizeof(SkPoint));
    354             verts[1].setIRectFan(0, 0, 1, 1, 2 * sizeof(SkPoint));
    355             fGpu->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4);
    356         }
    357     } else {
    358         // TODO: Our CPU stretch doesn't filter. But we create separate
    359         // stretched textures when the texture params is either filtered or
    360         // not. Either implement filtered stretch blit on CPU or just create
    361         // one when FBO case fails.
    362 
    363         rtDesc.fFlags = kNone_GrTextureFlags;
    364         // no longer need to clamp at min RT size.
    365         rtDesc.fWidth  = GrNextPow2(desc.fWidth);
    366         rtDesc.fHeight = GrNextPow2(desc.fHeight);
    367 
    368         // We shouldn't be resizing a compressed texture.
    369         SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig));
    370 
    371         size_t bpp = GrBytesPerPixel(desc.fConfig);
    372         SkAutoSMalloc<128*128*4> stretchedPixels(bpp * rtDesc.fWidth * rtDesc.fHeight);
    373         stretch_image(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
    374                       srcData, desc.fWidth, desc.fHeight, bpp);
    375 
    376         size_t stretchedRowBytes = rtDesc.fWidth * bpp;
    377 
    378         texture = fGpu->createTexture(rtDesc, stretchedPixels.get(), stretchedRowBytes);
    379         SkASSERT(NULL != texture);
    380     }
    381 
    382     return texture;
    383 }
    384 
    385 GrTexture* GrContext::createTexture(const GrTextureParams* params,
    386                                     const GrTextureDesc& desc,
    387                                     const GrCacheID& cacheID,
    388                                     const void* srcData,
    389                                     size_t rowBytes,
    390                                     GrResourceKey* cacheKey) {
    391     GrResourceKey resourceKey = GrTextureImpl::ComputeKey(fGpu, params, desc, cacheID);
    392 
    393     GrTexture* texture;
    394     if (GrTextureImpl::NeedsResizing(resourceKey)) {
    395         // We do not know how to resize compressed textures.
    396         SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig));
    397 
    398         texture = this->createResizedTexture(desc, cacheID,
    399                                              srcData, rowBytes,
    400                                              GrTextureImpl::NeedsBilerp(resourceKey));
    401     } else {
    402         texture = fGpu->createTexture(desc, srcData, rowBytes);
    403     }
    404 
    405     if (NULL != texture) {
    406         // Adding a resource could put us overbudget. Try to free up the
    407         // necessary space before adding it.
    408         fResourceCache->purgeAsNeeded(1, texture->gpuMemorySize());
    409         fResourceCache->addResource(resourceKey, texture);
    410 
    411         if (NULL != cacheKey) {
    412             *cacheKey = resourceKey;
    413         }
    414     }
    415 
    416     return texture;
    417 }
    418 
    419 static GrTexture* create_scratch_texture(GrGpu* gpu,
    420                                          GrResourceCache* resourceCache,
    421                                          const GrTextureDesc& desc) {
    422     GrTexture* texture = gpu->createTexture(desc, NULL, 0);
    423     if (NULL != texture) {
    424         GrResourceKey key = GrTextureImpl::ComputeScratchKey(texture->desc());
    425         // Adding a resource could put us overbudget. Try to free up the
    426         // necessary space before adding it.
    427         resourceCache->purgeAsNeeded(1, texture->gpuMemorySize());
    428         // Make the resource exclusive so future 'find' calls don't return it
    429         resourceCache->addResource(key, texture, GrResourceCache::kHide_OwnershipFlag);
    430     }
    431     return texture;
    432 }
    433 
    434 GrTexture* GrContext::lockAndRefScratchTexture(const GrTextureDesc& inDesc, ScratchTexMatch match) {
    435 
    436     SkASSERT((inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
    437              !(inDesc.fFlags & kNoStencil_GrTextureFlagBit));
    438 
    439     // Renderable A8 targets are not universally supported (e.g., not on ANGLE)
    440     SkASSERT(this->isConfigRenderable(kAlpha_8_GrPixelConfig, inDesc.fSampleCnt > 0) ||
    441              !(inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
    442              (inDesc.fConfig != kAlpha_8_GrPixelConfig));
    443 
    444     if (!fGpu->caps()->reuseScratchTextures() &&
    445         !(inDesc.fFlags & kRenderTarget_GrTextureFlagBit)) {
    446         // If we're never recycling this texture we can always make it the right size
    447         return create_scratch_texture(fGpu, fResourceCache, inDesc);
    448     }
    449 
    450     GrTextureDesc desc = inDesc;
    451 
    452     if (kApprox_ScratchTexMatch == match) {
    453         // bin by pow2 with a reasonable min
    454         static const int MIN_SIZE = 16;
    455         desc.fWidth  = SkTMax(MIN_SIZE, GrNextPow2(desc.fWidth));
    456         desc.fHeight = SkTMax(MIN_SIZE, GrNextPow2(desc.fHeight));
    457     }
    458 
    459     GrCacheable* resource = NULL;
    460     int origWidth = desc.fWidth;
    461     int origHeight = desc.fHeight;
    462 
    463     do {
    464         GrResourceKey key = GrTextureImpl::ComputeScratchKey(desc);
    465         // Ensure we have exclusive access to the texture so future 'find' calls don't return it
    466         resource = fResourceCache->find(key, GrResourceCache::kHide_OwnershipFlag);
    467         if (NULL != resource) {
    468             resource->ref();
    469             break;
    470         }
    471         if (kExact_ScratchTexMatch == match) {
    472             break;
    473         }
    474         // We had a cache miss and we are in approx mode, relax the fit of the flags.
    475 
    476         // We no longer try to reuse textures that were previously used as render targets in
    477         // situations where no RT is needed; doing otherwise can confuse the video driver and
    478         // cause significant performance problems in some cases.
    479         if (desc.fFlags & kNoStencil_GrTextureFlagBit) {
    480             desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit;
    481         } else {
    482             break;
    483         }
    484 
    485     } while (true);
    486 
    487     if (NULL == resource) {
    488         desc.fFlags = inDesc.fFlags;
    489         desc.fWidth = origWidth;
    490         desc.fHeight = origHeight;
    491         resource = create_scratch_texture(fGpu, fResourceCache, desc);
    492     }
    493 
    494     return static_cast<GrTexture*>(resource);
    495 }
    496 
    497 void GrContext::addExistingTextureToCache(GrTexture* texture) {
    498 
    499     if (NULL == texture) {
    500         return;
    501     }
    502 
    503     // This texture should already have a cache entry since it was once
    504     // attached
    505     SkASSERT(NULL != texture->getCacheEntry());
    506 
    507     // Conceptually, the cache entry is going to assume responsibility
    508     // for the creation ref. Assert refcnt == 1.
    509     SkASSERT(texture->unique());
    510 
    511     if (fGpu->caps()->reuseScratchTextures() || NULL != texture->asRenderTarget()) {
    512         // Since this texture came from an AutoScratchTexture it should
    513         // still be in the exclusive pile. Recycle it.
    514         fResourceCache->makeNonExclusive(texture->getCacheEntry());
    515         this->purgeCache();
    516     } else if (texture->getDeferredRefCount() <= 0) {
    517         // When we aren't reusing textures we know this scratch texture
    518         // will never be reused and would be just wasting time in the cache
    519         fResourceCache->makeNonExclusive(texture->getCacheEntry());
    520         fResourceCache->deleteResource(texture->getCacheEntry());
    521     } else {
    522         // In this case (fDeferredRefCount > 0) but the cache is the only
    523         // one holding a real ref. Mark the object so when the deferred
    524         // ref count goes to 0 the texture will be deleted (remember
    525         // in this code path scratch textures aren't getting reused).
    526         texture->setNeedsDeferredUnref();
    527     }
    528 }
    529 
    530 
    531 void GrContext::unlockScratchTexture(GrTexture* texture) {
    532     ASSERT_OWNED_RESOURCE(texture);
    533     SkASSERT(NULL != texture->getCacheEntry());
    534 
    535     // If this is a scratch texture we detached it from the cache
    536     // while it was locked (to avoid two callers simultaneously getting
    537     // the same texture).
    538     if (texture->getCacheEntry()->key().isScratch()) {
    539         if (fGpu->caps()->reuseScratchTextures() || NULL != texture->asRenderTarget()) {
    540             fResourceCache->makeNonExclusive(texture->getCacheEntry());
    541             this->purgeCache();
    542         } else if (texture->unique() && texture->getDeferredRefCount() <= 0) {
    543             // Only the cache now knows about this texture. Since we're never
    544             // reusing scratch textures (in this code path) it would just be
    545             // wasting time sitting in the cache.
    546             fResourceCache->makeNonExclusive(texture->getCacheEntry());
    547             fResourceCache->deleteResource(texture->getCacheEntry());
    548         } else {
    549             // In this case (fRefCnt > 1 || defRefCnt > 0) but we don't really
    550             // want to readd it to the cache (since it will never be reused).
    551             // Instead, give up the cache's ref and leave the decision up to
    552             // addExistingTextureToCache once its ref count reaches 0. For
    553             // this to work we need to leave it in the exclusive list.
    554             texture->impl()->setFlag((GrTextureFlags) GrTextureImpl::kReturnToCache_FlagBit);
    555             // Give up the cache's ref to the texture
    556             texture->unref();
    557         }
    558     }
    559 }
    560 
    561 void GrContext::purgeCache() {
    562     if (NULL != fResourceCache) {
    563         fResourceCache->purgeAsNeeded();
    564     }
    565 }
    566 
    567 bool GrContext::OverbudgetCB(void* data) {
    568     SkASSERT(NULL != data);
    569 
    570     GrContext* context = reinterpret_cast<GrContext*>(data);
    571 
    572     // Flush the InOrderDrawBuffer to possibly free up some textures
    573     context->fFlushToReduceCacheSize = true;
    574 
    575     return true;
    576 }
    577 
    578 
    579 GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& descIn,
    580                                             void* srcData,
    581                                             size_t rowBytes) {
    582     GrTextureDesc descCopy = descIn;
    583     return fGpu->createTexture(descCopy, srcData, rowBytes);
    584 }
    585 
    586 void GrContext::getResourceCacheLimits(int* maxTextures, size_t* maxTextureBytes) const {
    587     fResourceCache->getLimits(maxTextures, maxTextureBytes);
    588 }
    589 
    590 void GrContext::setResourceCacheLimits(int maxTextures, size_t maxTextureBytes) {
    591     fResourceCache->setLimits(maxTextures, maxTextureBytes);
    592 }
    593 
    594 int GrContext::getMaxTextureSize() const {
    595     return SkTMin(fGpu->caps()->maxTextureSize(), fMaxTextureSizeOverride);
    596 }
    597 
    598 int GrContext::getMaxRenderTargetSize() const {
    599     return fGpu->caps()->maxRenderTargetSize();
    600 }
    601 
    602 int GrContext::getMaxSampleCount() const {
    603     return fGpu->caps()->maxSampleCount();
    604 }
    605 
    606 ///////////////////////////////////////////////////////////////////////////////
    607 
    608 GrTexture* GrContext::wrapBackendTexture(const GrBackendTextureDesc& desc) {
    609     return fGpu->wrapBackendTexture(desc);
    610 }
    611 
    612 GrRenderTarget* GrContext::wrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) {
    613     return fGpu->wrapBackendRenderTarget(desc);
    614 }
    615 
    616 ///////////////////////////////////////////////////////////////////////////////
    617 
    618 bool GrContext::supportsIndex8PixelConfig(const GrTextureParams* params,
    619                                           int width, int height) const {
    620     const GrDrawTargetCaps* caps = fGpu->caps();
    621     if (!caps->isConfigTexturable(kIndex_8_GrPixelConfig)) {
    622         return false;
    623     }
    624 
    625     bool isPow2 = SkIsPow2(width) && SkIsPow2(height);
    626 
    627     if (!isPow2) {
    628         bool tiled = NULL != params && params->isTiled();
    629         if (tiled && !caps->npotTextureTileSupport()) {
    630             return false;
    631         }
    632     }
    633     return true;
    634 }
    635 
    636 
    637 ////////////////////////////////////////////////////////////////////////////////
    638 
    639 void GrContext::clear(const SkIRect* rect,
    640                       const GrColor color,
    641                       bool canIgnoreRect,
    642                       GrRenderTarget* target) {
    643     AutoRestoreEffects are;
    644     AutoCheckFlush acf(this);
    645     this->prepareToDraw(NULL, BUFFERED_DRAW, &are, &acf)->clear(rect, color,
    646                                                                 canIgnoreRect, target);
    647 }
    648 
    649 void GrContext::drawPaint(const GrPaint& origPaint) {
    650     // set rect to be big enough to fill the space, but not super-huge, so we
    651     // don't overflow fixed-point implementations
    652     SkRect r;
    653     r.setLTRB(0, 0,
    654               SkIntToScalar(getRenderTarget()->width()),
    655               SkIntToScalar(getRenderTarget()->height()));
    656     SkMatrix inverse;
    657     SkTCopyOnFirstWrite<GrPaint> paint(origPaint);
    658     AutoMatrix am;
    659 
    660     // We attempt to map r by the inverse matrix and draw that. mapRect will
    661     // map the four corners and bound them with a new rect. This will not
    662     // produce a correct result for some perspective matrices.
    663     if (!this->getMatrix().hasPerspective()) {
    664         if (!fViewMatrix.invert(&inverse)) {
    665             GrPrintf("Could not invert matrix\n");
    666             return;
    667         }
    668         inverse.mapRect(&r);
    669     } else {
    670         if (!am.setIdentity(this, paint.writable())) {
    671             GrPrintf("Could not invert matrix\n");
    672             return;
    673         }
    674     }
    675     // by definition this fills the entire clip, no need for AA
    676     if (paint->isAntiAlias()) {
    677         paint.writable()->setAntiAlias(false);
    678     }
    679     this->drawRect(*paint, r);
    680 }
    681 
    682 #ifdef SK_DEVELOPER
    683 void GrContext::dumpFontCache() const {
    684     fFontCache->dump();
    685 }
    686 #endif
    687 
    688 ////////////////////////////////////////////////////////////////////////////////
    689 
    690 /*  create a triangle strip that strokes the specified triangle. There are 8
    691  unique vertices, but we repreat the last 2 to close up. Alternatively we
    692  could use an indices array, and then only send 8 verts, but not sure that
    693  would be faster.
    694  */
    695 static void setStrokeRectStrip(SkPoint verts[10], SkRect rect,
    696                                SkScalar width) {
    697     const SkScalar rad = SkScalarHalf(width);
    698     rect.sort();
    699 
    700     verts[0].set(rect.fLeft + rad, rect.fTop + rad);
    701     verts[1].set(rect.fLeft - rad, rect.fTop - rad);
    702     verts[2].set(rect.fRight - rad, rect.fTop + rad);
    703     verts[3].set(rect.fRight + rad, rect.fTop - rad);
    704     verts[4].set(rect.fRight - rad, rect.fBottom - rad);
    705     verts[5].set(rect.fRight + rad, rect.fBottom + rad);
    706     verts[6].set(rect.fLeft + rad, rect.fBottom - rad);
    707     verts[7].set(rect.fLeft - rad, rect.fBottom + rad);
    708     verts[8] = verts[0];
    709     verts[9] = verts[1];
    710 }
    711 
    712 static bool isIRect(const SkRect& r) {
    713     return SkScalarIsInt(r.fLeft)  && SkScalarIsInt(r.fTop) &&
    714            SkScalarIsInt(r.fRight) && SkScalarIsInt(r.fBottom);
    715 }
    716 
    717 static bool apply_aa_to_rect(GrDrawTarget* target,
    718                              const SkRect& rect,
    719                              SkScalar strokeWidth,
    720                              const SkMatrix& combinedMatrix,
    721                              SkRect* devBoundRect,
    722                              bool* useVertexCoverage) {
    723     // we use a simple coverage ramp to do aa on axis-aligned rects
    724     // we check if the rect will be axis-aligned, and the rect won't land on
    725     // integer coords.
    726 
    727     // we are keeping around the "tweak the alpha" trick because
    728     // it is our only hope for the fixed-pipe implementation.
    729     // In a shader implementation we can give a separate coverage input
    730     // TODO: remove this ugliness when we drop the fixed-pipe impl
    731     *useVertexCoverage = false;
    732     if (!target->getDrawState().canTweakAlphaForCoverage()) {
    733         if (target->shouldDisableCoverageAAForBlend()) {
    734 #ifdef SK_DEBUG
    735             //GrPrintf("Turning off AA to correctly apply blend.\n");
    736 #endif
    737             return false;
    738         } else {
    739             *useVertexCoverage = true;
    740         }
    741     }
    742     const GrDrawState& drawState = target->getDrawState();
    743     if (drawState.getRenderTarget()->isMultisampled()) {
    744         return false;
    745     }
    746 
    747     if (0 == strokeWidth && target->willUseHWAALines()) {
    748         return false;
    749     }
    750 
    751 #if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
    752     if (strokeWidth >= 0) {
    753 #endif
    754         if (!combinedMatrix.preservesAxisAlignment()) {
    755             return false;
    756         }
    757 
    758 #if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
    759     } else {
    760         if (!combinedMatrix.preservesRightAngles()) {
    761             return false;
    762         }
    763     }
    764 #endif
    765 
    766     combinedMatrix.mapRect(devBoundRect, rect);
    767 
    768     if (strokeWidth < 0) {
    769         return !isIRect(*devBoundRect);
    770     } else {
    771         return true;
    772     }
    773 }
    774 
    775 static inline bool rect_contains_inclusive(const SkRect& rect, const SkPoint& point) {
    776     return point.fX >= rect.fLeft && point.fX <= rect.fRight &&
    777            point.fY >= rect.fTop && point.fY <= rect.fBottom;
    778 }
    779 
    780 void GrContext::drawRect(const GrPaint& paint,
    781                          const SkRect& rect,
    782                          const GrStrokeInfo* strokeInfo,
    783                          const SkMatrix* matrix) {
    784     if (NULL != strokeInfo && strokeInfo->isDashed()) {
    785         SkPath path;
    786         path.addRect(rect);
    787         this->drawPath(paint, path, *strokeInfo);
    788         return;
    789     }
    790 
    791     AutoRestoreEffects are;
    792     AutoCheckFlush acf(this);
    793     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
    794 
    795     GR_CREATE_TRACE_MARKER("GrContext::drawRect", target);
    796 
    797     SkScalar width = NULL == strokeInfo ? -1 : strokeInfo->getStrokeRec().getWidth();
    798     SkMatrix combinedMatrix = target->drawState()->getViewMatrix();
    799     if (NULL != matrix) {
    800         combinedMatrix.preConcat(*matrix);
    801     }
    802 
    803     // Check if this is a full RT draw and can be replaced with a clear. We don't bother checking
    804     // cases where the RT is fully inside a stroke.
    805     if (width < 0) {
    806         SkRect rtRect;
    807         target->getDrawState().getRenderTarget()->getBoundsRect(&rtRect);
    808         SkRect clipSpaceRTRect = rtRect;
    809         bool checkClip = false;
    810         if (NULL != this->getClip()) {
    811             checkClip = true;
    812             clipSpaceRTRect.offset(SkIntToScalar(this->getClip()->fOrigin.fX),
    813                                    SkIntToScalar(this->getClip()->fOrigin.fY));
    814         }
    815         // Does the clip contain the entire RT?
    816         if (!checkClip || target->getClip()->fClipStack->quickContains(clipSpaceRTRect)) {
    817             SkMatrix invM;
    818             if (!combinedMatrix.invert(&invM)) {
    819                 return;
    820             }
    821             // Does the rect bound the RT?
    822             SkPoint srcSpaceRTQuad[4];
    823             invM.mapRectToQuad(srcSpaceRTQuad, rtRect);
    824             if (rect_contains_inclusive(rect, srcSpaceRTQuad[0]) &&
    825                 rect_contains_inclusive(rect, srcSpaceRTQuad[1]) &&
    826                 rect_contains_inclusive(rect, srcSpaceRTQuad[2]) &&
    827                 rect_contains_inclusive(rect, srcSpaceRTQuad[3])) {
    828                 // Will it blend?
    829                 GrColor clearColor;
    830                 if (paint.isOpaqueAndConstantColor(&clearColor)) {
    831                     target->clear(NULL, clearColor, true);
    832                     return;
    833                 }
    834             }
    835         }
    836     }
    837 
    838     SkRect devBoundRect;
    839     bool useVertexCoverage;
    840     bool needAA = paint.isAntiAlias() &&
    841                   !target->getDrawState().getRenderTarget()->isMultisampled();
    842     bool doAA = needAA && apply_aa_to_rect(target, rect, width, combinedMatrix, &devBoundRect,
    843                                            &useVertexCoverage);
    844 
    845     const SkStrokeRec& strokeRec = strokeInfo->getStrokeRec();
    846 
    847     if (doAA) {
    848         GrDrawState::AutoViewMatrixRestore avmr;
    849         if (!avmr.setIdentity(target->drawState())) {
    850             return;
    851         }
    852         if (width >= 0) {
    853             fAARectRenderer->strokeAARect(this->getGpu(), target, rect,
    854                                           combinedMatrix, devBoundRect,
    855                                           strokeRec, useVertexCoverage);
    856         } else {
    857             // filled AA rect
    858             fAARectRenderer->fillAARect(this->getGpu(), target,
    859                                         rect, combinedMatrix, devBoundRect,
    860                                         useVertexCoverage);
    861         }
    862         return;
    863     }
    864 
    865     if (width >= 0) {
    866         // TODO: consider making static vertex buffers for these cases.
    867         // Hairline could be done by just adding closing vertex to
    868         // unitSquareVertexBuffer()
    869 
    870         static const int worstCaseVertCount = 10;
    871         target->drawState()->setDefaultVertexAttribs();
    872         GrDrawTarget::AutoReleaseGeometry geo(target, worstCaseVertCount, 0);
    873 
    874         if (!geo.succeeded()) {
    875             GrPrintf("Failed to get space for vertices!\n");
    876             return;
    877         }
    878 
    879         GrPrimitiveType primType;
    880         int vertCount;
    881         SkPoint* vertex = geo.positions();
    882 
    883         if (width > 0) {
    884             vertCount = 10;
    885             primType = kTriangleStrip_GrPrimitiveType;
    886             setStrokeRectStrip(vertex, rect, width);
    887         } else {
    888             // hairline
    889             vertCount = 5;
    890             primType = kLineStrip_GrPrimitiveType;
    891             vertex[0].set(rect.fLeft, rect.fTop);
    892             vertex[1].set(rect.fRight, rect.fTop);
    893             vertex[2].set(rect.fRight, rect.fBottom);
    894             vertex[3].set(rect.fLeft, rect.fBottom);
    895             vertex[4].set(rect.fLeft, rect.fTop);
    896         }
    897 
    898         GrDrawState::AutoViewMatrixRestore avmr;
    899         if (NULL != matrix) {
    900             GrDrawState* drawState = target->drawState();
    901             avmr.set(drawState, *matrix);
    902         }
    903 
    904         target->drawNonIndexed(primType, 0, vertCount);
    905     } else {
    906         // filled BW rect
    907         target->drawSimpleRect(rect, matrix);
    908     }
    909 }
    910 
    911 void GrContext::drawRectToRect(const GrPaint& paint,
    912                                const SkRect& dstRect,
    913                                const SkRect& localRect,
    914                                const SkMatrix* dstMatrix,
    915                                const SkMatrix* localMatrix) {
    916     AutoRestoreEffects are;
    917     AutoCheckFlush acf(this);
    918     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
    919 
    920     GR_CREATE_TRACE_MARKER("GrContext::drawRectToRect", target);
    921 
    922     target->drawRect(dstRect, dstMatrix, &localRect, localMatrix);
    923 }
    924 
    925 namespace {
    926 
    927 extern const GrVertexAttrib gPosUVColorAttribs[] = {
    928     {kVec2f_GrVertexAttribType,  0, kPosition_GrVertexAttribBinding },
    929     {kVec2f_GrVertexAttribType,  sizeof(SkPoint), kLocalCoord_GrVertexAttribBinding },
    930     {kVec4ub_GrVertexAttribType, 2*sizeof(SkPoint), kColor_GrVertexAttribBinding}
    931 };
    932 
    933 extern const GrVertexAttrib gPosColorAttribs[] = {
    934     {kVec2f_GrVertexAttribType,  0, kPosition_GrVertexAttribBinding},
    935     {kVec4ub_GrVertexAttribType, sizeof(SkPoint), kColor_GrVertexAttribBinding},
    936 };
    937 
    938 static void set_vertex_attributes(GrDrawState* drawState,
    939                                   const SkPoint* texCoords,
    940                                   const GrColor* colors,
    941                                   int* colorOffset,
    942                                   int* texOffset) {
    943     *texOffset = -1;
    944     *colorOffset = -1;
    945 
    946     if (NULL != texCoords && NULL != colors) {
    947         *texOffset = sizeof(SkPoint);
    948         *colorOffset = 2*sizeof(SkPoint);
    949         drawState->setVertexAttribs<gPosUVColorAttribs>(3);
    950     } else if (NULL != texCoords) {
    951         *texOffset = sizeof(SkPoint);
    952         drawState->setVertexAttribs<gPosUVColorAttribs>(2);
    953     } else if (NULL != colors) {
    954         *colorOffset = sizeof(SkPoint);
    955         drawState->setVertexAttribs<gPosColorAttribs>(2);
    956     } else {
    957         drawState->setVertexAttribs<gPosColorAttribs>(1);
    958     }
    959 }
    960 
    961 };
    962 
    963 void GrContext::drawVertices(const GrPaint& paint,
    964                              GrPrimitiveType primitiveType,
    965                              int vertexCount,
    966                              const SkPoint positions[],
    967                              const SkPoint texCoords[],
    968                              const GrColor colors[],
    969                              const uint16_t indices[],
    970                              int indexCount) {
    971     AutoRestoreEffects are;
    972     AutoCheckFlush acf(this);
    973     GrDrawTarget::AutoReleaseGeometry geo; // must be inside AutoCheckFlush scope
    974 
    975     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
    976     GrDrawState* drawState = target->drawState();
    977 
    978     GR_CREATE_TRACE_MARKER("GrContext::drawVertices", target);
    979 
    980     int colorOffset = -1, texOffset = -1;
    981     set_vertex_attributes(drawState, texCoords, colors, &colorOffset, &texOffset);
    982 
    983     size_t vertexSize = drawState->getVertexSize();
    984     if (sizeof(SkPoint) != vertexSize) {
    985         if (!geo.set(target, vertexCount, 0)) {
    986             GrPrintf("Failed to get space for vertices!\n");
    987             return;
    988         }
    989         void* curVertex = geo.vertices();
    990 
    991         for (int i = 0; i < vertexCount; ++i) {
    992             *((SkPoint*)curVertex) = positions[i];
    993 
    994             if (texOffset >= 0) {
    995                 *(SkPoint*)((intptr_t)curVertex + texOffset) = texCoords[i];
    996             }
    997             if (colorOffset >= 0) {
    998                 *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i];
    999             }
   1000             curVertex = (void*)((intptr_t)curVertex + vertexSize);
   1001         }
   1002     } else {
   1003         target->setVertexSourceToArray(positions, vertexCount);
   1004     }
   1005 
   1006     // we don't currently apply offscreen AA to this path. Need improved
   1007     // management of GrDrawTarget's geometry to avoid copying points per-tile.
   1008 
   1009     if (NULL != indices) {
   1010         target->setIndexSourceToArray(indices, indexCount);
   1011         target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount);
   1012         target->resetIndexSource();
   1013     } else {
   1014         target->drawNonIndexed(primitiveType, 0, vertexCount);
   1015     }
   1016 }
   1017 
   1018 ///////////////////////////////////////////////////////////////////////////////
   1019 
   1020 void GrContext::drawRRect(const GrPaint& paint,
   1021                           const SkRRect& rrect,
   1022                           const GrStrokeInfo& strokeInfo) {
   1023     if (rrect.isEmpty()) {
   1024        return;
   1025     }
   1026 
   1027     if (strokeInfo.isDashed()) {
   1028         SkPath path;
   1029         path.addRRect(rrect);
   1030         this->drawPath(paint, path, strokeInfo);
   1031         return;
   1032     }
   1033 
   1034     AutoRestoreEffects are;
   1035     AutoCheckFlush acf(this);
   1036     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
   1037 
   1038     GR_CREATE_TRACE_MARKER("GrContext::drawRRect", target);
   1039 
   1040     const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec();
   1041 
   1042     if (!fOvalRenderer->drawRRect(target, this, paint.isAntiAlias(), rrect, strokeRec)) {
   1043         SkPath path;
   1044         path.addRRect(rrect);
   1045         this->internalDrawPath(target, paint.isAntiAlias(), path, strokeInfo);
   1046     }
   1047 }
   1048 
   1049 ///////////////////////////////////////////////////////////////////////////////
   1050 
   1051 void GrContext::drawDRRect(const GrPaint& paint,
   1052                            const SkRRect& outer,
   1053                            const SkRRect& inner) {
   1054     if (outer.isEmpty()) {
   1055        return;
   1056     }
   1057 
   1058     AutoRestoreEffects are;
   1059     AutoCheckFlush acf(this);
   1060     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
   1061 
   1062     GR_CREATE_TRACE_MARKER("GrContext::drawDRRect", target);
   1063 
   1064     if (!fOvalRenderer->drawDRRect(target, this, paint.isAntiAlias(), outer, inner)) {
   1065         SkPath path;
   1066         path.addRRect(inner);
   1067         path.addRRect(outer);
   1068         path.setFillType(SkPath::kEvenOdd_FillType);
   1069 
   1070         GrStrokeInfo fillRec(SkStrokeRec::kFill_InitStyle);
   1071         this->internalDrawPath(target, paint.isAntiAlias(), path, fillRec);
   1072     }
   1073 }
   1074 
   1075 ///////////////////////////////////////////////////////////////////////////////
   1076 
   1077 void GrContext::drawOval(const GrPaint& paint,
   1078                          const SkRect& oval,
   1079                          const GrStrokeInfo& strokeInfo) {
   1080     if (oval.isEmpty()) {
   1081        return;
   1082     }
   1083 
   1084     if (strokeInfo.isDashed()) {
   1085         SkPath path;
   1086         path.addOval(oval);
   1087         this->drawPath(paint, path, strokeInfo);
   1088         return;
   1089     }
   1090 
   1091     AutoRestoreEffects are;
   1092     AutoCheckFlush acf(this);
   1093     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
   1094 
   1095     GR_CREATE_TRACE_MARKER("GrContext::drawOval", target);
   1096 
   1097     const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec();
   1098 
   1099 
   1100     if (!fOvalRenderer->drawOval(target, this, paint.isAntiAlias(), oval, strokeRec)) {
   1101         SkPath path;
   1102         path.addOval(oval);
   1103         this->internalDrawPath(target, paint.isAntiAlias(), path, strokeInfo);
   1104     }
   1105 }
   1106 
   1107 // Can 'path' be drawn as a pair of filled nested rectangles?
   1108 static bool is_nested_rects(GrDrawTarget* target,
   1109                             const SkPath& path,
   1110                             const SkStrokeRec& stroke,
   1111                             SkRect rects[2],
   1112                             bool* useVertexCoverage) {
   1113     SkASSERT(stroke.isFillStyle());
   1114 
   1115     if (path.isInverseFillType()) {
   1116         return false;
   1117     }
   1118 
   1119     const GrDrawState& drawState = target->getDrawState();
   1120 
   1121     // TODO: this restriction could be lifted if we were willing to apply
   1122     // the matrix to all the points individually rather than just to the rect
   1123     if (!drawState.getViewMatrix().preservesAxisAlignment()) {
   1124         return false;
   1125     }
   1126 
   1127     *useVertexCoverage = false;
   1128     if (!target->getDrawState().canTweakAlphaForCoverage()) {
   1129         if (target->shouldDisableCoverageAAForBlend()) {
   1130             return false;
   1131         } else {
   1132             *useVertexCoverage = true;
   1133         }
   1134     }
   1135 
   1136     SkPath::Direction dirs[2];
   1137     if (!path.isNestedRects(rects, dirs)) {
   1138         return false;
   1139     }
   1140 
   1141     if (SkPath::kWinding_FillType == path.getFillType() && dirs[0] == dirs[1]) {
   1142         // The two rects need to be wound opposite to each other
   1143         return false;
   1144     }
   1145 
   1146     // Right now, nested rects where the margin is not the same width
   1147     // all around do not render correctly
   1148     const SkScalar* outer = rects[0].asScalars();
   1149     const SkScalar* inner = rects[1].asScalars();
   1150 
   1151     SkScalar margin = SkScalarAbs(outer[0] - inner[0]);
   1152     for (int i = 1; i < 4; ++i) {
   1153         SkScalar temp = SkScalarAbs(outer[i] - inner[i]);
   1154         if (!SkScalarNearlyEqual(margin, temp)) {
   1155             return false;
   1156         }
   1157     }
   1158 
   1159     return true;
   1160 }
   1161 
   1162 void GrContext::drawPath(const GrPaint& paint, const SkPath& path, const GrStrokeInfo& strokeInfo) {
   1163 
   1164     if (path.isEmpty()) {
   1165        if (path.isInverseFillType()) {
   1166            this->drawPaint(paint);
   1167        }
   1168        return;
   1169     }
   1170 
   1171     if (strokeInfo.isDashed()) {
   1172         SkPoint pts[2];
   1173         if (path.isLine(pts)) {
   1174             AutoRestoreEffects are;
   1175             AutoCheckFlush acf(this);
   1176             GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
   1177             GrDrawState* drawState = target->drawState();
   1178 
   1179             SkMatrix origViewMatrix = drawState->getViewMatrix();
   1180             GrDrawState::AutoViewMatrixRestore avmr;
   1181             if (avmr.setIdentity(target->drawState())) {
   1182                 if (GrDashingEffect::DrawDashLine(pts, paint, strokeInfo, fGpu, target,
   1183                                                   origViewMatrix)) {
   1184                     return;
   1185                 }
   1186             }
   1187         }
   1188 
   1189         // Filter dashed path into new path with the dashing applied
   1190         const SkPathEffect::DashInfo& info = strokeInfo.getDashInfo();
   1191         SkTLazy<SkPath> effectPath;
   1192         GrStrokeInfo newStrokeInfo(strokeInfo, false);
   1193         SkStrokeRec* stroke = newStrokeInfo.getStrokeRecPtr();
   1194         if (SkDashPath::FilterDashPath(effectPath.init(), path, stroke, NULL, info)) {
   1195             this->drawPath(paint, *effectPath.get(), newStrokeInfo);
   1196             return;
   1197         }
   1198 
   1199         this->drawPath(paint, path, newStrokeInfo);
   1200         return;
   1201     }
   1202 
   1203     // Note that internalDrawPath may sw-rasterize the path into a scratch texture.
   1204     // Scratch textures can be recycled after they are returned to the texture
   1205     // cache. This presents a potential hazard for buffered drawing. However,
   1206     // the writePixels that uploads to the scratch will perform a flush so we're
   1207     // OK.
   1208     AutoRestoreEffects are;
   1209     AutoCheckFlush acf(this);
   1210     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
   1211     GrDrawState* drawState = target->drawState();
   1212 
   1213     GR_CREATE_TRACE_MARKER("GrContext::drawPath", target);
   1214 
   1215     const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec();
   1216 
   1217     bool useCoverageAA = paint.isAntiAlias() && !drawState->getRenderTarget()->isMultisampled();
   1218 
   1219     if (useCoverageAA && strokeRec.getWidth() < 0 && !path.isConvex()) {
   1220         // Concave AA paths are expensive - try to avoid them for special cases
   1221         bool useVertexCoverage;
   1222         SkRect rects[2];
   1223 
   1224         if (is_nested_rects(target, path, strokeRec, rects, &useVertexCoverage)) {
   1225             SkMatrix origViewMatrix = drawState->getViewMatrix();
   1226             GrDrawState::AutoViewMatrixRestore avmr;
   1227             if (!avmr.setIdentity(target->drawState())) {
   1228                 return;
   1229             }
   1230 
   1231             fAARectRenderer->fillAANestedRects(this->getGpu(), target,
   1232                                                rects,
   1233                                                origViewMatrix,
   1234                                                useVertexCoverage);
   1235             return;
   1236         }
   1237     }
   1238 
   1239     SkRect ovalRect;
   1240     bool isOval = path.isOval(&ovalRect);
   1241 
   1242     if (!isOval || path.isInverseFillType()
   1243         || !fOvalRenderer->drawOval(target, this, paint.isAntiAlias(), ovalRect, strokeRec)) {
   1244         this->internalDrawPath(target, paint.isAntiAlias(), path, strokeInfo);
   1245     }
   1246 }
   1247 
   1248 void GrContext::internalDrawPath(GrDrawTarget* target, bool useAA, const SkPath& path,
   1249                                  const GrStrokeInfo& strokeInfo) {
   1250     SkASSERT(!path.isEmpty());
   1251 
   1252     GR_CREATE_TRACE_MARKER("GrContext::internalDrawPath", target);
   1253 
   1254 
   1255     // An Assumption here is that path renderer would use some form of tweaking
   1256     // the src color (either the input alpha or in the frag shader) to implement
   1257     // aa. If we have some future driver-mojo path AA that can do the right
   1258     // thing WRT to the blend then we'll need some query on the PR.
   1259     bool useCoverageAA = useAA &&
   1260         !target->getDrawState().getRenderTarget()->isMultisampled() &&
   1261         !target->shouldDisableCoverageAAForBlend();
   1262 
   1263 
   1264     GrPathRendererChain::DrawType type =
   1265         useCoverageAA ? GrPathRendererChain::kColorAntiAlias_DrawType :
   1266                            GrPathRendererChain::kColor_DrawType;
   1267 
   1268     const SkPath* pathPtr = &path;
   1269     SkTLazy<SkPath> tmpPath;
   1270     SkTCopyOnFirstWrite<SkStrokeRec> stroke(strokeInfo.getStrokeRec());
   1271 
   1272     // Try a 1st time without stroking the path and without allowing the SW renderer
   1273     GrPathRenderer* pr = this->getPathRenderer(*pathPtr, *stroke, target, false, type);
   1274 
   1275     if (NULL == pr) {
   1276         if (!GrPathRenderer::IsStrokeHairlineOrEquivalent(*stroke, this->getMatrix(), NULL)) {
   1277             // It didn't work the 1st time, so try again with the stroked path
   1278             if (stroke->applyToPath(tmpPath.init(), *pathPtr)) {
   1279                 pathPtr = tmpPath.get();
   1280                 stroke.writable()->setFillStyle();
   1281                 if (pathPtr->isEmpty()) {
   1282                     return;
   1283                 }
   1284             }
   1285         }
   1286 
   1287         // This time, allow SW renderer
   1288         pr = this->getPathRenderer(*pathPtr, *stroke, target, true, type);
   1289     }
   1290 
   1291     if (NULL == pr) {
   1292 #ifdef SK_DEBUG
   1293         GrPrintf("Unable to find path renderer compatible with path.\n");
   1294 #endif
   1295         return;
   1296     }
   1297 
   1298     pr->drawPath(*pathPtr, *stroke, target, useCoverageAA);
   1299 }
   1300 
   1301 ////////////////////////////////////////////////////////////////////////////////
   1302 
   1303 void GrContext::flush(int flagsBitfield) {
   1304     if (NULL == fDrawBuffer) {
   1305         return;
   1306     }
   1307 
   1308     if (kDiscard_FlushBit & flagsBitfield) {
   1309         fDrawBuffer->reset();
   1310     } else {
   1311         fDrawBuffer->flush();
   1312     }
   1313     fFlushToReduceCacheSize = false;
   1314 }
   1315 
   1316 bool GrContext::writeTexturePixels(GrTexture* texture,
   1317                                    int left, int top, int width, int height,
   1318                                    GrPixelConfig config, const void* buffer, size_t rowBytes,
   1319                                    uint32_t flags) {
   1320     ASSERT_OWNED_RESOURCE(texture);
   1321 
   1322     if ((kUnpremul_PixelOpsFlag & flags) || !fGpu->canWriteTexturePixels(texture, config)) {
   1323         if (NULL != texture->asRenderTarget()) {
   1324             return this->writeRenderTargetPixels(texture->asRenderTarget(),
   1325                                                  left, top, width, height,
   1326                                                  config, buffer, rowBytes, flags);
   1327         } else {
   1328             return false;
   1329         }
   1330     }
   1331 
   1332     if (!(kDontFlush_PixelOpsFlag & flags)) {
   1333         this->flush();
   1334     }
   1335 
   1336     return fGpu->writeTexturePixels(texture, left, top, width, height,
   1337                                     config, buffer, rowBytes);
   1338 }
   1339 
   1340 bool GrContext::readTexturePixels(GrTexture* texture,
   1341                                   int left, int top, int width, int height,
   1342                                   GrPixelConfig config, void* buffer, size_t rowBytes,
   1343                                   uint32_t flags) {
   1344     ASSERT_OWNED_RESOURCE(texture);
   1345 
   1346     GrRenderTarget* target = texture->asRenderTarget();
   1347     if (NULL != target) {
   1348         return this->readRenderTargetPixels(target,
   1349                                             left, top, width, height,
   1350                                             config, buffer, rowBytes,
   1351                                             flags);
   1352     } else {
   1353         // TODO: make this more efficient for cases where we're reading the entire
   1354         //       texture, i.e., use GetTexImage() instead
   1355 
   1356         // create scratch rendertarget and read from that
   1357         GrAutoScratchTexture ast;
   1358         GrTextureDesc desc;
   1359         desc.fFlags = kRenderTarget_GrTextureFlagBit;
   1360         desc.fWidth = width;
   1361         desc.fHeight = height;
   1362         desc.fConfig = config;
   1363         desc.fOrigin = kTopLeft_GrSurfaceOrigin;
   1364         ast.set(this, desc, kExact_ScratchTexMatch);
   1365         GrTexture* dst = ast.texture();
   1366         if (NULL != dst && NULL != (target = dst->asRenderTarget())) {
   1367             this->copyTexture(texture, target, NULL);
   1368             return this->readRenderTargetPixels(target,
   1369                                                 left, top, width, height,
   1370                                                 config, buffer, rowBytes,
   1371                                                 flags);
   1372         }
   1373 
   1374         return false;
   1375     }
   1376 }
   1377 
   1378 #include "SkConfig8888.h"
   1379 
   1380 // toggles between RGBA and BGRA
   1381 static SkColorType toggle_colortype32(SkColorType ct) {
   1382     if (kRGBA_8888_SkColorType == ct) {
   1383         return kBGRA_8888_SkColorType;
   1384     } else {
   1385         SkASSERT(kBGRA_8888_SkColorType == ct);
   1386         return kRGBA_8888_SkColorType;
   1387     }
   1388 }
   1389 
   1390 bool GrContext::readRenderTargetPixels(GrRenderTarget* target,
   1391                                        int left, int top, int width, int height,
   1392                                        GrPixelConfig dstConfig, void* buffer, size_t rowBytes,
   1393                                        uint32_t flags) {
   1394     ASSERT_OWNED_RESOURCE(target);
   1395 
   1396     if (NULL == target) {
   1397         target = fRenderTarget.get();
   1398         if (NULL == target) {
   1399             return false;
   1400         }
   1401     }
   1402 
   1403     if (!(kDontFlush_PixelOpsFlag & flags)) {
   1404         this->flush();
   1405     }
   1406 
   1407     // Determine which conversions have to be applied: flipY, swapRAnd, and/or unpremul.
   1408 
   1409     // If fGpu->readPixels would incur a y-flip cost then we will read the pixels upside down. We'll
   1410     // either do the flipY by drawing into a scratch with a matrix or on the cpu after the read.
   1411     bool flipY = fGpu->readPixelsWillPayForYFlip(target, left, top,
   1412                                                  width, height, dstConfig,
   1413                                                  rowBytes);
   1414     // We ignore the preferred config if it is different than our config unless it is an R/B swap.
   1415     // In that case we'll perform an R and B swap while drawing to a scratch texture of the swapped
   1416     // config. Then we will call readPixels on the scratch with the swapped config. The swaps during
   1417     // the draw cancels out the fact that we call readPixels with a config that is R/B swapped from
   1418     // dstConfig.
   1419     GrPixelConfig readConfig = dstConfig;
   1420     bool swapRAndB = false;
   1421     if (GrPixelConfigSwapRAndB(dstConfig) ==
   1422         fGpu->preferredReadPixelsConfig(dstConfig, target->config())) {
   1423         readConfig = GrPixelConfigSwapRAndB(readConfig);
   1424         swapRAndB = true;
   1425     }
   1426 
   1427     bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags);
   1428 
   1429     if (unpremul && !GrPixelConfigIs8888(dstConfig)) {
   1430         // The unpremul flag is only allowed for these two configs.
   1431         return false;
   1432     }
   1433 
   1434     // If the src is a texture and we would have to do conversions after read pixels, we instead
   1435     // do the conversions by drawing the src to a scratch texture. If we handle any of the
   1436     // conversions in the draw we set the corresponding bool to false so that we don't reapply it
   1437     // on the read back pixels.
   1438     GrTexture* src = target->asTexture();
   1439     GrAutoScratchTexture ast;
   1440     if (NULL != src && (swapRAndB || unpremul || flipY)) {
   1441         // Make the scratch a render target because we don't have a robust readTexturePixels as of
   1442         // yet. It calls this function.
   1443         GrTextureDesc desc;
   1444         desc.fFlags = kRenderTarget_GrTextureFlagBit;
   1445         desc.fWidth = width;
   1446         desc.fHeight = height;
   1447         desc.fConfig = readConfig;
   1448         desc.fOrigin = kTopLeft_GrSurfaceOrigin;
   1449 
   1450         // When a full read back is faster than a partial we could always make the scratch exactly
   1451         // match the passed rect. However, if we see many different size rectangles we will trash
   1452         // our texture cache and pay the cost of creating and destroying many textures. So, we only
   1453         // request an exact match when the caller is reading an entire RT.
   1454         ScratchTexMatch match = kApprox_ScratchTexMatch;
   1455         if (0 == left &&
   1456             0 == top &&
   1457             target->width() == width &&
   1458             target->height() == height &&
   1459             fGpu->fullReadPixelsIsFasterThanPartial()) {
   1460             match = kExact_ScratchTexMatch;
   1461         }
   1462         ast.set(this, desc, match);
   1463         GrTexture* texture = ast.texture();
   1464         if (texture) {
   1465             // compute a matrix to perform the draw
   1466             SkMatrix textureMatrix;
   1467             textureMatrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top);
   1468             textureMatrix.postIDiv(src->width(), src->height());
   1469 
   1470             SkAutoTUnref<const GrEffectRef> effect;
   1471             if (unpremul) {
   1472                 effect.reset(this->createPMToUPMEffect(src, swapRAndB, textureMatrix));
   1473                 if (NULL != effect) {
   1474                     unpremul = false; // we no longer need to do this on CPU after the read back.
   1475                 }
   1476             }
   1477             // If we failed to create a PM->UPM effect and have no other conversions to perform then
   1478             // there is no longer any point to using the scratch.
   1479             if (NULL != effect || flipY || swapRAndB) {
   1480                 if (!effect) {
   1481                     effect.reset(GrConfigConversionEffect::Create(
   1482                                                     src,
   1483                                                     swapRAndB,
   1484                                                     GrConfigConversionEffect::kNone_PMConversion,
   1485                                                     textureMatrix));
   1486                 }
   1487                 swapRAndB = false; // we will handle the swap in the draw.
   1488 
   1489                 // We protect the existing geometry here since it may not be
   1490                 // clear to the caller that a draw operation (i.e., drawSimpleRect)
   1491                 // can be invoked in this method
   1492                 GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit);
   1493                 GrDrawState* drawState = fGpu->drawState();
   1494                 SkASSERT(effect);
   1495                 drawState->addColorEffect(effect);
   1496 
   1497                 drawState->setRenderTarget(texture->asRenderTarget());
   1498                 SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height));
   1499                 fGpu->drawSimpleRect(rect, NULL);
   1500                 // we want to read back from the scratch's origin
   1501                 left = 0;
   1502                 top = 0;
   1503                 target = texture->asRenderTarget();
   1504             }
   1505         }
   1506     }
   1507     if (!fGpu->readPixels(target,
   1508                           left, top, width, height,
   1509                           readConfig, buffer, rowBytes)) {
   1510         return false;
   1511     }
   1512     // Perform any conversions we weren't able to perform using a scratch texture.
   1513     if (unpremul || swapRAndB) {
   1514         SkDstPixelInfo dstPI;
   1515         if (!GrPixelConfig2ColorType(dstConfig, &dstPI.fColorType)) {
   1516             return false;
   1517         }
   1518         dstPI.fAlphaType = kUnpremul_SkAlphaType;
   1519         dstPI.fPixels = buffer;
   1520         dstPI.fRowBytes = rowBytes;
   1521 
   1522         SkSrcPixelInfo srcPI;
   1523         srcPI.fColorType = swapRAndB ? toggle_colortype32(dstPI.fColorType) : dstPI.fColorType;
   1524         srcPI.fAlphaType = kPremul_SkAlphaType;
   1525         srcPI.fPixels = buffer;
   1526         srcPI.fRowBytes = rowBytes;
   1527 
   1528         return srcPI.convertPixelsTo(&dstPI, width, height);
   1529     }
   1530     return true;
   1531 }
   1532 
   1533 void GrContext::resolveRenderTarget(GrRenderTarget* target) {
   1534     SkASSERT(target);
   1535     ASSERT_OWNED_RESOURCE(target);
   1536     // In the future we may track whether there are any pending draws to this
   1537     // target. We don't today so we always perform a flush. We don't promise
   1538     // this to our clients, though.
   1539     this->flush();
   1540     fGpu->resolveRenderTarget(target);
   1541 }
   1542 
   1543 void GrContext::discardRenderTarget(GrRenderTarget* target) {
   1544     SkASSERT(target);
   1545     ASSERT_OWNED_RESOURCE(target);
   1546     AutoRestoreEffects are;
   1547     AutoCheckFlush acf(this);
   1548     this->prepareToDraw(NULL, BUFFERED_DRAW, &are, &acf)->discard(target);
   1549 }
   1550 
   1551 void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst, const SkIPoint* topLeft) {
   1552     if (NULL == src || NULL == dst) {
   1553         return;
   1554     }
   1555     ASSERT_OWNED_RESOURCE(src);
   1556 
   1557     // Writes pending to the source texture are not tracked, so a flush
   1558     // is required to ensure that the copy captures the most recent contents
   1559     // of the source texture. See similar behavior in
   1560     // GrContext::resolveRenderTarget.
   1561     this->flush();
   1562 
   1563     GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
   1564     GrDrawState* drawState = fGpu->drawState();
   1565     drawState->setRenderTarget(dst);
   1566     SkMatrix sampleM;
   1567     sampleM.setIDiv(src->width(), src->height());
   1568     SkIRect srcRect = SkIRect::MakeWH(dst->width(), dst->height());
   1569     if (NULL != topLeft) {
   1570         srcRect.offset(*topLeft);
   1571     }
   1572     SkIRect srcBounds = SkIRect::MakeWH(src->width(), src->height());
   1573     if (!srcRect.intersect(srcBounds)) {
   1574         return;
   1575     }
   1576     sampleM.preTranslate(SkIntToScalar(srcRect.fLeft), SkIntToScalar(srcRect.fTop));
   1577     drawState->addColorTextureEffect(src, sampleM);
   1578     SkRect dstR = SkRect::MakeWH(SkIntToScalar(srcRect.width()), SkIntToScalar(srcRect.height()));
   1579     fGpu->drawSimpleRect(dstR, NULL);
   1580 }
   1581 
   1582 bool GrContext::writeRenderTargetPixels(GrRenderTarget* target,
   1583                                         int left, int top, int width, int height,
   1584                                         GrPixelConfig srcConfig,
   1585                                         const void* buffer,
   1586                                         size_t rowBytes,
   1587                                         uint32_t flags) {
   1588     ASSERT_OWNED_RESOURCE(target);
   1589 
   1590     if (NULL == target) {
   1591         target = fRenderTarget.get();
   1592         if (NULL == target) {
   1593             return false;
   1594         }
   1595     }
   1596 
   1597     // TODO: when underlying api has a direct way to do this we should use it (e.g. glDrawPixels on
   1598     // desktop GL).
   1599 
   1600     // We will always call some form of writeTexturePixels and we will pass our flags on to it.
   1601     // Thus, we don't perform a flush here since that call will do it (if the kNoFlush flag isn't
   1602     // set.)
   1603 
   1604     // If the RT is also a texture and we don't have to premultiply then take the texture path.
   1605     // We expect to be at least as fast or faster since it doesn't use an intermediate texture as
   1606     // we do below.
   1607 
   1608 #if !defined(SK_BUILD_FOR_MAC)
   1609     // At least some drivers on the Mac get confused when glTexImage2D is called on a texture
   1610     // attached to an FBO. The FBO still sees the old image. TODO: determine what OS versions and/or
   1611     // HW is affected.
   1612     if (NULL != target->asTexture() && !(kUnpremul_PixelOpsFlag & flags) &&
   1613         fGpu->canWriteTexturePixels(target->asTexture(), srcConfig)) {
   1614         return this->writeTexturePixels(target->asTexture(),
   1615                                         left, top, width, height,
   1616                                         srcConfig, buffer, rowBytes, flags);
   1617     }
   1618 #endif
   1619 
   1620     // We ignore the preferred config unless it is a R/B swap of the src config. In that case
   1621     // we will upload the original src data to a scratch texture but we will spoof it as the swapped
   1622     // config. This scratch will then have R and B swapped. We correct for this by swapping again
   1623     // when drawing the scratch to the dst using a conversion effect.
   1624     bool swapRAndB = false;
   1625     GrPixelConfig writeConfig = srcConfig;
   1626     if (GrPixelConfigSwapRAndB(srcConfig) ==
   1627         fGpu->preferredWritePixelsConfig(srcConfig, target->config())) {
   1628         writeConfig = GrPixelConfigSwapRAndB(srcConfig);
   1629         swapRAndB = true;
   1630     }
   1631 
   1632     GrTextureDesc desc;
   1633     desc.fWidth = width;
   1634     desc.fHeight = height;
   1635     desc.fConfig = writeConfig;
   1636     GrAutoScratchTexture ast(this, desc);
   1637     GrTexture* texture = ast.texture();
   1638     if (NULL == texture) {
   1639         return false;
   1640     }
   1641 
   1642     SkAutoTUnref<const GrEffectRef> effect;
   1643     SkMatrix textureMatrix;
   1644     textureMatrix.setIDiv(texture->width(), texture->height());
   1645 
   1646     // allocate a tmp buffer and sw convert the pixels to premul
   1647     SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0);
   1648 
   1649     if (kUnpremul_PixelOpsFlag & flags) {
   1650         if (!GrPixelConfigIs8888(srcConfig)) {
   1651             return false;
   1652         }
   1653         effect.reset(this->createUPMToPMEffect(texture, swapRAndB, textureMatrix));
   1654         // handle the unpremul step on the CPU if we couldn't create an effect to do it.
   1655         if (NULL == effect) {
   1656             SkSrcPixelInfo srcPI;
   1657             if (!GrPixelConfig2ColorType(srcConfig, &srcPI.fColorType)) {
   1658                 return false;
   1659             }
   1660             srcPI.fAlphaType = kUnpremul_SkAlphaType;
   1661             srcPI.fPixels = buffer;
   1662             srcPI.fRowBytes = rowBytes;
   1663 
   1664             tmpPixels.reset(width * height);
   1665 
   1666             SkDstPixelInfo dstPI;
   1667             dstPI.fColorType = srcPI.fColorType;
   1668             dstPI.fAlphaType = kPremul_SkAlphaType;
   1669             dstPI.fPixels = tmpPixels.get();
   1670             dstPI.fRowBytes = 4 * width;
   1671 
   1672             if (!srcPI.convertPixelsTo(&dstPI, width, height)) {
   1673                 return false;
   1674             }
   1675 
   1676             buffer = tmpPixels.get();
   1677             rowBytes = 4 * width;
   1678         }
   1679     }
   1680     if (NULL == effect) {
   1681         effect.reset(GrConfigConversionEffect::Create(texture,
   1682                                                       swapRAndB,
   1683                                                       GrConfigConversionEffect::kNone_PMConversion,
   1684                                                       textureMatrix));
   1685     }
   1686 
   1687     if (!this->writeTexturePixels(texture,
   1688                                   0, 0, width, height,
   1689                                   writeConfig, buffer, rowBytes,
   1690                                   flags & ~kUnpremul_PixelOpsFlag)) {
   1691         return false;
   1692     }
   1693 
   1694     // writeRenderTargetPixels can be called in the midst of drawing another
   1695     // object (e.g., when uploading a SW path rendering to the gpu while
   1696     // drawing a rect) so preserve the current geometry.
   1697     SkMatrix matrix;
   1698     matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top));
   1699     GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit, &matrix);
   1700     GrDrawState* drawState = fGpu->drawState();
   1701     SkASSERT(effect);
   1702     drawState->addColorEffect(effect);
   1703 
   1704     drawState->setRenderTarget(target);
   1705 
   1706     fGpu->drawSimpleRect(SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)), NULL);
   1707     return true;
   1708 }
   1709 ////////////////////////////////////////////////////////////////////////////////
   1710 
   1711 GrDrawTarget* GrContext::prepareToDraw(const GrPaint* paint,
   1712                                        BufferedDraw buffered,
   1713                                        AutoRestoreEffects* are,
   1714                                        AutoCheckFlush* acf) {
   1715     // All users of this draw state should be freeing up all effects when they're done.
   1716     // Otherwise effects that own resources may keep those resources alive indefinitely.
   1717     SkASSERT(0 == fDrawState->numColorStages() && 0 == fDrawState->numCoverageStages());
   1718 
   1719     if (kNo_BufferedDraw == buffered && kYes_BufferedDraw == fLastDrawWasBuffered) {
   1720         fDrawBuffer->flush();
   1721         fLastDrawWasBuffered = kNo_BufferedDraw;
   1722     }
   1723     ASSERT_OWNED_RESOURCE(fRenderTarget.get());
   1724     if (NULL != paint) {
   1725         SkASSERT(NULL != are);
   1726         SkASSERT(NULL != acf);
   1727         are->set(fDrawState);
   1728         fDrawState->setFromPaint(*paint, fViewMatrix, fRenderTarget.get());
   1729 #if GR_DEBUG_PARTIAL_COVERAGE_CHECK
   1730         if ((paint->hasMask() || 0xff != paint->fCoverage) &&
   1731             !fGpu->canApplyCoverage()) {
   1732             GrPrintf("Partial pixel coverage will be incorrectly blended.\n");
   1733         }
   1734 #endif
   1735     } else {
   1736         fDrawState->reset(fViewMatrix);
   1737         fDrawState->setRenderTarget(fRenderTarget.get());
   1738     }
   1739     GrDrawTarget* target;
   1740     if (kYes_BufferedDraw == buffered) {
   1741         fLastDrawWasBuffered = kYes_BufferedDraw;
   1742         target = fDrawBuffer;
   1743     } else {
   1744         SkASSERT(kNo_BufferedDraw == buffered);
   1745         fLastDrawWasBuffered = kNo_BufferedDraw;
   1746         target = fGpu;
   1747     }
   1748     fDrawState->setState(GrDrawState::kClip_StateBit, NULL != fClip &&
   1749                                                      !fClip->fClipStack->isWideOpen());
   1750     target->setClip(fClip);
   1751     SkASSERT(fDrawState == target->drawState());
   1752     return target;
   1753 }
   1754 
   1755 /*
   1756  * This method finds a path renderer that can draw the specified path on
   1757  * the provided target.
   1758  * Due to its expense, the software path renderer has split out so it can
   1759  * can be individually allowed/disallowed via the "allowSW" boolean.
   1760  */
   1761 GrPathRenderer* GrContext::getPathRenderer(const SkPath& path,
   1762                                            const SkStrokeRec& stroke,
   1763                                            const GrDrawTarget* target,
   1764                                            bool allowSW,
   1765                                            GrPathRendererChain::DrawType drawType,
   1766                                            GrPathRendererChain::StencilSupport* stencilSupport) {
   1767 
   1768     if (NULL == fPathRendererChain) {
   1769         fPathRendererChain = SkNEW_ARGS(GrPathRendererChain, (this));
   1770     }
   1771 
   1772     GrPathRenderer* pr = fPathRendererChain->getPathRenderer(path,
   1773                                                              stroke,
   1774                                                              target,
   1775                                                              drawType,
   1776                                                              stencilSupport);
   1777 
   1778     if (NULL == pr && allowSW) {
   1779         if (NULL == fSoftwarePathRenderer) {
   1780             fSoftwarePathRenderer = SkNEW_ARGS(GrSoftwarePathRenderer, (this));
   1781         }
   1782         pr = fSoftwarePathRenderer;
   1783     }
   1784 
   1785     return pr;
   1786 }
   1787 
   1788 ////////////////////////////////////////////////////////////////////////////////
   1789 bool GrContext::isConfigRenderable(GrPixelConfig config, bool withMSAA) const {
   1790     return fGpu->caps()->isConfigRenderable(config, withMSAA);
   1791 }
   1792 
   1793 int GrContext::getRecommendedSampleCount(GrPixelConfig config,
   1794                                          SkScalar dpi) const {
   1795     if (!this->isConfigRenderable(config, true)) {
   1796         return 0;
   1797     }
   1798     int chosenSampleCount = 0;
   1799     if (fGpu->caps()->pathRenderingSupport()) {
   1800         if (dpi >= 250.0f) {
   1801             chosenSampleCount = 4;
   1802         } else {
   1803             chosenSampleCount = 16;
   1804         }
   1805     }
   1806     return chosenSampleCount <= fGpu->caps()->maxSampleCount() ?
   1807         chosenSampleCount : 0;
   1808 }
   1809 
   1810 void GrContext::setupDrawBuffer() {
   1811     SkASSERT(NULL == fDrawBuffer);
   1812     SkASSERT(NULL == fDrawBufferVBAllocPool);
   1813     SkASSERT(NULL == fDrawBufferIBAllocPool);
   1814 
   1815     fDrawBufferVBAllocPool =
   1816         SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, false,
   1817                                     DRAW_BUFFER_VBPOOL_BUFFER_SIZE,
   1818                                     DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS));
   1819     fDrawBufferIBAllocPool =
   1820         SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu, false,
   1821                                    DRAW_BUFFER_IBPOOL_BUFFER_SIZE,
   1822                                    DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS));
   1823 
   1824     fDrawBuffer = SkNEW_ARGS(GrInOrderDrawBuffer, (fGpu,
   1825                                                    fDrawBufferVBAllocPool,
   1826                                                    fDrawBufferIBAllocPool));
   1827 
   1828     fDrawBuffer->setDrawState(fDrawState);
   1829 }
   1830 
   1831 GrDrawTarget* GrContext::getTextTarget() {
   1832     return this->prepareToDraw(NULL, BUFFERED_DRAW, NULL, NULL);
   1833 }
   1834 
   1835 const GrIndexBuffer* GrContext::getQuadIndexBuffer() const {
   1836     return fGpu->getQuadIndexBuffer();
   1837 }
   1838 
   1839 namespace {
   1840 void test_pm_conversions(GrContext* ctx, int* pmToUPMValue, int* upmToPMValue) {
   1841     GrConfigConversionEffect::PMConversion pmToUPM;
   1842     GrConfigConversionEffect::PMConversion upmToPM;
   1843     GrConfigConversionEffect::TestForPreservingPMConversions(ctx, &pmToUPM, &upmToPM);
   1844     *pmToUPMValue = pmToUPM;
   1845     *upmToPMValue = upmToPM;
   1846 }
   1847 }
   1848 
   1849 const GrEffectRef* GrContext::createPMToUPMEffect(GrTexture* texture,
   1850                                                   bool swapRAndB,
   1851                                                   const SkMatrix& matrix) {
   1852     if (!fDidTestPMConversions) {
   1853         test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
   1854         fDidTestPMConversions = true;
   1855     }
   1856     GrConfigConversionEffect::PMConversion pmToUPM =
   1857         static_cast<GrConfigConversionEffect::PMConversion>(fPMToUPMConversion);
   1858     if (GrConfigConversionEffect::kNone_PMConversion != pmToUPM) {
   1859         return GrConfigConversionEffect::Create(texture, swapRAndB, pmToUPM, matrix);
   1860     } else {
   1861         return NULL;
   1862     }
   1863 }
   1864 
   1865 const GrEffectRef* GrContext::createUPMToPMEffect(GrTexture* texture,
   1866                                                   bool swapRAndB,
   1867                                                   const SkMatrix& matrix) {
   1868     if (!fDidTestPMConversions) {
   1869         test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
   1870         fDidTestPMConversions = true;
   1871     }
   1872     GrConfigConversionEffect::PMConversion upmToPM =
   1873         static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion);
   1874     if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) {
   1875         return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM, matrix);
   1876     } else {
   1877         return NULL;
   1878     }
   1879 }
   1880 
   1881 GrPath* GrContext::createPath(const SkPath& inPath, const SkStrokeRec& stroke) {
   1882     SkASSERT(fGpu->caps()->pathRenderingSupport());
   1883 
   1884     // TODO: now we add to fResourceCache. This should change to fResourceCache.
   1885     GrResourceKey resourceKey = GrPath::ComputeKey(inPath, stroke);
   1886     GrPath* path = static_cast<GrPath*>(fResourceCache->find(resourceKey));
   1887     if (NULL != path && path->isEqualTo(inPath, stroke)) {
   1888         path->ref();
   1889     } else {
   1890         path = fGpu->createPath(inPath, stroke);
   1891         fResourceCache->purgeAsNeeded(1, path->gpuMemorySize());
   1892         fResourceCache->addResource(resourceKey, path);
   1893     }
   1894     return path;
   1895 }
   1896 
   1897 void GrContext::addResourceToCache(const GrResourceKey& resourceKey, GrCacheable* resource) {
   1898     fResourceCache->purgeAsNeeded(1, resource->gpuMemorySize());
   1899     fResourceCache->addResource(resourceKey, resource);
   1900 }
   1901 
   1902 GrCacheable* GrContext::findAndRefCachedResource(const GrResourceKey& resourceKey) {
   1903     GrCacheable* resource = fResourceCache->find(resourceKey);
   1904     SkSafeRef(resource);
   1905     return resource;
   1906 }
   1907 
   1908 void GrContext::addGpuTraceMarker(const GrGpuTraceMarker* marker) {
   1909     fGpu->addGpuTraceMarker(marker);
   1910     if (NULL != fDrawBuffer) {
   1911         fDrawBuffer->addGpuTraceMarker(marker);
   1912     }
   1913 }
   1914 
   1915 void GrContext::removeGpuTraceMarker(const GrGpuTraceMarker* marker) {
   1916     fGpu->removeGpuTraceMarker(marker);
   1917     if (NULL != fDrawBuffer) {
   1918         fDrawBuffer->removeGpuTraceMarker(marker);
   1919     }
   1920 }
   1921 
   1922 ///////////////////////////////////////////////////////////////////////////////
   1923 #if GR_CACHE_STATS
   1924 void GrContext::printCacheStats() const {
   1925     fResourceCache->printStats();
   1926 }
   1927 #endif
   1928