Home | History | Annotate | Download | only in gpu
      1 
      2 /*
      3  * Copyright 2011 Google Inc.
      4  *
      5  * Use of this source code is governed by a BSD-style license that can be
      6  * found in the LICENSE file.
      7  */
      8 
      9 
     10 #include "GrContext.h"
     11 
     12 #include "effects/GrSingleTextureEffect.h"
     13 #include "effects/GrConfigConversionEffect.h"
     14 
     15 #include "GrBufferAllocPool.h"
     16 #include "GrGpu.h"
     17 #include "GrDrawTargetCaps.h"
     18 #include "GrIndexBuffer.h"
     19 #include "GrInOrderDrawBuffer.h"
     20 #include "GrOvalRenderer.h"
     21 #include "GrPathRenderer.h"
     22 #include "GrPathUtils.h"
     23 #include "GrResourceCache.h"
     24 #include "GrSoftwarePathRenderer.h"
     25 #include "GrStencilBuffer.h"
     26 #include "GrTextStrike.h"
     27 #include "SkRTConf.h"
     28 #include "SkRRect.h"
     29 #include "SkStrokeRec.h"
     30 #include "SkTLazy.h"
     31 #include "SkTLS.h"
     32 #include "SkTrace.h"
     33 
     34 SK_DEFINE_INST_COUNT(GrContext)
     35 SK_DEFINE_INST_COUNT(GrDrawState)
     36 
     37 // It can be useful to set this to false to test whether a bug is caused by using the
     38 // InOrderDrawBuffer, to compare performance of using/not using InOrderDrawBuffer, or to make
     39 // debugging simpler.
     40 SK_CONF_DECLARE(bool, c_Defer, "gpu.deferContext", true,
     41                 "Defers rendering in GrContext via GrInOrderDrawBuffer.");
     42 
     43 #define BUFFERED_DRAW (c_Defer ? kYes_BufferedDraw : kNo_BufferedDraw)
     44 
     45 // When we're using coverage AA but the blend is incompatible (given gpu
     46 // limitations) should we disable AA or draw wrong?
     47 #define DISABLE_COVERAGE_AA_FOR_BLEND 1
     48 
     49 #if GR_DEBUG
     50     // change this to a 1 to see notifications when partial coverage fails
     51     #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
     52 #else
     53     #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
     54 #endif
     55 
     56 static const size_t MAX_TEXTURE_CACHE_COUNT = 2048;
     57 static const size_t MAX_TEXTURE_CACHE_BYTES = GR_DEFAULT_TEXTURE_CACHE_MB_LIMIT * 1024 * 1024;
     58 
     59 static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15;
     60 static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4;
     61 
     62 static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11;
     63 static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4;
     64 
     65 #define ASSERT_OWNED_RESOURCE(R) GrAssert(!(R) || (R)->getContext() == this)
     66 
     67 // Glorified typedef to avoid including GrDrawState.h in GrContext.h
     68 class GrContext::AutoRestoreEffects : public GrDrawState::AutoRestoreEffects {};
     69 
     70 GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext) {
     71     GrContext* context = SkNEW(GrContext);
     72     if (context->init(backend, backendContext)) {
     73         return context;
     74     } else {
     75         context->unref();
     76         return NULL;
     77     }
     78 }
     79 
     80 namespace {
     81 void* CreateThreadInstanceCount() {
     82     return SkNEW_ARGS(int, (0));
     83 }
     84 void DeleteThreadInstanceCount(void* v) {
     85     delete reinterpret_cast<int*>(v);
     86 }
     87 #define THREAD_INSTANCE_COUNT \
     88     (*reinterpret_cast<int*>(SkTLS::Get(CreateThreadInstanceCount, DeleteThreadInstanceCount)))
     89 }
     90 
     91 GrContext::GrContext() {
     92     ++THREAD_INSTANCE_COUNT;
     93     fDrawState = NULL;
     94     fGpu = NULL;
     95     fClip = NULL;
     96     fPathRendererChain = NULL;
     97     fSoftwarePathRenderer = NULL;
     98     fTextureCache = NULL;
     99     fFontCache = NULL;
    100     fDrawBuffer = NULL;
    101     fDrawBufferVBAllocPool = NULL;
    102     fDrawBufferIBAllocPool = NULL;
    103     fAARectRenderer = NULL;
    104     fOvalRenderer = NULL;
    105     fViewMatrix.reset();
    106     fMaxTextureSizeOverride = 1 << 20;
    107 }
    108 
    109 bool GrContext::init(GrBackend backend, GrBackendContext backendContext) {
    110     GrAssert(NULL == fGpu);
    111 
    112     fGpu = GrGpu::Create(backend, backendContext, this);
    113     if (NULL == fGpu) {
    114         return false;
    115     }
    116 
    117     fDrawState = SkNEW(GrDrawState);
    118     fGpu->setDrawState(fDrawState);
    119 
    120     fTextureCache = SkNEW_ARGS(GrResourceCache,
    121                                (MAX_TEXTURE_CACHE_COUNT,
    122                                 MAX_TEXTURE_CACHE_BYTES));
    123     fTextureCache->setOverbudgetCallback(OverbudgetCB, this);
    124 
    125     fFontCache = SkNEW_ARGS(GrFontCache, (fGpu));
    126 
    127     fLastDrawWasBuffered = kNo_BufferedDraw;
    128 
    129     fAARectRenderer = SkNEW(GrAARectRenderer);
    130     fOvalRenderer = SkNEW(GrOvalRenderer);
    131 
    132     fDidTestPMConversions = false;
    133 
    134     this->setupDrawBuffer();
    135 
    136     return true;
    137 }
    138 
    139 int GrContext::GetThreadInstanceCount() {
    140     return THREAD_INSTANCE_COUNT;
    141 }
    142 
    143 GrContext::~GrContext() {
    144     for (int i = 0; i < fCleanUpData.count(); ++i) {
    145         (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo);
    146     }
    147 
    148     if (NULL == fGpu) {
    149         return;
    150     }
    151 
    152     this->flush();
    153 
    154     // Since the gpu can hold scratch textures, give it a chance to let go
    155     // of them before freeing the texture cache
    156     fGpu->purgeResources();
    157 
    158     delete fTextureCache;
    159     fTextureCache = NULL;
    160     delete fFontCache;
    161     delete fDrawBuffer;
    162     delete fDrawBufferVBAllocPool;
    163     delete fDrawBufferIBAllocPool;
    164 
    165     fAARectRenderer->unref();
    166     fOvalRenderer->unref();
    167 
    168     fGpu->unref();
    169     GrSafeUnref(fPathRendererChain);
    170     GrSafeUnref(fSoftwarePathRenderer);
    171     fDrawState->unref();
    172 
    173     --THREAD_INSTANCE_COUNT;
    174 }
    175 
    176 void GrContext::contextLost() {
    177     this->contextDestroyed();
    178     this->setupDrawBuffer();
    179 }
    180 
    181 void GrContext::contextDestroyed() {
    182     // abandon first to so destructors
    183     // don't try to free the resources in the API.
    184     fGpu->abandonResources();
    185 
    186     // a path renderer may be holding onto resources that
    187     // are now unusable
    188     GrSafeSetNull(fPathRendererChain);
    189     GrSafeSetNull(fSoftwarePathRenderer);
    190 
    191     delete fDrawBuffer;
    192     fDrawBuffer = NULL;
    193 
    194     delete fDrawBufferVBAllocPool;
    195     fDrawBufferVBAllocPool = NULL;
    196 
    197     delete fDrawBufferIBAllocPool;
    198     fDrawBufferIBAllocPool = NULL;
    199 
    200     fAARectRenderer->reset();
    201     fOvalRenderer->reset();
    202 
    203     fTextureCache->purgeAllUnlocked();
    204     fFontCache->freeAll();
    205     fGpu->markContextDirty();
    206 }
    207 
    208 void GrContext::resetContext(uint32_t state) {
    209     fGpu->markContextDirty(state);
    210 }
    211 
    212 void GrContext::freeGpuResources() {
    213     this->flush();
    214 
    215     fGpu->purgeResources();
    216 
    217     fAARectRenderer->reset();
    218     fOvalRenderer->reset();
    219 
    220     fTextureCache->purgeAllUnlocked();
    221     fFontCache->freeAll();
    222     // a path renderer may be holding onto resources
    223     GrSafeSetNull(fPathRendererChain);
    224     GrSafeSetNull(fSoftwarePathRenderer);
    225 }
    226 
    227 size_t GrContext::getGpuTextureCacheBytes() const {
    228   return fTextureCache->getCachedResourceBytes();
    229 }
    230 
    231 ////////////////////////////////////////////////////////////////////////////////
    232 
    233 GrTexture* GrContext::findAndRefTexture(const GrTextureDesc& desc,
    234                                         const GrCacheID& cacheID,
    235                                         const GrTextureParams* params) {
    236     GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID);
    237     GrResource* resource = fTextureCache->find(resourceKey);
    238     SkSafeRef(resource);
    239     return static_cast<GrTexture*>(resource);
    240 }
    241 
    242 bool GrContext::isTextureInCache(const GrTextureDesc& desc,
    243                                  const GrCacheID& cacheID,
    244                                  const GrTextureParams* params) const {
    245     GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID);
    246     return fTextureCache->hasKey(resourceKey);
    247 }
    248 
    249 void GrContext::addStencilBuffer(GrStencilBuffer* sb) {
    250     ASSERT_OWNED_RESOURCE(sb);
    251 
    252     GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(sb->width(),
    253                                                             sb->height(),
    254                                                             sb->numSamples());
    255     fTextureCache->addResource(resourceKey, sb);
    256 }
    257 
    258 GrStencilBuffer* GrContext::findStencilBuffer(int width, int height,
    259                                               int sampleCnt) {
    260     GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(width,
    261                                                             height,
    262                                                             sampleCnt);
    263     GrResource* resource = fTextureCache->find(resourceKey);
    264     return static_cast<GrStencilBuffer*>(resource);
    265 }
    266 
    267 static void stretchImage(void* dst,
    268                          int dstW,
    269                          int dstH,
    270                          void* src,
    271                          int srcW,
    272                          int srcH,
    273                          int bpp) {
    274     GrFixed dx = (srcW << 16) / dstW;
    275     GrFixed dy = (srcH << 16) / dstH;
    276 
    277     GrFixed y = dy >> 1;
    278 
    279     int dstXLimit = dstW*bpp;
    280     for (int j = 0; j < dstH; ++j) {
    281         GrFixed x = dx >> 1;
    282         void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp;
    283         void* dstRow = (uint8_t*)dst + j*dstW*bpp;
    284         for (int i = 0; i < dstXLimit; i += bpp) {
    285             memcpy((uint8_t*) dstRow + i,
    286                    (uint8_t*) srcRow + (x>>16)*bpp,
    287                    bpp);
    288             x += dx;
    289         }
    290         y += dy;
    291     }
    292 }
    293 
    294 namespace {
    295 
    296 // position + local coordinate
    297 extern const GrVertexAttrib gVertexAttribs[] = {
    298     {kVec2f_GrVertexAttribType, 0,               kPosition_GrVertexAttribBinding},
    299     {kVec2f_GrVertexAttribType, sizeof(GrPoint), kLocalCoord_GrVertexAttribBinding}
    300 };
    301 
    302 };
    303 
    304 // The desired texture is NPOT and tiled but that isn't supported by
    305 // the current hardware. Resize the texture to be a POT
    306 GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc,
    307                                            const GrCacheID& cacheID,
    308                                            void* srcData,
    309                                            size_t rowBytes,
    310                                            bool filter) {
    311     SkAutoTUnref<GrTexture> clampedTexture(this->findAndRefTexture(desc, cacheID, NULL));
    312     if (NULL == clampedTexture) {
    313         clampedTexture.reset(this->createTexture(NULL, desc, cacheID, srcData, rowBytes));
    314 
    315         if (NULL == clampedTexture) {
    316             return NULL;
    317         }
    318     }
    319 
    320     GrTextureDesc rtDesc = desc;
    321     rtDesc.fFlags =  rtDesc.fFlags |
    322                      kRenderTarget_GrTextureFlagBit |
    323                      kNoStencil_GrTextureFlagBit;
    324     rtDesc.fWidth  = GrNextPow2(GrMax(desc.fWidth, 64));
    325     rtDesc.fHeight = GrNextPow2(GrMax(desc.fHeight, 64));
    326 
    327     GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0);
    328 
    329     if (NULL != texture) {
    330         GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
    331         GrDrawState* drawState = fGpu->drawState();
    332         drawState->setRenderTarget(texture->asRenderTarget());
    333 
    334         // if filtering is not desired then we want to ensure all
    335         // texels in the resampled image are copies of texels from
    336         // the original.
    337         GrTextureParams params(SkShader::kClamp_TileMode, filter ? GrTextureParams::kBilerp_FilterMode :
    338                                                                    GrTextureParams::kNone_FilterMode);
    339         drawState->addColorTextureEffect(clampedTexture, SkMatrix::I(), params);
    340 
    341         drawState->setVertexAttribs<gVertexAttribs>(SK_ARRAY_COUNT(gVertexAttribs));
    342 
    343         GrDrawTarget::AutoReleaseGeometry arg(fGpu, 4, 0);
    344 
    345         if (arg.succeeded()) {
    346             GrPoint* verts = (GrPoint*) arg.vertices();
    347             verts[0].setIRectFan(0, 0, texture->width(), texture->height(), 2 * sizeof(GrPoint));
    348             verts[1].setIRectFan(0, 0, 1, 1, 2 * sizeof(GrPoint));
    349             fGpu->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4);
    350         }
    351     } else {
    352         // TODO: Our CPU stretch doesn't filter. But we create separate
    353         // stretched textures when the texture params is either filtered or
    354         // not. Either implement filtered stretch blit on CPU or just create
    355         // one when FBO case fails.
    356 
    357         rtDesc.fFlags = kNone_GrTextureFlags;
    358         // no longer need to clamp at min RT size.
    359         rtDesc.fWidth  = GrNextPow2(desc.fWidth);
    360         rtDesc.fHeight = GrNextPow2(desc.fHeight);
    361         int bpp = GrBytesPerPixel(desc.fConfig);
    362         SkAutoSMalloc<128*128*4> stretchedPixels(bpp * rtDesc.fWidth * rtDesc.fHeight);
    363         stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
    364                      srcData, desc.fWidth, desc.fHeight, bpp);
    365 
    366         size_t stretchedRowBytes = rtDesc.fWidth * bpp;
    367 
    368         SkDEBUGCODE(GrTexture* texture = )fGpu->createTexture(rtDesc, stretchedPixels.get(),
    369                                                               stretchedRowBytes);
    370         GrAssert(NULL != texture);
    371     }
    372 
    373     return texture;
    374 }
    375 
    376 GrTexture* GrContext::createTexture(const GrTextureParams* params,
    377                                     const GrTextureDesc& desc,
    378                                     const GrCacheID& cacheID,
    379                                     void* srcData,
    380                                     size_t rowBytes) {
    381     SK_TRACE_EVENT0("GrContext::createTexture");
    382 
    383     GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID);
    384 
    385     GrTexture* texture;
    386     if (GrTexture::NeedsResizing(resourceKey)) {
    387         texture = this->createResizedTexture(desc, cacheID,
    388                                              srcData, rowBytes,
    389                                              GrTexture::NeedsBilerp(resourceKey));
    390     } else {
    391         texture= fGpu->createTexture(desc, srcData, rowBytes);
    392     }
    393 
    394     if (NULL != texture) {
    395         // Adding a resource could put us overbudget. Try to free up the
    396         // necessary space before adding it.
    397         fTextureCache->purgeAsNeeded(1, texture->sizeInBytes());
    398         fTextureCache->addResource(resourceKey, texture);
    399     }
    400 
    401     return texture;
    402 }
    403 
    404 static GrTexture* create_scratch_texture(GrGpu* gpu,
    405                                          GrResourceCache* textureCache,
    406                                          const GrTextureDesc& desc) {
    407     GrTexture* texture = gpu->createTexture(desc, NULL, 0);
    408     if (NULL != texture) {
    409         GrResourceKey key = GrTexture::ComputeScratchKey(texture->desc());
    410         // Adding a resource could put us overbudget. Try to free up the
    411         // necessary space before adding it.
    412         textureCache->purgeAsNeeded(1, texture->sizeInBytes());
    413         // Make the resource exclusive so future 'find' calls don't return it
    414         textureCache->addResource(key, texture, GrResourceCache::kHide_OwnershipFlag);
    415     }
    416     return texture;
    417 }
    418 
    419 GrTexture* GrContext::lockAndRefScratchTexture(const GrTextureDesc& inDesc, ScratchTexMatch match) {
    420 
    421     GrAssert((inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
    422              !(inDesc.fFlags & kNoStencil_GrTextureFlagBit));
    423 
    424     // Renderable A8 targets are not universally supported (e.g., not on ANGLE)
    425     GrAssert(this->isConfigRenderable(kAlpha_8_GrPixelConfig) ||
    426              !(inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
    427              (inDesc.fConfig != kAlpha_8_GrPixelConfig));
    428 
    429     if (!fGpu->caps()->reuseScratchTextures()) {
    430         // If we're never recycling scratch textures we can
    431         // always make them the right size
    432         return create_scratch_texture(fGpu, fTextureCache, inDesc);
    433     }
    434 
    435     GrTextureDesc desc = inDesc;
    436 
    437     if (kApprox_ScratchTexMatch == match) {
    438         // bin by pow2 with a reasonable min
    439         static const int MIN_SIZE = 16;
    440         desc.fWidth  = GrMax(MIN_SIZE, GrNextPow2(desc.fWidth));
    441         desc.fHeight = GrMax(MIN_SIZE, GrNextPow2(desc.fHeight));
    442     }
    443 
    444     GrResource* resource = NULL;
    445     int origWidth = desc.fWidth;
    446     int origHeight = desc.fHeight;
    447 
    448     do {
    449         GrResourceKey key = GrTexture::ComputeScratchKey(desc);
    450         // Ensure we have exclusive access to the texture so future 'find' calls don't return it
    451         resource = fTextureCache->find(key, GrResourceCache::kHide_OwnershipFlag);
    452         if (NULL != resource) {
    453             resource->ref();
    454             break;
    455         }
    456         if (kExact_ScratchTexMatch == match) {
    457             break;
    458         }
    459         // We had a cache miss and we are in approx mode, relax the fit of the flags.
    460 
    461         // We no longer try to reuse textures that were previously used as render targets in
    462         // situations where no RT is needed; doing otherwise can confuse the video driver and
    463         // cause significant performance problems in some cases.
    464         if (desc.fFlags & kNoStencil_GrTextureFlagBit) {
    465             desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit;
    466         } else {
    467             break;
    468         }
    469 
    470     } while (true);
    471 
    472     if (NULL == resource) {
    473         desc.fFlags = inDesc.fFlags;
    474         desc.fWidth = origWidth;
    475         desc.fHeight = origHeight;
    476         resource = create_scratch_texture(fGpu, fTextureCache, desc);
    477     }
    478 
    479     return static_cast<GrTexture*>(resource);
    480 }
    481 
    482 void GrContext::addExistingTextureToCache(GrTexture* texture) {
    483 
    484     if (NULL == texture) {
    485         return;
    486     }
    487 
    488     // This texture should already have a cache entry since it was once
    489     // attached
    490     GrAssert(NULL != texture->getCacheEntry());
    491 
    492     // Conceptually, the cache entry is going to assume responsibility
    493     // for the creation ref.
    494     GrAssert(texture->unique());
    495 
    496     // Since this texture came from an AutoScratchTexture it should
    497     // still be in the exclusive pile
    498     fTextureCache->makeNonExclusive(texture->getCacheEntry());
    499 
    500     if (fGpu->caps()->reuseScratchTextures()) {
    501         this->purgeCache();
    502     } else {
    503         // When we aren't reusing textures we know this scratch texture
    504         // will never be reused and would be just wasting time in the cache
    505         fTextureCache->deleteResource(texture->getCacheEntry());
    506     }
    507 }
    508 
    509 
    510 void GrContext::unlockScratchTexture(GrTexture* texture) {
    511     ASSERT_OWNED_RESOURCE(texture);
    512     GrAssert(NULL != texture->getCacheEntry());
    513 
    514     // If this is a scratch texture we detached it from the cache
    515     // while it was locked (to avoid two callers simultaneously getting
    516     // the same texture).
    517     if (texture->getCacheEntry()->key().isScratch()) {
    518         fTextureCache->makeNonExclusive(texture->getCacheEntry());
    519         this->purgeCache();
    520     }
    521 }
    522 
    523 void GrContext::purgeCache() {
    524     if (NULL != fTextureCache) {
    525         fTextureCache->purgeAsNeeded();
    526     }
    527 }
    528 
    529 bool GrContext::OverbudgetCB(void* data) {
    530     GrAssert(NULL != data);
    531 
    532     GrContext* context = reinterpret_cast<GrContext*>(data);
    533 
    534     // Flush the InOrderDrawBuffer to possibly free up some textures
    535     context->flush();
    536 
    537     // TODO: actually track flush's behavior rather than always just
    538     // returning true.
    539     return true;
    540 }
    541 
    542 
    543 GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& descIn,
    544                                             void* srcData,
    545                                             size_t rowBytes) {
    546     GrTextureDesc descCopy = descIn;
    547     return fGpu->createTexture(descCopy, srcData, rowBytes);
    548 }
    549 
    550 void GrContext::getTextureCacheLimits(int* maxTextures,
    551                                       size_t* maxTextureBytes) const {
    552     fTextureCache->getLimits(maxTextures, maxTextureBytes);
    553 }
    554 
    555 void GrContext::setTextureCacheLimits(int maxTextures, size_t maxTextureBytes) {
    556     fTextureCache->setLimits(maxTextures, maxTextureBytes);
    557 }
    558 
    559 int GrContext::getMaxTextureSize() const {
    560     return GrMin(fGpu->caps()->maxTextureSize(), fMaxTextureSizeOverride);
    561 }
    562 
    563 int GrContext::getMaxRenderTargetSize() const {
    564     return fGpu->caps()->maxRenderTargetSize();
    565 }
    566 
    567 int GrContext::getMaxSampleCount() const {
    568     return fGpu->caps()->maxSampleCount();
    569 }
    570 
    571 ///////////////////////////////////////////////////////////////////////////////
    572 
    573 GrTexture* GrContext::wrapBackendTexture(const GrBackendTextureDesc& desc) {
    574     return fGpu->wrapBackendTexture(desc);
    575 }
    576 
    577 GrRenderTarget* GrContext::wrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) {
    578     return fGpu->wrapBackendRenderTarget(desc);
    579 }
    580 
    581 ///////////////////////////////////////////////////////////////////////////////
    582 
    583 bool GrContext::supportsIndex8PixelConfig(const GrTextureParams* params,
    584                                           int width, int height) const {
    585     const GrDrawTargetCaps* caps = fGpu->caps();
    586     if (!caps->eightBitPaletteSupport()) {
    587         return false;
    588     }
    589 
    590     bool isPow2 = GrIsPow2(width) && GrIsPow2(height);
    591 
    592     if (!isPow2) {
    593         bool tiled = NULL != params && params->isTiled();
    594         if (tiled && !caps->npotTextureTileSupport()) {
    595             return false;
    596         }
    597     }
    598     return true;
    599 }
    600 
    601 
    602 ////////////////////////////////////////////////////////////////////////////////
    603 
    604 void GrContext::clear(const SkIRect* rect,
    605                       const GrColor color,
    606                       GrRenderTarget* target) {
    607     AutoRestoreEffects are;
    608     this->prepareToDraw(NULL, BUFFERED_DRAW, &are)->clear(rect, color, target);
    609 }
    610 
    611 void GrContext::drawPaint(const GrPaint& origPaint) {
    612     // set rect to be big enough to fill the space, but not super-huge, so we
    613     // don't overflow fixed-point implementations
    614     SkRect r;
    615     r.setLTRB(0, 0,
    616               SkIntToScalar(getRenderTarget()->width()),
    617               SkIntToScalar(getRenderTarget()->height()));
    618     SkMatrix inverse;
    619     SkTCopyOnFirstWrite<GrPaint> paint(origPaint);
    620     AutoMatrix am;
    621 
    622     // We attempt to map r by the inverse matrix and draw that. mapRect will
    623     // map the four corners and bound them with a new rect. This will not
    624     // produce a correct result for some perspective matrices.
    625     if (!this->getMatrix().hasPerspective()) {
    626         if (!fViewMatrix.invert(&inverse)) {
    627             GrPrintf("Could not invert matrix\n");
    628             return;
    629         }
    630         inverse.mapRect(&r);
    631     } else {
    632         if (!am.setIdentity(this, paint.writable())) {
    633             GrPrintf("Could not invert matrix\n");
    634             return;
    635         }
    636     }
    637     // by definition this fills the entire clip, no need for AA
    638     if (paint->isAntiAlias()) {
    639         paint.writable()->setAntiAlias(false);
    640     }
    641     this->drawRect(*paint, r);
    642 }
    643 
    644 ////////////////////////////////////////////////////////////////////////////////
    645 
    646 namespace {
    647 inline bool disable_coverage_aa_for_blend(GrDrawTarget* target) {
    648     return DISABLE_COVERAGE_AA_FOR_BLEND && !target->canApplyCoverage();
    649 }
    650 }
    651 
    652 ////////////////////////////////////////////////////////////////////////////////
    653 
    654 /*  create a triangle strip that strokes the specified triangle. There are 8
    655  unique vertices, but we repreat the last 2 to close up. Alternatively we
    656  could use an indices array, and then only send 8 verts, but not sure that
    657  would be faster.
    658  */
    659 static void setStrokeRectStrip(GrPoint verts[10], SkRect rect,
    660                                SkScalar width) {
    661     const SkScalar rad = SkScalarHalf(width);
    662     rect.sort();
    663 
    664     verts[0].set(rect.fLeft + rad, rect.fTop + rad);
    665     verts[1].set(rect.fLeft - rad, rect.fTop - rad);
    666     verts[2].set(rect.fRight - rad, rect.fTop + rad);
    667     verts[3].set(rect.fRight + rad, rect.fTop - rad);
    668     verts[4].set(rect.fRight - rad, rect.fBottom - rad);
    669     verts[5].set(rect.fRight + rad, rect.fBottom + rad);
    670     verts[6].set(rect.fLeft + rad, rect.fBottom - rad);
    671     verts[7].set(rect.fLeft - rad, rect.fBottom + rad);
    672     verts[8] = verts[0];
    673     verts[9] = verts[1];
    674 }
    675 
    676 static bool isIRect(const SkRect& r) {
    677     return SkScalarIsInt(r.fLeft)  && SkScalarIsInt(r.fTop) &&
    678            SkScalarIsInt(r.fRight) && SkScalarIsInt(r.fBottom);
    679 }
    680 
    681 static bool apply_aa_to_rect(GrDrawTarget* target,
    682                              const SkRect& rect,
    683                              SkScalar strokeWidth,
    684                              const SkMatrix* matrix,
    685                              SkMatrix* combinedMatrix,
    686                              SkRect* devRect,
    687                              bool* useVertexCoverage) {
    688     // we use a simple coverage ramp to do aa on axis-aligned rects
    689     // we check if the rect will be axis-aligned, and the rect won't land on
    690     // integer coords.
    691 
    692     // we are keeping around the "tweak the alpha" trick because
    693     // it is our only hope for the fixed-pipe implementation.
    694     // In a shader implementation we can give a separate coverage input
    695     // TODO: remove this ugliness when we drop the fixed-pipe impl
    696     *useVertexCoverage = false;
    697     if (!target->getDrawState().canTweakAlphaForCoverage()) {
    698         if (disable_coverage_aa_for_blend(target)) {
    699 #if GR_DEBUG
    700             //GrPrintf("Turning off AA to correctly apply blend.\n");
    701 #endif
    702             return false;
    703         } else {
    704             *useVertexCoverage = true;
    705         }
    706     }
    707     const GrDrawState& drawState = target->getDrawState();
    708     if (drawState.getRenderTarget()->isMultisampled()) {
    709         return false;
    710     }
    711 
    712     if (0 == strokeWidth && target->willUseHWAALines()) {
    713         return false;
    714     }
    715 
    716 #if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
    717     if (strokeWidth >= 0) {
    718 #endif
    719         if (!drawState.getViewMatrix().preservesAxisAlignment()) {
    720             return false;
    721         }
    722 
    723         if (NULL != matrix && !matrix->preservesAxisAlignment()) {
    724             return false;
    725         }
    726 #if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
    727     } else {
    728         if (!drawState.getViewMatrix().preservesAxisAlignment() &&
    729             !drawState.getViewMatrix().preservesRightAngles()) {
    730             return false;
    731         }
    732 
    733         if (NULL != matrix && !matrix->preservesRightAngles()) {
    734             return false;
    735         }
    736     }
    737 #endif
    738 
    739     *combinedMatrix = drawState.getViewMatrix();
    740     if (NULL != matrix) {
    741         combinedMatrix->preConcat(*matrix);
    742 
    743 #if GR_DEBUG
    744 #if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
    745         if (strokeWidth >= 0) {
    746 #endif
    747             GrAssert(combinedMatrix->preservesAxisAlignment());
    748 #if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
    749         } else {
    750             GrAssert(combinedMatrix->preservesRightAngles());
    751         }
    752 #endif
    753 #endif
    754     }
    755 
    756     combinedMatrix->mapRect(devRect, rect);
    757 
    758     if (strokeWidth < 0) {
    759         return !isIRect(*devRect);
    760     } else {
    761         return true;
    762     }
    763 }
    764 
    765 void GrContext::drawRect(const GrPaint& paint,
    766                          const SkRect& rect,
    767                          SkScalar width,
    768                          const SkMatrix* matrix) {
    769     SK_TRACE_EVENT0("GrContext::drawRect");
    770 
    771     AutoRestoreEffects are;
    772     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are);
    773 
    774     SkRect devRect;
    775     SkMatrix combinedMatrix;
    776     bool useVertexCoverage;
    777     bool needAA = paint.isAntiAlias() &&
    778                   !target->getDrawState().getRenderTarget()->isMultisampled();
    779     bool doAA = needAA && apply_aa_to_rect(target, rect, width, matrix,
    780                                            &combinedMatrix, &devRect,
    781                                            &useVertexCoverage);
    782     if (doAA) {
    783         GrDrawState::AutoViewMatrixRestore avmr;
    784         if (!avmr.setIdentity(target->drawState())) {
    785             return;
    786         }
    787         if (width >= 0) {
    788             fAARectRenderer->strokeAARect(this->getGpu(), target,
    789                                           rect, combinedMatrix, devRect,
    790                                           width, useVertexCoverage);
    791         } else {
    792             // filled AA rect
    793             fAARectRenderer->fillAARect(this->getGpu(), target,
    794                                         rect, combinedMatrix, devRect,
    795                                         useVertexCoverage);
    796         }
    797         return;
    798     }
    799 
    800     if (width >= 0) {
    801         // TODO: consider making static vertex buffers for these cases.
    802         // Hairline could be done by just adding closing vertex to
    803         // unitSquareVertexBuffer()
    804 
    805         static const int worstCaseVertCount = 10;
    806         target->drawState()->setDefaultVertexAttribs();
    807         GrDrawTarget::AutoReleaseGeometry geo(target, worstCaseVertCount, 0);
    808 
    809         if (!geo.succeeded()) {
    810             GrPrintf("Failed to get space for vertices!\n");
    811             return;
    812         }
    813 
    814         GrPrimitiveType primType;
    815         int vertCount;
    816         GrPoint* vertex = geo.positions();
    817 
    818         if (width > 0) {
    819             vertCount = 10;
    820             primType = kTriangleStrip_GrPrimitiveType;
    821             setStrokeRectStrip(vertex, rect, width);
    822         } else {
    823             // hairline
    824             vertCount = 5;
    825             primType = kLineStrip_GrPrimitiveType;
    826             vertex[0].set(rect.fLeft, rect.fTop);
    827             vertex[1].set(rect.fRight, rect.fTop);
    828             vertex[2].set(rect.fRight, rect.fBottom);
    829             vertex[3].set(rect.fLeft, rect.fBottom);
    830             vertex[4].set(rect.fLeft, rect.fTop);
    831         }
    832 
    833         GrDrawState::AutoViewMatrixRestore avmr;
    834         if (NULL != matrix) {
    835             GrDrawState* drawState = target->drawState();
    836             avmr.set(drawState, *matrix);
    837         }
    838 
    839         target->drawNonIndexed(primType, 0, vertCount);
    840     } else {
    841         // filled BW rect
    842         target->drawSimpleRect(rect, matrix);
    843     }
    844 }
    845 
    846 void GrContext::drawRectToRect(const GrPaint& paint,
    847                                const SkRect& dstRect,
    848                                const SkRect& localRect,
    849                                const SkMatrix* dstMatrix,
    850                                const SkMatrix* localMatrix) {
    851     SK_TRACE_EVENT0("GrContext::drawRectToRect");
    852     AutoRestoreEffects are;
    853     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are);
    854 
    855     target->drawRect(dstRect, dstMatrix, &localRect, localMatrix);
    856 }
    857 
    858 namespace {
    859 
    860 extern const GrVertexAttrib gPosUVColorAttribs[] = {
    861     {kVec2f_GrVertexAttribType,  0, kPosition_GrVertexAttribBinding },
    862     {kVec2f_GrVertexAttribType,  sizeof(GrPoint), kLocalCoord_GrVertexAttribBinding },
    863     {kVec4ub_GrVertexAttribType, 2*sizeof(GrPoint), kColor_GrVertexAttribBinding}
    864 };
    865 
    866 extern const GrVertexAttrib gPosColorAttribs[] = {
    867     {kVec2f_GrVertexAttribType,  0, kPosition_GrVertexAttribBinding},
    868     {kVec4ub_GrVertexAttribType, sizeof(GrPoint), kColor_GrVertexAttribBinding},
    869 };
    870 
    871 static void set_vertex_attributes(GrDrawState* drawState,
    872                                   const GrPoint* texCoords,
    873                                   const GrColor* colors,
    874                                   int* colorOffset,
    875                                   int* texOffset) {
    876     *texOffset = -1;
    877     *colorOffset = -1;
    878 
    879     if (NULL != texCoords && NULL != colors) {
    880         *texOffset = sizeof(GrPoint);
    881         *colorOffset = 2*sizeof(GrPoint);
    882         drawState->setVertexAttribs<gPosUVColorAttribs>(3);
    883     } else if (NULL != texCoords) {
    884         *texOffset = sizeof(GrPoint);
    885         drawState->setVertexAttribs<gPosUVColorAttribs>(2);
    886     } else if (NULL != colors) {
    887         *colorOffset = sizeof(GrPoint);
    888         drawState->setVertexAttribs<gPosColorAttribs>(2);
    889     } else {
    890         drawState->setVertexAttribs<gPosColorAttribs>(1);
    891     }
    892 }
    893 
    894 };
    895 
    896 void GrContext::drawVertices(const GrPaint& paint,
    897                              GrPrimitiveType primitiveType,
    898                              int vertexCount,
    899                              const GrPoint positions[],
    900                              const GrPoint texCoords[],
    901                              const GrColor colors[],
    902                              const uint16_t indices[],
    903                              int indexCount) {
    904     SK_TRACE_EVENT0("GrContext::drawVertices");
    905 
    906     GrDrawTarget::AutoReleaseGeometry geo;
    907 
    908     AutoRestoreEffects are;
    909     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are);
    910 
    911     GrDrawState* drawState = target->drawState();
    912 
    913     int colorOffset = -1, texOffset = -1;
    914     set_vertex_attributes(drawState, texCoords, colors, &colorOffset, &texOffset);
    915 
    916     size_t vertexSize = drawState->getVertexSize();
    917     if (sizeof(GrPoint) != vertexSize) {
    918         if (!geo.set(target, vertexCount, 0)) {
    919             GrPrintf("Failed to get space for vertices!\n");
    920             return;
    921         }
    922         void* curVertex = geo.vertices();
    923 
    924         for (int i = 0; i < vertexCount; ++i) {
    925             *((GrPoint*)curVertex) = positions[i];
    926 
    927             if (texOffset >= 0) {
    928                 *(GrPoint*)((intptr_t)curVertex + texOffset) = texCoords[i];
    929             }
    930             if (colorOffset >= 0) {
    931                 *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i];
    932             }
    933             curVertex = (void*)((intptr_t)curVertex + vertexSize);
    934         }
    935     } else {
    936         target->setVertexSourceToArray(positions, vertexCount);
    937     }
    938 
    939     // we don't currently apply offscreen AA to this path. Need improved
    940     // management of GrDrawTarget's geometry to avoid copying points per-tile.
    941 
    942     if (NULL != indices) {
    943         target->setIndexSourceToArray(indices, indexCount);
    944         target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount);
    945         target->resetIndexSource();
    946     } else {
    947         target->drawNonIndexed(primitiveType, 0, vertexCount);
    948     }
    949 }
    950 
    951 ///////////////////////////////////////////////////////////////////////////////
    952 
    953 void GrContext::drawRRect(const GrPaint& paint,
    954                           const SkRRect& rect,
    955                           const SkStrokeRec& stroke) {
    956     if (rect.isEmpty()) {
    957        return;
    958     }
    959 
    960     AutoRestoreEffects are;
    961     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are);
    962 
    963     bool useAA = paint.isAntiAlias() &&
    964                  !target->getDrawState().getRenderTarget()->isMultisampled() &&
    965                  !disable_coverage_aa_for_blend(target);
    966 
    967     if (!fOvalRenderer->drawSimpleRRect(target, this, useAA, rect, stroke)) {
    968         SkPath path;
    969         path.addRRect(rect);
    970         this->internalDrawPath(target, useAA, path, stroke);
    971     }
    972 }
    973 
    974 ///////////////////////////////////////////////////////////////////////////////
    975 
    976 void GrContext::drawOval(const GrPaint& paint,
    977                          const SkRect& oval,
    978                          const SkStrokeRec& stroke) {
    979     if (oval.isEmpty()) {
    980        return;
    981     }
    982 
    983     AutoRestoreEffects are;
    984     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are);
    985 
    986     bool useAA = paint.isAntiAlias() &&
    987                  !target->getDrawState().getRenderTarget()->isMultisampled() &&
    988                  !disable_coverage_aa_for_blend(target);
    989 
    990     if (!fOvalRenderer->drawOval(target, this, useAA, oval, stroke)) {
    991         SkPath path;
    992         path.addOval(oval);
    993         this->internalDrawPath(target, useAA, path, stroke);
    994     }
    995 }
    996 
    997 // Can 'path' be drawn as a pair of filled nested rectangles?
    998 static bool is_nested_rects(GrDrawTarget* target,
    999                             const SkPath& path,
   1000                             const SkStrokeRec& stroke,
   1001                             SkRect rects[2],
   1002                             bool* useVertexCoverage) {
   1003     SkASSERT(stroke.isFillStyle());
   1004 
   1005     if (path.isInverseFillType()) {
   1006         return false;
   1007     }
   1008 
   1009     const GrDrawState& drawState = target->getDrawState();
   1010 
   1011     // TODO: this restriction could be lifted if we were willing to apply
   1012     // the matrix to all the points individually rather than just to the rect
   1013     if (!drawState.getViewMatrix().preservesAxisAlignment()) {
   1014         return false;
   1015     }
   1016 
   1017     *useVertexCoverage = false;
   1018     if (!target->getDrawState().canTweakAlphaForCoverage()) {
   1019         if (disable_coverage_aa_for_blend(target)) {
   1020             return false;
   1021         } else {
   1022             *useVertexCoverage = true;
   1023         }
   1024     }
   1025 
   1026     SkPath::Direction dirs[2];
   1027     if (!path.isNestedRects(rects, dirs)) {
   1028         return false;
   1029     }
   1030 
   1031     if (SkPath::kWinding_FillType == path.getFillType() && dirs[0] == dirs[1]) {
   1032         // The two rects need to be wound opposite to each other
   1033         return false;
   1034     }
   1035 
   1036     // Right now, nested rects where the margin is not the same width
   1037     // all around do not render correctly
   1038     const SkScalar* outer = rects[0].asScalars();
   1039     const SkScalar* inner = rects[1].asScalars();
   1040 
   1041     SkScalar margin = SkScalarAbs(outer[0] - inner[0]);
   1042     for (int i = 1; i < 4; ++i) {
   1043         SkScalar temp = SkScalarAbs(outer[i] - inner[i]);
   1044         if (!SkScalarNearlyEqual(margin, temp)) {
   1045             return false;
   1046         }
   1047     }
   1048 
   1049     return true;
   1050 }
   1051 
   1052 void GrContext::drawPath(const GrPaint& paint, const SkPath& path, const SkStrokeRec& stroke) {
   1053 
   1054     if (path.isEmpty()) {
   1055        if (path.isInverseFillType()) {
   1056            this->drawPaint(paint);
   1057        }
   1058        return;
   1059     }
   1060 
   1061     // Note that internalDrawPath may sw-rasterize the path into a scratch texture.
   1062     // Scratch textures can be recycled after they are returned to the texture
   1063     // cache. This presents a potential hazard for buffered drawing. However,
   1064     // the writePixels that uploads to the scratch will perform a flush so we're
   1065     // OK.
   1066     AutoRestoreEffects are;
   1067     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are);
   1068 
   1069     bool useAA = paint.isAntiAlias() && !target->getDrawState().getRenderTarget()->isMultisampled();
   1070     if (useAA && stroke.getWidth() < 0 && !path.isConvex()) {
   1071         // Concave AA paths are expensive - try to avoid them for special cases
   1072         bool useVertexCoverage;
   1073         SkRect rects[2];
   1074 
   1075         if (is_nested_rects(target, path, stroke, rects, &useVertexCoverage)) {
   1076             SkMatrix origViewMatrix = target->getDrawState().getViewMatrix();
   1077             GrDrawState::AutoViewMatrixRestore avmr;
   1078             if (!avmr.setIdentity(target->drawState())) {
   1079                 return;
   1080             }
   1081 
   1082             fAARectRenderer->fillAANestedRects(this->getGpu(), target,
   1083                                                rects,
   1084                                                origViewMatrix,
   1085                                                useVertexCoverage);
   1086             return;
   1087         }
   1088     }
   1089 
   1090     SkRect ovalRect;
   1091     bool isOval = path.isOval(&ovalRect);
   1092 
   1093     if (!isOval || path.isInverseFillType()
   1094         || !fOvalRenderer->drawOval(target, this, useAA, ovalRect, stroke)) {
   1095         this->internalDrawPath(target, useAA, path, stroke);
   1096     }
   1097 }
   1098 
   1099 void GrContext::internalDrawPath(GrDrawTarget* target, bool useAA, const SkPath& path,
   1100                                  const SkStrokeRec& stroke) {
   1101     SkASSERT(!path.isEmpty());
   1102 
   1103     // An Assumption here is that path renderer would use some form of tweaking
   1104     // the src color (either the input alpha or in the frag shader) to implement
   1105     // aa. If we have some future driver-mojo path AA that can do the right
   1106     // thing WRT to the blend then we'll need some query on the PR.
   1107     if (disable_coverage_aa_for_blend(target)) {
   1108 #if GR_DEBUG
   1109         //GrPrintf("Turning off AA to correctly apply blend.\n");
   1110 #endif
   1111         useAA = false;
   1112     }
   1113 
   1114     GrPathRendererChain::DrawType type = useAA ? GrPathRendererChain::kColorAntiAlias_DrawType :
   1115                                                  GrPathRendererChain::kColor_DrawType;
   1116 
   1117     const SkPath* pathPtr = &path;
   1118     SkPath tmpPath;
   1119     SkStrokeRec strokeRec(stroke);
   1120 
   1121     // Try a 1st time without stroking the path and without allowing the SW renderer
   1122     GrPathRenderer* pr = this->getPathRenderer(*pathPtr, strokeRec, target, false, type);
   1123 
   1124     if (NULL == pr) {
   1125         if (!strokeRec.isHairlineStyle()) {
   1126             // It didn't work the 1st time, so try again with the stroked path
   1127             if (strokeRec.applyToPath(&tmpPath, *pathPtr)) {
   1128                 pathPtr = &tmpPath;
   1129                 strokeRec.setFillStyle();
   1130             }
   1131         }
   1132         if (pathPtr->isEmpty()) {
   1133             return;
   1134         }
   1135 
   1136         // This time, allow SW renderer
   1137         pr = this->getPathRenderer(*pathPtr, strokeRec, target, true, type);
   1138     }
   1139 
   1140     if (NULL == pr) {
   1141 #if GR_DEBUG
   1142         GrPrintf("Unable to find path renderer compatible with path.\n");
   1143 #endif
   1144         return;
   1145     }
   1146 
   1147     pr->drawPath(*pathPtr, strokeRec, target, useAA);
   1148 }
   1149 
   1150 ////////////////////////////////////////////////////////////////////////////////
   1151 
   1152 void GrContext::flush(int flagsBitfield) {
   1153     if (NULL == fDrawBuffer) {
   1154         return;
   1155     }
   1156 
   1157     if (kDiscard_FlushBit & flagsBitfield) {
   1158         fDrawBuffer->reset();
   1159     } else {
   1160         fDrawBuffer->flush();
   1161     }
   1162 }
   1163 
   1164 bool GrContext::writeTexturePixels(GrTexture* texture,
   1165                                    int left, int top, int width, int height,
   1166                                    GrPixelConfig config, const void* buffer, size_t rowBytes,
   1167                                    uint32_t flags) {
   1168     SK_TRACE_EVENT0("GrContext::writeTexturePixels");
   1169     ASSERT_OWNED_RESOURCE(texture);
   1170 
   1171     if ((kUnpremul_PixelOpsFlag & flags) || !fGpu->canWriteTexturePixels(texture, config)) {
   1172         if (NULL != texture->asRenderTarget()) {
   1173             return this->writeRenderTargetPixels(texture->asRenderTarget(),
   1174                                                  left, top, width, height,
   1175                                                  config, buffer, rowBytes, flags);
   1176         } else {
   1177             return false;
   1178         }
   1179     }
   1180 
   1181     if (!(kDontFlush_PixelOpsFlag & flags)) {
   1182         this->flush();
   1183     }
   1184 
   1185     return fGpu->writeTexturePixels(texture, left, top, width, height,
   1186                                     config, buffer, rowBytes);
   1187 }
   1188 
   1189 bool GrContext::readTexturePixels(GrTexture* texture,
   1190                                   int left, int top, int width, int height,
   1191                                   GrPixelConfig config, void* buffer, size_t rowBytes,
   1192                                   uint32_t flags) {
   1193     SK_TRACE_EVENT0("GrContext::readTexturePixels");
   1194     ASSERT_OWNED_RESOURCE(texture);
   1195 
   1196     // TODO: code read pixels for textures that aren't also rendertargets
   1197     GrRenderTarget* target = texture->asRenderTarget();
   1198     if (NULL != target) {
   1199         return this->readRenderTargetPixels(target,
   1200                                             left, top, width, height,
   1201                                             config, buffer, rowBytes,
   1202                                             flags);
   1203     } else {
   1204         return false;
   1205     }
   1206 }
   1207 
   1208 #include "SkConfig8888.h"
   1209 
   1210 namespace {
   1211 /**
   1212  * Converts a GrPixelConfig to a SkCanvas::Config8888. Only byte-per-channel
   1213  * formats are representable as Config8888 and so the function returns false
   1214  * if the GrPixelConfig has no equivalent Config8888.
   1215  */
   1216 bool grconfig_to_config8888(GrPixelConfig config,
   1217                             bool unpremul,
   1218                             SkCanvas::Config8888* config8888) {
   1219     switch (config) {
   1220         case kRGBA_8888_GrPixelConfig:
   1221             if (unpremul) {
   1222                 *config8888 = SkCanvas::kRGBA_Unpremul_Config8888;
   1223             } else {
   1224                 *config8888 = SkCanvas::kRGBA_Premul_Config8888;
   1225             }
   1226             return true;
   1227         case kBGRA_8888_GrPixelConfig:
   1228             if (unpremul) {
   1229                 *config8888 = SkCanvas::kBGRA_Unpremul_Config8888;
   1230             } else {
   1231                 *config8888 = SkCanvas::kBGRA_Premul_Config8888;
   1232             }
   1233             return true;
   1234         default:
   1235             return false;
   1236     }
   1237 }
   1238 
   1239 // It returns a configuration with where the byte position of the R & B components are swapped in
   1240 // relation to the input config. This should only be called with the result of
   1241 // grconfig_to_config8888 as it will fail for other configs.
   1242 SkCanvas::Config8888 swap_config8888_red_and_blue(SkCanvas::Config8888 config8888) {
   1243     switch (config8888) {
   1244         case SkCanvas::kBGRA_Premul_Config8888:
   1245             return SkCanvas::kRGBA_Premul_Config8888;
   1246         case SkCanvas::kBGRA_Unpremul_Config8888:
   1247             return SkCanvas::kRGBA_Unpremul_Config8888;
   1248         case SkCanvas::kRGBA_Premul_Config8888:
   1249             return SkCanvas::kBGRA_Premul_Config8888;
   1250         case SkCanvas::kRGBA_Unpremul_Config8888:
   1251             return SkCanvas::kBGRA_Unpremul_Config8888;
   1252         default:
   1253             GrCrash("Unexpected input");
   1254             return SkCanvas::kBGRA_Unpremul_Config8888;;
   1255     }
   1256 }
   1257 }
   1258 
   1259 bool GrContext::readRenderTargetPixels(GrRenderTarget* target,
   1260                                        int left, int top, int width, int height,
   1261                                        GrPixelConfig dstConfig, void* buffer, size_t rowBytes,
   1262                                        uint32_t flags) {
   1263     SK_TRACE_EVENT0("GrContext::readRenderTargetPixels");
   1264     ASSERT_OWNED_RESOURCE(target);
   1265 
   1266     if (NULL == target) {
   1267         target = fRenderTarget.get();
   1268         if (NULL == target) {
   1269             return false;
   1270         }
   1271     }
   1272 
   1273     if (!(kDontFlush_PixelOpsFlag & flags)) {
   1274         this->flush();
   1275     }
   1276 
   1277     // Determine which conversions have to be applied: flipY, swapRAnd, and/or unpremul.
   1278 
   1279     // If fGpu->readPixels would incur a y-flip cost then we will read the pixels upside down. We'll
   1280     // either do the flipY by drawing into a scratch with a matrix or on the cpu after the read.
   1281     bool flipY = fGpu->readPixelsWillPayForYFlip(target, left, top,
   1282                                                  width, height, dstConfig,
   1283                                                  rowBytes);
   1284     // We ignore the preferred config if it is different than our config unless it is an R/B swap.
   1285     // In that case we'll perform an R and B swap while drawing to a scratch texture of the swapped
   1286     // config. Then we will call readPixels on the scratch with the swapped config. The swaps during
   1287     // the draw cancels out the fact that we call readPixels with a config that is R/B swapped from
   1288     // dstConfig.
   1289     GrPixelConfig readConfig = dstConfig;
   1290     bool swapRAndB = false;
   1291     if (GrPixelConfigSwapRAndB(dstConfig) ==
   1292         fGpu->preferredReadPixelsConfig(dstConfig, target->config())) {
   1293         readConfig = GrPixelConfigSwapRAndB(readConfig);
   1294         swapRAndB = true;
   1295     }
   1296 
   1297     bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags);
   1298 
   1299     if (unpremul && !GrPixelConfigIs8888(dstConfig)) {
   1300         // The unpremul flag is only allowed for these two configs.
   1301         return false;
   1302     }
   1303 
   1304     // If the src is a texture and we would have to do conversions after read pixels, we instead
   1305     // do the conversions by drawing the src to a scratch texture. If we handle any of the
   1306     // conversions in the draw we set the corresponding bool to false so that we don't reapply it
   1307     // on the read back pixels.
   1308     GrTexture* src = target->asTexture();
   1309     GrAutoScratchTexture ast;
   1310     if (NULL != src && (swapRAndB || unpremul || flipY)) {
   1311         // Make the scratch a render target because we don't have a robust readTexturePixels as of
   1312         // yet. It calls this function.
   1313         GrTextureDesc desc;
   1314         desc.fFlags = kRenderTarget_GrTextureFlagBit;
   1315         desc.fWidth = width;
   1316         desc.fHeight = height;
   1317         desc.fConfig = readConfig;
   1318         desc.fOrigin = kTopLeft_GrSurfaceOrigin;
   1319 
   1320         // When a full read back is faster than a partial we could always make the scratch exactly
   1321         // match the passed rect. However, if we see many different size rectangles we will trash
   1322         // our texture cache and pay the cost of creating and destroying many textures. So, we only
   1323         // request an exact match when the caller is reading an entire RT.
   1324         ScratchTexMatch match = kApprox_ScratchTexMatch;
   1325         if (0 == left &&
   1326             0 == top &&
   1327             target->width() == width &&
   1328             target->height() == height &&
   1329             fGpu->fullReadPixelsIsFasterThanPartial()) {
   1330             match = kExact_ScratchTexMatch;
   1331         }
   1332         ast.set(this, desc, match);
   1333         GrTexture* texture = ast.texture();
   1334         if (texture) {
   1335             // compute a matrix to perform the draw
   1336             SkMatrix textureMatrix;
   1337             textureMatrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top);
   1338             textureMatrix.postIDiv(src->width(), src->height());
   1339 
   1340             SkAutoTUnref<const GrEffectRef> effect;
   1341             if (unpremul) {
   1342                 effect.reset(this->createPMToUPMEffect(src, swapRAndB, textureMatrix));
   1343                 if (NULL != effect) {
   1344                     unpremul = false; // we no longer need to do this on CPU after the read back.
   1345                 }
   1346             }
   1347             // If we failed to create a PM->UPM effect and have no other conversions to perform then
   1348             // there is no longer any point to using the scratch.
   1349             if (NULL != effect || flipY || swapRAndB) {
   1350                 if (!effect) {
   1351                     effect.reset(GrConfigConversionEffect::Create(
   1352                                                     src,
   1353                                                     swapRAndB,
   1354                                                     GrConfigConversionEffect::kNone_PMConversion,
   1355                                                     textureMatrix));
   1356                 }
   1357                 swapRAndB = false; // we will handle the swap in the draw.
   1358 
   1359                 // We protect the existing geometry here since it may not be
   1360                 // clear to the caller that a draw operation (i.e., drawSimpleRect)
   1361                 // can be invoked in this method
   1362                 GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit);
   1363                 GrDrawState* drawState = fGpu->drawState();
   1364                 GrAssert(effect);
   1365                 drawState->addColorEffect(effect);
   1366 
   1367                 drawState->setRenderTarget(texture->asRenderTarget());
   1368                 SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height));
   1369                 fGpu->drawSimpleRect(rect, NULL);
   1370                 // we want to read back from the scratch's origin
   1371                 left = 0;
   1372                 top = 0;
   1373                 target = texture->asRenderTarget();
   1374             }
   1375         }
   1376     }
   1377     if (!fGpu->readPixels(target,
   1378                           left, top, width, height,
   1379                           readConfig, buffer, rowBytes)) {
   1380         return false;
   1381     }
   1382     // Perform any conversions we weren't able to perform using a scratch texture.
   1383     if (unpremul || swapRAndB) {
   1384         // These are initialized to suppress a warning
   1385         SkCanvas::Config8888 srcC8888 = SkCanvas::kNative_Premul_Config8888;
   1386         SkCanvas::Config8888 dstC8888 = SkCanvas::kNative_Premul_Config8888;
   1387 
   1388         SkDEBUGCODE(bool c8888IsValid =) grconfig_to_config8888(dstConfig, false, &srcC8888);
   1389         grconfig_to_config8888(dstConfig, unpremul, &dstC8888);
   1390 
   1391         if (swapRAndB) {
   1392             GrAssert(c8888IsValid); // we should only do r/b swap on 8888 configs
   1393             srcC8888 = swap_config8888_red_and_blue(srcC8888);
   1394         }
   1395         GrAssert(c8888IsValid);
   1396         uint32_t* b32 = reinterpret_cast<uint32_t*>(buffer);
   1397         SkConvertConfig8888Pixels(b32, rowBytes, dstC8888,
   1398                                   b32, rowBytes, srcC8888,
   1399                                   width, height);
   1400     }
   1401     return true;
   1402 }
   1403 
   1404 void GrContext::resolveRenderTarget(GrRenderTarget* target) {
   1405     GrAssert(target);
   1406     ASSERT_OWNED_RESOURCE(target);
   1407     // In the future we may track whether there are any pending draws to this
   1408     // target. We don't today so we always perform a flush. We don't promise
   1409     // this to our clients, though.
   1410     this->flush();
   1411     fGpu->resolveRenderTarget(target);
   1412 }
   1413 
   1414 void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst, const SkIPoint* topLeft) {
   1415     if (NULL == src || NULL == dst) {
   1416         return;
   1417     }
   1418     ASSERT_OWNED_RESOURCE(src);
   1419 
   1420     // Writes pending to the source texture are not tracked, so a flush
   1421     // is required to ensure that the copy captures the most recent contents
   1422     // of the source texture. See similar behavior in
   1423     // GrContext::resolveRenderTarget.
   1424     this->flush();
   1425 
   1426     GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
   1427     GrDrawState* drawState = fGpu->drawState();
   1428     drawState->setRenderTarget(dst);
   1429     SkMatrix sampleM;
   1430     sampleM.setIDiv(src->width(), src->height());
   1431     SkIRect srcRect = SkIRect::MakeWH(dst->width(), dst->height());
   1432     if (NULL != topLeft) {
   1433         srcRect.offset(*topLeft);
   1434     }
   1435     SkIRect srcBounds = SkIRect::MakeWH(src->width(), src->height());
   1436     if (!srcRect.intersect(srcBounds)) {
   1437         return;
   1438     }
   1439     sampleM.preTranslate(SkIntToScalar(srcRect.fLeft), SkIntToScalar(srcRect.fTop));
   1440     drawState->addColorTextureEffect(src, sampleM);
   1441     SkRect dstR = SkRect::MakeWH(SkIntToScalar(srcRect.width()), SkIntToScalar(srcRect.height()));
   1442     fGpu->drawSimpleRect(dstR, NULL);
   1443 }
   1444 
   1445 bool GrContext::writeRenderTargetPixels(GrRenderTarget* target,
   1446                                         int left, int top, int width, int height,
   1447                                         GrPixelConfig srcConfig,
   1448                                         const void* buffer,
   1449                                         size_t rowBytes,
   1450                                         uint32_t flags) {
   1451     SK_TRACE_EVENT0("GrContext::writeRenderTargetPixels");
   1452     ASSERT_OWNED_RESOURCE(target);
   1453 
   1454     if (NULL == target) {
   1455         target = fRenderTarget.get();
   1456         if (NULL == target) {
   1457             return false;
   1458         }
   1459     }
   1460 
   1461     // TODO: when underlying api has a direct way to do this we should use it (e.g. glDrawPixels on
   1462     // desktop GL).
   1463 
   1464     // We will always call some form of writeTexturePixels and we will pass our flags on to it.
   1465     // Thus, we don't perform a flush here since that call will do it (if the kNoFlush flag isn't
   1466     // set.)
   1467 
   1468     // If the RT is also a texture and we don't have to premultiply then take the texture path.
   1469     // We expect to be at least as fast or faster since it doesn't use an intermediate texture as
   1470     // we do below.
   1471 
   1472 #if !GR_MAC_BUILD
   1473     // At least some drivers on the Mac get confused when glTexImage2D is called on a texture
   1474     // attached to an FBO. The FBO still sees the old image. TODO: determine what OS versions and/or
   1475     // HW is affected.
   1476     if (NULL != target->asTexture() && !(kUnpremul_PixelOpsFlag & flags) &&
   1477         fGpu->canWriteTexturePixels(target->asTexture(), srcConfig)) {
   1478         return this->writeTexturePixels(target->asTexture(),
   1479                                         left, top, width, height,
   1480                                         srcConfig, buffer, rowBytes, flags);
   1481     }
   1482 #endif
   1483 
   1484     // We ignore the preferred config unless it is a R/B swap of the src config. In that case
   1485     // we will upload the original src data to a scratch texture but we will spoof it as the swapped
   1486     // config. This scratch will then have R and B swapped. We correct for this by swapping again
   1487     // when drawing the scratch to the dst using a conversion effect.
   1488     bool swapRAndB = false;
   1489     GrPixelConfig writeConfig = srcConfig;
   1490     if (GrPixelConfigSwapRAndB(srcConfig) ==
   1491         fGpu->preferredWritePixelsConfig(srcConfig, target->config())) {
   1492         writeConfig = GrPixelConfigSwapRAndB(srcConfig);
   1493         swapRAndB = true;
   1494     }
   1495 
   1496     GrTextureDesc desc;
   1497     desc.fWidth = width;
   1498     desc.fHeight = height;
   1499     desc.fConfig = writeConfig;
   1500     GrAutoScratchTexture ast(this, desc);
   1501     GrTexture* texture = ast.texture();
   1502     if (NULL == texture) {
   1503         return false;
   1504     }
   1505 
   1506     SkAutoTUnref<const GrEffectRef> effect;
   1507     SkMatrix textureMatrix;
   1508     textureMatrix.setIDiv(texture->width(), texture->height());
   1509 
   1510     // allocate a tmp buffer and sw convert the pixels to premul
   1511     SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0);
   1512 
   1513     if (kUnpremul_PixelOpsFlag & flags) {
   1514         if (!GrPixelConfigIs8888(srcConfig)) {
   1515             return false;
   1516         }
   1517         effect.reset(this->createUPMToPMEffect(texture, swapRAndB, textureMatrix));
   1518         // handle the unpremul step on the CPU if we couldn't create an effect to do it.
   1519         if (NULL == effect) {
   1520             SkCanvas::Config8888 srcConfig8888, dstConfig8888;
   1521             GR_DEBUGCODE(bool success = )
   1522             grconfig_to_config8888(srcConfig, true, &srcConfig8888);
   1523             GrAssert(success);
   1524             GR_DEBUGCODE(success = )
   1525             grconfig_to_config8888(srcConfig, false, &dstConfig8888);
   1526             GrAssert(success);
   1527             const uint32_t* src = reinterpret_cast<const uint32_t*>(buffer);
   1528             tmpPixels.reset(width * height);
   1529             SkConvertConfig8888Pixels(tmpPixels.get(), 4 * width, dstConfig8888,
   1530                                       src, rowBytes, srcConfig8888,
   1531                                       width, height);
   1532             buffer = tmpPixels.get();
   1533             rowBytes = 4 * width;
   1534         }
   1535     }
   1536     if (NULL == effect) {
   1537         effect.reset(GrConfigConversionEffect::Create(texture,
   1538                                                       swapRAndB,
   1539                                                       GrConfigConversionEffect::kNone_PMConversion,
   1540                                                       textureMatrix));
   1541     }
   1542 
   1543     if (!this->writeTexturePixels(texture,
   1544                                   0, 0, width, height,
   1545                                   writeConfig, buffer, rowBytes,
   1546                                   flags & ~kUnpremul_PixelOpsFlag)) {
   1547         return false;
   1548     }
   1549 
   1550     // writeRenderTargetPixels can be called in the midst of drawing another
   1551     // object (e.g., when uploading a SW path rendering to the gpu while
   1552     // drawing a rect) so preserve the current geometry.
   1553     SkMatrix matrix;
   1554     matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top));
   1555     GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit, &matrix);
   1556     GrDrawState* drawState = fGpu->drawState();
   1557     GrAssert(effect);
   1558     drawState->addColorEffect(effect);
   1559 
   1560     drawState->setRenderTarget(target);
   1561 
   1562     fGpu->drawSimpleRect(SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)), NULL);
   1563     return true;
   1564 }
   1565 ////////////////////////////////////////////////////////////////////////////////
   1566 
   1567 GrDrawTarget* GrContext::prepareToDraw(const GrPaint* paint,
   1568                                        BufferedDraw buffered,
   1569                                        AutoRestoreEffects* are) {
   1570     // All users of this draw state should be freeing up all effects when they're done.
   1571     // Otherwise effects that own resources may keep those resources alive indefinitely.
   1572     GrAssert(0 == fDrawState->numColorStages() && 0 == fDrawState->numCoverageStages());
   1573 
   1574     if (kNo_BufferedDraw == buffered && kYes_BufferedDraw == fLastDrawWasBuffered) {
   1575         fDrawBuffer->flush();
   1576         fLastDrawWasBuffered = kNo_BufferedDraw;
   1577     }
   1578     ASSERT_OWNED_RESOURCE(fRenderTarget.get());
   1579     if (NULL != paint) {
   1580         GrAssert(NULL != are);
   1581         are->set(fDrawState);
   1582         fDrawState->setFromPaint(*paint, fViewMatrix, fRenderTarget.get());
   1583 #if GR_DEBUG_PARTIAL_COVERAGE_CHECK
   1584         if ((paint->hasMask() || 0xff != paint->fCoverage) &&
   1585             !fGpu->canApplyCoverage()) {
   1586             GrPrintf("Partial pixel coverage will be incorrectly blended.\n");
   1587         }
   1588 #endif
   1589     } else {
   1590         fDrawState->reset(fViewMatrix);
   1591         fDrawState->setRenderTarget(fRenderTarget.get());
   1592     }
   1593     GrDrawTarget* target;
   1594     if (kYes_BufferedDraw == buffered) {
   1595         fLastDrawWasBuffered = kYes_BufferedDraw;
   1596         target = fDrawBuffer;
   1597     } else {
   1598         GrAssert(kNo_BufferedDraw == buffered);
   1599         fLastDrawWasBuffered = kNo_BufferedDraw;
   1600         target = fGpu;
   1601     }
   1602     fDrawState->setState(GrDrawState::kClip_StateBit, NULL != fClip &&
   1603                                                      !fClip->fClipStack->isWideOpen());
   1604     target->setClip(fClip);
   1605     GrAssert(fDrawState == target->drawState());
   1606     return target;
   1607 }
   1608 
   1609 /*
   1610  * This method finds a path renderer that can draw the specified path on
   1611  * the provided target.
   1612  * Due to its expense, the software path renderer has split out so it can
   1613  * can be individually allowed/disallowed via the "allowSW" boolean.
   1614  */
   1615 GrPathRenderer* GrContext::getPathRenderer(const SkPath& path,
   1616                                            const SkStrokeRec& stroke,
   1617                                            const GrDrawTarget* target,
   1618                                            bool allowSW,
   1619                                            GrPathRendererChain::DrawType drawType,
   1620                                            GrPathRendererChain::StencilSupport* stencilSupport) {
   1621 
   1622     if (NULL == fPathRendererChain) {
   1623         fPathRendererChain = SkNEW_ARGS(GrPathRendererChain, (this));
   1624     }
   1625 
   1626     GrPathRenderer* pr = fPathRendererChain->getPathRenderer(path,
   1627                                                              stroke,
   1628                                                              target,
   1629                                                              drawType,
   1630                                                              stencilSupport);
   1631 
   1632     if (NULL == pr && allowSW) {
   1633         if (NULL == fSoftwarePathRenderer) {
   1634             fSoftwarePathRenderer = SkNEW_ARGS(GrSoftwarePathRenderer, (this));
   1635         }
   1636         pr = fSoftwarePathRenderer;
   1637     }
   1638 
   1639     return pr;
   1640 }
   1641 
   1642 ////////////////////////////////////////////////////////////////////////////////
   1643 
   1644 bool GrContext::isConfigRenderable(GrPixelConfig config) const {
   1645     return fGpu->isConfigRenderable(config);
   1646 }
   1647 
   1648 static inline intptr_t setOrClear(intptr_t bits, int shift, intptr_t pred) {
   1649     intptr_t mask = 1 << shift;
   1650     if (pred) {
   1651         bits |= mask;
   1652     } else {
   1653         bits &= ~mask;
   1654     }
   1655     return bits;
   1656 }
   1657 
   1658 void GrContext::setupDrawBuffer() {
   1659 
   1660     GrAssert(NULL == fDrawBuffer);
   1661     GrAssert(NULL == fDrawBufferVBAllocPool);
   1662     GrAssert(NULL == fDrawBufferIBAllocPool);
   1663 
   1664     fDrawBufferVBAllocPool =
   1665         SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, false,
   1666                                     DRAW_BUFFER_VBPOOL_BUFFER_SIZE,
   1667                                     DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS));
   1668     fDrawBufferIBAllocPool =
   1669         SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu, false,
   1670                                    DRAW_BUFFER_IBPOOL_BUFFER_SIZE,
   1671                                    DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS));
   1672 
   1673     fDrawBuffer = SkNEW_ARGS(GrInOrderDrawBuffer, (fGpu,
   1674                                                    fDrawBufferVBAllocPool,
   1675                                                    fDrawBufferIBAllocPool));
   1676 
   1677     fDrawBuffer->setDrawState(fDrawState);
   1678 }
   1679 
   1680 GrDrawTarget* GrContext::getTextTarget() {
   1681     return this->prepareToDraw(NULL, BUFFERED_DRAW, NULL);
   1682 }
   1683 
   1684 const GrIndexBuffer* GrContext::getQuadIndexBuffer() const {
   1685     return fGpu->getQuadIndexBuffer();
   1686 }
   1687 
   1688 namespace {
   1689 void test_pm_conversions(GrContext* ctx, int* pmToUPMValue, int* upmToPMValue) {
   1690     GrConfigConversionEffect::PMConversion pmToUPM;
   1691     GrConfigConversionEffect::PMConversion upmToPM;
   1692     GrConfigConversionEffect::TestForPreservingPMConversions(ctx, &pmToUPM, &upmToPM);
   1693     *pmToUPMValue = pmToUPM;
   1694     *upmToPMValue = upmToPM;
   1695 }
   1696 }
   1697 
   1698 const GrEffectRef* GrContext::createPMToUPMEffect(GrTexture* texture,
   1699                                                   bool swapRAndB,
   1700                                                   const SkMatrix& matrix) {
   1701     if (!fDidTestPMConversions) {
   1702         test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
   1703         fDidTestPMConversions = true;
   1704     }
   1705     GrConfigConversionEffect::PMConversion pmToUPM =
   1706         static_cast<GrConfigConversionEffect::PMConversion>(fPMToUPMConversion);
   1707     if (GrConfigConversionEffect::kNone_PMConversion != pmToUPM) {
   1708         return GrConfigConversionEffect::Create(texture, swapRAndB, pmToUPM, matrix);
   1709     } else {
   1710         return NULL;
   1711     }
   1712 }
   1713 
   1714 const GrEffectRef* GrContext::createUPMToPMEffect(GrTexture* texture,
   1715                                                   bool swapRAndB,
   1716                                                   const SkMatrix& matrix) {
   1717     if (!fDidTestPMConversions) {
   1718         test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
   1719         fDidTestPMConversions = true;
   1720     }
   1721     GrConfigConversionEffect::PMConversion upmToPM =
   1722         static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion);
   1723     if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) {
   1724         return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM, matrix);
   1725     } else {
   1726         return NULL;
   1727     }
   1728 }
   1729 
   1730 ///////////////////////////////////////////////////////////////////////////////
   1731 #if GR_CACHE_STATS
   1732 void GrContext::printCacheStats() const {
   1733     fTextureCache->printStats();
   1734 }
   1735 #endif
   1736