Home | History | Annotate | Download | only in gpu
      1 /*
      2  * Copyright 2015 Google Inc.
      3  *
      4  * Use of this source code is governed by a BSD-style license that can be
      5  * found in the LICENSE file.
      6  */
      7 
      8 #include "GrBatchAtlas.h"
      9 #include "GrBatchTarget.h"
     10 #include "GrGpu.h"
     11 #include "GrRectanizer.h"
     12 #include "GrTracing.h"
     13 #include "GrVertexBuffer.h"
     14 
     15 static inline void adjust_for_offset(SkIPoint16* loc, const SkIPoint16& offset) {
     16     loc->fX += offset.fX;
     17     loc->fY += offset.fY;
     18 }
     19 
     20 static GrBatchAtlas::AtlasID create_id(int index, int generation) {
     21     // Generation ID can roll over because we only check for equality
     22     SkASSERT(index < (1 << 16));
     23     return generation << 16 | index;
     24 }
     25 
     26 // The backing GrTexture for a GrBatchAtlas is broken into a spatial grid of GrBatchPlots.
     27 // The GrBatchPlots keep track of subimage placement via their GrRectanizer. In turn, a GrBatchPlot
     28 // manages the lifetime of its data using two tokens, a last ref toke and a last upload token.
     29 // Once a GrBatchPlot is "full" (i.e. there is no room for the new subimage according to the
     30 // GrRectanizer), it can no longer be used unless the last ref on the GrPlot has already been
     31 // flushed through to the gpu.
     32 
     33 class BatchPlot : public SkRefCnt {
     34 public:
     35     typedef GrBatchAtlas::BatchToken BatchToken;
     36     SK_DECLARE_INST_COUNT(BatchPlot);
     37     SK_DECLARE_INTERNAL_LLIST_INTERFACE(BatchPlot);
     38 
     39     // index() refers to the index of the plot in the owning GrAtlas's plot array.  genID() is a
     40     // monotonically incrementing number which is bumped every time the cpu backing store is
     41     // wiped, or when the plot itself is evicted from the atlas(ie, there is continuity in genID()
     42     // across atlas spills)
     43     int index() const { return fIndex; }
     44     int genID() const { return fGenID; }
     45     GrBatchAtlas::AtlasID id() { return fID; }
     46 
     47     GrTexture* texture() const { return fTexture; }
     48 
     49     bool addSubImage(int width, int height, const void* image, SkIPoint16* loc, size_t rowBytes)  {
     50         if (!fRects->addRect(width, height, loc)) {
     51             return false;
     52         }
     53 
     54         if (!fData) {
     55             fData = reinterpret_cast<unsigned char*>(sk_calloc_throw(fBytesPerPixel * fWidth *
     56                                                                      fHeight));
     57         }
     58         const unsigned char* imagePtr = (const unsigned char*)image;
     59         // point ourselves at the right starting spot
     60         unsigned char* dataPtr = fData;
     61         dataPtr += fBytesPerPixel * fWidth * loc->fY;
     62         dataPtr += fBytesPerPixel * loc->fX;
     63         // copy into the data buffer
     64         for (int i = 0; i < height; ++i) {
     65             memcpy(dataPtr, imagePtr, rowBytes);
     66             dataPtr += fBytesPerPixel * fWidth;
     67             imagePtr += rowBytes;
     68         }
     69 
     70         fDirtyRect.join(loc->fX, loc->fY, loc->fX + width, loc->fY + height);
     71         adjust_for_offset(loc, fOffset);
     72         SkDEBUGCODE(fDirty = true;)
     73 
     74         return true;
     75     }
     76 
     77     // to manage the lifetime of a plot, we use two tokens.  We use last upload token to know when
     78     // we can 'piggy back' uploads, ie if the last upload hasn't been flushed to gpu, we don't need
     79     // to issue a new upload even if we update the cpu backing store.  We use lastref to determine
     80     // when we can evict a plot from the cache, ie if the last ref has already flushed through
     81     // the gpu then we can reuse the plot
     82     BatchToken lastUploadToken() const { return fLastUpload; }
     83     BatchToken lastUseToken() const { return fLastUse; }
     84     void setLastUploadToken(BatchToken batchToken) {
     85         SkASSERT(batchToken >= fLastUpload);
     86         fLastUpload = batchToken;
     87     }
     88     void setLastUseToken(BatchToken batchToken) {
     89         SkASSERT(batchToken >= fLastUse);
     90         fLastUse = batchToken;
     91     }
     92 
     93     void uploadToTexture(GrBatchTarget::TextureUploader uploader)  {
     94         // We should only be issuing uploads if we are in fact dirty
     95         SkASSERT(fDirty && fData && fTexture);
     96         TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), "GrBatchPlot::uploadToTexture");
     97         size_t rowBytes = fBytesPerPixel * fRects->width();
     98         const unsigned char* dataPtr = fData;
     99         dataPtr += rowBytes * fDirtyRect.fTop;
    100         dataPtr += fBytesPerPixel * fDirtyRect.fLeft;
    101         uploader.writeTexturePixels(fTexture,
    102                                     fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyRect.fTop,
    103                                     fDirtyRect.width(), fDirtyRect.height(),
    104                                     fTexture->config(), dataPtr, rowBytes);
    105         fDirtyRect.setEmpty();
    106         SkDEBUGCODE(fDirty = false;)
    107     }
    108 
    109     void resetRects() {
    110         SkASSERT(fRects);
    111         fRects->reset();
    112         fGenID++;
    113         fID = create_id(fIndex, fGenID);
    114 
    115         // zero out the plot
    116         if (fData) {
    117             sk_bzero(fData, fBytesPerPixel * fWidth * fHeight);
    118         }
    119 
    120         fDirtyRect.setEmpty();
    121         SkDEBUGCODE(fDirty = false;)
    122     }
    123 
    124     int x() const { return fX; }
    125     int y() const { return fY; }
    126 
    127 private:
    128     BatchPlot()
    129         : fLastUpload(0)
    130         , fLastUse(0)
    131         , fIndex(-1)
    132         , fGenID(-1)
    133         , fID(0)
    134         , fData(NULL)
    135         , fWidth(0)
    136         , fHeight(0)
    137         , fX(0)
    138         , fY(0)
    139         , fTexture(NULL)
    140         , fRects(NULL)
    141         , fAtlas(NULL)
    142         , fBytesPerPixel(1)
    143     #ifdef SK_DEBUG
    144         , fDirty(false)
    145     #endif
    146     {
    147         fOffset.set(0, 0);
    148     }
    149 
    150     ~BatchPlot() {
    151         sk_free(fData);
    152         fData = NULL;
    153         delete fRects;
    154     }
    155 
    156     void init(GrBatchAtlas* atlas, GrTexture* texture, int index, uint32_t generation,
    157               int offX, int offY, int width, int height, size_t bpp) {
    158         fIndex = index;
    159         fGenID = generation;
    160         fID = create_id(index, generation);
    161         fWidth = width;
    162         fHeight = height;
    163         fX = offX;
    164         fY = offY;
    165         fRects = GrRectanizer::Factory(width, height);
    166         fAtlas = atlas;
    167         fOffset.set(offX * width, offY * height);
    168         fBytesPerPixel = bpp;
    169         fData = NULL;
    170         fDirtyRect.setEmpty();
    171         SkDEBUGCODE(fDirty = false;)
    172         fTexture = texture;
    173     }
    174 
    175     BatchToken fLastUpload;
    176     BatchToken fLastUse;
    177 
    178     uint32_t fIndex;
    179     uint32_t fGenID;
    180     GrBatchAtlas::AtlasID fID;
    181     unsigned char* fData;
    182     int fWidth;
    183     int fHeight;
    184     int fX;
    185     int fY;
    186     GrTexture* fTexture;
    187     GrRectanizer* fRects;
    188     GrBatchAtlas* fAtlas;
    189     SkIPoint16 fOffset;        // the offset of the plot in the backing texture
    190     size_t fBytesPerPixel;
    191     SkIRect fDirtyRect;
    192     SkDEBUGCODE(bool fDirty;)
    193 
    194     friend class GrBatchAtlas;
    195 
    196     typedef SkRefCnt INHERITED;
    197 };
    198 
    199 ////////////////////////////////////////////////////////////////////////////////
    200 
    201 class GrPlotUploader : public GrBatchTarget::Uploader {
    202 public:
    203     GrPlotUploader(BatchPlot* plot)
    204         : INHERITED(plot->lastUploadToken())
    205         , fPlot(SkRef(plot)) {
    206         SkASSERT(plot);
    207     }
    208 
    209     void upload(GrBatchTarget::TextureUploader uploader) override {
    210         fPlot->uploadToTexture(uploader);
    211     }
    212 
    213 private:
    214     SkAutoTUnref<BatchPlot> fPlot;
    215 
    216     typedef GrBatchTarget::Uploader INHERITED;
    217 };
    218 
    219 ///////////////////////////////////////////////////////////////////////////////
    220 
    221 GrBatchAtlas::GrBatchAtlas(GrTexture* texture, int numPlotsX, int numPlotsY)
    222     : fTexture(texture)
    223     , fNumPlotsX(numPlotsX)
    224     , fNumPlotsY(numPlotsY)
    225     , fPlotWidth(texture->width() / numPlotsX)
    226     , fPlotHeight(texture->height() / numPlotsY)
    227     , fAtlasGeneration(kInvalidAtlasGeneration + 1) {
    228     SkASSERT(fNumPlotsX * fNumPlotsY <= BulkUseTokenUpdater::kMaxPlots);
    229     SkASSERT(fPlotWidth * fNumPlotsX == texture->width());
    230     SkASSERT(fPlotHeight * fNumPlotsY == texture->height());
    231 
    232     // We currently do not support compressed atlases...
    233     SkASSERT(!GrPixelConfigIsCompressed(texture->desc().fConfig));
    234 
    235     // set up allocated plots
    236     fBPP = GrBytesPerPixel(texture->desc().fConfig);
    237     fPlotArray = SkNEW_ARRAY(SkAutoTUnref<BatchPlot>, (fNumPlotsX * fNumPlotsY));
    238 
    239     SkAutoTUnref<BatchPlot>* currPlot = fPlotArray;
    240     for (int y = fNumPlotsY - 1, r = 0; y >= 0; --y, ++r) {
    241         for (int x = fNumPlotsX - 1, c = 0; x >= 0; --x, ++c) {
    242             int id = r * fNumPlotsX + c;
    243             currPlot->reset(SkNEW(BatchPlot));
    244             (*currPlot)->init(this, texture, id, 1, x, y, fPlotWidth, fPlotHeight, fBPP);
    245 
    246             // build LRU list
    247             fPlotList.addToHead(currPlot->get());
    248             ++currPlot;
    249         }
    250     }
    251 }
    252 
    253 GrBatchAtlas::~GrBatchAtlas() {
    254     SkSafeUnref(fTexture);
    255     SkDELETE_ARRAY(fPlotArray);
    256 }
    257 
    258 void GrBatchAtlas::processEviction(AtlasID id) {
    259     for (int i = 0; i < fEvictionCallbacks.count(); i++) {
    260         (*fEvictionCallbacks[i].fFunc)(id, fEvictionCallbacks[i].fData);
    261     }
    262 }
    263 
    264 void GrBatchAtlas::makeMRU(BatchPlot* plot) {
    265     if (fPlotList.head() == plot) {
    266         return;
    267     }
    268 
    269     fPlotList.remove(plot);
    270     fPlotList.addToHead(plot);
    271 }
    272 
    273 inline void GrBatchAtlas::updatePlot(GrBatchTarget* batchTarget, AtlasID* id, BatchPlot* plot) {
    274     this->makeMRU(plot);
    275 
    276     // If our most recent upload has already occurred then we have to insert a new
    277     // upload. Otherwise, we already have a scheduled upload that hasn't yet ocurred.
    278     // This new update will piggy back on that previously scheduled update.
    279     if (batchTarget->isIssued(plot->lastUploadToken())) {
    280         plot->setLastUploadToken(batchTarget->asapToken());
    281         SkAutoTUnref<GrPlotUploader> uploader(SkNEW_ARGS(GrPlotUploader, (plot)));
    282         batchTarget->upload(uploader);
    283     }
    284     *id = plot->id();
    285 }
    286 
    287 bool GrBatchAtlas::addToAtlas(AtlasID* id, GrBatchTarget* batchTarget,
    288                               int width, int height, const void* image, SkIPoint16* loc) {
    289     // We should already have a texture, TODO clean this up
    290     SkASSERT(fTexture && width <= fPlotWidth && height <= fPlotHeight);
    291 
    292     // now look through all allocated plots for one we can share, in Most Recently Refed order
    293     GrBatchPlotList::Iter plotIter;
    294     plotIter.init(fPlotList, GrBatchPlotList::Iter::kHead_IterStart);
    295     BatchPlot* plot;
    296     while ((plot = plotIter.get())) {
    297         if (plot->addSubImage(width, height, image, loc, fBPP * width)) {
    298             this->updatePlot(batchTarget, id, plot);
    299             return true;
    300         }
    301         plotIter.next();
    302     }
    303 
    304     // If the above fails, then see if the least recently refed plot has already been flushed to the
    305     // gpu
    306     plotIter.init(fPlotList, GrBatchPlotList::Iter::kTail_IterStart);
    307     plot = plotIter.get();
    308     SkASSERT(plot);
    309     if (batchTarget->isIssued(plot->lastUseToken())) {
    310         this->processEviction(plot->id());
    311         plot->resetRects();
    312         SkDEBUGCODE(bool verify = )plot->addSubImage(width, height, image, loc, fBPP * width);
    313         SkASSERT(verify);
    314         this->updatePlot(batchTarget, id, plot);
    315         fAtlasGeneration++;
    316         return true;
    317     }
    318 
    319     // The least recently refed plot hasn't been flushed to the gpu yet, however, if we have flushed
    320     // it to the batch target than we can reuse it.  Our last ref token is guaranteed to be less
    321     // than or equal to the current token.  If its 'less than' the current token, than we can spin
    322     // off the plot(ie let the batch target manage it) and create a new plot in its place in our
    323     // array.  If it is equal to the currentToken, then the caller has to flush draws to the batch
    324     // target so we can spin off the plot
    325     if (plot->lastUseToken() == batchTarget->currentToken()) {
    326         return false;
    327     }
    328 
    329     // We take an extra ref here so our plot isn't deleted when we reset its index in the array.
    330     plot->ref();
    331     int index = plot->index();
    332     int x = plot->x();
    333     int y = plot->y();
    334     int generation = plot->genID();
    335 
    336     this->processEviction(plot->id());
    337     fPlotList.remove(plot);
    338     SkAutoTUnref<BatchPlot>& newPlot = fPlotArray[plot->index()];
    339     newPlot.reset(SkNEW(BatchPlot));
    340     newPlot->init(this, fTexture, index, ++generation, x, y, fPlotWidth, fPlotHeight, fBPP);
    341 
    342     fPlotList.addToHead(newPlot.get());
    343     SkDEBUGCODE(bool verify = )newPlot->addSubImage(width, height, image, loc, fBPP * width);
    344     SkASSERT(verify);
    345     newPlot->setLastUploadToken(batchTarget->currentToken());
    346     SkAutoTUnref<GrPlotUploader> uploader(SkNEW_ARGS(GrPlotUploader, (newPlot)));
    347     batchTarget->upload(uploader);
    348     *id = newPlot->id();
    349     plot->unref();
    350     fAtlasGeneration++;
    351     return true;
    352 }
    353 
    354 bool GrBatchAtlas::hasID(AtlasID id) {
    355     int index = GetIndexFromID(id);
    356     SkASSERT(index < fNumPlotsX * fNumPlotsY);
    357     return fPlotArray[index]->genID() == GetGenerationFromID(id);
    358 }
    359 
    360 void GrBatchAtlas::setLastUseToken(AtlasID id, BatchToken batchToken) {
    361     SkASSERT(this->hasID(id));
    362     int index = GetIndexFromID(id);
    363     SkASSERT(index < fNumPlotsX * fNumPlotsY);
    364     this->makeMRU(fPlotArray[index]);
    365     fPlotArray[index]->setLastUseToken(batchToken);
    366 }
    367 
    368 void GrBatchAtlas::setLastUseTokenBulk(const BulkUseTokenUpdater& updater, BatchToken batchToken) {
    369     int count = updater.fPlotsToUpdate.count();
    370     for (int i = 0; i < count; i++) {
    371         BatchPlot* plot = fPlotArray[updater.fPlotsToUpdate[i]];
    372         this->makeMRU(plot);
    373         plot->setLastUseToken(batchToken);
    374     }
    375 }
    376