Home | History | Annotate | Download | only in gpu
      1 /*
      2  * Copyright 2010 Google Inc.
      3  *
      4  * Use of this source code is governed by a BSD-style license that can be
      5  * found in the LICENSE file.
      6  */
      7 
      8 
      9 #include "GrBufferAllocPool.h"
     10 #include "GrBuffer.h"
     11 #include "GrCaps.h"
     12 #include "GrContext.h"
     13 #include "GrContextPriv.h"
     14 #include "GrGpu.h"
     15 #include "GrResourceProvider.h"
     16 #include "GrTypes.h"
     17 #include "SkSafeMath.h"
     18 #include "SkTraceEvent.h"
     19 
     20 #ifdef SK_DEBUG
     21     #define VALIDATE validate
     22 #else
     23     static void VALIDATE(bool = false) {}
     24 #endif
     25 
     26 static const size_t MIN_VERTEX_BUFFER_SIZE = 1 << 15;
     27 static const size_t MIN_INDEX_BUFFER_SIZE = 1 << 12;
     28 
     29 // page size
     30 #define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 15)
     31 
     32 #define UNMAP_BUFFER(block)                                                               \
     33 do {                                                                                      \
     34     TRACE_EVENT_INSTANT1("skia.gpu",                                                      \
     35                          "GrBufferAllocPool Unmapping Buffer",                            \
     36                          TRACE_EVENT_SCOPE_THREAD,                                        \
     37                          "percent_unwritten",                                             \
     38                          (float)((block).fBytesFree) / (block).fBuffer->gpuMemorySize()); \
     39     (block).fBuffer->unmap();                                                             \
     40 } while (false)
     41 
     42 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, GrBufferType bufferType, size_t blockSize)
     43         : fBlocks(8) {
     44 
     45     fGpu = SkRef(gpu);
     46     fCpuData = nullptr;
     47     fBufferType = bufferType;
     48     fBufferPtr = nullptr;
     49     fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize);
     50 
     51     fBytesInUse = 0;
     52 
     53     fBufferMapThreshold = gpu->caps()->bufferMapThreshold();
     54 }
     55 
     56 void GrBufferAllocPool::deleteBlocks() {
     57     if (fBlocks.count()) {
     58         GrBuffer* buffer = fBlocks.back().fBuffer;
     59         if (buffer->isMapped()) {
     60             UNMAP_BUFFER(fBlocks.back());
     61         }
     62     }
     63     while (!fBlocks.empty()) {
     64         this->destroyBlock();
     65     }
     66     SkASSERT(!fBufferPtr);
     67 }
     68 
     69 GrBufferAllocPool::~GrBufferAllocPool() {
     70     VALIDATE();
     71     this->deleteBlocks();
     72     sk_free(fCpuData);
     73     fGpu->unref();
     74 }
     75 
     76 void GrBufferAllocPool::reset() {
     77     VALIDATE();
     78     fBytesInUse = 0;
     79     this->deleteBlocks();
     80     this->resetCpuData(0);      // delete all the cpu-side memory
     81     VALIDATE();
     82 }
     83 
     84 void GrBufferAllocPool::unmap() {
     85     VALIDATE();
     86 
     87     if (fBufferPtr) {
     88         BufferBlock& block = fBlocks.back();
     89         if (block.fBuffer->isMapped()) {
     90             UNMAP_BUFFER(block);
     91         } else {
     92             size_t flushSize = block.fBuffer->gpuMemorySize() - block.fBytesFree;
     93             this->flushCpuData(fBlocks.back(), flushSize);
     94         }
     95         fBufferPtr = nullptr;
     96     }
     97     VALIDATE();
     98 }
     99 
    100 #ifdef SK_DEBUG
    101 void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
    102     bool wasDestroyed = false;
    103     if (fBufferPtr) {
    104         SkASSERT(!fBlocks.empty());
    105         if (fBlocks.back().fBuffer->isMapped()) {
    106             GrBuffer* buf = fBlocks.back().fBuffer;
    107             SkASSERT(buf->mapPtr() == fBufferPtr);
    108         } else {
    109             SkASSERT(fCpuData == fBufferPtr);
    110         }
    111     } else {
    112         SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isMapped());
    113     }
    114     size_t bytesInUse = 0;
    115     for (int i = 0; i < fBlocks.count() - 1; ++i) {
    116         SkASSERT(!fBlocks[i].fBuffer->isMapped());
    117     }
    118     for (int i = 0; !wasDestroyed && i < fBlocks.count(); ++i) {
    119         if (fBlocks[i].fBuffer->wasDestroyed()) {
    120             wasDestroyed = true;
    121         } else {
    122             size_t bytes = fBlocks[i].fBuffer->gpuMemorySize() - fBlocks[i].fBytesFree;
    123             bytesInUse += bytes;
    124             SkASSERT(bytes || unusedBlockAllowed);
    125         }
    126     }
    127 
    128     if (!wasDestroyed) {
    129         SkASSERT(bytesInUse == fBytesInUse);
    130         if (unusedBlockAllowed) {
    131             SkASSERT((fBytesInUse && !fBlocks.empty()) ||
    132                      (!fBytesInUse && (fBlocks.count() < 2)));
    133         } else {
    134             SkASSERT((0 == fBytesInUse) == fBlocks.empty());
    135         }
    136     }
    137 }
    138 #endif
    139 
    140 void* GrBufferAllocPool::makeSpace(size_t size,
    141                                    size_t alignment,
    142                                    const GrBuffer** buffer,
    143                                    size_t* offset) {
    144     VALIDATE();
    145 
    146     SkASSERT(buffer);
    147     SkASSERT(offset);
    148 
    149     if (fBufferPtr) {
    150         BufferBlock& back = fBlocks.back();
    151         size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
    152         size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
    153         if ((size + pad) <= back.fBytesFree) {
    154             memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
    155             usedBytes += pad;
    156             *offset = usedBytes;
    157             *buffer = back.fBuffer;
    158             back.fBytesFree -= size + pad;
    159             fBytesInUse += size + pad;
    160             VALIDATE();
    161             return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
    162         }
    163     }
    164 
    165     // We could honor the space request using by a partial update of the current
    166     // VB (if there is room). But we don't currently use draw calls to GL that
    167     // allow the driver to know that previously issued draws won't read from
    168     // the part of the buffer we update. Also, the GL buffer implementation
    169     // may be cheating on the actual buffer size by shrinking the buffer on
    170     // updateData() if the amount of data passed is less than the full buffer
    171     // size.
    172 
    173     if (!this->createBlock(size)) {
    174         return nullptr;
    175     }
    176     SkASSERT(fBufferPtr);
    177 
    178     *offset = 0;
    179     BufferBlock& back = fBlocks.back();
    180     *buffer = back.fBuffer;
    181     back.fBytesFree -= size;
    182     fBytesInUse += size;
    183     VALIDATE();
    184     return fBufferPtr;
    185 }
    186 
    187 void* GrBufferAllocPool::makeSpaceAtLeast(size_t minSize,
    188                                           size_t fallbackSize,
    189                                           size_t alignment,
    190                                           const GrBuffer** buffer,
    191                                           size_t* offset,
    192                                           size_t* actualSize) {
    193     VALIDATE();
    194 
    195     SkASSERT(buffer);
    196     SkASSERT(offset);
    197     SkASSERT(actualSize);
    198 
    199     if (fBufferPtr) {
    200         BufferBlock& back = fBlocks.back();
    201         size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
    202         size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
    203         if ((minSize + pad) <= back.fBytesFree) {
    204             // Consume padding first, to make subsequent alignment math easier
    205             memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
    206             usedBytes += pad;
    207             back.fBytesFree -= pad;
    208             fBytesInUse += pad;
    209 
    210             // Give caller all remaining space in this block (but aligned correctly)
    211             size_t size = GrSizeAlignDown(back.fBytesFree, alignment);
    212             *offset = usedBytes;
    213             *buffer = back.fBuffer;
    214             *actualSize = size;
    215             back.fBytesFree -= size;
    216             fBytesInUse += size;
    217             VALIDATE();
    218             return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
    219         }
    220     }
    221 
    222     // We could honor the space request using by a partial update of the current
    223     // VB (if there is room). But we don't currently use draw calls to GL that
    224     // allow the driver to know that previously issued draws won't read from
    225     // the part of the buffer we update. Also, the GL buffer implementation
    226     // may be cheating on the actual buffer size by shrinking the buffer on
    227     // updateData() if the amount of data passed is less than the full buffer
    228     // size.
    229 
    230     if (!this->createBlock(fallbackSize)) {
    231         return nullptr;
    232     }
    233     SkASSERT(fBufferPtr);
    234 
    235     *offset = 0;
    236     BufferBlock& back = fBlocks.back();
    237     *buffer = back.fBuffer;
    238     *actualSize = fallbackSize;
    239     back.fBytesFree -= fallbackSize;
    240     fBytesInUse += fallbackSize;
    241     VALIDATE();
    242     return fBufferPtr;
    243 }
    244 
    245 void GrBufferAllocPool::putBack(size_t bytes) {
    246     VALIDATE();
    247 
    248     while (bytes) {
    249         // caller shouldn't try to put back more than they've taken
    250         SkASSERT(!fBlocks.empty());
    251         BufferBlock& block = fBlocks.back();
    252         size_t bytesUsed = block.fBuffer->gpuMemorySize() - block.fBytesFree;
    253         if (bytes >= bytesUsed) {
    254             bytes -= bytesUsed;
    255             fBytesInUse -= bytesUsed;
    256             // if we locked a vb to satisfy the make space and we're releasing
    257             // beyond it, then unmap it.
    258             if (block.fBuffer->isMapped()) {
    259                 UNMAP_BUFFER(block);
    260             }
    261             this->destroyBlock();
    262         } else {
    263             block.fBytesFree += bytes;
    264             fBytesInUse -= bytes;
    265             bytes = 0;
    266             break;
    267         }
    268     }
    269 
    270     VALIDATE();
    271 }
    272 
    273 bool GrBufferAllocPool::createBlock(size_t requestSize) {
    274 
    275     size_t size = SkTMax(requestSize, fMinBlockSize);
    276     SkASSERT(size >= GrBufferAllocPool_MIN_BLOCK_SIZE);
    277 
    278     VALIDATE();
    279 
    280     BufferBlock& block = fBlocks.push_back();
    281 
    282     block.fBuffer = this->getBuffer(size);
    283     if (!block.fBuffer) {
    284         fBlocks.pop_back();
    285         return false;
    286     }
    287 
    288     block.fBytesFree = block.fBuffer->gpuMemorySize();
    289     if (fBufferPtr) {
    290         SkASSERT(fBlocks.count() > 1);
    291         BufferBlock& prev = fBlocks.fromBack(1);
    292         if (prev.fBuffer->isMapped()) {
    293             UNMAP_BUFFER(prev);
    294         } else {
    295             this->flushCpuData(prev, prev.fBuffer->gpuMemorySize() - prev.fBytesFree);
    296         }
    297         fBufferPtr = nullptr;
    298     }
    299 
    300     SkASSERT(!fBufferPtr);
    301 
    302     // If the buffer is CPU-backed we map it because it is free to do so and saves a copy.
    303     // Otherwise when buffer mapping is supported we map if the buffer size is greater than the
    304     // threshold.
    305     bool attemptMap = block.fBuffer->isCPUBacked();
    306     if (!attemptMap && GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) {
    307         attemptMap = size > fBufferMapThreshold;
    308     }
    309 
    310     if (attemptMap) {
    311         fBufferPtr = block.fBuffer->map();
    312     }
    313 
    314     if (!fBufferPtr) {
    315         fBufferPtr = this->resetCpuData(block.fBytesFree);
    316     }
    317 
    318     VALIDATE(true);
    319 
    320     return true;
    321 }
    322 
    323 void GrBufferAllocPool::destroyBlock() {
    324     SkASSERT(!fBlocks.empty());
    325 
    326     BufferBlock& block = fBlocks.back();
    327 
    328     SkASSERT(!block.fBuffer->isMapped());
    329     block.fBuffer->unref();
    330     fBlocks.pop_back();
    331     fBufferPtr = nullptr;
    332 }
    333 
    334 void* GrBufferAllocPool::resetCpuData(size_t newSize) {
    335     sk_free(fCpuData);
    336     if (newSize) {
    337         if (fGpu->caps()->mustClearUploadedBufferData()) {
    338             fCpuData = sk_calloc_throw(newSize);
    339         } else {
    340             fCpuData = sk_malloc_throw(newSize);
    341         }
    342     } else {
    343         fCpuData = nullptr;
    344     }
    345     return fCpuData;
    346 }
    347 
    348 
    349 void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) {
    350     GrBuffer* buffer = block.fBuffer;
    351     SkASSERT(buffer);
    352     SkASSERT(!buffer->isMapped());
    353     SkASSERT(fCpuData == fBufferPtr);
    354     SkASSERT(flushSize <= buffer->gpuMemorySize());
    355     VALIDATE(true);
    356 
    357     if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
    358         flushSize > fBufferMapThreshold) {
    359         void* data = buffer->map();
    360         if (data) {
    361             memcpy(data, fBufferPtr, flushSize);
    362             UNMAP_BUFFER(block);
    363             return;
    364         }
    365     }
    366     buffer->updateData(fBufferPtr, flushSize);
    367     VALIDATE(true);
    368 }
    369 
    370 GrBuffer* GrBufferAllocPool::getBuffer(size_t size) {
    371 
    372     auto resourceProvider = fGpu->getContext()->contextPriv().resourceProvider();
    373 
    374     // Shouldn't have to use this flag (https://bug.skia.org/4156)
    375     static const uint32_t kFlags = GrResourceProvider::kNoPendingIO_Flag;
    376     return resourceProvider->createBuffer(size, fBufferType, kDynamic_GrAccessPattern, kFlags);
    377 }
    378 
    379 ////////////////////////////////////////////////////////////////////////////////
    380 
    381 GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu)
    382     : GrBufferAllocPool(gpu, kVertex_GrBufferType, MIN_VERTEX_BUFFER_SIZE) {
    383 }
    384 
    385 void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
    386                                          int vertexCount,
    387                                          const GrBuffer** buffer,
    388                                          int* startVertex) {
    389 
    390     SkASSERT(vertexCount >= 0);
    391     SkASSERT(buffer);
    392     SkASSERT(startVertex);
    393 
    394     size_t offset SK_INIT_TO_AVOID_WARNING;
    395     void* ptr = INHERITED::makeSpace(SkSafeMath::Mul(vertexSize, vertexCount),
    396                                      vertexSize,
    397                                      buffer,
    398                                      &offset);
    399 
    400     SkASSERT(0 == offset % vertexSize);
    401     *startVertex = static_cast<int>(offset / vertexSize);
    402     return ptr;
    403 }
    404 
    405 void* GrVertexBufferAllocPool::makeSpaceAtLeast(size_t vertexSize, int minVertexCount,
    406                                                 int fallbackVertexCount, const GrBuffer** buffer,
    407                                                 int* startVertex, int* actualVertexCount) {
    408 
    409     SkASSERT(minVertexCount >= 0);
    410     SkASSERT(fallbackVertexCount >= minVertexCount);
    411     SkASSERT(buffer);
    412     SkASSERT(startVertex);
    413     SkASSERT(actualVertexCount);
    414 
    415     size_t offset SK_INIT_TO_AVOID_WARNING;
    416     size_t actualSize SK_INIT_TO_AVOID_WARNING;
    417     void* ptr = INHERITED::makeSpaceAtLeast(SkSafeMath::Mul(vertexSize, minVertexCount),
    418                                             SkSafeMath::Mul(vertexSize, fallbackVertexCount),
    419                                             vertexSize,
    420                                             buffer,
    421                                             &offset,
    422                                             &actualSize);
    423 
    424     SkASSERT(0 == offset % vertexSize);
    425     *startVertex = static_cast<int>(offset / vertexSize);
    426 
    427     SkASSERT(0 == actualSize % vertexSize);
    428     SkASSERT(actualSize >= vertexSize * minVertexCount);
    429     *actualVertexCount = static_cast<int>(actualSize / vertexSize);
    430 
    431     return ptr;
    432 }
    433 
    434 ////////////////////////////////////////////////////////////////////////////////
    435 
    436 GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu)
    437     : GrBufferAllocPool(gpu, kIndex_GrBufferType, MIN_INDEX_BUFFER_SIZE) {
    438 }
    439 
    440 void* GrIndexBufferAllocPool::makeSpace(int indexCount,
    441                                         const GrBuffer** buffer,
    442                                         int* startIndex) {
    443 
    444     SkASSERT(indexCount >= 0);
    445     SkASSERT(buffer);
    446     SkASSERT(startIndex);
    447 
    448     size_t offset SK_INIT_TO_AVOID_WARNING;
    449     void* ptr = INHERITED::makeSpace(SkSafeMath::Mul(indexCount, sizeof(uint16_t)),
    450                                      sizeof(uint16_t),
    451                                      buffer,
    452                                      &offset);
    453 
    454     SkASSERT(0 == offset % sizeof(uint16_t));
    455     *startIndex = static_cast<int>(offset / sizeof(uint16_t));
    456     return ptr;
    457 }
    458 
    459 void* GrIndexBufferAllocPool::makeSpaceAtLeast(int minIndexCount, int fallbackIndexCount,
    460                                                const GrBuffer** buffer, int* startIndex,
    461                                                int* actualIndexCount) {
    462     SkASSERT(minIndexCount >= 0);
    463     SkASSERT(fallbackIndexCount >= minIndexCount);
    464     SkASSERT(buffer);
    465     SkASSERT(startIndex);
    466     SkASSERT(actualIndexCount);
    467 
    468     size_t offset SK_INIT_TO_AVOID_WARNING;
    469     size_t actualSize SK_INIT_TO_AVOID_WARNING;
    470     void* ptr = INHERITED::makeSpaceAtLeast(SkSafeMath::Mul(minIndexCount, sizeof(uint16_t)),
    471                                             SkSafeMath::Mul(fallbackIndexCount, sizeof(uint16_t)),
    472                                             sizeof(uint16_t),
    473                                             buffer,
    474                                             &offset,
    475                                             &actualSize);
    476 
    477     SkASSERT(0 == offset % sizeof(uint16_t));
    478     *startIndex = static_cast<int>(offset / sizeof(uint16_t));
    479 
    480     SkASSERT(0 == actualSize % sizeof(uint16_t));
    481     SkASSERT(actualSize >= minIndexCount * sizeof(uint16_t));
    482     *actualIndexCount = static_cast<int>(actualSize / sizeof(uint16_t));
    483     return ptr;
    484 }
    485