Home | History | Annotate | Download | only in client
      1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 // This file contains the implementation of the FencedAllocator class.
      6 
      7 #include "gpu/command_buffer/client/fenced_allocator.h"
      8 
      9 #include <algorithm>
     10 
     11 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
     12 
     13 namespace gpu {
     14 
     15 namespace {
     16 
     17 // Allocation alignment, must be a power of two.
     18 const unsigned int kAllocAlignment = 16;
     19 
     20 // Round down to the largest multiple of kAllocAlignment no greater than |size|.
     21 unsigned int RoundDown(unsigned int size) {
     22   return size & ~(kAllocAlignment - 1);
     23 }
     24 
     25 // Round up to the smallest multiple of kAllocAlignment no smaller than |size|.
     26 unsigned int RoundUp(unsigned int size) {
     27   return (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1);
     28 }
     29 
     30 }  // namespace
     31 
     32 #ifndef _MSC_VER
     33 const FencedAllocator::Offset FencedAllocator::kInvalidOffset;
     34 #endif
     35 
     36 FencedAllocator::FencedAllocator(unsigned int size,
     37                                  CommandBufferHelper *helper)
     38     : helper_(helper),
     39       bytes_in_use_(0) {
     40   Block block = { FREE, 0, RoundDown(size), kUnusedToken };
     41   blocks_.push_back(block);
     42 }
     43 
     44 FencedAllocator::~FencedAllocator() {
     45   // Free blocks pending tokens.
     46   for (unsigned int i = 0; i < blocks_.size(); ++i) {
     47     if (blocks_[i].state == FREE_PENDING_TOKEN) {
     48       i = WaitForTokenAndFreeBlock(i);
     49     }
     50   }
     51   // These checks are not valid if the service has crashed or lost the context.
     52   // DCHECK_EQ(blocks_.size(), 1u);
     53   // DCHECK_EQ(blocks_[0].state, FREE);
     54 }
     55 
     56 // Looks for a non-allocated block that is big enough. Search in the FREE
     57 // blocks first (for direct usage), first-fit, then in the FREE_PENDING_TOKEN
     58 // blocks, waiting for them. The current implementation isn't smart about
     59 // optimizing what to wait for, just looks inside the block in order (first-fit
     60 // as well).
     61 FencedAllocator::Offset FencedAllocator::Alloc(unsigned int size) {
     62   // size of 0 is not allowed because it would be inconsistent to only sometimes
     63   // have it succeed. Example: Alloc(SizeOfBuffer), Alloc(0).
     64   if (size == 0)  {
     65     return kInvalidOffset;
     66   }
     67 
     68   // Round up the allocation size to ensure alignment.
     69   size = RoundUp(size);
     70 
     71   // Try first to allocate in a free block.
     72   for (unsigned int i = 0; i < blocks_.size(); ++i) {
     73     Block &block = blocks_[i];
     74     if (block.state == FREE && block.size >= size) {
     75       return AllocInBlock(i, size);
     76     }
     77   }
     78 
     79   // No free block is available. Look for blocks pending tokens, and wait for
     80   // them to be re-usable.
     81   for (unsigned int i = 0; i < blocks_.size(); ++i) {
     82     if (blocks_[i].state != FREE_PENDING_TOKEN)
     83       continue;
     84     i = WaitForTokenAndFreeBlock(i);
     85     if (blocks_[i].size >= size)
     86       return AllocInBlock(i, size);
     87   }
     88   return kInvalidOffset;
     89 }
     90 
     91 // Looks for the corresponding block, mark it FREE, and collapse it if
     92 // necessary.
     93 void FencedAllocator::Free(FencedAllocator::Offset offset) {
     94   BlockIndex index = GetBlockByOffset(offset);
     95   DCHECK_NE(blocks_[index].state, FREE);
     96   Block &block = blocks_[index];
     97 
     98   if (block.state == IN_USE)
     99     bytes_in_use_ -= block.size;
    100 
    101   block.state = FREE;
    102   CollapseFreeBlock(index);
    103 }
    104 
    105 // Looks for the corresponding block, mark it FREE_PENDING_TOKEN.
    106 void FencedAllocator::FreePendingToken(
    107     FencedAllocator::Offset offset, int32 token) {
    108   BlockIndex index = GetBlockByOffset(offset);
    109   Block &block = blocks_[index];
    110   if (block.state == IN_USE)
    111     bytes_in_use_ -= block.size;
    112   block.state = FREE_PENDING_TOKEN;
    113   block.token = token;
    114 }
    115 
    116 // Gets the max of the size of the blocks marked as free.
    117 unsigned int FencedAllocator::GetLargestFreeSize() {
    118   FreeUnused();
    119   unsigned int max_size = 0;
    120   for (unsigned int i = 0; i < blocks_.size(); ++i) {
    121     Block &block = blocks_[i];
    122     if (block.state == FREE)
    123       max_size = std::max(max_size, block.size);
    124   }
    125   return max_size;
    126 }
    127 
    128 // Gets the size of the largest segment of blocks that are either FREE or
    129 // FREE_PENDING_TOKEN.
    130 unsigned int FencedAllocator::GetLargestFreeOrPendingSize() {
    131   unsigned int max_size = 0;
    132   unsigned int current_size = 0;
    133   for (unsigned int i = 0; i < blocks_.size(); ++i) {
    134     Block &block = blocks_[i];
    135     if (block.state == IN_USE) {
    136       max_size = std::max(max_size, current_size);
    137       current_size = 0;
    138     } else {
    139       DCHECK(block.state == FREE || block.state == FREE_PENDING_TOKEN);
    140       current_size += block.size;
    141     }
    142   }
    143   return std::max(max_size, current_size);
    144 }
    145 
    146 // Makes sure that:
    147 // - there is at least one block.
    148 // - there are no contiguous FREE blocks (they should have been collapsed).
    149 // - the successive offsets match the block sizes, and they are in order.
    150 bool FencedAllocator::CheckConsistency() {
    151   if (blocks_.size() < 1) return false;
    152   for (unsigned int i = 0; i < blocks_.size() - 1; ++i) {
    153     Block &current = blocks_[i];
    154     Block &next = blocks_[i + 1];
    155     // This test is NOT included in the next one, because offset is unsigned.
    156     if (next.offset <= current.offset)
    157       return false;
    158     if (next.offset != current.offset + current.size)
    159       return false;
    160     if (current.state == FREE && next.state == FREE)
    161       return false;
    162   }
    163   return true;
    164 }
    165 
    166 // Returns false if all blocks are actually FREE, in which
    167 // case they would be coalesced into one block, true otherwise.
    168 bool FencedAllocator::InUse() {
    169   return blocks_.size() != 1 || blocks_[0].state != FREE;
    170 }
    171 
    172 // Collapse the block to the next one, then to the previous one. Provided the
    173 // structure is consistent, those are the only blocks eligible for collapse.
    174 FencedAllocator::BlockIndex FencedAllocator::CollapseFreeBlock(
    175     BlockIndex index) {
    176   if (index + 1 < blocks_.size()) {
    177     Block &next = blocks_[index + 1];
    178     if (next.state == FREE) {
    179       blocks_[index].size += next.size;
    180       blocks_.erase(blocks_.begin() + index + 1);
    181     }
    182   }
    183   if (index > 0) {
    184     Block &prev = blocks_[index - 1];
    185     if (prev.state == FREE) {
    186       prev.size += blocks_[index].size;
    187       blocks_.erase(blocks_.begin() + index);
    188       --index;
    189     }
    190   }
    191   return index;
    192 }
    193 
    194 // Waits for the block's token, then mark the block as free, then collapse it.
    195 FencedAllocator::BlockIndex FencedAllocator::WaitForTokenAndFreeBlock(
    196     BlockIndex index) {
    197   Block &block = blocks_[index];
    198   DCHECK_EQ(block.state, FREE_PENDING_TOKEN);
    199   helper_->WaitForToken(block.token);
    200   block.state = FREE;
    201   return CollapseFreeBlock(index);
    202 }
    203 
    204 // Frees any blocks pending a token for which the token has been read.
    205 void FencedAllocator::FreeUnused() {
    206   int32 last_token_read = helper_->last_token_read();
    207   for (unsigned int i = 0; i < blocks_.size();) {
    208     Block& block = blocks_[i];
    209     if (block.state == FREE_PENDING_TOKEN && block.token <= last_token_read) {
    210       block.state = FREE;
    211       i = CollapseFreeBlock(i);
    212     } else {
    213       ++i;
    214     }
    215   }
    216 }
    217 
    218 // If the block is exactly the requested size, simply mark it IN_USE, otherwise
    219 // split it and mark the first one (of the requested size) IN_USE.
    220 FencedAllocator::Offset FencedAllocator::AllocInBlock(BlockIndex index,
    221                                                       unsigned int size) {
    222   Block &block = blocks_[index];
    223   DCHECK_GE(block.size, size);
    224   DCHECK_EQ(block.state, FREE);
    225   Offset offset = block.offset;
    226   bytes_in_use_ += size;
    227   if (block.size == size) {
    228     block.state = IN_USE;
    229     return offset;
    230   }
    231   Block newblock = { FREE, offset + size, block.size - size, kUnusedToken};
    232   block.state = IN_USE;
    233   block.size = size;
    234   // this is the last thing being done because it may invalidate block;
    235   blocks_.insert(blocks_.begin() + index + 1, newblock);
    236   return offset;
    237 }
    238 
    239 // The blocks are in offset order, so we can do a binary search.
    240 FencedAllocator::BlockIndex FencedAllocator::GetBlockByOffset(Offset offset) {
    241   Block templ = { IN_USE, offset, 0, kUnusedToken };
    242   Container::iterator it = std::lower_bound(blocks_.begin(), blocks_.end(),
    243                                             templ, OffsetCmp());
    244   DCHECK(it != blocks_.end() && it->offset == offset);
    245   return it-blocks_.begin();
    246 }
    247 
    248 }  // namespace gpu
    249