Home | History | Annotate | Download | only in client
      1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 // This file contains the implementation of the FencedAllocator class.
      6 
      7 #include "gpu/command_buffer/client/fenced_allocator.h"
      8 #include <algorithm>
      9 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
     10 
     11 namespace gpu {
     12 
     13 namespace {
     14 
     15 // Allocation alignment, must be a power of two.
     16 const unsigned int kAllocAlignment = 16;
     17 
     18 // Round down to the largest multiple of kAllocAlignment no greater than |size|.
     19 unsigned int RoundDown(unsigned int size) {
     20   return size & ~(kAllocAlignment - 1);
     21 }
     22 
     23 // Round up to the smallest multiple of kAllocAlignment no smaller than |size|.
     24 unsigned int RoundUp(unsigned int size) {
     25   return (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1);
     26 }
     27 
     28 }  // namespace
     29 
     30 #ifndef _MSC_VER
     31 const FencedAllocator::Offset FencedAllocator::kInvalidOffset;
     32 #endif
     33 
     34 FencedAllocator::FencedAllocator(unsigned int size,
     35                                  CommandBufferHelper *helper)
     36     : helper_(helper) {
     37   Block block = { FREE, 0, RoundDown(size), kUnusedToken };
     38   blocks_.push_back(block);
     39 }
     40 
     41 FencedAllocator::~FencedAllocator() {
     42   // Free blocks pending tokens.
     43   for (unsigned int i = 0; i < blocks_.size(); ++i) {
     44     if (blocks_[i].state == FREE_PENDING_TOKEN) {
     45       i = WaitForTokenAndFreeBlock(i);
     46     }
     47   }
     48   // These checks are not valid if the service has crashed or lost the context.
     49   // GPU_DCHECK_EQ(blocks_.size(), 1u);
     50   // GPU_DCHECK_EQ(blocks_[0].state, FREE);
     51 }
     52 
     53 // Looks for a non-allocated block that is big enough. Search in the FREE
     54 // blocks first (for direct usage), first-fit, then in the FREE_PENDING_TOKEN
     55 // blocks, waiting for them. The current implementation isn't smart about
     56 // optimizing what to wait for, just looks inside the block in order (first-fit
     57 // as well).
     58 FencedAllocator::Offset FencedAllocator::Alloc(unsigned int size) {
     59   // size of 0 is not allowed because it would be inconsistent to only sometimes
     60   // have it succeed. Example: Alloc(SizeOfBuffer), Alloc(0).
     61   if (size == 0)  {
     62     return kInvalidOffset;
     63   }
     64 
     65   // Round up the allocation size to ensure alignment.
     66   size = RoundUp(size);
     67 
     68   // Try first to allocate in a free block.
     69   for (unsigned int i = 0; i < blocks_.size(); ++i) {
     70     Block &block = blocks_[i];
     71     if (block.state == FREE && block.size >= size) {
     72       return AllocInBlock(i, size);
     73     }
     74   }
     75 
     76   // No free block is available. Look for blocks pending tokens, and wait for
     77   // them to be re-usable.
     78   for (unsigned int i = 0; i < blocks_.size(); ++i) {
     79     if (blocks_[i].state != FREE_PENDING_TOKEN)
     80       continue;
     81     i = WaitForTokenAndFreeBlock(i);
     82     if (blocks_[i].size >= size)
     83       return AllocInBlock(i, size);
     84   }
     85   return kInvalidOffset;
     86 }
     87 
     88 // Looks for the corresponding block, mark it FREE, and collapse it if
     89 // necessary.
     90 void FencedAllocator::Free(FencedAllocator::Offset offset) {
     91   BlockIndex index = GetBlockByOffset(offset);
     92   GPU_DCHECK_NE(blocks_[index].state, FREE);
     93   blocks_[index].state = FREE;
     94   CollapseFreeBlock(index);
     95 }
     96 
     97 // Looks for the corresponding block, mark it FREE_PENDING_TOKEN.
     98 void FencedAllocator::FreePendingToken(
     99     FencedAllocator::Offset offset, int32 token) {
    100   BlockIndex index = GetBlockByOffset(offset);
    101   Block &block = blocks_[index];
    102   block.state = FREE_PENDING_TOKEN;
    103   block.token = token;
    104 }
    105 
    106 // Gets the max of the size of the blocks marked as free.
    107 unsigned int FencedAllocator::GetLargestFreeSize() {
    108   FreeUnused();
    109   unsigned int max_size = 0;
    110   for (unsigned int i = 0; i < blocks_.size(); ++i) {
    111     Block &block = blocks_[i];
    112     if (block.state == FREE)
    113       max_size = std::max(max_size, block.size);
    114   }
    115   return max_size;
    116 }
    117 
    118 // Gets the size of the largest segment of blocks that are either FREE or
    119 // FREE_PENDING_TOKEN.
    120 unsigned int FencedAllocator::GetLargestFreeOrPendingSize() {
    121   unsigned int max_size = 0;
    122   unsigned int current_size = 0;
    123   for (unsigned int i = 0; i < blocks_.size(); ++i) {
    124     Block &block = blocks_[i];
    125     if (block.state == IN_USE) {
    126       max_size = std::max(max_size, current_size);
    127       current_size = 0;
    128     } else {
    129       GPU_DCHECK(block.state == FREE || block.state == FREE_PENDING_TOKEN);
    130       current_size += block.size;
    131     }
    132   }
    133   return std::max(max_size, current_size);
    134 }
    135 
    136 // Makes sure that:
    137 // - there is at least one block.
    138 // - there are no contiguous FREE blocks (they should have been collapsed).
    139 // - the successive offsets match the block sizes, and they are in order.
    140 bool FencedAllocator::CheckConsistency() {
    141   if (blocks_.size() < 1) return false;
    142   for (unsigned int i = 0; i < blocks_.size() - 1; ++i) {
    143     Block &current = blocks_[i];
    144     Block &next = blocks_[i + 1];
    145     // This test is NOT included in the next one, because offset is unsigned.
    146     if (next.offset <= current.offset)
    147       return false;
    148     if (next.offset != current.offset + current.size)
    149       return false;
    150     if (current.state == FREE && next.state == FREE)
    151       return false;
    152   }
    153   return true;
    154 }
    155 
    156 bool FencedAllocator::InUse() {
    157   return blocks_.size() != 1 || blocks_[0].state != FREE;
    158 }
    159 
    160 // Collapse the block to the next one, then to the previous one. Provided the
    161 // structure is consistent, those are the only blocks eligible for collapse.
    162 FencedAllocator::BlockIndex FencedAllocator::CollapseFreeBlock(
    163     BlockIndex index) {
    164   if (index + 1 < blocks_.size()) {
    165     Block &next = blocks_[index + 1];
    166     if (next.state == FREE) {
    167       blocks_[index].size += next.size;
    168       blocks_.erase(blocks_.begin() + index + 1);
    169     }
    170   }
    171   if (index > 0) {
    172     Block &prev = blocks_[index - 1];
    173     if (prev.state == FREE) {
    174       prev.size += blocks_[index].size;
    175       blocks_.erase(blocks_.begin() + index);
    176       --index;
    177     }
    178   }
    179   return index;
    180 }
    181 
    182 // Waits for the block's token, then mark the block as free, then collapse it.
    183 FencedAllocator::BlockIndex FencedAllocator::WaitForTokenAndFreeBlock(
    184     BlockIndex index) {
    185   Block &block = blocks_[index];
    186   GPU_DCHECK_EQ(block.state, FREE_PENDING_TOKEN);
    187   helper_->WaitForToken(block.token);
    188   block.state = FREE;
    189   return CollapseFreeBlock(index);
    190 }
    191 
    192 // Frees any blocks pending a token for which the token has been read.
    193 void FencedAllocator::FreeUnused() {
    194   int32 last_token_read = helper_->last_token_read();
    195   for (unsigned int i = 0; i < blocks_.size();) {
    196     Block& block = blocks_[i];
    197     if (block.state == FREE_PENDING_TOKEN && block.token <= last_token_read) {
    198       block.state = FREE;
    199       i = CollapseFreeBlock(i);
    200     } else {
    201       ++i;
    202     }
    203   }
    204 }
    205 
    206 // If the block is exactly the requested size, simply mark it IN_USE, otherwise
    207 // split it and mark the first one (of the requested size) IN_USE.
    208 FencedAllocator::Offset FencedAllocator::AllocInBlock(BlockIndex index,
    209                                                       unsigned int size) {
    210   Block &block = blocks_[index];
    211   GPU_DCHECK_GE(block.size, size);
    212   GPU_DCHECK_EQ(block.state, FREE);
    213   Offset offset = block.offset;
    214   if (block.size == size) {
    215     block.state = IN_USE;
    216     return offset;
    217   }
    218   Block newblock = { FREE, offset + size, block.size - size, kUnusedToken};
    219   block.state = IN_USE;
    220   block.size = size;
    221   // this is the last thing being done because it may invalidate block;
    222   blocks_.insert(blocks_.begin() + index + 1, newblock);
    223   return offset;
    224 }
    225 
    226 // The blocks are in offset order, so we can do a binary search.
    227 FencedAllocator::BlockIndex FencedAllocator::GetBlockByOffset(Offset offset) {
    228   Block templ = { IN_USE, offset, 0, kUnusedToken };
    229   Container::iterator it = std::lower_bound(blocks_.begin(), blocks_.end(),
    230                                             templ, OffsetCmp());
    231   GPU_DCHECK(it != blocks_.end() && it->offset == offset);
    232   return it-blocks_.begin();
    233 }
    234 
    235 }  // namespace gpu
    236