1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 // A class to Manage a growing transfer buffer. 6 7 #include "gpu/command_buffer/client/transfer_buffer.h" 8 9 #include "base/logging.h" 10 #include "gpu/command_buffer/client/cmd_buffer_helper.h" 11 12 namespace gpu { 13 14 AlignedRingBuffer::~AlignedRingBuffer() { 15 } 16 17 TransferBuffer::TransferBuffer( 18 CommandBufferHelper* helper) 19 : helper_(helper), 20 result_size_(0), 21 default_buffer_size_(0), 22 min_buffer_size_(0), 23 max_buffer_size_(0), 24 alignment_(0), 25 size_to_flush_(0), 26 bytes_since_last_flush_(0), 27 buffer_id_(-1), 28 result_buffer_(NULL), 29 result_shm_offset_(0), 30 usable_(true) { 31 } 32 33 TransferBuffer::~TransferBuffer() { 34 Free(); 35 } 36 37 bool TransferBuffer::Initialize( 38 unsigned int default_buffer_size, 39 unsigned int result_size, 40 unsigned int min_buffer_size, 41 unsigned int max_buffer_size, 42 unsigned int alignment, 43 unsigned int size_to_flush) { 44 result_size_ = result_size; 45 default_buffer_size_ = default_buffer_size; 46 min_buffer_size_ = min_buffer_size; 47 max_buffer_size_ = max_buffer_size; 48 alignment_ = alignment; 49 size_to_flush_ = size_to_flush; 50 ReallocateRingBuffer(default_buffer_size_ - result_size); 51 return HaveBuffer(); 52 } 53 54 void TransferBuffer::Free() { 55 if (HaveBuffer()) { 56 helper_->Finish(); 57 helper_->command_buffer()->DestroyTransferBuffer(buffer_id_); 58 buffer_id_ = -1; 59 buffer_.ptr = NULL; 60 buffer_.size = 0; 61 result_buffer_ = NULL; 62 result_shm_offset_ = 0; 63 ring_buffer_.reset(); 64 bytes_since_last_flush_ = 0; 65 } 66 } 67 68 bool TransferBuffer::HaveBuffer() const { 69 return buffer_id_ != -1; 70 } 71 72 RingBuffer::Offset TransferBuffer::GetOffset(void* pointer) const { 73 return ring_buffer_->GetOffset(pointer); 74 } 75 76 void TransferBuffer::FreePendingToken(void* p, unsigned int token) { 77 ring_buffer_->FreePendingToken(p, token); 78 if (bytes_since_last_flush_ >= size_to_flush_ && size_to_flush_ > 0) { 79 helper_->Flush(); 80 bytes_since_last_flush_ = 0; 81 } 82 } 83 84 void TransferBuffer::AllocateRingBuffer(unsigned int size) { 85 for (;size >= min_buffer_size_; size /= 2) { 86 int32 id = -1; 87 gpu::Buffer buffer = 88 helper_->command_buffer()->CreateTransferBuffer(size, &id); 89 if (id != -1) { 90 buffer_ = buffer; 91 ring_buffer_.reset(new AlignedRingBuffer( 92 alignment_, 93 id, 94 result_size_, 95 buffer_.size - result_size_, 96 helper_, 97 static_cast<char*>(buffer_.ptr) + result_size_)); 98 buffer_id_ = id; 99 result_buffer_ = buffer_.ptr; 100 result_shm_offset_ = 0; 101 return; 102 } 103 // we failed so don't try larger than this. 104 max_buffer_size_ = size / 2; 105 } 106 usable_ = false; 107 } 108 109 // Returns the integer i such as 2^i <= n < 2^(i+1) 110 static int Log2Floor(uint32 n) { 111 if (n == 0) 112 return -1; 113 int log = 0; 114 uint32 value = n; 115 for (int i = 4; i >= 0; --i) { 116 int shift = (1 << i); 117 uint32 x = value >> shift; 118 if (x != 0) { 119 value = x; 120 log += shift; 121 } 122 } 123 DCHECK_EQ(value, 1u); 124 return log; 125 } 126 127 // Returns the integer i such as 2^(i-1) < n <= 2^i 128 static int Log2Ceiling(uint32 n) { 129 if (n == 0) { 130 return -1; 131 } else { 132 // Log2Floor returns -1 for 0, so the following works correctly for n=1. 133 return 1 + Log2Floor(n - 1); 134 } 135 } 136 137 static unsigned int ComputePOTSize(unsigned int dimension) { 138 return (dimension == 0) ? 0 : 1 << Log2Ceiling(dimension); 139 } 140 141 void TransferBuffer::ReallocateRingBuffer(unsigned int size) { 142 // What size buffer would we ask for if we needed a new one? 143 unsigned int needed_buffer_size = ComputePOTSize(size + result_size_); 144 needed_buffer_size = std::max(needed_buffer_size, min_buffer_size_); 145 needed_buffer_size = std::max(needed_buffer_size, default_buffer_size_); 146 needed_buffer_size = std::min(needed_buffer_size, max_buffer_size_); 147 148 if (usable_ && (!HaveBuffer() || needed_buffer_size > buffer_.size)) { 149 if (HaveBuffer()) { 150 Free(); 151 } 152 AllocateRingBuffer(needed_buffer_size); 153 } 154 } 155 156 void* TransferBuffer::AllocUpTo( 157 unsigned int size, unsigned int* size_allocated) { 158 DCHECK(size_allocated); 159 160 ReallocateRingBuffer(size); 161 162 if (!HaveBuffer()) { 163 return NULL; 164 } 165 166 unsigned int max_size = ring_buffer_->GetLargestFreeOrPendingSize(); 167 *size_allocated = std::min(max_size, size); 168 bytes_since_last_flush_ += *size_allocated; 169 return ring_buffer_->Alloc(*size_allocated); 170 } 171 172 void* TransferBuffer::Alloc(unsigned int size) { 173 ReallocateRingBuffer(size); 174 175 if (!HaveBuffer()) { 176 return NULL; 177 } 178 179 unsigned int max_size = ring_buffer_->GetLargestFreeOrPendingSize(); 180 if (size > max_size) { 181 return NULL; 182 } 183 184 bytes_since_last_flush_ += size; 185 return ring_buffer_->Alloc(size); 186 } 187 188 void* TransferBuffer::GetResultBuffer() { 189 ReallocateRingBuffer(result_size_); 190 return result_buffer_; 191 } 192 193 int TransferBuffer::GetResultOffset() { 194 ReallocateRingBuffer(result_size_); 195 return result_shm_offset_; 196 } 197 198 int TransferBuffer::GetShmId() { 199 ReallocateRingBuffer(result_size_); 200 return buffer_id_; 201 } 202 203 unsigned int TransferBuffer::GetCurrentMaxAllocationWithoutRealloc() const { 204 return HaveBuffer() ? ring_buffer_->GetLargestFreeOrPendingSize() : 0; 205 } 206 207 unsigned int TransferBuffer::GetMaxAllocation() const { 208 return HaveBuffer() ? max_buffer_size_ - result_size_ : 0; 209 } 210 211 void ScopedTransferBufferPtr::Release() { 212 if (buffer_) { 213 transfer_buffer_->FreePendingToken(buffer_, helper_->InsertToken()); 214 buffer_ = NULL; 215 size_ = 0; 216 } 217 } 218 219 void ScopedTransferBufferPtr::Reset(unsigned int new_size) { 220 Release(); 221 // NOTE: we allocate buffers of size 0 so that HaveBuffer will be true, so 222 // that address will return a pointer just like malloc, and so that GetShmId 223 // will be valid. That has the side effect that we'll insert a token on free. 224 // We could add code skip the token for a zero size buffer but it doesn't seem 225 // worth the complication. 226 buffer_ = transfer_buffer_->AllocUpTo(new_size, &size_); 227 } 228 229 } // namespace gpu 230