1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef GPU_COMMAND_BUFFER_CLIENT_TRANSFER_BUFFER_H_ 6 #define GPU_COMMAND_BUFFER_CLIENT_TRANSFER_BUFFER_H_ 7 8 #include "base/compiler_specific.h" 9 #include "base/memory/scoped_ptr.h" 10 #include "gpu/command_buffer/client/ring_buffer.h" 11 #include "gpu/command_buffer/common/buffer.h" 12 #include "gpu/command_buffer/common/gles2_cmd_utils.h" 13 #include "gpu/gpu_export.h" 14 15 namespace gpu { 16 17 class CommandBufferHelper; 18 19 // Wraps RingBufferWrapper to provide aligned allocations. 20 class AlignedRingBuffer : public RingBufferWrapper { 21 public: 22 AlignedRingBuffer( 23 unsigned int alignment, 24 int32 shm_id, 25 RingBuffer::Offset base_offset, 26 unsigned int size, 27 CommandBufferHelper* helper, 28 void* base) 29 : RingBufferWrapper(base_offset, size, helper, base), 30 alignment_(alignment), 31 shm_id_(shm_id) { 32 } 33 ~AlignedRingBuffer(); 34 35 // Hiding Alloc from RingBufferWrapper 36 void* Alloc(unsigned int size) { 37 return RingBufferWrapper::Alloc(RoundToAlignment(size)); 38 } 39 40 int32 GetShmId() const { 41 return shm_id_; 42 } 43 44 private: 45 unsigned int RoundToAlignment(unsigned int size) { 46 return (size + alignment_ - 1) & ~(alignment_ - 1); 47 } 48 49 unsigned int alignment_; 50 int32 shm_id_; 51 }; 52 53 // Interface for managing the transfer buffer. 54 class GPU_EXPORT TransferBufferInterface { 55 public: 56 TransferBufferInterface() { } 57 virtual ~TransferBufferInterface() { } 58 59 virtual bool Initialize( 60 unsigned int buffer_size, 61 unsigned int result_size, 62 unsigned int min_buffer_size, 63 unsigned int max_buffer_size, 64 unsigned int alignment, 65 unsigned int size_to_flush) = 0; 66 67 virtual int GetShmId() = 0; 68 virtual void* GetResultBuffer() = 0; 69 virtual int GetResultOffset() = 0; 70 71 virtual void Free() = 0; 72 73 virtual bool HaveBuffer() const = 0; 74 75 // Allocates up to size bytes. 76 virtual void* AllocUpTo(unsigned int size, unsigned int* size_allocated) = 0; 77 78 // Allocates size bytes. 79 // Note: Alloc will fail if it can not return size bytes. 80 virtual void* Alloc(unsigned int size) = 0; 81 82 virtual RingBuffer::Offset GetOffset(void* pointer) const = 0; 83 84 virtual void FreePendingToken(void* p, unsigned int token) = 0; 85 }; 86 87 // Class that manages the transfer buffer. 88 class GPU_EXPORT TransferBuffer : public TransferBufferInterface { 89 public: 90 TransferBuffer(CommandBufferHelper* helper); 91 virtual ~TransferBuffer(); 92 93 // Overridden from TransferBufferInterface. 94 virtual bool Initialize( 95 unsigned int default_buffer_size, 96 unsigned int result_size, 97 unsigned int min_buffer_size, 98 unsigned int max_buffer_size, 99 unsigned int alignment, 100 unsigned int size_to_flush) OVERRIDE; 101 virtual int GetShmId() OVERRIDE; 102 virtual void* GetResultBuffer() OVERRIDE; 103 virtual int GetResultOffset() OVERRIDE; 104 virtual void Free() OVERRIDE; 105 virtual bool HaveBuffer() const OVERRIDE; 106 virtual void* AllocUpTo( 107 unsigned int size, unsigned int* size_allocated) OVERRIDE; 108 virtual void* Alloc(unsigned int size) OVERRIDE; 109 virtual RingBuffer::Offset GetOffset(void* pointer) const OVERRIDE; 110 virtual void FreePendingToken(void* p, unsigned int token) OVERRIDE; 111 112 // These are for testing. 113 unsigned int GetCurrentMaxAllocationWithoutRealloc() const; 114 unsigned int GetMaxAllocation() const; 115 116 private: 117 // Tries to reallocate the ring buffer if it's not large enough for size. 118 void ReallocateRingBuffer(unsigned int size); 119 120 void AllocateRingBuffer(unsigned int size); 121 122 CommandBufferHelper* helper_; 123 scoped_ptr<AlignedRingBuffer> ring_buffer_; 124 125 // size reserved for results 126 unsigned int result_size_; 127 128 // default size. Size we want when starting or re-allocating 129 unsigned int default_buffer_size_; 130 131 // min size we'll consider successful 132 unsigned int min_buffer_size_; 133 134 // max size we'll let the buffer grow 135 unsigned int max_buffer_size_; 136 137 // alignment for allocations 138 unsigned int alignment_; 139 140 // Size at which to do an async flush. 0 = never. 141 unsigned int size_to_flush_; 142 143 // Number of bytes since we last flushed. 144 unsigned int bytes_since_last_flush_; 145 146 // the current buffer. 147 gpu::Buffer buffer_; 148 149 // id of buffer. -1 = no buffer 150 int32 buffer_id_; 151 152 // address of result area 153 void* result_buffer_; 154 155 // offset to result area 156 uint32 result_shm_offset_; 157 158 // false if we failed to allocate min_buffer_size 159 bool usable_; 160 }; 161 162 // A class that will manage the lifetime of a transferbuffer allocation. 163 class GPU_EXPORT ScopedTransferBufferPtr { 164 public: 165 ScopedTransferBufferPtr( 166 unsigned int size, 167 CommandBufferHelper* helper, 168 TransferBufferInterface* transfer_buffer) 169 : buffer_(NULL), 170 size_(0), 171 helper_(helper), 172 transfer_buffer_(transfer_buffer) { 173 Reset(size); 174 } 175 176 ~ScopedTransferBufferPtr() { 177 Release(); 178 } 179 180 bool valid() const { 181 return buffer_ != NULL; 182 } 183 184 unsigned int size() const { 185 return size_; 186 } 187 188 int shm_id() const { 189 return transfer_buffer_->GetShmId(); 190 } 191 192 RingBuffer::Offset offset() const { 193 return transfer_buffer_->GetOffset(buffer_); 194 } 195 196 void* address() const { 197 return buffer_; 198 } 199 200 void Release(); 201 202 void Reset(unsigned int new_size); 203 204 private: 205 void* buffer_; 206 unsigned int size_; 207 CommandBufferHelper* helper_; 208 TransferBufferInterface* transfer_buffer_; 209 DISALLOW_COPY_AND_ASSIGN(ScopedTransferBufferPtr); 210 }; 211 212 template <typename T> 213 class ScopedTransferBufferArray : public ScopedTransferBufferPtr { 214 public: 215 ScopedTransferBufferArray( 216 unsigned int num_elements, 217 CommandBufferHelper* helper, TransferBufferInterface* transfer_buffer) 218 : ScopedTransferBufferPtr( 219 num_elements * sizeof(T), helper, transfer_buffer) { 220 } 221 222 T* elements() { 223 return static_cast<T*>(address()); 224 } 225 226 unsigned int num_elements() const { 227 return size() / sizeof(T); 228 } 229 }; 230 231 } // namespace gpu 232 233 #endif // GPU_COMMAND_BUFFER_CLIENT_TRANSFER_BUFFER_H_ 234