Home | History | Annotate | Download | only in client
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #ifndef GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
      6 #define GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
      7 
      8 #include "base/memory/scoped_vector.h"
      9 #include "gpu/command_buffer/client/fenced_allocator.h"
     10 #include "gpu/command_buffer/common/buffer.h"
     11 #include "gpu/command_buffer/common/types.h"
     12 #include "gpu/gpu_export.h"
     13 
     14 namespace gpu {
     15 
     16 class CommandBufferHelper;
     17 
     18 // Manages a shared memory segment.
     19 class GPU_EXPORT MemoryChunk {
     20  public:
     21   MemoryChunk(int32 shm_id, gpu::Buffer shm, CommandBufferHelper* helper);
     22 
     23   // Gets the size of the largest free block that is available without waiting.
     24   unsigned int GetLargestFreeSizeWithoutWaiting() {
     25     return allocator_.GetLargestFreeSize();
     26   }
     27 
     28   // Gets the size of the largest free block that can be allocated if the
     29   // caller can wait.
     30   unsigned int GetLargestFreeSizeWithWaiting() {
     31     return allocator_.GetLargestFreeOrPendingSize();
     32   }
     33 
     34   // Gets the size of the chunk.
     35   unsigned int GetSize() const {
     36     return shm_.size;
     37   }
     38 
     39   // The shared memory id for this chunk.
     40   int32 shm_id() const {
     41     return shm_id_;
     42   }
     43 
     44   // Allocates a block of memory. If the buffer is out of directly available
     45   // memory, this function may wait until memory that was freed "pending a
     46   // token" can be re-used.
     47   //
     48   // Parameters:
     49   //   size: the size of the memory block to allocate.
     50   //
     51   // Returns:
     52   //   the pointer to the allocated memory block, or NULL if out of
     53   //   memory.
     54   void* Alloc(unsigned int size) {
     55     return allocator_.Alloc(size);
     56   }
     57 
     58   // Gets the offset to a memory block given the base memory and the address.
     59   // It translates NULL to FencedAllocator::kInvalidOffset.
     60   unsigned int GetOffset(void* pointer) {
     61     return allocator_.GetOffset(pointer);
     62   }
     63 
     64   // Frees a block of memory.
     65   //
     66   // Parameters:
     67   //   pointer: the pointer to the memory block to free.
     68   void Free(void* pointer) {
     69     allocator_.Free(pointer);
     70   }
     71 
     72   // Frees a block of memory, pending the passage of a token. That memory won't
     73   // be re-allocated until the token has passed through the command stream.
     74   //
     75   // Parameters:
     76   //   pointer: the pointer to the memory block to free.
     77   //   token: the token value to wait for before re-using the memory.
     78   void FreePendingToken(void* pointer, unsigned int token) {
     79     allocator_.FreePendingToken(pointer, token);
     80   }
     81 
     82   // Frees any blocks who's tokens have passed.
     83   void FreeUnused() {
     84     allocator_.FreeUnused();
     85   }
     86 
     87   // Returns true if pointer is in the range of this block.
     88   bool IsInChunk(void* pointer) const {
     89     return pointer >= shm_.ptr &&
     90            pointer < reinterpret_cast<const int8*>(shm_.ptr) + shm_.size;
     91   }
     92 
     93   // Returns true of any memory in this chuck is in use.
     94   bool InUse() {
     95     return allocator_.InUse();
     96   }
     97 
     98  private:
     99   int32 shm_id_;
    100   gpu::Buffer shm_;
    101   FencedAllocatorWrapper allocator_;
    102 
    103   DISALLOW_COPY_AND_ASSIGN(MemoryChunk);
    104 };
    105 
    106 // Manages MemoryChucks.
    107 class GPU_EXPORT MappedMemoryManager {
    108  public:
    109   explicit MappedMemoryManager(CommandBufferHelper* helper);
    110 
    111   ~MappedMemoryManager();
    112 
    113   unsigned int chunk_size_multiple() const {
    114     return chunk_size_multiple_;
    115   }
    116 
    117   void set_chunk_size_multiple(unsigned int multiple) {
    118     chunk_size_multiple_ = multiple;
    119   }
    120 
    121   // Allocates a block of memory
    122   // Parameters:
    123   //   size: size of memory to allocate.
    124   //   shm_id: pointer to variable to receive the shared memory id.
    125   //   shm_offset: pointer to variable to receive the shared memory offset.
    126   // Returns:
    127   //   pointer to allocated block of memory. NULL if failure.
    128   void* Alloc(
    129       unsigned int size, int32* shm_id, unsigned int* shm_offset);
    130 
    131   // Frees a block of memory.
    132   //
    133   // Parameters:
    134   //   pointer: the pointer to the memory block to free.
    135   void Free(void* pointer);
    136 
    137   // Frees a block of memory, pending the passage of a token. That memory won't
    138   // be re-allocated until the token has passed through the command stream.
    139   //
    140   // Parameters:
    141   //   pointer: the pointer to the memory block to free.
    142   //   token: the token value to wait for before re-using the memory.
    143   void FreePendingToken(void* pointer, int32 token);
    144 
    145   // Free Any Shared memory that is not in use.
    146   void FreeUnused();
    147 
    148   // Used for testing
    149   size_t num_chunks() {
    150     return chunks_.size();
    151   }
    152 
    153  private:
    154   typedef ScopedVector<MemoryChunk> MemoryChunkVector;
    155 
    156   // size a chunk is rounded up to.
    157   unsigned int chunk_size_multiple_;
    158   CommandBufferHelper* helper_;
    159   MemoryChunkVector chunks_;
    160 
    161   DISALLOW_COPY_AND_ASSIGN(MappedMemoryManager);
    162 };
    163 
    164 }  // namespace gpu
    165 
    166 #endif  // GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
    167 
    168