Home | History | Annotate | Download | only in framework
      1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 #ifndef TENSORFLOW_FRAMEWORK_TRACKING_ALLOCATOR_H_
     17 #define TENSORFLOW_FRAMEWORK_TRACKING_ALLOCATOR_H_
     18 
     19 #include <unordered_map>
     20 #include "tensorflow/core/framework/allocator.h"
     21 #include "tensorflow/core/framework/step_stats.pb.h"
     22 #include "tensorflow/core/lib/core/refcount.h"
     23 #include "tensorflow/core/lib/gtl/inlined_vector.h"
     24 #include "tensorflow/core/platform/mutex.h"
     25 #include "tensorflow/core/platform/thread_annotations.h"
     26 #include "tensorflow/core/platform/types.h"
     27 
     28 namespace tensorflow {
     29 
     30 // TrackingAllocator is a wrapper for an Allocator. It keeps a running
     31 // count of the number of bytes allocated through the wrapper. It is
     32 // used by the Executor to "charge" allocations to particular Op
     33 // executions. Each Op gets a separate TrackingAllocator wrapper
     34 // around the underlying allocator.
     35 //
     36 // The implementation assumes the invariant that all calls to
     37 // AllocateRaw by an Op (or work items spawned by the Op) will occur
     38 // before the Op's Compute method returns. Thus the high watermark is
     39 // established once Compute returns.
     40 //
     41 // DeallocateRaw can be called long after the Op has finished,
     42 // e.g. when an output tensor is deallocated, and the wrapper cannot
     43 // be deleted until the last of these calls has occurred.  The
     44 // TrackingAllocator keeps track of outstanding calls using a
     45 // reference count, and deletes itself once the last call has been
     46 // received and the high watermark has been retrieved.
     47 struct AllocRecord {
     48   AllocRecord(int64 a_btyes, int64 a_micros)
     49       : alloc_bytes(a_btyes), alloc_micros(a_micros) {}
     50   AllocRecord() : AllocRecord(0, 0) {}
     51 
     52   int64 alloc_bytes;
     53   int64 alloc_micros;
     54 };
     55 
     56 class TrackingAllocator : public Allocator {
     57  public:
     58   explicit TrackingAllocator(Allocator* allocator, bool track_ids);
     59   string Name() override { return allocator_->Name(); }
     60   void* AllocateRaw(size_t alignment, size_t num_bytes) override {
     61     return AllocateRaw(alignment, num_bytes, AllocationAttributes());
     62   }
     63   void* AllocateRaw(size_t alignment, size_t num_bytes,
     64                     const AllocationAttributes& allocation_attr) override;
     65   void DeallocateRaw(void* ptr) override;
     66   bool TracksAllocationSizes() override;
     67   size_t RequestedSize(const void* ptr) override;
     68   size_t AllocatedSize(const void* ptr) override;
     69   int64 AllocationId(const void* ptr) override;
     70   void GetStats(AllocatorStats* stats) override;
     71   void ClearStats() override;
     72 
     73   // If the underlying allocator tracks allocation sizes, this returns
     74   // a tuple where the first value is the total number of bytes
     75   // allocated through this wrapper, the second value is the high
     76   // watermark of bytes allocated through this wrapper and the third value is
     77   // the allocated bytes through this wrapper that are still alive. If the
     78   // underlying allocator does not track allocation sizes the first
     79   // value is the total number of bytes requested through this wrapper
     80   // and the second and the third are 0.
     81   //
     82   std::tuple<size_t, size_t, size_t> GetSizes();
     83   // After GetRecordsAndUnRef is called, the only further calls allowed
     84   // on this wrapper are calls to DeallocateRaw with pointers that
     85   // were allocated by this wrapper and have not yet been
     86   // deallocated. After this call completes and all allocated pointers
     87   // have been deallocated the wrapper will delete itself.
     88   gtl::InlinedVector<AllocRecord, 4> GetRecordsAndUnRef();
     89   // Returns a copy of allocation records collected so far.
     90   gtl::InlinedVector<AllocRecord, 4> GetCurrentRecords();
     91 
     92  protected:
     93   ~TrackingAllocator() override {}
     94 
     95  private:
     96   bool UnRef() EXCLUSIVE_LOCKS_REQUIRED(mu_);
     97 
     98   Allocator* allocator_;  // not owned.
     99   mutex mu_;
    100   // the number of calls to AllocateRaw that have not yet been matched
    101   // by a corresponding call to DeAllocateRaw, plus 1 if the Executor
    102   // has not yet read out the high watermark.
    103   int ref_ GUARDED_BY(mu_);
    104   // the current number of outstanding bytes that have been allocated
    105   // by this wrapper, or 0 if the underlying allocator does not track
    106   // allocation sizes.
    107   size_t allocated_ GUARDED_BY(mu_);
    108   // the maximum number of outstanding bytes that have been allocated
    109   // by this wrapper, or 0 if the underlying allocator does not track
    110   // allocation sizes.
    111   size_t high_watermark_ GUARDED_BY(mu_);
    112   // the total number of bytes that have been allocated by this
    113   // wrapper if the underlying allocator tracks allocation sizes,
    114   // otherwise the total number of bytes that have been requested by
    115   // this allocator.
    116   size_t total_bytes_ GUARDED_BY(mu_);
    117 
    118   gtl::InlinedVector<AllocRecord, 4> allocations_ GUARDED_BY(mu_);
    119 
    120   // Track allocations locally if requested in the constructor and the
    121   // underlying allocator doesn't already do it for us.
    122   const bool track_sizes_locally_;
    123   struct Chunk {
    124     size_t requested_size;
    125     size_t allocated_size;
    126     int64 allocation_id;
    127   };
    128   std::unordered_map<const void*, Chunk> in_use_ GUARDED_BY(mu_);
    129   int64 next_allocation_id_ GUARDED_BY(mu_);
    130 };
    131 
    132 }  // end namespace tensorflow
    133 
    134 #endif  // TENSORFLOW_FRAMEWORK_TRACKING_ALLOCATOR_H_
    135