Home | History | Annotate | Download | only in service
      1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_HEAP_SIMULATOR_H_
     17 #define TENSORFLOW_COMPILER_XLA_SERVICE_HEAP_SIMULATOR_H_
     18 
     19 #include <memory>
     20 #include <set>
     21 #include <utility>
     22 #include <vector>
     23 
     24 #include "tensorflow/compiler/xla/service/hlo.pb.h"
     25 #include "tensorflow/compiler/xla/service/hlo_computation.h"
     26 #include "tensorflow/compiler/xla/service/hlo_instruction.h"
     27 #include "tensorflow/compiler/xla/service/hlo_ordering.h"
     28 #include "tensorflow/compiler/xla/service/logical_buffer.h"
     29 #include "tensorflow/compiler/xla/service/tuple_points_to_analysis.h"
     30 #include "tensorflow/compiler/xla/statusor.h"
     31 #include "tensorflow/core/lib/gtl/flatmap.h"
     32 #include "tensorflow/core/lib/gtl/flatset.h"
     33 
     34 namespace xla {
     35 
     36 // Forward declare classes defined below.
     37 class HeapAlgorithm;
     38 
     39 // HeapSimulator assigns buffer offsets by running a simulation of a regular
     40 // memory heap with Alloc and Free calls.  It only works for completely
     41 // sequential instruction sequences.  Unlike regular heaps, we have the
     42 // advantage that the sequence of Alloc and Free calls is known up-front; we
     43 // don't need to return the assignment of buffer offsets until the very end.
     44 class HeapSimulator {
     45  public:
     46   // Chunk represents a contiguous piece of memory.  Each LogicalBuffer will be
     47   // associated with a chunk in the assignment result.
     48   struct Chunk {
     49     int64 offset;
     50     int64 size;
     51 
     52     int64 chunk_end() const { return offset + size; }
     53   };
     54 
     55   // Result represents the result of the heap simulation.
     56   struct Result {
     57     // The assignment of buffers to chunks.
     58     tensorflow::gtl::FlatMap<const LogicalBuffer*, Chunk> chunk_map;
     59 
     60     // The total size in bytes of the heap, containing all assigned chunks.
     61     int64 heap_size = 0;
     62 
     63     // The total size in bytes of heap fragmentation.
     64     int64 fragmentation_size = 0;
     65 
     66     // A trace of heap simulation events.
     67     HeapSimulatorTrace debug_trace;
     68   };
     69 
     70   // The different options to be passed to the Run() APIs.
     71   struct Options {
     72     Options()
     73         : may_reuse_operand_buffers(true),
     74           alloc_constants(false),
     75           buffers_to_assign(nullptr) {}
     76 
     77     // Whether a buffer about to be Free()-ed, can be recycled for a new born
     78     // one, hence collapsing Free()+Alloc() calls (default true).
     79     bool may_reuse_operand_buffers;
     80     // Whether to issue Alloc() and Free() calls for constants (default false).
     81     bool alloc_constants;
     82     // If 'buffers_to_assign' is provided, only those buffers are assigned
     83     // offsets, otherwise all buffers defined by the instructions are assigned.
     84     const tensorflow::gtl::FlatSet<const LogicalBuffer*>* buffers_to_assign;
     85   };
     86 
     87   // Run the heap simulation with the given algorithm, assuming the given
     88   // module_sequence, which must contain a topologically-consistent total
     89   // ordering of all instructions within each computation. The result is invalid
     90   // if instructions are not run in exactly this sequence.
     91   //
     92   // Running heap simulation on the whole module tends to save memory, compared
     93   // to running on a per-computation basis, since we can re-use buffer space for
     94   // called sub-computations.
     95   //
     96   static StatusOr<Result> Run(
     97       std::unique_ptr<HeapAlgorithm> algorithm, const HloModule& module,
     98       const SequentialHloOrdering::HloModuleSequence& module_sequence,
     99       const TuplePointsToAnalysis& points_to_analysis,
    100       const LogicalBuffer::SizeFunction& size_fn,
    101       const Options& options = Options());
    102 
    103   // Same as above, but runs on a single computation. The 'instruction_sequence'
    104   // must contain a topologically-consistent total ordering of all instructions
    105   // in the computation. The result is invalid if instructions are not run in
    106   // exactly this sequence.
    107   static StatusOr<Result> Run(
    108       std::unique_ptr<HeapAlgorithm> algorithm,
    109       const HloComputation& computation,
    110       const std::vector<const HloInstruction*>& instruction_sequence,
    111       const TuplePointsToAnalysis& points_to_analysis,
    112       const LogicalBuffer::SizeFunction& size_fn,
    113       const Options& options = Options());
    114 
    115  private:
    116   // If 'module_sequence' is non-null, it is used to find kCall and kWhile
    117   // sub-computations, and the heap simulation for those sub-computations will
    118   // be run recursively. I.e. the simulation is run over the whole module.
    119   HeapSimulator(
    120       std::unique_ptr<HeapAlgorithm> algorithm,
    121       const LogicalBuffer::SizeFunction& size_fn, const Options& options,
    122       const SequentialHloOrdering::HloModuleSequence* module_sequence);
    123   ~HeapSimulator();
    124 
    125   Status RunComputation(
    126       const HloComputation& computation,
    127       const std::vector<const HloInstruction*>& instruction_sequence,
    128       const TuplePointsToAnalysis& points_to_analysis);
    129 
    130   bool IgnoreBuffer(const LogicalBuffer* buffer) const;
    131   void Alloc(const LogicalBuffer* buffer, const HloInstruction* instruction);
    132   void Free(const LogicalBuffer* buffer, const HloInstruction* instruction);
    133   void ShareBuffer(const LogicalBuffer* buffer, const LogicalBuffer* shared,
    134                    const HloInstruction* instruction);
    135   Result Finish();
    136 
    137   void FillDebugTrace(HeapSimulatorTrace::Event::Kind kind,
    138                       const LogicalBuffer* buffer,
    139                       const HloInstruction* instruction,
    140                       const LogicalBuffer* shared_with_canonical);
    141 
    142   const std::unique_ptr<HeapAlgorithm> no_fragmentation_stats_;
    143   const std::unique_ptr<HeapAlgorithm> algorithm_;
    144   const LogicalBuffer::SizeFunction size_fn_;
    145   const Options options_;
    146   const SequentialHloOrdering::HloModuleSequence* module_sequence_;
    147 
    148   // In addition to Alloc and Free, the heap simulator exposes a concept of
    149   // buffer sharing.  When ShareBuffer is called, instead of allocating new
    150   // space for the buffer, it associates the buffer with a previously allocated
    151   // (or shared) buffer.  Each group of mutually-shared buffers points to a
    152   // single SharedGroup instance, which is a shared control block.
    153   //
    154   // This forced buffer sharing is hidden from the underlying heap algorithm,
    155   // which only sees a regular Alloc call on the canonical buffer.  The
    156   // corresponding Free call is delayed until the liveness of all shared buffers
    157   // in the group has expired, which is tracked via the refcount.  The results
    158   // are post-processed in Finish to add chunks for shared buffers.
    159   //
    160   // The shared_buffers_ map associates each shared buffer (including the
    161   // canonical) to its SharedGroup control block.
    162   struct SharedGroup {
    163     const LogicalBuffer* canonical = nullptr;
    164     int64 refcount = 0;
    165   };
    166   tensorflow::gtl::FlatMap<const LogicalBuffer*, std::shared_ptr<SharedGroup>>
    167       shared_buffers_;
    168 
    169   // Hold some sets for error-checking the sequence of Alloc and Free calls.
    170   tensorflow::gtl::FlatSet<const LogicalBuffer*> allocated_buffers_;
    171   tensorflow::gtl::FlatSet<const LogicalBuffer*> freed_buffers_;
    172 
    173   // Debugging information filled in while the heap simulator runs.
    174   HeapSimulatorTrace debug_trace_;
    175 };
    176 
    177 // Abstract base class describing a heap simulation algorithm that assigns
    178 // offsets to buffers.  A sequence of Alloc / Free calls will be made, with the
    179 // same semantics as a regular memory heap.  Finish will be called at the end to
    180 // collect the simulation results.
    181 class HeapAlgorithm {
    182  public:
    183   using Chunk = HeapSimulator::Chunk;
    184   using Result = HeapSimulator::Result;
    185 
    186   virtual ~HeapAlgorithm() = default;
    187 
    188   // Alloc allocates a buffer of 'size' bytes.
    189   virtual void Alloc(const LogicalBuffer* buffer, int64 size) = 0;
    190 
    191   // Free de-allocates a previously allocated buffer.
    192   virtual void Free(const LogicalBuffer* buffer, int64 size) = 0;
    193 
    194   // Finish collects the buffer offset assignment results.  Free may only be
    195   // called once, after the Alloc and Free calls.
    196   virtual Result Finish() = 0;
    197 };
    198 
    199 // NoFragmentationStatsHeap computes the heap size assuming no fragmentation;
    200 // this is the absolute minimum size for a given instruction sequence.  The
    201 // result.chunk_map returned in Finish is always empty, since we only collect
    202 // stats, and don't actually compute chunk assignments.
    203 class NoFragmentationStatsHeap : public HeapAlgorithm {
    204  public:
    205   NoFragmentationStatsHeap() = default;
    206   ~NoFragmentationStatsHeap() override = default;
    207 
    208   void Alloc(const LogicalBuffer* buffer, int64 size) override;
    209   void Free(const LogicalBuffer* buffer, int64 size) override;
    210   Result Finish() override;
    211 
    212  private:
    213   int64 current_heap_size_ = 0;
    214   int64 max_heap_size_ = 0;
    215 };
    216 
    217 // DecreasingSizeRunsHeap collects runs of Alloc and Free calls, sorts them by
    218 // decreasing size, and delegates the actual calls to another heap algorithm.
    219 // This greedy heuristic tends to reduce fragmentation for all algorithms.
    220 class DecreasingSizeRunsHeap : public HeapAlgorithm {
    221  public:
    222   DecreasingSizeRunsHeap(std::unique_ptr<HeapAlgorithm> algorithm)
    223       : algorithm_(std::move(algorithm)) {}
    224   ~DecreasingSizeRunsHeap() override {}
    225 
    226   void Alloc(const LogicalBuffer* buffer, int64 size) override;
    227   void Free(const LogicalBuffer* buffer, int64 size) override;
    228   Result Finish() override;
    229 
    230  private:
    231   // A single Alloc or Free operation that we've buffered in run_.
    232   struct Op {
    233     const LogicalBuffer* buffer;
    234     int64 size;
    235   };
    236 
    237   // Current collection mode; kInit means no ops have been collected yet.
    238   enum Mode { kInit, kAlloc, kFree };
    239 
    240   void SetMode(Mode mode);
    241   void CallAndDrainRun();
    242 
    243   const std::unique_ptr<HeapAlgorithm> algorithm_;
    244   std::vector<Op> run_;
    245   Mode mode_ = kInit;
    246 };
    247 
    248 // LazyBestFitHeap is a variant of the traditional best-fit heap.  This is a
    249 // greedy heuristic, based on the idea that delaying offset assignment helps
    250 // reduce fragmentation.  Here's an example of a "bad" offset assignment, where
    251 // a tiny buffer A prevents adjacent free chunks from being coalesced:
    252 //    BAD: |  free  |A|  free  |
    253 // If we could have delayed the assignment of A, we might have ended up with:
    254 //   GOOD: |      free       |A|
    255 //
    256 // In general it's actually hard to say whether GOOD is better than BAD; the
    257 // heuristic we use is we try to leave large contiguous chunks free, and we try
    258 // to avoid growing the overall heap size unless necessary.
    259 //
    260 // Just like regular best-fit, in Alloc we look for the smallest free chunk that
    261 // fits the requested size.  Unlike regular best-fit, we postpone offset
    262 // assignment for buffers that cannot re-use existing free chunks (and force us
    263 // to grow the heap); these buffers are "lazily" assigned offsets in Free.
    264 class LazyBestFitHeap : public HeapAlgorithm {
    265  public:
    266   LazyBestFitHeap(int64 alignment) : alignment_(alignment) {}
    267   ~LazyBestFitHeap() override {}
    268 
    269   void Alloc(const LogicalBuffer* buffer, int64 size) override;
    270   void Free(const LogicalBuffer* buffer, int64 size) override;
    271   Result Finish() override;
    272 
    273  private:
    274   // Sentry value used to indicate a chunk that wasn't assigned an offset in
    275   // Alloc, and will instead be assigned an offset in Free.
    276   enum { kLazyAllocOffset = -1 };
    277 
    278   struct OrderChunkByIncreasingSize {
    279     bool operator()(const Chunk& a, const Chunk& b) const {
    280       if (a.size != b.size) return a.size < b.size;
    281       return a.offset < b.offset;
    282     }
    283   };
    284 
    285   void AddFreeChunk(int64 offset, int64 size);
    286 
    287   const int64 alignment_;
    288   Result result_;
    289 
    290   // Maintain the set of free chunks, ordered by increasing size.
    291   std::set<Chunk, OrderChunkByIncreasingSize> free_;
    292 };
    293 
    294 }  // namespace xla
    295 
    296 #endif  // TENSORFLOW_COMPILER_XLA_SERVICE_HEAP_SIMULATOR_H_
    297