Home | History | Annotate | Download | only in gpu
      1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_HLO_SCHEDULE_H_
     17 #define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_HLO_SCHEDULE_H_
     18 
     19 #include <memory>
     20 #include <vector>
     21 
     22 #include "tensorflow/compiler/xla/service/gpu/stream_assignment.h"
     23 #include "tensorflow/compiler/xla/service/hlo_module.h"
     24 #include "tensorflow/compiler/xla/service/hlo_ordering.h"
     25 #include "tensorflow/compiler/xla/statusor.h"
     26 
     27 namespace xla {
     28 namespace gpu {
     29 
     30 // Determines the schedule of HLO instructions, represented by the total order
     31 // of thunk launches, and the partial order of HLO instructions. The HLO
     32 // instructions are only partially ordered, despite the total ordering of thunk
     33 // launches, because thunks may be scheduled onto concurrent streams. This
     34 // schedule is used by BufferAssigner to determine buffer liveness (i.e. to
     35 // minimize allocations), and also by ThunkSchedule to determine the thunk
     36 // launch order.
     37 class HloSchedule {
     38  public:
     39   // Constructs an HloSchedule for the given module, based on the given stream
     40   // assignment.
     41   static StatusOr<std::unique_ptr<HloSchedule>> Build(
     42       const HloModule& module, const StreamAssignment& stream_assignment,
     43       int64 pointer_size);
     44 
     45   // Returns the total order of thunk launches, represented in terms of HLO
     46   // instructions.
     47   const std::vector<const HloInstruction*>& ThunkLaunchOrder() const {
     48     return thunk_launch_order_;
     49   }
     50 
     51   // Returns the partial order of HLO instructions. This method may only be
     52   // called once. The order is based on the total order of thunk lanches, the
     53   // stream assignment, and the data dependencies in the HLO DAG.
     54   std::unique_ptr<HloOrdering> ConsumeHloOrdering() {
     55     return std::move(hlo_ordering_);
     56   }
     57 
     58  private:
     59   HloSchedule();
     60 
     61   std::vector<const HloInstruction*> thunk_launch_order_;
     62   std::unique_ptr<HloOrdering> hlo_ordering_;
     63 };
     64 
     65 }  // namespace gpu
     66 }  // namespace xla
     67 
     68 #endif  // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_HLO_SCHEDULE_H_
     69