1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 3 Licensed under the Apache License, Version 2.0 (the "License"); 4 you may not use this file except in compliance with the License. 5 You may obtain a copy of the License at 6 7 http://www.apache.org/licenses/LICENSE-2.0 8 9 Unless required by applicable law or agreed to in writing, software 10 distributed under the License is distributed on an "AS IS" BASIS, 11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 See the License for the specific language governing permissions and 13 limitations under the License. 14 ==============================================================================*/ 15 16 #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_GEMM_THUNK_H_ 17 #define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_GEMM_THUNK_H_ 18 19 #include "tensorflow/compiler/xla/service/buffer_assignment.h" 20 #include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h" 21 #include "tensorflow/compiler/xla/service/gpu/gpu_executable.h" 22 #include "tensorflow/compiler/xla/service/gpu/thunk.h" 23 #include "tensorflow/compiler/xla/service/hlo_instruction.h" 24 #include "tensorflow/compiler/xla/xla_data.pb.h" 25 #include "tensorflow/core/lib/core/status.h" 26 #include "tensorflow/core/platform/stream_executor_no_cuda.h" 27 28 namespace xla { 29 namespace gpu { 30 31 // This class stores everything that StreamExecutor needs to launch a BLAS gemm. 32 // It is generated by IrEmitter. 33 // 34 // This is thread-compatible. 35 class GemmThunk : public Thunk { 36 public: 37 // Constructs a thunk that computes "output = lhs <dot> rhs" using BLAS gemm. 38 // transpose_lhs and transpose_rhs indicate whether gemm should transpose the 39 // lhs and rhs operand. hlo_instruction is as in Thunk. 40 GemmThunk(const BufferAllocation::Slice& lhs_buffer, 41 const BufferAllocation::Slice& rhs_buffer, 42 const BufferAllocation::Slice& output_buffer, 43 const Shape& lhs_shape, const Shape& rhs_shape, 44 const Shape& output_shape, bool transpose_lhs, bool transpose_rhs, 45 const HloInstruction* hlo_instruction); 46 47 GemmThunk(const GemmThunk&) = delete; 48 GemmThunk& operator=(const GemmThunk&) = delete; 49 50 // Does the gemm operation for the thunk on "stream", which must be non-null. 51 tensorflow::Status ExecuteOnStream( 52 const BufferAllocations& buffer_allocations, 53 perftools::gputools::Stream* stream) override; 54 55 // Returns true if we'll perform autotuning if run on the given stream. If 56 // so, we want the GPU to be quiescent during autotuning, so as not to 57 // introduce noise in our results. 58 bool ShouldHaltAllActivityBeforeRunning( 59 perftools::gputools::Stream* stream) override { 60 return autotune_results_.count( 61 stream->parent()->GetDeviceDescription().name()) != 0; 62 } 63 64 private: 65 const BufferAllocation::Slice lhs_buffer_; 66 const BufferAllocation::Slice rhs_buffer_; 67 const BufferAllocation::Slice output_buffer_; 68 69 const Shape lhs_shape_; 70 const Shape rhs_shape_; 71 const Shape output_shape_; 72 73 const bool transpose_lhs_; 74 const bool transpose_rhs_; 75 76 // Maps device names (StreamExecutor::DeviceDescription::name()) to autotune 77 // results. The map's value is the best algorithm we've found for this thunk 78 // on this device, or an error if none of the algorithms worked and we should 79 // use the regular gemm without an algorithm. 80 std::unordered_map<string, 81 StatusOr<::perftools::gputools::blas::AlgorithmType>> 82 autotune_results_; 83 }; 84 85 } // namespace gpu 86 } // namespace xla 87 88 #endif // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_GEMM_THUNK_H_ 89