Home | History | Annotate | Download | only in service
      1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_DEVICE_MEMORY_ALLOCATOR_H_
     17 #define TENSORFLOW_COMPILER_XLA_SERVICE_DEVICE_MEMORY_ALLOCATOR_H_
     18 
     19 #include <vector>
     20 
     21 #include "tensorflow/compiler/xla/statusor.h"
     22 #include "tensorflow/compiler/xla/types.h"
     23 #include "tensorflow/core/lib/gtl/array_slice.h"
     24 #include "tensorflow/core/platform/stream_executor_no_cuda.h"
     25 #include "tensorflow/core/platform/types.h"
     26 
     27 namespace xla {
     28 
     29 // Interface for device memory allocators used within the XLA service. An
     30 // allocator is responsible for allocating memory on all devices of a particular
     31 // platform.
     32 class DeviceMemoryAllocator {
     33  public:
     34   // Parameter platform indicates which platform the allocator allocates memory
     35   // on. Must be non-null.
     36   explicit DeviceMemoryAllocator(const perftools::gputools::Platform* platform)
     37       : platform_(platform) {}
     38   virtual ~DeviceMemoryAllocator() {}
     39 
     40   // 'retry_on_failure': If false, and the first attempt to allocate the memory
     41   // fails, the allocation should return immediately without retrying.
     42   // An example use case is optional scratch spaces where a failure
     43   // has only performance impact.
     44   // Allocate() should return a null pointer for a size-0 allocation.
     45   // Deallocate() must be a no-op for null pointers.
     46   virtual StatusOr<perftools::gputools::DeviceMemoryBase> Allocate(
     47       int device_ordinal, uint64 size, bool retry_on_failure = true) = 0;
     48   virtual tensorflow::Status Deallocate(
     49       int device_ordinal, perftools::gputools::DeviceMemoryBase* mem) = 0;
     50 
     51   // Return the platform that the allocator allocates memory on.
     52   const perftools::gputools::Platform* platform() const { return platform_; }
     53 
     54   // Can we call Deallocate() as soon as a computation has been scheduled on
     55   // a stream, or do we have to wait for the computation to complete first?
     56   virtual bool AllowsAsynchronousDeallocation() const = 0;
     57 
     58  protected:
     59   const perftools::gputools::Platform* platform_;
     60 };
     61 
     62 // Default memory allocator for a platform which uses
     63 // StreamExecutor::Allocate/Deallocate.
     64 class StreamExecutorMemoryAllocator : public DeviceMemoryAllocator {
     65  public:
     66   StreamExecutorMemoryAllocator(
     67       const perftools::gputools::Platform* platform,
     68       tensorflow::gtl::ArraySlice<perftools::gputools::StreamExecutor*>
     69           stream_executors);
     70 
     71   StatusOr<perftools::gputools::DeviceMemoryBase> Allocate(
     72       int device_ordinal, uint64 size, bool retry_on_failure = true) override;
     73   tensorflow::Status Deallocate(
     74       int device_ordinal, perftools::gputools::DeviceMemoryBase* mem) override;
     75 
     76   bool AllowsAsynchronousDeallocation() const override;
     77 
     78  private:
     79   StatusOr<perftools::gputools::StreamExecutor*> GetStreamExecutor(
     80       int device_ordinal);
     81 
     82   // A vector indexed by device ordinal of StreamExecutors for each device of
     83   // the allocator's platform type. If an element is nullptr, then the device
     84   // with the respective device ordinal is not supported by XLA.
     85   std::vector<perftools::gputools::StreamExecutor*> stream_executors_;
     86 };
     87 
     88 }  // namespace xla
     89 
     90 #endif  // TENSORFLOW_COMPILER_XLA_SERVICE_DEVICE_MEMORY_ALLOCATOR_H_
     91