Home | History | Annotate | Download | only in service
      1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 #include "tensorflow/compiler/xla/service/shaped_buffer.h"
     17 
     18 #include <set>
     19 #include <string>
     20 #include <utility>
     21 
     22 #include "tensorflow/compiler/xla/layout_util.h"
     23 #include "tensorflow/compiler/xla/ptr_util.h"
     24 #include "tensorflow/compiler/xla/shape_util.h"
     25 #include "tensorflow/compiler/xla/status_macros.h"
     26 #include "tensorflow/compiler/xla/types.h"
     27 #include "tensorflow/compiler/xla/util.h"
     28 #include "tensorflow/core/lib/strings/stringprintf.h"
     29 #include "tensorflow/core/platform/logging.h"
     30 
     31 namespace se = ::perftools::gputools;
     32 
     33 namespace xla {
     34 
     35 using ::tensorflow::strings::Appendf;
     36 
     37 ShapedBuffer::ShapedBuffer(const Shape& on_host_shape,
     38                            const Shape& on_device_shape,
     39                            const se::Platform* platform, int device_ordinal)
     40     : on_host_shape_(on_host_shape),
     41       on_device_shape_(on_device_shape),
     42       platform_(platform),
     43       device_ordinal_(device_ordinal),
     44       buffers_(&on_device_shape_) {}
     45 
     46 ShapedBuffer::ShapedBuffer(ShapedBuffer&& s)
     47     : on_host_shape_(std::move(s.on_host_shape_)),
     48       on_device_shape_(std::move(s.on_device_shape_)),
     49       platform_(s.platform_),
     50       device_ordinal_(s.device_ordinal_),
     51       buffers_(std::move(s.buffers_)) {
     52   // s.buffers_ has a pointer to s.on_device_shape_. When we move s.buffers_
     53   // into buffers_, we also need to update this pointer so that buffers_ doesn't
     54   // point into s.
     55   buffers_.replace_shape_ptr(&on_device_shape_);
     56 }
     57 
     58 ShapedBuffer& ShapedBuffer::operator=(ShapedBuffer&& s) {
     59   on_host_shape_ = std::move(s.on_host_shape_);
     60   on_device_shape_ = std::move(s.on_device_shape_);
     61   platform_ = s.platform_;
     62   device_ordinal_ = s.device_ordinal_;
     63   buffers_ = std::move(s.buffers_);
     64   // buffers_ has a pointer to its on_device_shape_. When we move s.buffers_
     65   // into buffers_, we also need to update this pointer so that buffers_ doesn't
     66   // point into s.
     67   buffers_.replace_shape_ptr(&on_device_shape_);
     68   return *this;
     69 }
     70 
     71 void ShapedBuffer::clear() {
     72   for (auto& pair : buffers_) {
     73     // A default constructed DeviceMemoryBase is a null pointer.
     74     pair.second = se::DeviceMemoryBase();
     75   }
     76 }
     77 
     78 string ShapedBuffer::ToString() const {
     79   string s = tensorflow::strings::StrCat(
     80       "ShapedBuffer(", platform_->Name(), ":", device_ordinal(),
     81       "), on-host shape=" + ShapeUtil::HumanStringWithLayout(on_host_shape()),
     82       ", on-device shape=" +
     83           ShapeUtil::HumanStringWithLayout(on_device_shape()),
     84       ":\n");
     85   ShapeUtil::ForEachSubshape(
     86       on_device_shape(),
     87       [this, &s](const Shape& subshape, const ShapeIndex& index) {
     88         string shape_str;
     89         if (ShapeUtil::IsTuple(subshape)) {
     90           shape_str = "tuple";
     91         } else {
     92           shape_str = ShapeUtil::HumanStringWithLayout(subshape);
     93         }
     94         const se::DeviceMemoryBase& memory = buffer(index);
     95         Appendf(&s, "  %s%p (%lld bytes) : %s\n",
     96                 string(index.size() * 2, ' ').c_str(), memory.opaque(),
     97                 memory.size(), shape_str.c_str());
     98       });
     99   return s;
    100 }
    101 
    102 std::ostream& operator<<(std::ostream& out, const ShapedBuffer& buffer) {
    103   out << buffer.ToString();
    104   return out;
    105 }
    106 
    107 /* static */
    108 StatusOr<std::unique_ptr<ScopedShapedBuffer>> ScopedShapedBuffer::MakeScoped(
    109     ShapedBuffer* shaped_buffer, DeviceMemoryAllocator* allocator) {
    110   auto scoped_buffer = WrapUnique(new ScopedShapedBuffer(
    111       shaped_buffer->on_host_shape(), shaped_buffer->on_device_shape(),
    112       allocator, shaped_buffer->device_ordinal()));
    113   scoped_buffer->buffers_ = shaped_buffer->buffers();
    114   shaped_buffer->clear();
    115 
    116   return std::move(scoped_buffer);
    117 }
    118 
    119 ScopedShapedBuffer::ScopedShapedBuffer(const Shape& on_host_shape,
    120                                        const Shape& on_device_shape,
    121                                        DeviceMemoryAllocator* allocator,
    122                                        int device_ordinal)
    123     : ShapedBuffer(on_host_shape, on_device_shape, allocator->platform(),
    124                    device_ordinal),
    125       allocator_(allocator) {}
    126 
    127 ScopedShapedBuffer::ScopedShapedBuffer(ShapedBuffer shaped_buffer,
    128                                        DeviceMemoryAllocator* allocator)
    129     : ShapedBuffer(std::move(shaped_buffer)), allocator_(allocator) {}
    130 
    131 ScopedShapedBuffer::~ScopedShapedBuffer() {
    132   // Deallocate all non-null buffers. A buffer may appear in more than one spot
    133   // in the shape (eg, a tuple with a repeated element) so keep track of what
    134   // has been deallocated.
    135   std::set<void*> deallocated_opaques;
    136   for (auto& pair : buffers_) {
    137     se::DeviceMemoryBase& memory_base = pair.second;
    138     if (!memory_base.is_null() &&
    139         deallocated_opaques.count(memory_base.opaque()) == 0) {
    140       deallocated_opaques.insert(memory_base.opaque());
    141       TF_CHECK_OK(
    142           this->allocator_->Deallocate(this->device_ordinal(), &memory_base));
    143     }
    144   }
    145 }
    146 
    147 std::unique_ptr<ShapedBuffer> ScopedShapedBuffer::release() {
    148   auto shaped_buffer = MakeUnique<ShapedBuffer>(std::move(*this));
    149   buffers_ = ShapeTree<perftools::gputools::DeviceMemoryBase>();
    150   return shaped_buffer;
    151 }
    152 
    153 }  // namespace xla
    154