Home | History | Annotate | Download | only in tf2xla
      1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 #ifndef TENSORFLOW_COMPILER_TF2XLA_XLA_COMPILATION_DEVICE_H_
     17 #define TENSORFLOW_COMPILER_TF2XLA_XLA_COMPILATION_DEVICE_H_
     18 
     19 #include <memory>
     20 
     21 #include "tensorflow/compiler/tf2xla/xla_resource.h"
     22 #include "tensorflow/compiler/xla/client/computation_builder.h"
     23 #include "tensorflow/compiler/xla/xla_data.pb.h"
     24 #include "tensorflow/core/common_runtime/local_device.h"
     25 #include "tensorflow/core/framework/device_base.h"
     26 #include "tensorflow/core/framework/tensor.h"
     27 #include "tensorflow/core/lib/core/status.h"
     28 #include "tensorflow/core/platform/mem.h"
     29 #include "tensorflow/core/public/session_options.h"
     30 
     31 namespace tensorflow {
     32 
     33 // Class is defined in xla_compilation_device.cc, reference
     34 // included here only so the XlaCompilationDevice allocator_ member can be
     35 // declared.
     36 class XlaCompilationAllocator;
     37 
     38 // This is a 'dummy' TensorFlow device that is only used to execute a
     39 // subgraph of XLA compilation Ops to construct a compiled version
     40 // of the subgraph's computation. It has a 'dummy' allocator that
     41 // backs each Tensor with metadata indicating the computation the
     42 // Tensor represents.
     43 //
     44 // We deliberately don't register a device factory because we *never*
     45 // want placement to put Ops on a compilation device. The device is created
     46 // manually, not using a factory.
     47 //
     48 // XLA compilation is not thread-safe. OpKernels registered on the
     49 // XlaCompilationDevice must not use threads or concurrency.
     50 class XlaCompilationDevice : public LocalDevice {
     51  public:
     52   XlaCompilationDevice(const SessionOptions& options, DeviceType type);
     53 
     54   ~XlaCompilationDevice() override;
     55 
     56   Allocator* GetAllocator(AllocatorAttributes attr) override;
     57 
     58   void Compute(OpKernel* op_kernel, OpKernelContext* context) override;
     59 
     60   Status Sync() override;
     61 
     62   Status MakeTensorFromProto(const TensorProto& tensor_proto,
     63                              const AllocatorAttributes alloc_attrs,
     64                              Tensor* tensor) override;
     65 
     66  private:
     67   std::unique_ptr<XlaCompilationAllocator> allocator_;
     68 };
     69 
     70 // A XlaExpression wraps an XLA computation. Each Tensor on an
     71 // XlaCompilationDevice contains an XlaExpression, and the shape of the Tensor
     72 // matches the shape of the subcomputation in the ComputationDataHandle. Each
     73 // expression is either a constant, or a function of previously-compiled
     74 // expressions.
     75 class XlaExpression {
     76  public:
     77   XlaExpression();
     78 
     79   // handle() stores the XLA handle of the computation that the
     80   // expression represents.
     81   void set_handle(const xla::ComputationDataHandle& h);
     82   const xla::ComputationDataHandle& handle() const { return handle_; }
     83 
     84   void set_constant_value(Tensor value);
     85   bool has_constant_value() const { return has_constant_value_; }
     86   const Tensor& constant_value() const { return constant_value_; }
     87 
     88   void set_resource(XlaResource* resource) { resource_ = resource; }
     89   XlaResource* resource() const { return resource_; }
     90 
     91  private:
     92   // The XLA handle of the expression's computation.
     93   xla::ComputationDataHandle handle_;
     94 
     95   // If this expression is a constant with a known value, 'constant_value' is a
     96   // host-memory Tensor containing the value. Used to avoid invoking XLA for
     97   // expressions that are trivially constant.
     98   bool has_constant_value_ = false;
     99   Tensor constant_value_;
    100 
    101   XlaResource* resource_ = nullptr;  // Not owned.
    102 };
    103 
    104 }  // namespace tensorflow
    105 
    106 #endif  // TENSORFLOW_COMPILER_TF2XLA_XLA_COMPILATION_DEVICE_H_
    107