Home | History | Annotate | Download | only in framework
      1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 #ifndef TENSORFLOW_CORE_FRAMEWORK_TENSOR_H_
     17 #define TENSORFLOW_CORE_FRAMEWORK_TENSOR_H_
     18 
     19 #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
     20 #include "tensorflow/core/framework/allocator.h"
     21 #include "tensorflow/core/framework/tensor_shape.h"
     22 #include "tensorflow/core/framework/tensor_types.h"
     23 #include "tensorflow/core/framework/types.h"
     24 #include "tensorflow/core/framework/types.pb.h"
     25 #include "tensorflow/core/lib/core/refcount.h"
     26 #include "tensorflow/core/lib/core/status.h"
     27 #include "tensorflow/core/lib/core/stringpiece.h"
     28 #include "tensorflow/core/lib/gtl/inlined_vector.h"
     29 #include "tensorflow/core/platform/logging.h"
     30 #include "tensorflow/core/platform/macros.h"
     31 #include "tensorflow/core/platform/types.h"
     32 
     33 namespace tensorflow {
     34 
     35 // Forward declarations.  In particular, we forward declare protos so that their
     36 // symbols can be removed from .so exports.
     37 class AllocationDescription;
     38 class Allocator;
     39 class OpKernelContext;
     40 class TensorBuffer;
     41 class TensorCApi;
     42 class TensorDescription;
     43 class TensorProto;
     44 class VariantTensorData;
     45 namespace batch_util {
     46 Status CopyElementToSlice(Tensor element, Tensor* parent, int64 index);
     47 }  // namespace batch_util
     48 
     49 /// @ingroup core
     50 /// Represents an n-dimensional array of values.
     51 class Tensor {
     52  public:
     53   /// \brief Creates a 1-dimensional, 0-element float tensor.
     54   ///
     55   /// The returned Tensor is not a scalar (shape {}), but is instead
     56   /// an empty one-dimensional Tensor (shape {0}, NumElements() ==
     57   /// 0). Since it has no elements, it does not need to be assigned a
     58   /// value and is initialized by default (IsInitialized() is
     59   /// true). If this is undesirable, consider creating a one-element
     60   /// scalar which does require initialization:
     61   ///
     62   /// ```c++
     63   ///
     64   ///     Tensor(DT_FLOAT, TensorShape({}))
     65   ///
     66   /// ```
     67   Tensor();
     68 
     69   /// \brief Creates a Tensor of the given `type` and `shape`.  If
     70   /// LogMemory::IsEnabled() the allocation is logged as coming from
     71   /// an unknown kernel and step. Calling the Tensor constructor
     72   /// directly from within an Op is deprecated: use the
     73   /// OpKernelConstruction/OpKernelContext allocate_* methods to
     74   /// allocate a new tensor, which record the kernel and step.
     75   ///
     76   /// The underlying buffer is allocated using a `CPUAllocator`.
     77   Tensor(DataType type, const TensorShape& shape);
     78 
     79   /// \brief Creates a tensor with the input `type` and `shape`, using
     80   /// the allocator `a` to allocate the underlying buffer. If
     81   /// LogMemory::IsEnabled() the allocation is logged as coming from
     82   /// an unknown kernel and step. Calling the Tensor constructor
     83   /// directly from within an Op is deprecated: use the
     84   /// OpKernelConstruction/OpKernelContext allocate_* methods to
     85   /// allocate a new tensor, which record the kernel and step.
     86   ///
     87   /// `a` must outlive the lifetime of this Tensor.
     88   Tensor(Allocator* a, DataType type, const TensorShape& shape);
     89 
     90   /// \brief Creates a tensor with the input `type` and `shape`, using
     91   /// the allocator `a` and the specified "allocation_attr" to
     92   /// allocate the underlying buffer. If the kernel and step are known
     93   /// allocation_attr.allocation_will_be_logged should be set to true
     94   /// and LogMemory::RecordTensorAllocation should be called after the
     95   /// tensor is constructed. Calling the Tensor constructor directly
     96   /// from within an Op is deprecated: use the
     97   /// OpKernelConstruction/OpKernelContext allocate_* methods to
     98   /// allocate a new tensor, which record the kernel and step.
     99   ///
    100   /// `a` must outlive the lifetime of this Tensor.
    101   Tensor(Allocator* a, DataType type, const TensorShape& shape,
    102          const AllocationAttributes& allocation_attr);
    103 
    104   /// \brief Creates an empty Tensor of the given data type.
    105   ///
    106   /// Like Tensor(), returns a 1-dimensional, 0-element Tensor with
    107   /// IsInitialized() returning True. See the Tensor() documentation
    108   /// for details.
    109   explicit Tensor(DataType type);
    110 
    111   /// Copy constructor.
    112   Tensor(const Tensor& other);
    113 
    114   /// \brief Move constructor. After this call, <other> is safely destructible
    115   /// and can be assigned to, but other calls on it (e.g. shape manipulation)
    116   /// are not valid.
    117   Tensor(Tensor&& other);
    118 
    119   ~Tensor();
    120 
    121   /// Returns the data type.
    122   DataType dtype() const { return shape_.data_type(); }
    123 
    124   /// Returns the shape of the tensor.
    125   const TensorShape& shape() const { return shape_; }
    126 
    127   /// \brief Convenience accessor for the tensor shape.
    128   ///
    129   /// For all shape accessors, see comments for relevant methods of
    130   /// `TensorShape` in `tensor_shape.h`.
    131   int dims() const { return shape().dims(); }
    132 
    133   /// Convenience accessor for the tensor shape.
    134   int64 dim_size(int d) const { return shape().dim_size(d); }
    135 
    136   /// Convenience accessor for the tensor shape.
    137   int64 NumElements() const { return shape().num_elements(); }
    138 
    139   bool IsSameSize(const Tensor& b) const {
    140     return shape().IsSameSize(b.shape());
    141   }
    142 
    143   // True iff the two tensors use the same underlying refcounted storage
    144   bool SharesBufferWith(const Tensor& b) const;
    145 
    146   /// \brief If necessary, has this Tensor been initialized?
    147   ///
    148   /// Zero-element Tensors are always considered initialized, even if they
    149   /// have never been assigned to and do not have any memory allocated.
    150   bool IsInitialized() const;
    151 
    152   /// Returns the estimated memory usage of this tensor.
    153   size_t TotalBytes() const;
    154 
    155   // Returns the size of sallocated memory for this tensor.
    156   size_t AllocatedBytes() const;
    157 
    158   /// Returns true iff this tensor is aligned.
    159   bool IsAligned() const {
    160 #if EIGEN_MAX_ALIGN_BYTES == 0
    161     return true;
    162 #else
    163     void* ptr = base<void>();
    164     return reinterpret_cast<intptr_t>(ptr) % EIGEN_MAX_ALIGN_BYTES == 0;
    165 #endif
    166   }
    167 
    168   /// Assign operator. This tensor shares other's underlying storage.
    169   Tensor& operator=(const Tensor& other) {
    170     CopyFromInternal(other, other.shape());
    171     return *this;
    172   }
    173 
    174   /// Move operator.  See move constructor for details.
    175   Tensor& operator=(Tensor&& other);
    176 
    177   /// \brief Copy the other tensor into this tensor and reshape it.
    178   ///
    179   /// This tensor shares other's underlying storage. Returns `true`
    180   /// iff `other.shape()` has the same number of elements of the given
    181   /// `shape`.
    182   bool CopyFrom(const Tensor& other,
    183                 const TensorShape& shape) TF_MUST_USE_RESULT {
    184     if (other.NumElements() != shape.num_elements()) return false;
    185     CopyFromInternal(other, shape);
    186     return true;
    187   }
    188 
    189   /// \brief Slice this tensor along the 1st dimension.
    190 
    191   /// I.e., the returned tensor satisfies
    192   ///     returned[i, ...] == this[dim0_start + i, ...].
    193   /// The returned tensor shares the underlying tensor buffer with this
    194   /// tensor.
    195   ///
    196   /// NOTE: The returned tensor may not satisfy the same alignment
    197   /// requirement as this tensor depending on the shape. The caller
    198   /// must check the returned tensor's alignment before calling certain
    199   /// methods that have alignment requirement (e.g., `flat()`, `tensor()`).
    200   ///
    201   /// REQUIRES: `dims()` >= 1
    202   /// REQUIRES: `0 <= dim0_start <= dim0_limit <= dim_size(0)`
    203   Tensor Slice(int64 dim0_start, int64 dim0_limit) const;
    204 
    205   /// \brief Parse `other` and construct the tensor.
    206 
    207   /// Returns `true` iff the parsing succeeds. If the parsing fails,
    208   /// the state of `*this` is unchanged.
    209   bool FromProto(const TensorProto& other) TF_MUST_USE_RESULT;
    210   bool FromProto(Allocator* a, const TensorProto& other) TF_MUST_USE_RESULT;
    211 
    212   /// \brief Fills in `proto` with `*this` tensor's content.
    213   ///
    214   /// `AsProtoField()` fills in the repeated field for `proto.dtype()`, while
    215   /// `AsProtoTensorContent()` encodes the content in `proto.tensor_content()`
    216   /// in a compact form.
    217   void AsProtoField(TensorProto* proto) const;
    218   void AsProtoTensorContent(TensorProto* proto) const;
    219 
    220   /// \brief Return the tensor data as an `Eigen::Tensor` with the type and
    221   /// sizes of this `Tensor`.
    222   ///
    223   /// Use these methods when you know the data type and the number of
    224   /// dimensions of the Tensor and you want an `Eigen::Tensor`
    225   /// automatically sized to the `Tensor` sizes. The implementation check
    226   /// fails if either type or sizes mismatch.
    227   ///
    228   /// Example:
    229   ///
    230   /// ```c++
    231   ///
    232   ///     typedef float T;
    233   ///     Tensor my_mat(...built with Shape{rows: 3, cols: 5}...);
    234   ///     auto mat = my_mat.matrix<T>();    // 2D Eigen::Tensor, 3 x 5.
    235   ///     auto mat = my_mat.tensor<T, 2>(); // 2D Eigen::Tensor, 3 x 5.
    236   ///     auto vec = my_mat.vec<T>();       // CHECK fails as my_mat is 2D.
    237   ///     auto vec = my_mat.tensor<T, 3>(); // CHECK fails as my_mat is 2D.
    238   ///     auto mat = my_mat.matrix<int32>();// CHECK fails as type mismatch.
    239   ///
    240   /// ```
    241   template <typename T>
    242   typename TTypes<T>::Vec vec() {
    243     return tensor<T, 1>();
    244   }
    245 
    246   template <typename T>
    247   typename TTypes<T>::Matrix matrix() {
    248     return tensor<T, 2>();
    249   }
    250 
    251   template <typename T, size_t NDIMS>
    252   typename TTypes<T, NDIMS>::Tensor tensor();
    253 
    254   /// \brief Return the tensor data to an `Eigen::Tensor` with the
    255   /// same size but a bitwise cast to the specified dtype `T`.
    256   ///
    257   /// Using a bitcast is useful for move and copy operations.
    258   /// NOTE: this is the same as `tensor()` except a bitcast is allowed.
    259   template <typename T, size_t NDIMS>
    260   typename TTypes<T, NDIMS>::Tensor bit_casted_tensor();
    261 
    262   /// \brief Return the tensor data to an `Eigen::Tensor` with the
    263   /// last dimension elements converted into single elements of a larger type.
    264   ///
    265   /// For example, this is useful for kernels that can treat NCHW_VECT_C int8
    266   /// tensors as NCHW int32 tensors. The sizeof(T) should equal the size of
    267   /// the original element type * num elements in the original last dimension.
    268   /// NDIMS should be 1 less than the original number of dimensions.
    269   template <typename T, size_t NDIMS>
    270   typename TTypes<T, NDIMS>::Tensor reinterpret_last_dimension();
    271 
    272   /// \brief Return the tensor data as an `Eigen::Tensor` of the data type and a
    273   /// specified shape.
    274   ///
    275   /// These methods allow you to access the data with the dimensions
    276   /// and sizes of your choice.  You do not need to know the number of
    277   /// dimensions of the Tensor to call them.  However, they `CHECK` that
    278   /// the type matches and the dimensions requested creates an
    279   /// `Eigen::Tensor` with the same number of elements as the tensor.
    280   ///
    281   /// Example:
    282   ///
    283   /// ```c++
    284   ///
    285   ///     typedef float T;
    286   ///     Tensor my_ten(...built with Shape{planes: 4, rows: 3, cols: 5}...);
    287   ///     // 1D Eigen::Tensor, size 60:
    288   ///     auto flat = my_ten.flat<T>();
    289   ///     // 2D Eigen::Tensor 12 x 5:
    290   ///     auto inner = my_ten.flat_inner_dims<T>();
    291   ///     // 2D Eigen::Tensor 4 x 15:
    292   ///     auto outer = my_ten.shaped<T, 2>({4, 15});
    293   ///     // CHECK fails, bad num elements:
    294   ///     auto outer = my_ten.shaped<T, 2>({4, 8});
    295   ///     // 3D Eigen::Tensor 6 x 5 x 2:
    296   ///     auto weird = my_ten.shaped<T, 3>({6, 5, 2});
    297   ///     // CHECK fails, type mismatch:
    298   ///     auto bad   = my_ten.flat<int32>();
    299   ///
    300   /// ```
    301   template <typename T>
    302   typename TTypes<T>::Flat flat() {
    303     return shaped<T, 1>({NumElements()});
    304   }
    305 
    306   template <typename T>
    307   typename TTypes<T>::UnalignedFlat unaligned_flat() {
    308     return unaligned_shaped<T, 1>({NumElements()});
    309   }
    310 
    311   /// Returns the data as an Eigen::Tensor with NDIMS dimensions, collapsing all
    312   /// Tensor dimensions but the last NDIMS-1 into the first dimension of the
    313   /// result. If NDIMS > dims() then leading dimensions of size 1 will be
    314   /// added to make the output rank NDIMS.
    315   template <typename T, size_t NDIMS = 2>
    316   typename TTypes<T, NDIMS>::Tensor flat_inner_dims();
    317 
    318   /// Returns the data as an Eigen::Tensor with NDIMS dimensions, collapsing all
    319   /// Tensor dimensions but the first NDIMS-1 into the last dimension of the
    320   /// result. If NDIMS > dims() then trailing dimensions of size 1 will be
    321   /// added to make the output rank NDIMS.
    322   template <typename T, size_t NDIMS = 2>
    323   typename TTypes<T, NDIMS>::Tensor flat_outer_dims();
    324 
    325   /// Returns the data as an Eigen::Tensor with NDIMS dimensions, collapsing the
    326   /// first 'begin' Tensor dimensions into the first dimension of the result and
    327   /// the Tensor dimensions of the last dims() - 'begin' - NDIMS into the last
    328   /// dimension of the result. If 'begin' < 0 then the |'begin'| leading
    329   /// dimensions of size 1 will be added. If 'begin' + NDIMS > dims() then
    330   /// 'begin' + NDIMS - dims() trailing dimensions of size 1 will be added.
    331   template <typename T, size_t NDIMS = 3>
    332   typename TTypes<T, NDIMS>::Tensor flat_inner_outer_dims(int64 begin);
    333 
    334   template <typename T, size_t NDIMS>
    335   typename TTypes<T, NDIMS>::Tensor shaped(gtl::ArraySlice<int64> new_sizes);
    336 
    337   /// \brief Return the tensor data to an `Eigen::Tensor` with the new
    338   /// shape specified in `new_sizes` and cast to a new dtype `T`.
    339   ///
    340   /// Using a bitcast is useful for move and copy operations.
    341   /// The allowed bitcast is the only difference from `shaped()`.
    342   template <typename T, size_t NDIMS>
    343   typename TTypes<T, NDIMS>::Tensor bit_casted_shaped(
    344       gtl::ArraySlice<int64> new_sizes);
    345 
    346   template <typename T, size_t NDIMS>
    347   typename TTypes<T, NDIMS>::UnalignedTensor unaligned_shaped(
    348       gtl::ArraySlice<int64> new_sizes);
    349 
    350   /// \brief Return the Tensor data as a `TensorMap` of fixed size 1:
    351   /// `TensorMap<TensorFixedSize<T, 1>>`.
    352 
    353   /// Using `scalar()` allows the compiler to perform optimizations as
    354   /// the size of the tensor is known at compile time.
    355   template <typename T>
    356   typename TTypes<T>::Scalar scalar();
    357 
    358   /// Const versions of all the methods above.
    359   template <typename T>
    360   typename TTypes<T>::ConstVec vec() const {
    361     return tensor<T, 1>();
    362   }
    363 
    364   template <typename T>
    365   typename TTypes<T>::ConstMatrix matrix() const {
    366     return tensor<T, 2>();
    367   }
    368 
    369   template <typename T, size_t NDIMS>
    370   typename TTypes<T, NDIMS>::ConstTensor tensor() const;
    371 
    372   /// \brief Return the tensor data to an `Eigen::Tensor` with the
    373   /// same size but a bitwise cast to the specified dtype `T`.
    374   ///
    375   /// Using a bitcast is useful for move and copy operations.
    376   /// NOTE: this is the same as `tensor()` except a bitcast is allowed.
    377   template <typename T, size_t NDIMS>
    378   typename TTypes<T, NDIMS>::ConstTensor bit_casted_tensor() const;
    379 
    380   /// \brief Return the tensor data to an `Eigen::Tensor` with the
    381   /// last dimension elements converted into single elements of a larger type.
    382   ///
    383   /// For example, this is useful for kernels that can treat NCHW_VECT_C int8
    384   /// tensors as NCHW int32 tensors. The sizeof(T) should equal the size of
    385   /// the original element type * num elements in the original last dimension.
    386   /// NDIMS should be 1 less than the original number of dimensions.
    387   template <typename T, size_t NDIMS>
    388   typename TTypes<T, NDIMS>::ConstTensor reinterpret_last_dimension() const;
    389 
    390   template <typename T>
    391   typename TTypes<T>::ConstFlat flat() const {
    392     return shaped<T, 1>({NumElements()});
    393   }
    394 
    395   template <typename T>
    396   typename TTypes<T>::UnalignedConstFlat unaligned_flat() const {
    397     return unaligned_shaped<T, 1>({NumElements()});
    398   }
    399 
    400   template <typename T, size_t NDIMS>
    401   typename TTypes<T, NDIMS>::ConstTensor shaped(
    402       gtl::ArraySlice<int64> new_sizes) const;
    403 
    404   /// \brief Return the tensor data to an `Eigen::Tensor` with the new
    405   /// shape specified in `new_sizes` and cast to a new dtype `T`.
    406   ///
    407   /// Using a bitcast is useful for move and copy operations.
    408   /// The allowed bitcast is the only difference from `shaped()`.
    409   template <typename T, size_t NDIMS>
    410   typename TTypes<T, NDIMS>::ConstTensor bit_casted_shaped(
    411       gtl::ArraySlice<int64> new_sizes) const;
    412 
    413   template <typename T, size_t NDIMS>
    414   typename TTypes<T, NDIMS>::UnalignedConstTensor unaligned_shaped(
    415       gtl::ArraySlice<int64> new_sizes) const;
    416 
    417   template <typename T>
    418   typename TTypes<T>::ConstScalar scalar() const;
    419 
    420   template <typename T, size_t NDIMS = 2>
    421   typename TTypes<T, NDIMS>::ConstTensor flat_inner_dims() const;
    422 
    423   template <typename T, size_t NDIMS = 2>
    424   typename TTypes<T, NDIMS>::ConstTensor flat_outer_dims() const;
    425 
    426   template <typename T, size_t NDIMS = 3>
    427   typename TTypes<T, NDIMS>::ConstTensor flat_inner_outer_dims(
    428       int64 begin) const;
    429 
    430   /// Render the first `max_entries` values in `*this` into a string.
    431   string SummarizeValue(int64 max_entries) const;
    432 
    433   /// A human-readable summary of the tensor suitable for debugging.
    434   string DebugString() const;
    435 
    436   /// Fill in the `TensorDescription` proto with metadata about the
    437   /// tensor that is useful for monitoring and debugging.
    438   void FillDescription(TensorDescription* description) const;
    439 
    440   /// \brief Returns a `StringPiece` mapping the current tensor's buffer.
    441   ///
    442   /// The returned `StringPiece` may point to memory location on devices
    443   /// that the CPU cannot address directly.
    444   ///
    445   /// NOTE: The underlying tensor buffer is refcounted, so the lifetime
    446   /// of the contents mapped by the `StringPiece` matches the lifetime of
    447   /// the buffer; callers should arrange to make sure the buffer does
    448   /// not get destroyed while the `StringPiece` is still used.
    449   ///
    450   /// REQUIRES: `DataTypeCanUseMemcpy(dtype())`.
    451   StringPiece tensor_data() const;
    452 
    453   /// Copy the other tensor into this tensor and reshape it and reinterpret the
    454   /// buffer's datatype.
    455   ///
    456   /// This tensor shares other's underlying storage.
    457   void UnsafeCopyFromInternal(const Tensor&, DataType dtype,
    458                               const TensorShape&);
    459 
    460  private:
    461   // Returns true if the refcount on buf_ and any possible underlying root
    462   // buffer is one.
    463   bool RefCountIsOne() const;
    464   void CheckType(DataType expected_dtype) const;
    465   void CheckTypeAndIsAligned(DataType expected_dtype) const;
    466   void CheckIsAlignedAndSingleElement() const;
    467   void set_dtype(DataType t) { shape_.set_data_type(t); }
    468 
    469   // TensorShape's InlineVector.
    470   static gtl::InlinedVector<int64, 4> ComputeFlatInnerDims(
    471       gtl::ArraySlice<int64> orig, int64 num_out_dims);
    472   static gtl::InlinedVector<int64, 4> ComputeFlatOuterDims(
    473       gtl::ArraySlice<int64> orig, int64 num_out_dims);
    474 
    475   TensorShape shape_;
    476   TensorBuffer* buf_;
    477 
    478   friend class DMAHelper;
    479   friend class TensorCApi;
    480   friend class TensorReference;       // For access to buf_
    481   friend class VariableOp;            // For access to set_shape
    482   friend class AutoReloadVariableOp;  // For access to set_shape
    483   friend class TensorTestHelper;      // For access to set_shape
    484   friend class OpKernelContext;       // For access to RefCountIsOne().
    485   template <typename Device, typename T>
    486   friend class AssignVariableOp;  // For access to RefCountIsOne().
    487   template <typename Device, typename T>
    488   friend Status PrepareToUpdateVariable(
    489       OpKernelContext* ctx, Tensor* tensor);  // For access to RefCountIsOne().
    490   friend Status batch_util::CopyElementToSlice(
    491       Tensor element, Tensor* parent,
    492       int64 index);                // For access to RefCountIsOne().
    493   friend class NumpyTensorBuffer;  // For access to the private constructor
    494                                    // taking the buffer.
    495 
    496   // Creates a tensor with the input datatype, shape and buf.
    497   //
    498   // Acquires a ref on buf that belongs to this Tensor.
    499   Tensor(DataType type, const TensorShape& shape, TensorBuffer* buf);
    500 
    501   bool CanUseDMA() const;
    502 
    503   // Only needed by variable op to set the shape of an uninitialized
    504   // Tensor.
    505   // TODO: Remove this when we have a better story for detecting
    506   // uninitialized tensors.
    507   void set_shape(const TensorShape& shape) {
    508     DataType dt = dtype();
    509     shape_ = shape;
    510     set_dtype(dt);
    511   }
    512 
    513   void CopyFromInternal(const Tensor& other, const TensorShape& shape);
    514 
    515   template <typename T>
    516   T* base() const;
    517 
    518   template <size_t NDIMS>
    519   void FillDimsAndValidateCompatibleShape(
    520       gtl::ArraySlice<int64> new_sizes,
    521       Eigen::array<Eigen::DenseIndex, NDIMS>* dims) const;
    522 
    523   template <typename T, size_t NDIMS>
    524   void FillDimsAndValidateCompatibleShape(
    525       gtl::ArraySlice<int64> new_sizes,
    526       Eigen::array<Eigen::DenseIndex, NDIMS>* dims) const;
    527 };
    528 
    529 // Implementation details
    530 
    531 // START_SKIP_DOXYGEN
    532 
    533 // Interface to access the raw ref-counted data buffer.
    534 class TensorBuffer : public core::RefCounted {
    535  public:
    536   ~TensorBuffer() override {}
    537 
    538   // data() points to a memory region of size() bytes.
    539   virtual void* data() const = 0;
    540   virtual size_t size() const = 0;
    541 
    542   // If this TensorBuffer is sub-buffer of another TensorBuffer,
    543   // returns that TensorBuffer. Otherwise, returns this.
    544   virtual TensorBuffer* root_buffer() = 0;
    545 
    546   // Fill metadata about the allocation into the proto.
    547   virtual void FillAllocationDescription(
    548       AllocationDescription* proto) const = 0;
    549 
    550   template <typename T>
    551   T* base() const {
    552     return reinterpret_cast<T*>(data());
    553   }
    554 
    555   // Whether this TensorBuffer owns the underlying memory.
    556   virtual bool OwnsMemory() const { return true; }
    557 };
    558 
    559 template <typename T>
    560 T* Tensor::base() const {
    561   return buf_ == nullptr ? nullptr : buf_->base<T>();
    562 }
    563 
    564 template <typename T, size_t NDIMS>
    565 typename TTypes<T, NDIMS>::Tensor Tensor::tensor() {
    566   CheckTypeAndIsAligned(DataTypeToEnum<T>::v());
    567   return typename TTypes<T, NDIMS>::Tensor(base<T>(),
    568                                            shape().AsEigenDSizes<NDIMS>());
    569 }
    570 
    571 template <typename T, size_t NDIMS>
    572 typename TTypes<T, NDIMS>::ConstTensor Tensor::tensor() const {
    573   CheckTypeAndIsAligned(DataTypeToEnum<T>::v());
    574   return typename TTypes<T, NDIMS>::ConstTensor(base<const T>(),
    575                                                 shape().AsEigenDSizes<NDIMS>());
    576 }
    577 
    578 template <typename T, size_t NDIMS>
    579 typename TTypes<T, NDIMS>::Tensor Tensor::bit_casted_tensor() {
    580   CHECK(IsAligned());
    581   return typename TTypes<T, NDIMS>::Tensor(base<T>(),
    582                                            shape().AsEigenDSizes<NDIMS>());
    583 }
    584 
    585 template <typename T, size_t NDIMS>
    586 typename TTypes<T, NDIMS>::ConstTensor Tensor::bit_casted_tensor() const {
    587   CHECK(IsAligned());
    588   return typename TTypes<T, NDIMS>::ConstTensor(base<const T>(),
    589                                                 shape().AsEigenDSizes<NDIMS>());
    590 }
    591 
    592 template <typename T, size_t NDIMS>
    593 typename TTypes<T, NDIMS>::Tensor Tensor::reinterpret_last_dimension() {
    594   if (NDIMS == dims()) {
    595     return tensor<T, NDIMS>();
    596   }
    597   CHECK(IsAligned());
    598   CHECK_EQ(NDIMS, dims() - 1);
    599   CHECK_EQ(sizeof(T), shape_.dim_sizes()[NDIMS] * DataTypeSize(dtype()));
    600   Eigen::array<Eigen::DenseIndex, NDIMS> dims;
    601   for (int d = 0; d < NDIMS; ++d) {
    602     dims[d] = shape_.dim_sizes()[d];
    603   }
    604   return typename TTypes<T, NDIMS>::Tensor(base<T>(), dims);
    605 }
    606 
    607 template <typename T, size_t NDIMS>
    608 typename TTypes<T, NDIMS>::ConstTensor Tensor::reinterpret_last_dimension()
    609     const {
    610   if (NDIMS == dims()) {
    611     return tensor<T, NDIMS>();
    612   }
    613   CHECK(IsAligned());
    614   CHECK_EQ(NDIMS, dims() - 1);
    615   CHECK_EQ(sizeof(T), shape_.dim_sizes()[NDIMS] * DataTypeSize(dtype()));
    616   Eigen::array<Eigen::DenseIndex, NDIMS> dims;
    617   for (int d = 0; d < NDIMS; ++d) {
    618     dims[d] = shape_.dim_sizes()[d];
    619   }
    620   return typename TTypes<T, NDIMS>::ConstTensor(base<const T>(), dims);
    621 }
    622 
    623 template <size_t NDIMS>
    624 void Tensor::FillDimsAndValidateCompatibleShape(
    625     gtl::ArraySlice<int64> new_sizes,
    626     Eigen::array<Eigen::DenseIndex, NDIMS>* dims) const {
    627   CHECK_EQ(NDIMS, new_sizes.size());
    628   int64 new_num_elements = 1;
    629   for (size_t d = 0; d < NDIMS; d++) {
    630     new_num_elements *= new_sizes[d];
    631     (*dims)[d] = new_sizes[d];
    632   }
    633   CHECK_EQ(new_num_elements, NumElements());
    634 }
    635 
    636 template <typename T, size_t NDIMS>
    637 void Tensor::FillDimsAndValidateCompatibleShape(
    638     gtl::ArraySlice<int64> new_sizes,
    639     Eigen::array<Eigen::DenseIndex, NDIMS>* dims) const {
    640   CHECK_EQ(NDIMS, new_sizes.size());
    641   int64 new_num_elements = 1;
    642   for (size_t d = 0; d < NDIMS; d++) {
    643     new_num_elements *= new_sizes[d];
    644     (*dims)[d] = new_sizes[d];
    645   }
    646   const int element_size = DataTypeSize(BaseType(dtype()));
    647   if (element_size > 0) {
    648     CHECK_EQ(new_num_elements * sizeof(T), NumElements() * element_size);
    649   } else {
    650     // DataTypeSize() returns 0 for some data types. In this case, assume that T
    651     // has the same size as the buffer type.
    652     // NOTE: If we can be sure that DataTypeSize() does not return 0 for all POD
    653     // types, then we should check DataTypeToEnum<T>::v() == dtype(). Or simply
    654     // check if `element_size > 0` to err when bit cast is attempted on Tensor
    655     // of unknown data type size.
    656     CHECK_EQ(new_num_elements, NumElements());
    657   }
    658 }
    659 
    660 template <typename T, size_t NDIMS>
    661 typename TTypes<T, NDIMS>::Tensor Tensor::shaped(
    662     gtl::ArraySlice<int64> new_sizes) {
    663   CheckTypeAndIsAligned(DataTypeToEnum<T>::v());
    664   Eigen::array<Eigen::DenseIndex, NDIMS> dims;
    665   FillDimsAndValidateCompatibleShape(new_sizes, &dims);
    666   return typename TTypes<T, NDIMS>::Tensor(base<T>(), dims);
    667 }
    668 
    669 template <typename T, size_t NDIMS>
    670 typename TTypes<T, NDIMS>::Tensor Tensor::bit_casted_shaped(
    671     gtl::ArraySlice<int64> new_sizes) {
    672   CHECK(IsAligned());
    673   Eigen::array<Eigen::DenseIndex, NDIMS> dims;
    674   FillDimsAndValidateCompatibleShape<T>(new_sizes, &dims);
    675   return typename TTypes<T, NDIMS>::Tensor(base<T>(), dims);
    676 }
    677 
    678 template <typename T, size_t NDIMS>
    679 typename TTypes<T, NDIMS>::UnalignedTensor Tensor::unaligned_shaped(
    680     gtl::ArraySlice<int64> new_sizes) {
    681   CheckType(DataTypeToEnum<T>::v());
    682   Eigen::array<Eigen::DenseIndex, NDIMS> dims;
    683   FillDimsAndValidateCompatibleShape(new_sizes, &dims);
    684   return typename TTypes<T, NDIMS>::UnalignedTensor(base<T>(), dims);
    685 }
    686 
    687 template <typename T, size_t NDIMS>
    688 typename TTypes<T, NDIMS>::ConstTensor Tensor::shaped(
    689     gtl::ArraySlice<int64> new_sizes) const {
    690   CheckType(DataTypeToEnum<T>::v());
    691   CHECK(IsAligned());
    692   Eigen::array<Eigen::DenseIndex, NDIMS> dims;
    693   FillDimsAndValidateCompatibleShape(new_sizes, &dims);
    694   return typename TTypes<T, NDIMS>::ConstTensor(base<T>(), dims);
    695 }
    696 
    697 template <typename T, size_t NDIMS>
    698 typename TTypes<T, NDIMS>::ConstTensor Tensor::bit_casted_shaped(
    699     gtl::ArraySlice<int64> new_sizes) const {
    700   CHECK(IsAligned());
    701   Eigen::array<Eigen::DenseIndex, NDIMS> dims;
    702   FillDimsAndValidateCompatibleShape<T>(new_sizes, &dims);
    703   return typename TTypes<T, NDIMS>::ConstTensor(base<T>(), dims);
    704 }
    705 
    706 template <typename T, size_t NDIMS>
    707 typename TTypes<T, NDIMS>::UnalignedConstTensor Tensor::unaligned_shaped(
    708     gtl::ArraySlice<int64> new_sizes) const {
    709   CheckType(DataTypeToEnum<T>::v());
    710   Eigen::array<Eigen::DenseIndex, NDIMS> dims;
    711   FillDimsAndValidateCompatibleShape(new_sizes, &dims);
    712   return typename TTypes<T, NDIMS>::UnalignedConstTensor(base<T>(), dims);
    713 }
    714 
    715 template <typename T>
    716 typename TTypes<T>::Scalar Tensor::scalar() {
    717   CheckIsAlignedAndSingleElement();
    718   return typename TTypes<T>::Scalar(base<T>());
    719 }
    720 
    721 template <typename T>
    722 typename TTypes<T>::ConstScalar Tensor::scalar() const {
    723   CheckIsAlignedAndSingleElement();
    724   return typename TTypes<T>::ConstScalar(base<T>());
    725 }
    726 
    727 template <typename T, size_t NDIMS>
    728 typename TTypes<T, NDIMS>::Tensor Tensor::flat_inner_dims() {
    729   return shaped<T, NDIMS>(ComputeFlatInnerDims(shape_.dim_sizes(), NDIMS));
    730 }
    731 
    732 template <typename T, size_t NDIMS>
    733 typename TTypes<T, NDIMS>::Tensor Tensor::flat_outer_dims() {
    734   return shaped<T, NDIMS>(ComputeFlatOuterDims(shape_.dim_sizes(), NDIMS));
    735 }
    736 
    737 template <typename T, size_t NDIMS>
    738 typename TTypes<T, NDIMS>::Tensor Tensor::flat_inner_outer_dims(int64 begin) {
    739   gtl::InlinedVector<int64, 4> flat_outer =
    740       ComputeFlatOuterDims(shape_.dim_sizes(), begin + NDIMS);
    741   return shaped<T, NDIMS>(ComputeFlatInnerDims(flat_outer, NDIMS));
    742 }
    743 
    744 template <typename T, size_t NDIMS>
    745 typename TTypes<T, NDIMS>::ConstTensor Tensor::flat_inner_dims() const {
    746   return shaped<T, NDIMS>(ComputeFlatInnerDims(shape_.dim_sizes(), NDIMS));
    747 }
    748 
    749 template <typename T, size_t NDIMS>
    750 typename TTypes<T, NDIMS>::ConstTensor Tensor::flat_outer_dims() const {
    751   return shaped<T, NDIMS>(ComputeFlatOuterDims(shape_.dim_sizes(), NDIMS));
    752 }
    753 
    754 template <typename T, size_t NDIMS>
    755 typename TTypes<T, NDIMS>::ConstTensor Tensor::flat_inner_outer_dims(
    756     int64 begin) const {
    757   gtl::InlinedVector<int64, 4> flat_outer =
    758       ComputeFlatOuterDims(shape_.dim_sizes(), begin + NDIMS);
    759   return shaped<T, NDIMS>(ComputeFlatInnerDims(flat_outer, NDIMS));
    760 }
    761 
    762 inline Tensor::Tensor(const Tensor& other)
    763     : shape_(other.shape()), buf_(other.buf_) {
    764   if (buf_) buf_->Ref();
    765 }
    766 
    767 inline Tensor::Tensor(Tensor&& other)
    768     : shape_(std::move(other.shape())), buf_(other.buf_) {
    769   other.buf_ = nullptr;
    770 }
    771 
    772 inline Tensor& Tensor::operator=(Tensor&& other) {
    773   // Avoid self-assignment, since we might destroy our underlying buffer.
    774   if (&other != this) {
    775     shape_ = std::move(other.shape_);
    776     if (buf_) buf_->Unref();
    777     buf_ = other.buf_;
    778     other.buf_ = nullptr;
    779   }
    780   return *this;
    781 }
    782 
    783 // END_SKIP_DOXYGEN
    784 
    785 }  // namespace tensorflow
    786 
    787 #endif  // TENSORFLOW_CORE_FRAMEWORK_TENSOR_H_
    788