Home | History | Annotate | Download | only in kernels
      1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 #define EIGEN_USE_THREADS
     17 #include "tensorflow/core/kernels/tensor_array.h"
     18 
     19 #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
     20 #include "tensorflow/core/framework/register_types.h"
     21 #include "tensorflow/core/kernels/aggregate_ops_cpu.h"
     22 
     23 namespace tensorflow {
     24 
     25 typedef Eigen::ThreadPoolDevice CPUDevice;
     26 typedef Eigen::GpuDevice GPUDevice;
     27 
     28 namespace tensor_array {
     29 
     30 #define TENSOR_ARRAY_WRITE_OR_ADD(Device, T)                                \
     31   template <>                                                               \
     32   Status AddToTensor<Device, T>(OpKernelContext * ctx, Tensor * sum,        \
     33                                 const Tensor* current, const Tensor* add) { \
     34     functor::Add2Functor<Device, T> add_functor;                            \
     35     add_functor(ctx->template eigen_device<Device>(), sum->flat<T>(),       \
     36                 current->flat<T>(), add->flat<T>());                        \
     37     return Status::OK();                                                    \
     38   }
     39 
     40 #define TENSOR_ARRAY_WRITE_OR_ADD_CPU(T) TENSOR_ARRAY_WRITE_OR_ADD(CPUDevice, T)
     41 TF_CALL_NUMBER_TYPES(TENSOR_ARRAY_WRITE_OR_ADD_CPU)
     42 #undef TENSOR_ARRAY_WRITE_OR_ADD_CPU
     43 
     44 #if GOOGLE_CUDA
     45 
     46 #define TENSOR_ARRAY_WRITE_OR_ADD_GPU(T) TENSOR_ARRAY_WRITE_OR_ADD(GPUDevice, T)
     47 TF_CALL_GPU_NUMBER_TYPES(TENSOR_ARRAY_WRITE_OR_ADD_GPU);
     48 TF_CALL_complex64(TENSOR_ARRAY_WRITE_OR_ADD_GPU);
     49 TF_CALL_complex128(TENSOR_ARRAY_WRITE_OR_ADD_GPU);
     50 #undef TENSOR_ARRAY_WRITE_OR_ADD_GPU
     51 
     52 #endif  // GOOGLE_CUDA
     53 
     54 #undef TENSOR_ARRAY_WRITE_OR_ADD
     55 
     56 #define TENSOR_ARRAY_SET_ZERO(Device, T)                                      \
     57   template <>                                                                 \
     58   Status TensorSetZero<Device, T>(OpKernelContext * ctx, Tensor * value) {    \
     59     functor::SetZeroFunctor<Device, T> set_zero_functor;                      \
     60     set_zero_functor(ctx->template eigen_device<Device>(), value->flat<T>()); \
     61     return Status::OK();                                                      \
     62   }
     63 
     64 #define TENSOR_ARRAY_SET_ZERO_CPU(T) TENSOR_ARRAY_SET_ZERO(CPUDevice, T)
     65 TF_CALL_NUMBER_TYPES(TENSOR_ARRAY_SET_ZERO_CPU)
     66 #undef TENSOR_ARRAY_SET_ZERO_CPU
     67 
     68 #if GOOGLE_CUDA
     69 
     70 #define TENSOR_ARRAY_SET_ZERO_GPU(T) TENSOR_ARRAY_SET_ZERO(GPUDevice, T)
     71 TF_CALL_GPU_NUMBER_TYPES(TENSOR_ARRAY_SET_ZERO_GPU);
     72 TF_CALL_complex64(TENSOR_ARRAY_SET_ZERO_GPU);
     73 TF_CALL_complex128(TENSOR_ARRAY_SET_ZERO_GPU);
     74 #undef TENSOR_ARRAY_SET_ZERO_GPU
     75 
     76 #endif  // GOOGLE_CUDA
     77 
     78 #undef TENSOR_ARRAY_SET_ZERO
     79 
     80 }  // namespace tensor_array
     81 
     82 std::atomic<int64> TensorArray::tensor_array_counter{0};
     83 
     84 Status TensorArray::CopyShapesFrom(TensorArray* rhs) {
     85   mutex_lock l(mu_);
     86   mutex_lock l_rhs(rhs->mu_);
     87   TF_RETURN_IF_ERROR(LockedReturnIfClosed());
     88   TF_RETURN_IF_ERROR(rhs->LockedReturnIfClosed());
     89   if (tensors_.size() != rhs->tensors_.size()) {
     90     return errors::InvalidArgument(
     91         "TensorArray sizes do not match during CopyShapesFrom: ",
     92         handle_.vec<string>()(1), " has size ", tensors_.size(), " but rhs ",
     93         rhs->handle_.vec<string>()(1), " has size ", rhs->tensors_.size());
     94   }
     95   for (std::size_t i = 0; i < tensors_.size(); ++i) {
     96     // Skip "soft copy" of indices which have not been written.
     97     if (!rhs->tensors_[i].written) continue;
     98 
     99     // Copy the shape over.
    100     tensors_[i].shape = rhs->tensors_[i].shape;
    101     // Mark as written.  Reads will know that if written is true and
    102     // read is false, and cleared is false, to return zeros of the
    103     // appropriate shape.  Future aggregating writes will only use the shape
    104     // for validation.
    105     tensors_[i].written = true;
    106   }
    107 
    108   return Status::OK();
    109 }
    110 
    111 }  // namespace tensorflow
    112