Home | History | Annotate | Download | only in kernels
      1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 #ifndef TENSORFLOW_KERNELS_SCATTER_FUNCTOR_GPU_CU_H_
     17 #define TENSORFLOW_KERNELS_SCATTER_FUNCTOR_GPU_CU_H_
     18 
     19 #if GOOGLE_CUDA
     20 
     21 #define EIGEN_USE_GPU
     22 
     23 #include "tensorflow/core/framework/tensor_types.h"
     24 #include "tensorflow/core/kernels/scatter_functor.h"
     25 #include "tensorflow/core/platform/types.h"
     26 #include "tensorflow/core/util/cuda_kernel_helper.h"
     27 
     28 namespace tensorflow {
     29 
     30 typedef Eigen::GpuDevice GPUDevice;
     31 
     32 template <typename T, typename Index, scatter_op::UpdateOp op>
     33 __global__ void ScatterOpCustomKernel(T* params, const T* updates,
     34                                       const Index* indices,
     35                                       Index first_dim_size, Index updates_size,
     36                                       Index indices_size) {
     37   Index update_block = updates_size / indices_size;
     38   CUDA_1D_KERNEL_LOOP(i, updates_size) {
     39     int indices_i = i / update_block;
     40     int updates_i = i;
     41     int param_first_index = indices[indices_i];
     42     if (!(param_first_index >= 0 && param_first_index < first_dim_size)) {
     43       // Ignore indices that are out of range.
     44       continue;
     45     }
     46     int params_i = param_first_index * update_block + (i % update_block);
     47     switch (op) {
     48       case scatter_op::UpdateOp::ASSIGN: {
     49         params[params_i] = ldg(updates + updates_i);
     50         break;
     51       }
     52       case scatter_op::UpdateOp::ADD: {
     53         CudaAtomicAdd(params + params_i, ldg(updates + updates_i));
     54         break;
     55       }
     56       case scatter_op::UpdateOp::SUB: {
     57         CudaAtomicSub(params + params_i, ldg(updates + updates_i));
     58         break;
     59       }
     60       case scatter_op::UpdateOp::MUL: {
     61         CudaAtomicMul(params + params_i, ldg(updates + updates_i));
     62         break;
     63       }
     64       case scatter_op::UpdateOp::DIV: {
     65         CudaAtomicDiv(params + params_i, ldg(updates + updates_i));
     66         break;
     67       }
     68     }
     69   }
     70 }
     71 
     72 namespace functor {
     73 // Specialization for a GPU device.
     74 template <typename T, typename Index, scatter_op::UpdateOp op>
     75 struct ScatterFunctor<GPUDevice, T, Index, op> {
     76   Index operator()(OpKernelContext* c, const GPUDevice& d,
     77                    typename TTypes<T>::Matrix params,
     78                    typename TTypes<T>::ConstMatrix updates,
     79                    typename TTypes<Index>::ConstFlat indices) {
     80     // TODO(b/31801742): Implement indices range check. The hardest part is
     81     // with returning a value after the range check, as we do not want to do
     82     // device to host memcpy during a stream.
     83     const Index first_dim_size = params.dimension(0);
     84     const Index indices_size = indices.size();
     85     const Index updates_size = updates.size();
     86     CudaLaunchConfig config = GetCudaLaunchConfig(updates_size, d);
     87     ScatterOpCustomKernel<T, Index, op>
     88         <<<config.block_count, config.thread_per_block, 0, d.stream()>>>(
     89             params.data(), updates.data(), indices.data(), first_dim_size,
     90             updates_size, indices_size);
     91     return -1;
     92   }
     93 };
     94 
     95 }  // namespace functor
     96 }  // namespace tensorflow
     97 
     98 #endif  // GOOGLE_CUDA
     99 
    100 #endif  // TENSORFLOW_KERNELS_SCATTER_FUNCTOR_GPU_CU_H_
    101