Home | History | Annotate | Download | only in kernels
      1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
     12 implied.
     13 See the License for the specific language governing permissions and
     14 limitations under the License.
     15 ==============================================================================*/
     16 
     17 #if GOOGLE_CUDA
     18 
     19 #define EIGEN_USE_GPU
     20 
     21 #include "tensorflow/core/kernels/population_count_op.h"
     22 
     23 #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
     24 #include "tensorflow/core/framework/op_kernel.h"
     25 #include "tensorflow/core/framework/register_types.h"
     26 #include "tensorflow/core/framework/tensor_types.h"
     27 #include "tensorflow/core/platform/types.h"
     28 #include "tensorflow/core/util/cuda_kernel_helper.h"
     29 
     30 namespace tensorflow {
     31 
     32 typedef Eigen::GpuDevice GPUDevice;
     33 
     34 namespace functor {
     35 
     36 template <typename T>
     37 __global__ void PopulationCountKernel(const int size, const T* input,
     38                                       uint8* output) {
     39   CUDA_1D_KERNEL_LOOP(i, size) { output[i] = __popc(ldg(input + i)); }
     40 }
     41 
     42 template <>
     43 __global__ void PopulationCountKernel(const int size, const int8* input,
     44                                       uint8* output) {
     45   // For some reason, __popc on a negative int8 gets confused.
     46   CUDA_1D_KERNEL_LOOP(i, size) {
     47     output[i] = __popc(ldg(reinterpret_cast<const uint8*>(input + i)));
     48   }
     49 }
     50 
     51 template <>
     52 __global__ void PopulationCountKernel(const int size, const int16* input,
     53                                       uint8* output) {
     54   // For some reason, __popc on a negative int16 gets confused.
     55   CUDA_1D_KERNEL_LOOP(i, size) {
     56     output[i] = __popc(ldg(reinterpret_cast<const uint16*>(input + i)));
     57   }
     58 }
     59 
     60 template <>
     61 __global__ void PopulationCountKernel<int64>(const int size, const int64* input,
     62                                              uint8* output) {
     63   CUDA_1D_KERNEL_LOOP(i, size) { output[i] = __popcll(ldg(input + i)); }
     64 }
     65 
     66 #define DEFINE_GPU_SPECS(T)                                               \
     67   template <>                                                             \
     68   void PopulationCount<GPUDevice, T>::operator()(                         \
     69       OpKernelContext* c, typename TTypes<T>::ConstFlat input,            \
     70       TTypes<uint8>::Flat output) {                                       \
     71     const GPUDevice& d = c->eigen_device<GPUDevice>();                    \
     72     int64 total_count = input.size();                                     \
     73     CudaLaunchConfig config = GetCudaLaunchConfig(total_count, d);        \
     74     PopulationCountKernel<T>                                              \
     75         <<<config.block_count, config.thread_per_block, 0, d.stream()>>>( \
     76             total_count, input.data(), output.data());                    \
     77   }
     78 
     79 TF_CALL_uint8(DEFINE_GPU_SPECS);
     80 TF_CALL_int8(DEFINE_GPU_SPECS);
     81 TF_CALL_uint16(DEFINE_GPU_SPECS);
     82 TF_CALL_int16(DEFINE_GPU_SPECS);
     83 TF_CALL_int32(DEFINE_GPU_SPECS);
     84 TF_CALL_int64(DEFINE_GPU_SPECS);
     85 
     86 #undef DEFINE_GPU_SPECS
     87 
     88 }  // namespace functor
     89 
     90 }  // namespace tensorflow
     91 
     92 #endif  // GOOGLE_CUDA
     93