1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. 2 3 Licensed under the Apache License, Version 2.0 (the "License"); 4 you may not use this file except in compliance with the License. 5 You may obtain a copy of the License at 6 7 http://www.apache.org/licenses/LICENSE-2.0 8 9 Unless required by applicable law or agreed to in writing, software 10 distributed under the License is distributed on an "AS IS" BASIS, 11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 See the License for the specific language governing permissions and 13 limitations under the License. 14 ==============================================================================*/ 15 16 #if GOOGLE_CUDA 17 18 #define EIGEN_USE_GPU 19 20 #include "tensorflow/core/kernels/spacetodepth_op.h" 21 22 #include "tensorflow/core/framework/tensor_types.h" 23 #include "tensorflow/core/platform/types.h" 24 #include "tensorflow/core/util/cuda_kernel_helper.h" 25 26 namespace tensorflow { 27 28 typedef Eigen::GpuDevice GPUDevice; 29 30 // Space2Depth kernel for FORMAT_NHWC. 31 // See 'spacetodepth_op.h' for a more detailed description. 32 template <typename dtype> 33 __global__ void S2D_NHWC(const int32 nthreads, const dtype* input_ptr, 34 const int block_size, const int batch_size, 35 const int input_height, const int input_width, 36 const int input_depth, const int output_height, 37 const int output_width, const int output_depth, 38 dtype* output_ptr) { 39 CUDA_1D_KERNEL_LOOP(inp_idx, nthreads) { 40 // inp_idx = d + input_depth * (w + input_width * (h + input_height * b)) 41 const int d = inp_idx % input_depth; 42 const int inp_idx2 = inp_idx / input_depth; 43 const int w = inp_idx2 % input_width; 44 const int inp_idx3 = inp_idx2 / input_width; 45 const int h = inp_idx3 % input_height; 46 const int b = inp_idx3 / input_height; 47 48 const int out_h = h / block_size; 49 const int offset_h = h % block_size; 50 const int out_w = w / block_size; 51 const int offset_w = w % block_size; 52 const int offset_d = (offset_h * block_size + offset_w) * input_depth; 53 const int out_d = d + offset_d; 54 const int out_idx = 55 out_d + 56 output_depth * (out_w + output_width * (out_h + output_height * b)); 57 *(output_ptr + out_idx) = ldg(input_ptr + inp_idx); 58 } 59 } 60 61 // Space2Depth kernel for FORMAT_NCHW. 62 // See 'spacetodepth_op.h' for a more detailed description. 63 template <typename dtype> 64 __global__ void S2D_NCHW(const int32 nthreads, 65 const dtype* __restrict__ input_ptr, 66 const int block_size, const int output_width, 67 const int input_depth_by_output_height, 68 dtype* __restrict__ output_ptr) { 69 CUDA_1D_KERNEL_LOOP(input_idx, nthreads) { 70 // We assume both the input and output are packed NCHW tensors. 71 // input_idx represents an index within the flattened input tensor. 72 // We can consider the block width and height as extra tensor dimensions, 73 // then isolate the relevant components of input_idx and recombine them to 74 // form output_idx. The layout transform performed is: 75 // n, iC, oY, bY, oX, bX (== input_idx) to 76 // n, bY, bX, iC, oY, oX (== output_idx). 77 78 const int n_iC_oY_bY_oX = input_idx / block_size; 79 const int bX = input_idx - n_iC_oY_bY_oX * block_size; 80 81 const int n_iC_oY_bY = n_iC_oY_bY_oX / output_width; 82 const int oX = n_iC_oY_bY_oX - n_iC_oY_bY * output_width; 83 84 const int n_iC_oY = n_iC_oY_bY / block_size; 85 const int bY = n_iC_oY_bY - n_iC_oY * block_size; 86 87 const int n = n_iC_oY / input_depth_by_output_height; 88 const int iC_oY = n_iC_oY - n * input_depth_by_output_height; 89 90 const int output_idx = oX + (((n * block_size + bY) * block_size + bX) * 91 input_depth_by_output_height + 92 iC_oY) * 93 output_width; 94 95 *(output_ptr + output_idx) = ldg(input_ptr + input_idx); 96 } 97 } 98 99 // Space2Depth kernel for FORMAT_NCHW using a loop over block area. 100 // See 'spacetodepth_op.h' for functional specification. 101 template <typename dtype, int block_size> 102 __global__ void S2D_NCHW_LOOP(const int32 nthreads, 103 const dtype* __restrict__ input, 104 const int output_width, const int input_width, 105 const int input_depth_by_output_area, 106 const int output_depth_by_output_area, 107 dtype* __restrict__ output) { 108 CUDA_1D_KERNEL_LOOP(thread_idx, nthreads) { 109 // We will be converting the image from ordering: 110 // n, iC, oY, bY, oX, bX (== input index) to 111 // n, bY, bX, iC, oY, oX (== output index) 112 113 // We assume thread_idx encodes n_iC_oY_oX, and use an unrolled loop over 114 // bY and bX coordinates within the block. This kernel gets a small 115 // performance improvement compared with S2D_NCHW due to a denser access 116 // pattern on the input side. (Note: the equivalent D2S kernel gets a larger 117 // improvement as a denser pattern on the output side makes more 118 // difference). 119 120 const int n_iC_oY = thread_idx / output_width; 121 const int oX = thread_idx - n_iC_oY * output_width; 122 const int n = thread_idx / input_depth_by_output_area; 123 const int iC_oY_oX = thread_idx - n * input_depth_by_output_area; 124 125 // Recombine the components and apply to the input and output pointers. 126 auto input_ptr = input + (n_iC_oY * input_width + oX) * block_size; 127 auto output_ptr = output + n * output_depth_by_output_area + iC_oY_oX; 128 129 #pragma unroll 130 // Copy a patch of data to the output batch image. 131 for (int bY = 0; bY < block_size; ++bY) { 132 #pragma unroll 133 for (int bX = 0; bX < block_size; ++bX) { 134 output_ptr[(bY * block_size + bX) * input_depth_by_output_area] = 135 ldg(input_ptr + bY * input_width + bX); 136 } 137 } 138 } 139 } 140 141 // Specialization of SpaceToDepthOpFunctor for a CPUDevice. 142 namespace functor { 143 template <typename T> 144 struct SpaceToDepthOpFunctor<GPUDevice, T, FORMAT_NHWC> { 145 void operator()(const GPUDevice& d, typename TTypes<T, 4>::ConstTensor input, 146 int block_size, typename TTypes<T, 4>::Tensor output) { 147 const int batch_size = output.dimension(0); 148 const int input_height = input.dimension(1); 149 const int input_width = input.dimension(2); 150 const int input_depth = input.dimension(3); 151 const int output_height = output.dimension(1); 152 const int output_width = output.dimension(2); 153 const int output_depth = output.dimension(3); 154 155 const int total_count = 156 batch_size * input_height * input_width * input_depth; 157 CudaLaunchConfig config = GetCudaLaunchConfig(total_count, d); 158 S2D_NHWC<<<config.block_count, config.thread_per_block, 0, d.stream()>>>( 159 config.virtual_thread_count, input.data(), block_size, batch_size, 160 input_height, input_width, input_depth, output_height, output_width, 161 output_depth, output.data()); 162 } 163 void operator()(const GPUDevice& d, typename TTypes<T, 5>::ConstTensor input, 164 int block_size, typename TTypes<T, 5>::Tensor output) { 165 LOG(FATAL) << "5-D tensors should not be used with NHWC format"; 166 } 167 }; 168 169 template <typename T> 170 struct SpaceToDepthOpFunctor<GPUDevice, T, FORMAT_NCHW> { 171 void operator()(const GPUDevice& d, typename TTypes<T, 4>::ConstTensor input, 172 int block_size, typename TTypes<T, 4>::Tensor output) { 173 const int batch_size = output.dimension(0); 174 const int input_depth = input.dimension(1); 175 const int output_depth = output.dimension(1); 176 const int output_height = output.dimension(2); 177 const int output_width = output.dimension(3); 178 const int output_area = output_width * output_height; 179 const int output_depth_by_output_area = output_depth * output_area; 180 181 // We improve performance by generating instantiations of the loop kernel 182 // for the most common block sizes. 183 if (block_size <= 4) { 184 const int input_width = input.dimension(3); 185 const int input_depth_by_output_area = input_depth * output_area; 186 const int total_count = batch_size * input_depth_by_output_area; 187 CudaLaunchConfig config = GetCudaLaunchConfig(total_count, d); 188 switch (block_size) { 189 case 2: 190 return S2D_NCHW_LOOP<T, 2> 191 <<<config.block_count, config.thread_per_block, 0, d.stream()>>>( 192 total_count, input.data(), output_width, input_width, 193 input_depth_by_output_area, output_depth_by_output_area, 194 output.data()); 195 case 3: 196 return S2D_NCHW_LOOP<T, 3> 197 <<<config.block_count, config.thread_per_block, 0, d.stream()>>>( 198 total_count, input.data(), output_width, input_width, 199 input_depth_by_output_area, output_depth_by_output_area, 200 output.data()); 201 case 4: 202 return S2D_NCHW_LOOP<T, 4> 203 <<<config.block_count, config.thread_per_block, 0, d.stream()>>>( 204 total_count, input.data(), output_width, input_width, 205 input_depth_by_output_area, output_depth_by_output_area, 206 output.data()); 207 } 208 } 209 210 // Other block sizes are processed by the generic kernel. 211 const int total_count = batch_size * output_depth_by_output_area; 212 CudaLaunchConfig config = GetCudaLaunchConfig(total_count, d); 213 S2D_NCHW<<<config.block_count, config.thread_per_block, 0, d.stream()>>>( 214 config.virtual_thread_count, input.data(), block_size, output_width, 215 input_depth * output_height, output.data()); 216 } 217 void operator()(const GPUDevice& d, typename TTypes<T, 5>::ConstTensor input, 218 int block_size, typename TTypes<T, 5>::Tensor output) { 219 LOG(FATAL) << "5-D tensors should not be used with NCHW format"; 220 } 221 }; 222 } // end namespace functor 223 224 // Instantiate the GPU implementations for float. 225 template struct functor::SpaceToDepthOpFunctor<GPUDevice, float, FORMAT_NCHW>; 226 template struct functor::SpaceToDepthOpFunctor<GPUDevice, float, FORMAT_NHWC>; 227 228 // NCHW_VECT_C with 4 x qint8 can be treated as NCHW int32. 229 template struct functor::SpaceToDepthOpFunctor<GPUDevice, int32, FORMAT_NCHW>; 230 231 } // end namespace tensorflow 232 233 #endif // GOOGLE_CUDA 234