1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 3 Licensed under the Apache License, Version 2.0 (the "License"); 4 you may not use this file except in compliance with the License. 5 You may obtain a copy of the License at 6 7 http://www.apache.org/licenses/LICENSE-2.0 8 9 Unless required by applicable law or agreed to in writing, software 10 distributed under the License is distributed on an "AS IS" BASIS, 11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 See the License for the specific language governing permissions and 13 limitations under the License. 14 ==============================================================================*/ 15 16 #include "tensorflow/compiler/tf2xla/xla_helpers.h" 17 #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" 18 #include "tensorflow/compiler/tf2xla/xla_op_registry.h" 19 #include "tensorflow/core/util/tensor_format.h" 20 21 namespace tensorflow { 22 namespace { 23 24 class DepthToSpaceOp : public XlaOpKernel { 25 public: 26 explicit DepthToSpaceOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { 27 string data_format_str; 28 OP_REQUIRES_OK(ctx, ctx->GetAttr("data_format", &data_format_str)); 29 OP_REQUIRES(ctx, FormatFromString(data_format_str, &data_format_), 30 errors::InvalidArgument("Invalid data format")); 31 32 OP_REQUIRES(ctx, data_format_ == FORMAT_NCHW || data_format_ == FORMAT_NHWC, 33 errors::InvalidArgument("Unsupported data format ", 34 ToString(data_format_), 35 "; expected formats NHWC or NCHW")); 36 37 OP_REQUIRES_OK(ctx, ctx->GetAttr("block_size", &block_size_)); 38 OP_REQUIRES( 39 ctx, block_size_ > 1, 40 errors::InvalidArgument("Block size should be > 1: ", block_size_)); 41 } 42 43 void Compile(XlaOpKernelContext* ctx) override { 44 const TensorShape input_tensor_shape = ctx->InputShape(0); 45 int input_rank = input_tensor_shape.dims(); 46 static const int kRequiredDims = 4; 47 OP_REQUIRES(ctx, kRequiredDims == input_rank, 48 errors::InvalidArgument("Input rank should be ", kRequiredDims, 49 "; got: ", input_rank)); 50 const gtl::InlinedVector<int64, 4> input_shape = 51 input_tensor_shape.dim_sizes(); 52 53 xla::ComputationBuilder* b = ctx->builder(); 54 xla::ComputationDataHandle input = ctx->Input(0); 55 56 int feature_dim = GetTensorFeatureDimIndex(input_rank, data_format_); 57 int num_spatial_dims = GetTensorSpatialDims(input_rank, data_format_); 58 59 std::vector<int64> reshaped_shape; 60 std::vector<int64> transpose_order; 61 std::vector<int64> output_shape; 62 reshaped_shape.reserve(input_rank); 63 transpose_order.reserve(input_rank); 64 output_shape.reserve(input_rank); 65 if (data_format_ == FORMAT_NHWC) { 66 reshaped_shape.push_back(input_shape[0]); 67 for (int i = 0; i < num_spatial_dims; ++i) { 68 reshaped_shape.push_back(input_shape[1 + i]); 69 } 70 int64 block_elems = 1; 71 for (int i = 0; i < num_spatial_dims; ++i) { 72 reshaped_shape.push_back(block_size_); 73 block_elems *= block_size_; 74 } 75 reshaped_shape.push_back(input_shape[feature_dim] / block_elems); 76 77 transpose_order.push_back(0); 78 for (int i = 0; i < num_spatial_dims; ++i) { 79 transpose_order.push_back(i + 1); 80 transpose_order.push_back(i + 1 + num_spatial_dims); 81 } 82 transpose_order.push_back(feature_dim + num_spatial_dims); 83 84 output_shape.push_back(input_shape[0]); 85 for (int i = 0; i < num_spatial_dims; ++i) { 86 output_shape.push_back(input_shape[1 + i] * block_size_); 87 } 88 output_shape.push_back(input_shape[feature_dim] / block_elems); 89 } else { 90 // NCHW format. 91 reshaped_shape.push_back(input_shape[0]); 92 int64 block_elems = 1; 93 for (int i = 0; i < num_spatial_dims; ++i) { 94 reshaped_shape.push_back(block_size_); 95 block_elems *= block_size_; 96 } 97 reshaped_shape.push_back(input_shape[feature_dim] / block_elems); 98 for (int i = 0; i < num_spatial_dims; ++i) { 99 reshaped_shape.push_back(input_shape[2 + i]); 100 } 101 102 transpose_order.push_back(0); 103 transpose_order.push_back(1 + num_spatial_dims); 104 for (int i = 0; i < num_spatial_dims; ++i) { 105 transpose_order.push_back(2 + num_spatial_dims + i); 106 transpose_order.push_back(1 + i); 107 } 108 109 output_shape.push_back(input_shape[0]); 110 output_shape.push_back(input_shape[feature_dim] / block_elems); 111 for (int i = 0; i < num_spatial_dims; ++i) { 112 output_shape.push_back(input_shape[2 + i] * block_size_); 113 } 114 } 115 116 // Note: comments are given in NHWC format; NCHW is similar with a different 117 // dimension order. 118 // 1. Reshape `input` to `reshaped` of shape: 119 // 120 // [batch, 121 // input_shape[1], 122 // input_shape[2], 123 // block_size_, 124 // block_size_, 125 // depth / (block_size_ * block_size_)] 126 OP_REQUIRES(ctx, 127 input_shape[feature_dim] % (block_size_ * block_size_) == 0, 128 errors::InvalidArgument( 129 "Input depth dimension (", input_shape[3], 130 ") is not divisible by square of the block size (", 131 block_size_, ")")); 132 133 xla::ComputationDataHandle reshaped = b->Reshape(input, reshaped_shape); 134 135 // 2. Permute dimensions of `reshaped` to produce 136 // `permuted_reshaped` of shape: 137 // 138 // [batch, 139 // input_shape[1], 140 // block_size_, 141 // input_shape[2], 142 // block_size_, 143 // depth / (block_size_ * block_size_)] 144 xla::ComputationDataHandle permuted_reshaped = 145 b->Transpose(reshaped, transpose_order); 146 147 // 3. Reshape `permuted_reshaped` to flatten `block_shape` into the 148 // batch dimension, producing an output tensor of shape: 149 // 150 // [batch, 151 // input_shape[1] * block_size_, 152 // input_shape[2] * block_size_, 153 // depth / (block_size_ * block_size_)] 154 // 155 xla::ComputationDataHandle output = 156 b->Reshape(permuted_reshaped, output_shape); 157 158 ctx->SetOutput(0, output); 159 } 160 161 private: 162 TensorFormat data_format_; 163 int block_size_; 164 }; 165 REGISTER_XLA_OP(Name("DepthToSpace"), DepthToSpaceOp); 166 167 } // namespace 168 } // namespace tensorflow 169