Home | History | Annotate | Download | only in kernels
      1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 #include <memory>
     17 #include <vector>
     18 
     19 #include "tensorflow/lite/c/builtin_op_data.h"
     20 #include "tensorflow/lite/c/c_api_internal.h"
     21 #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
     22 #include "tensorflow/lite/kernels/internal/quantization_util.h"
     23 #include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
     24 #include "tensorflow/lite/kernels/internal/tensor.h"
     25 #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
     26 #include "tensorflow/lite/kernels/kernel_util.h"
     27 #include "tensorflow/lite/kernels/op_macros.h"
     28 
     29 namespace tflite {
     30 namespace ops {
     31 namespace builtin {
     32 namespace mirror_pad {
     33 namespace {
     34 
     35 // Nil value for paddingMode/offset.
     36 const int kUnsetOffset = -1;
     37 
     38 // Wrapper for data used by the op.
     39 struct OpData {
     40   // Holds computed value (memoized value) of an internal fill state of a
     41   // subarray.
     42   // State is (Dimension to fill, index in tensor as flattened array)
     43   // The value is start and end in the output array which has the padded result.
     44   std::vector<std::pair<int, int>> cache;
     45 };
     46 
     47 // Wrapper for params passed to the Eval<T> function.
     48 template <typename T>
     49 struct EvalData {
     50   OpData* op_data = nullptr;
     51   const TfLiteTensor* padding_matrix = nullptr;
     52   const TfLiteIntArray* input_dims = nullptr;
     53   // Holds number of elements at the nth dimension.
     54   // value at last dimension = 1, at second to last = sizeof last dimension.
     55   const std::vector<int>* dimension_num_elements = nullptr;
     56   const T* input_data = nullptr;
     57 
     58   int offset = kUnsetOffset;
     59   T* output_data = nullptr;
     60   int input_size = 0;
     61   int output_size = 0;
     62   int num_dims = 0;
     63 };
     64 
     65 // Helper method that fills the left and right pads.
     66 template <typename T>
     67 inline void GetPadding(const T* data, int offset, int64_t* left_pad,
     68                        int64_t* right_pad) {
     69   *left_pad = static_cast<int64_t>(*(data + offset * 2));
     70   *right_pad = static_cast<int64_t>(*(data + offset * 2 + 1));
     71 }
     72 
     73 inline void GetPadding(const TfLiteTensor* padding_matrix, int dimension,
     74                        int64_t* left_pad, int64_t* right_pad) {
     75   switch (padding_matrix->type) {
     76     case kTfLiteInt32:
     77       GetPadding(padding_matrix->data.i32, dimension, left_pad, right_pad);
     78       break;
     79     case kTfLiteInt64:
     80       GetPadding(padding_matrix->data.i64, dimension, left_pad, right_pad);
     81       break;
     82     default:
     83       return;
     84   }
     85 }
     86 
     87 template <typename T>
     88 int Eval(EvalData<T>* eval_data, int current_dim, int flat_index,
     89          int output_index) {
     90   if (current_dim == eval_data->num_dims) {
     91     // Base case if we finished evaluating.
     92     if (output_index >= eval_data->output_size) {
     93       return output_index;
     94     }
     95     eval_data->output_data[output_index] = eval_data->input_data[flat_index];
     96     return output_index + 1;
     97   }
     98   // Check if the value is computed already.
     99   const int cache_index = current_dim * eval_data->input_size + flat_index;
    100   auto& cache_entry = eval_data->op_data->cache[cache_index];
    101   if (cache_entry.first != -1) {
    102     // Cache value is (start, end) interval. We can just copy the interval
    103     // directly.
    104     const int count = cache_entry.second - cache_entry.first;
    105     memcpy(eval_data->output_data + output_index,
    106            eval_data->output_data + cache_entry.first, count * sizeof(T));
    107     return output_index + count;
    108   }
    109   cache_entry.first = output_index;
    110   int64_t left_pad = 0, right_pad = 0;
    111   const int multiplier = (*eval_data->dimension_num_elements)[current_dim];
    112   const TfLiteTensor* padding_matrix = eval_data->padding_matrix;
    113   const auto offset = eval_data->offset;
    114   auto* dims = eval_data->input_dims;
    115 
    116   GetPadding(padding_matrix, current_dim, &left_pad, &right_pad);
    117   // Left padding
    118   for (int i = left_pad + offset - 1; i >= offset && left_pad > 0;
    119        --i, --left_pad) {
    120     output_index = Eval(eval_data, current_dim + 1, flat_index + i * multiplier,
    121                         output_index);
    122   }
    123   // Original values.
    124   for (int i = 0; i < dims->data[current_dim]; ++i) {
    125     output_index = Eval(eval_data, current_dim + 1, flat_index + i * multiplier,
    126                         output_index);
    127   }
    128   // Right padding.
    129   for (int i = dims->data[current_dim] - (1 + offset); i >= 0 && right_pad > 0;
    130        --i, --right_pad) {
    131     output_index = Eval(eval_data, current_dim + 1, flat_index + i * multiplier,
    132                         output_index);
    133   }
    134   cache_entry.second = output_index;
    135   return output_index;
    136 }
    137 
    138 // Returns the shape of the final output after padding.
    139 std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> GetPaddedOutputShape(
    140     const TfLiteTensor* input, const TfLiteTensor* padding_matrix) {
    141   const int input_dims = NumDimensions(input);
    142   std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> shape(
    143       TfLiteIntArrayCreate(input_dims), TfLiteIntArrayFree);
    144 
    145   int64_t left_pad = 0, right_pad = 0;
    146   for (int i = 0; i < input_dims; ++i) {
    147     GetPadding(padding_matrix, i, &left_pad, &right_pad);
    148     shape->data[i] = SizeOfDimension(input, i) + left_pad + right_pad;
    149   }
    150   return shape;
    151 }
    152 
    153 }  // namespace
    154 
    155 TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
    156   const TfLiteTensor* input_tensor = GetInput(context, node, 0);
    157   const TfLiteTensor* padding_matrix = GetInput(context, node, 1);
    158   auto* params =
    159       reinterpret_cast<TfLiteMirrorPaddingParams*>(node->builtin_data);
    160   OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
    161 
    162   if (params == nullptr) {
    163     return kTfLiteError;
    164   }
    165   const int input_dims = NumDimensions(input_tensor);
    166 
    167   TfLiteTensor* output_tensor = GetOutput(context, node, 0);
    168   if (IsDynamicTensor(output_tensor)) {
    169     auto output_size = GetPaddedOutputShape(input_tensor, padding_matrix);
    170     if (output_size == nullptr) {
    171       return kTfLiteError;
    172     }
    173     TF_LITE_ENSURE_STATUS(
    174         context->ResizeTensor(context, output_tensor, output_size.release()));
    175   }
    176 
    177   std::vector<int> dimension_num_elements(input_dims, 1);
    178   for (int i = input_dims - 2; i >= 0; i--) {
    179     dimension_num_elements[i] =
    180         dimension_num_elements[i + 1] * input_tensor->dims->data[i + 1];
    181   }
    182   const int input_size = NumElements(input_tensor);
    183 
    184   const int offset =
    185       params->mode != TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect ? 0
    186                                                                            : 1;
    187   TfLiteStatus status = kTfLiteOk;
    188   int output_index = 0;
    189   // Reset cache array.
    190   std::fill(op_data->cache.begin(), op_data->cache.end(),
    191             std::make_pair(-1, -1));
    192 #define TF_LITE_MIRROR_PAD(type)                              \
    193   EvalData<type> eval_data;                                   \
    194   eval_data.input_data = GetTensorData<type>(input_tensor);   \
    195   eval_data.input_dims = input_tensor->dims;                  \
    196   eval_data.input_size = input_size;                          \
    197   eval_data.dimension_num_elements = &dimension_num_elements; \
    198   eval_data.num_dims = input_dims;                            \
    199   eval_data.offset = offset;                                  \
    200   eval_data.op_data = op_data;                                \
    201   eval_data.output_data = GetTensorData<type>(output_tensor); \
    202   eval_data.output_size = NumElements(output_tensor);         \
    203   eval_data.padding_matrix = padding_matrix;                  \
    204   Eval<type>(&eval_data, 0, 0, output_index);
    205 
    206   switch (output_tensor->type) {
    207     case kTfLiteFloat32: {
    208       TF_LITE_MIRROR_PAD(float);
    209       break;
    210     }
    211     case kTfLiteInt32: {
    212       TF_LITE_MIRROR_PAD(int32_t);
    213       break;
    214     }
    215     case kTfLiteUInt8: {
    216       TF_LITE_MIRROR_PAD(uint8_t);
    217       break;
    218     }
    219     case kTfLiteInt64: {
    220       TF_LITE_MIRROR_PAD(int64_t);
    221       break;
    222     }
    223     default:
    224       status = kTfLiteError;
    225       break;
    226   }
    227 #undef TF_LITE_MIRROR_PAD
    228   return status;
    229 }
    230 
    231 void* Init(TfLiteContext* context, const char* buffer, size_t length) {
    232   return new OpData;
    233 }
    234 
    235 void Free(TfLiteContext* context, void* buffer) {
    236   delete reinterpret_cast<OpData*>(buffer);
    237 }
    238 
    239 TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
    240   const TfLiteTensor* input_tensor = GetInput(context, node, 0);
    241   const TfLiteTensor* padding_matrix = GetInput(context, node, 1);
    242   TfLiteTensor* output_tensor = GetOutput(context, node, 0);
    243   OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
    244 
    245   TF_LITE_ENSURE_EQ(context, NumDimensions(padding_matrix), 2);
    246   TF_LITE_ENSURE_EQ(context, SizeOfDimension(padding_matrix, 0),
    247                     NumDimensions(input_tensor));
    248 
    249   int num_elements = NumElements(input_tensor) * NumDimensions(input_tensor);
    250   op_data->cache.resize(num_elements + 1);
    251 
    252   if (!IsConstantTensor(padding_matrix)) {
    253     SetTensorToDynamic(output_tensor);
    254     return kTfLiteOk;
    255   }
    256   // We have constant padding, so we can infer output size.
    257 
    258   auto output_size = GetPaddedOutputShape(input_tensor, padding_matrix);
    259   if (output_size == nullptr) {
    260     return kTfLiteError;
    261   }
    262   return context->ResizeTensor(context, output_tensor, output_size.release());
    263 }
    264 
    265 }  // namespace mirror_pad
    266 TfLiteRegistration* Register_MIRROR_PAD() {
    267   static TfLiteRegistration r = {mirror_pad::Init, mirror_pad::Free,
    268                                  mirror_pad::Prepare, mirror_pad::Eval};
    269   return &r;
    270 }
    271 
    272 }  // namespace builtin
    273 }  // namespace ops
    274 }  // namespace tflite
    275