Home | History | Annotate | Download | only in kernels
      1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 #include <string.h>
     16 #include "tensorflow/lite/c/builtin_op_data.h"
     17 #include "tensorflow/lite/c/c_api_internal.h"
     18 #include "tensorflow/lite/kernels/internal/tensor.h"
     19 #include "tensorflow/lite/kernels/kernel_util.h"
     20 #include "tensorflow/lite/kernels/op_macros.h"
     21 
     22 namespace tflite {
     23 namespace ops {
     24 namespace builtin {
     25 namespace reshape {
     26 
     27 constexpr int kInputTensor = 0;
     28 constexpr int kShapeTensor = 1;
     29 constexpr int kOutputTensor = 0;
     30 
     31 TfLiteStatus ResizeOutput(TfLiteContext* context, TfLiteNode* node,
     32                           TfLiteIntArray* output_shape) {
     33   const TfLiteTensor* input = GetInput(context, node, kInputTensor);
     34   TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
     35 
     36   // Tensorflow's Reshape allows one of the shape components to have the
     37   // special -1 value, meaning it will be calculated automatically based on the
     38   // input. Here we calculate what that dimension should be so that the number
     39   // of output elements in the same as the number of input elements.
     40   int num_input_elements = NumElements(input);
     41 
     42   int num_output_elements = 1;
     43   int stretch_dim = -1;
     44   for (int i = 0; i < output_shape->size; ++i) {
     45     int value = output_shape->data[i];
     46     if (value == -1) {
     47       TF_LITE_ENSURE_EQ(context, stretch_dim, -1);
     48       stretch_dim = i;
     49     } else {
     50       num_output_elements *= value;
     51     }
     52   }
     53   if (stretch_dim != -1) {
     54     output_shape->data[stretch_dim] = num_input_elements / num_output_elements;
     55     num_output_elements *= output_shape->data[stretch_dim];
     56   }
     57 
     58   TF_LITE_ENSURE_EQ(context, num_input_elements, num_output_elements);
     59   return context->ResizeTensor(context, output, output_shape);
     60 }
     61 
     62 TfLiteIntArray* GetOutputShapeFromTensor(TfLiteContext* context,
     63                                          TfLiteNode* node) {
     64   const TfLiteTensor* shape = GetInput(context, node, kShapeTensor);
     65 
     66   TfLiteIntArray* output_shape = TfLiteIntArrayCreate(shape->dims->data[0]);
     67   for (int i = 0; i < output_shape->size; ++i) {
     68     output_shape->data[i] = shape->data.i32[i];
     69   }
     70 
     71   return output_shape;
     72 }
     73 
     74 TfLiteIntArray* GetOutputShapeFromParam(TfLiteContext* context,
     75                                         TfLiteNode* node) {
     76   auto* params = reinterpret_cast<TfLiteReshapeParams*>(node->builtin_data);
     77 
     78   // The function is returned above this line if the shape tensor is usable.
     79   // Now fallback to the shape parameter in `TfLiteReshapeParams`.
     80   int num_dimensions = params->num_dimensions;
     81   if (num_dimensions == 1 && params->shape[0] == 0) {
     82     // Legacy tflite models use a shape parameter of [0] to indicate scalars,
     83     // so adjust accordingly. TODO(b/111614235): Allow zero-sized buffers during
     84     // toco conversion.
     85     num_dimensions = 0;
     86   }
     87   TfLiteIntArray* output_shape = TfLiteIntArrayCreate(num_dimensions);
     88   for (int i = 0; i < num_dimensions; ++i) {
     89     output_shape->data[i] = params->shape[i];
     90   }
     91 
     92   return output_shape;
     93 }
     94 
     95 // Check if the shape tensor is valid. Shapes should be int32 vectors.
     96 bool ShapeIsVector(TfLiteContext* context, TfLiteNode* node) {
     97   const TfLiteTensor* shape = GetInput(context, node, kShapeTensor);
     98   return (shape->dims->size == 1 && shape->type == kTfLiteInt32);
     99 }
    100 
    101 TfLiteIntArray* GetOutputShape(TfLiteContext* context, TfLiteNode* node) {
    102   if (NumInputs(node) == 2 && ShapeIsVector(context, node)) {
    103     return GetOutputShapeFromTensor(context, node);
    104   } else {
    105     return GetOutputShapeFromParam(context, node);
    106   }
    107 }
    108 
    109 TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
    110   TF_LITE_ENSURE(context, NumInputs(node) == 1 || NumInputs(node) == 2);
    111   TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
    112 
    113   // Always postpone sizing string tensors, even if we could in principle
    114   // calculate their shapes now. String tensors don't benefit from having their
    115   // shapes precalculated because the actual memory can only be allocated after
    116   // we know all the content.
    117   TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
    118   if (output->type != kTfLiteString) {
    119     if (NumInputs(node) == 1 ||
    120         IsConstantTensor(GetInput(context, node, kShapeTensor))) {
    121       TF_LITE_ENSURE_OK(
    122           context, ResizeOutput(context, node, GetOutputShape(context, node)));
    123     } else {
    124       SetTensorToDynamic(output);
    125     }
    126   }
    127   return kTfLiteOk;
    128 }
    129 
    130 TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
    131   const TfLiteTensor* input = GetInput(context, node, kInputTensor);
    132   TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
    133 
    134   // There are two ways in which the 'output' can be made dynamic: it could be
    135   // a string tensor, or its shape cannot be calculated during Prepare(). In
    136   // either case, we now have all the information to calculate its shape.
    137   if (IsDynamicTensor(output)) {
    138     TF_LITE_ENSURE_OK(
    139         context, ResizeOutput(context, node, GetOutputShape(context, node)));
    140   }
    141 
    142   // Note that string tensors are always "dynamic" in the sense that their size
    143   // is not known until we have all the content. This applies even when their
    144   // shape is known ahead of time. As a result, a string tensor is never given
    145   // any memory by ResizeOutput(), and we need to do it manually here. Since
    146   // reshape doesn't change the data, the output tensor needs exactly as many
    147   // bytes as the input tensor.
    148   if (output->type == kTfLiteString) {
    149     auto bytes_required = input->bytes;
    150     TfLiteTensorRealloc(bytes_required, output);
    151     output->bytes = bytes_required;
    152   }
    153 
    154   memcpy(output->data.raw, input->data.raw, input->bytes);
    155 
    156   return kTfLiteOk;
    157 }
    158 
    159 }  // namespace reshape
    160 
    161 TfLiteRegistration* Register_RESHAPE() {
    162   static TfLiteRegistration r = {nullptr, nullptr, reshape::Prepare,
    163                                  reshape::Eval};
    164   return &r;
    165 }
    166 
    167 }  // namespace builtin
    168 }  // namespace ops
    169 }  // namespace tflite
    170