Home | History | Annotate | Download | only in kernels
      1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 #include "tensorflow/lite/c/builtin_op_data.h"
     16 #include "tensorflow/lite/c/c_api_internal.h"
     17 #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
     18 #include "tensorflow/lite/kernels/internal/quantization_util.h"
     19 #include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
     20 #include "tensorflow/lite/kernels/internal/tensor.h"
     21 #include "tensorflow/lite/kernels/kernel_util.h"
     22 #include "tensorflow/lite/kernels/op_macros.h"
     23 
     24 namespace tflite {
     25 namespace ops {
     26 namespace builtin {
     27 namespace squared_difference {
     28 
     29 constexpr int kInputTensor1 = 0;
     30 constexpr int kInputTensor2 = 1;
     31 constexpr int kOutputTensor = 0;
     32 
     33 struct OpData {
     34   bool requires_broadcast;
     35 };
     36 
     37 template <typename T>
     38 T SquaredDifference(T input1, T input2) {
     39   const T difference = input1 - input2;
     40   return difference * difference;
     41 }
     42 
     43 void* Init(TfLiteContext* context, const char* buffer, size_t length) {
     44   auto* data = new OpData;
     45   data->requires_broadcast = false;
     46   return data;
     47 }
     48 
     49 void Free(TfLiteContext* context, void* buffer) {
     50   delete reinterpret_cast<OpData*>(buffer);
     51 }
     52 
     53 TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
     54   OpData* data = reinterpret_cast<OpData*>(node->user_data);
     55 
     56   TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
     57   TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
     58 
     59   const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
     60   const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
     61   TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
     62 
     63   TF_LITE_ENSURE_EQ(context, input1->type, input2->type);
     64   output->type = input2->type;
     65 
     66   data->requires_broadcast = !HaveSameShapes(input1, input2);
     67 
     68   TfLiteIntArray* output_size = nullptr;
     69   if (data->requires_broadcast) {
     70     TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
     71                                    context, input1, input2, &output_size));
     72   } else {
     73     output_size = TfLiteIntArrayCopy(input1->dims);
     74   }
     75 
     76   return context->ResizeTensor(context, output, output_size);
     77 }
     78 
     79 template <typename T>
     80 void EvalSquaredDifference(TfLiteContext* context, TfLiteNode* node,
     81                            const OpData* data, const TfLiteTensor* input1,
     82                            const TfLiteTensor* input2, TfLiteTensor* output) {
     83   if (data->requires_broadcast) {
     84     reference_ops::BroadcastBinaryFunction4DSlow<T, T, T>(
     85         GetTensorShape(input1), GetTensorData<T>(input1),
     86         GetTensorShape(input2), GetTensorData<T>(input2),
     87         GetTensorShape(output), GetTensorData<T>(output), SquaredDifference<T>);
     88   } else {
     89     reference_ops::BinaryFunction<T, T, T>(
     90         GetTensorShape(input1), GetTensorData<T>(input1),
     91         GetTensorShape(input2), GetTensorData<T>(input2),
     92         GetTensorShape(output), GetTensorData<T>(output), SquaredDifference<T>);
     93   }
     94 }
     95 
     96 TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
     97   OpData* data = reinterpret_cast<OpData*>(node->user_data);
     98 
     99   const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
    100   const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
    101   TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
    102 
    103   if (output->type == kTfLiteFloat32) {
    104     EvalSquaredDifference<float>(context, node, data, input1, input2, output);
    105   } else if (output->type == kTfLiteInt32) {
    106     EvalSquaredDifference<int32_t>(context, node, data, input1, input2, output);
    107   } else {
    108     context->ReportError(
    109         context,
    110         "SquaredDifference only supports FLOAT32 and INT32 now, got %d.",
    111         output->type);
    112     return kTfLiteError;
    113   }
    114 
    115   return kTfLiteOk;
    116 }
    117 
    118 }  // namespace squared_difference
    119 
    120 TfLiteRegistration* Register_SQUARED_DIFFERENCE() {
    121   static TfLiteRegistration r = {
    122       squared_difference::Init, squared_difference::Free,
    123       squared_difference::Prepare, squared_difference::Eval};
    124   return &r;
    125 }
    126 
    127 }  // namespace builtin
    128 }  // namespace ops
    129 }  // namespace tflite
    130