Home | History | Annotate | Download | only in kernels
      1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 #include <gtest/gtest.h>
     16 #include "tensorflow/lite/interpreter.h"
     17 #include "tensorflow/lite/kernels/register.h"
     18 #include "tensorflow/lite/kernels/test_util.h"
     19 #include "tensorflow/lite/model.h"
     20 
     21 namespace tflite {
     22 namespace {
     23 
     24 using ::testing::ElementsAreArray;
     25 
     26 class BaseAddOpModel : public SingleOpModel {
     27  public:
     28   BaseAddOpModel(const TensorData& input1, const TensorData& input2,
     29                  const TensorData& output,
     30                  ActivationFunctionType activation_type) {
     31     input1_ = AddInput(input1);
     32     input2_ = AddInput(input2);
     33     output_ = AddOutput(output);
     34     SetBuiltinOp(BuiltinOperator_ADD, BuiltinOptions_AddOptions,
     35                  CreateAddOptions(builder_, activation_type).Union());
     36     BuildInterpreter({GetShape(input1_), GetShape(input2_)});
     37   }
     38 
     39   int input1() { return input1_; }
     40   int input2() { return input2_; }
     41 
     42  protected:
     43   int input1_;
     44   int input2_;
     45   int output_;
     46 };
     47 
     48 class FloatAddOpModel : public BaseAddOpModel {
     49  public:
     50   using BaseAddOpModel::BaseAddOpModel;
     51 
     52   std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
     53 };
     54 
     55 class IntegerAddOpModel : public BaseAddOpModel {
     56  public:
     57   using BaseAddOpModel::BaseAddOpModel;
     58 
     59   std::vector<int32_t> GetOutput() { return ExtractVector<int32_t>(output_); }
     60 };
     61 
     62 class QuantizedAddOpModel : public BaseAddOpModel {
     63  public:
     64   using BaseAddOpModel::BaseAddOpModel;
     65 
     66   template <typename integer_dtype>
     67   std::vector<float> GetDequantizedOutput() {
     68     return Dequantize<integer_dtype>(ExtractVector<integer_dtype>(output_),
     69                                      GetScale(output_), GetZeroPoint(output_));
     70   }
     71 
     72   std::vector<float> GetDequantizedOutputInt16() {
     73     return Dequantize<int16_t>(ExtractVector<int16_t>(output_),
     74                                GetScale(output_), GetZeroPoint(output_));
     75   }
     76 };
     77 
     78 // for quantized Add, the error shouldn't exceed step
     79 float GetTolerance(float min, float max) {
     80   float kQuantizedStep = (max - min) / 255.0;
     81   return kQuantizedStep;
     82 }
     83 
     84 float GetToleranceInt16(float min, float max) {
     85   float kQuantizedStep = (max - min) / 32767.f;
     86   return kQuantizedStep;
     87 }
     88 
     89 TEST(FloatAddOpModel, NoActivation) {
     90   FloatAddOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}},
     91                     {TensorType_FLOAT32, {1, 2, 2, 1}},
     92                     {TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
     93   m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
     94   m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
     95   m.Invoke();
     96   EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
     97 }
     98 
     99 TEST(FloatAddOpModel, ActivationRELU_N1_TO_1) {
    100   FloatAddOpModel m(
    101       {TensorType_FLOAT32, {1, 2, 2, 1}}, {TensorType_FLOAT32, {1, 2, 2, 1}},
    102       {TensorType_FLOAT32, {}}, ActivationFunctionType_RELU_N1_TO_1);
    103   m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
    104   m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
    105   m.Invoke();
    106   EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.0, 0.4, 1.0, 1.0}));
    107 }
    108 
    109 TEST(FloatAddOpModel, VariousInputShapes) {
    110   std::vector<std::vector<int>> test_shapes = {
    111       {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
    112   for (int i = 0; i < test_shapes.size(); ++i) {
    113     FloatAddOpModel m({TensorType_FLOAT32, test_shapes[i]},
    114                       {TensorType_FLOAT32, test_shapes[i]},
    115                       {TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
    116     m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8, 1.1, 2.0});
    117     m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5, 1.1, 0.1});
    118     m.Invoke();
    119     EXPECT_THAT(m.GetOutput(),
    120                 ElementsAreArray({-1.9, 0.4, 1.0, 1.3, 2.2, 2.1}))
    121         << "With shape number " << i;
    122   }
    123 }
    124 
    125 TEST(FloatAddOpModel, WithBroadcast) {
    126   std::vector<std::vector<int>> test_shapes = {
    127       {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
    128   for (int i = 0; i < test_shapes.size(); ++i) {
    129     FloatAddOpModel m({TensorType_FLOAT32, test_shapes[i]},
    130                       {TensorType_FLOAT32, {}},  // always a scalar
    131                       {TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
    132     m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8, 1.1, 2.0});
    133     m.PopulateTensor<float>(m.input2(), {0.1});
    134     m.Invoke();
    135     EXPECT_THAT(
    136         m.GetOutput(),
    137         ElementsAreArray(ArrayFloatNear({-1.9, 0.3, 0.8, 0.9, 1.2, 2.1})))
    138         << "With shape number " << i;
    139   }
    140 }
    141 
    142 TEST(IntegerAddOpModel, NoActivation) {
    143   IntegerAddOpModel m({TensorType_INT32, {1, 2, 2, 1}},
    144                       {TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {}},
    145                       ActivationFunctionType_NONE);
    146   m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8});
    147   m.PopulateTensor<int32_t>(m.input2(), {1, 2, 3, 5});
    148   m.Invoke();
    149   EXPECT_THAT(m.GetOutput(), ElementsAreArray({-19, 4, 10, 13}));
    150 }
    151 
    152 TEST(IntegerAddOpModel, ActivationRELU_N1_TO_1) {
    153   IntegerAddOpModel m({TensorType_INT32, {1, 2, 2, 1}},
    154                       {TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {}},
    155                       ActivationFunctionType_RELU_N1_TO_1);
    156   m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8});
    157   m.PopulateTensor<int32_t>(m.input2(), {1, 2, 3, 5});
    158   m.Invoke();
    159   EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1, 1, 1, 1}));
    160 }
    161 
    162 TEST(IntegerAddOpModel, VariousInputShapes) {
    163   std::vector<std::vector<int>> test_shapes = {
    164       {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
    165   for (int i = 0; i < test_shapes.size(); ++i) {
    166     IntegerAddOpModel m({TensorType_INT32, test_shapes[i]},
    167                         {TensorType_INT32, test_shapes[i]},
    168                         {TensorType_INT32, {}}, ActivationFunctionType_NONE);
    169     m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8, 11, 20});
    170     m.PopulateTensor<int32_t>(m.input2(), {1, 2, 3, 5, 11, 1});
    171     m.Invoke();
    172     EXPECT_THAT(m.GetOutput(), ElementsAreArray({-19, 04, 10, 13, 22, 21}))
    173         << "With shape number " << i;
    174   }
    175 }
    176 
    177 TEST(IntegerAddOpModel, WithBroadcast) {
    178   std::vector<std::vector<int>> test_shapes = {
    179       {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
    180   for (int i = 0; i < test_shapes.size(); ++i) {
    181     IntegerAddOpModel m({TensorType_INT32, test_shapes[i]},
    182                         {TensorType_INT32, {}},  // always a scalar
    183                         {TensorType_INT32, {}}, ActivationFunctionType_NONE);
    184     m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8, 11, 20});
    185     m.PopulateTensor<int32_t>(m.input2(), {1});
    186     m.Invoke();
    187     EXPECT_THAT(m.GetOutput(),
    188                 ElementsAreArray(ArrayFloatNear({-19, 3, 8, 9, 12, 21})))
    189         << "With shape number " << i;
    190   }
    191 }
    192 
    193 template <TensorType tensor_type, typename integer_dtype>
    194 void QuantizedTestsNoActivation() {
    195   float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
    196   std::vector<std::vector<float>> inputs1 = {
    197       {0.1, 0.2, 0.3, 0.4}, {-0.8, 0.2, 0.4, 0.7}, {-0.8, 0.2, 0.7, 0.3}};
    198   std::vector<std::vector<float>> inputs2 = {
    199       {0.6, 0.4, 0.3, 0.1}, {0.6, 0.4, 0.5, -0.8}, {0.6, 0.4, -0.8, 0.5}};
    200   std::vector<std::vector<float>> results = {
    201       {0.7, 0.6, 0.6, 0.5}, {-0.2, 0.6, 0.9, -0.1}, {-0.2, 0.6, -0.1, 0.8}};
    202   for (int i = 0; i < inputs1.size(); ++i) {
    203     QuantizedAddOpModel m({tensor_type, {1, 2, 2, 1}, -1.0, 1.0},
    204                           {tensor_type, {1, 2, 2, 1}, -1.0, 1.0},
    205                           {tensor_type, {}, -1.0, 1.0},
    206                           ActivationFunctionType_NONE);
    207     m.QuantizeAndPopulate<integer_dtype>(m.input1(), inputs1[i]);
    208     m.QuantizeAndPopulate<integer_dtype>(m.input2(), inputs2[i]);
    209     m.Invoke();
    210     EXPECT_THAT(
    211         m.GetDequantizedOutput<integer_dtype>(),
    212         ElementsAreArray(ArrayFloatNear(results[i], kQuantizedTolerance)))
    213         << "With test number " << i;
    214   }
    215 }
    216 
    217 TEST(QuantizedAddOpModel, QuantizedTestsNoActivationUInt8) {
    218   QuantizedTestsNoActivation<TensorType_UINT8, uint8_t>();
    219 }
    220 
    221 TEST(QuantizedAddOpModel, QuantizedTestsNoActivationInt8) {
    222   QuantizedTestsNoActivation<TensorType_INT8, int8_t>();
    223 }
    224 
    225 TEST(QuantizedAddOpModel, QuantizedTestsNoActivationInt16) {
    226   const float kMin = -1.f;
    227   const float kMax = 32767.f / 32768.f;
    228   float kQuantizedTolerance = GetToleranceInt16(kMin, kMax);
    229   std::vector<std::vector<float>> inputs1 = {
    230       {0.1, 0.2, 0.3, 0.4}, {-0.8, 0.2, 0.4, 0.7}, {-0.8, 0.2, 0.7, 0.3}};
    231   std::vector<std::vector<float>> inputs2 = {
    232       {0.6, 0.4, 0.3, 0.1}, {0.6, 0.4, 0.5, -0.8}, {0.6, 0.4, -0.8, 0.5}};
    233   std::vector<std::vector<float>> results = {
    234       {0.7, 0.6, 0.6, 0.5}, {-0.2, 0.6, 0.9, -0.1}, {-0.2, 0.6, -0.1, 0.8}};
    235   for (int i = 0; i < inputs1.size(); ++i) {
    236     QuantizedAddOpModel m({TensorType_INT16, {1, 2, 2, 1}, kMin, kMax},
    237                           {TensorType_INT16, {1, 2, 2, 1}, kMin, kMax},
    238                           {TensorType_INT16, {}, kMin, kMax},
    239                           ActivationFunctionType_NONE);
    240     m.QuantizeAndPopulate<int16_t>(m.input1(), inputs1[i]);
    241     m.QuantizeAndPopulate<int16_t>(m.input2(), inputs2[i]);
    242     m.Invoke();
    243     EXPECT_THAT(
    244         m.GetDequantizedOutputInt16(),
    245         ElementsAreArray(ArrayFloatNear(results[i], kQuantizedTolerance)))
    246         << "With test number " << i;
    247   }
    248 }
    249 
    250 template <enum TensorType tensor_type, typename integer_dtype>
    251 void QuantizedTestsActivationRELU_N1_TO_1() {
    252   float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
    253   std::vector<std::vector<float>> inputs1 = {{-0.8, 0.2, 0.9, 0.7},
    254                                              {-0.8, 0.2, 0.7, 0.3}};
    255   std::vector<std::vector<float>> inputs2 = {{0.6, 0.4, 0.9, -0.8},
    256                                              {0.6, 0.4, -0.8, 0.5}};
    257   std::vector<std::vector<float>> results = {{-0.2, 0.6, 1.0, -0.1},
    258                                              {-0.2, 0.6, -0.1, 0.8}};
    259   for (int i = 0; i < inputs1.size(); ++i) {
    260     QuantizedAddOpModel m({tensor_type, {1, 2, 2, 1}, -1.0, 1.0},
    261                           {tensor_type, {1, 2, 2, 1}, -1.0, 1.0},
    262                           {tensor_type, {}, -1.0, 1.0},
    263                           ActivationFunctionType_RELU_N1_TO_1);
    264     m.QuantizeAndPopulate<integer_dtype>(m.input1(), inputs1[i]);
    265     m.QuantizeAndPopulate<integer_dtype>(m.input2(), inputs2[i]);
    266     m.Invoke();
    267     EXPECT_THAT(
    268         m.GetDequantizedOutput<integer_dtype>(),
    269         ElementsAreArray(ArrayFloatNear(results[i], kQuantizedTolerance)))
    270         << "With test number " << i;
    271   }
    272 }
    273 
    274 TEST(QuantizedAddOpModel, QuantizedTestsActivationRELU_N1_TO_1UInt8) {
    275   QuantizedTestsActivationRELU_N1_TO_1<TensorType_UINT8, uint8_t>();
    276 }
    277 
    278 TEST(QuantizedAddOpModel, QuantizedTestsActivationRELU_N1_TO_1Int8) {
    279   QuantizedTestsActivationRELU_N1_TO_1<TensorType_INT8, int8_t>();
    280 }
    281 
    282 template <enum TensorType tensor_type, typename integer_dtype>
    283 void QuantizedVariousInputShapes() {
    284   float kQuantizedTolerance = GetTolerance(-3.0, 3.0);
    285   std::vector<std::vector<int>> test_shapes = {
    286       {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
    287   for (int i = 0; i < test_shapes.size(); ++i) {
    288     QuantizedAddOpModel m({tensor_type, test_shapes[i], -3.0, 3.0},
    289                           {tensor_type, test_shapes[i], -3.0, 3.0},
    290                           {tensor_type, {}, -3.0, 3.0},
    291                           ActivationFunctionType_NONE);
    292     m.QuantizeAndPopulate<integer_dtype>(m.input1(),
    293                                          {-2.0, 0.2, 0.7, 0.8, 1.1, 2.0});
    294     m.QuantizeAndPopulate<integer_dtype>(m.input2(),
    295                                          {0.1, 0.3, 0.3, 0.5, 1.1, 0.1});
    296     m.Invoke();
    297     EXPECT_THAT(m.GetDequantizedOutput<integer_dtype>(),
    298                 ElementsAreArray(ArrayFloatNear({-1.9, 0.5, 1.0, 1.3, 2.2, 2.1},
    299                                                 kQuantizedTolerance)))
    300         << "With shape number " << i;
    301   }
    302 }
    303 
    304 TEST(QuantizedAddOpModel, QuantizedVariousInputShapesUInt8) {
    305   QuantizedVariousInputShapes<TensorType_UINT8, uint8_t>();
    306 }
    307 
    308 TEST(QuantizedAddOpModel, QuantizedVariousInputShapesInt8) {
    309   QuantizedVariousInputShapes<TensorType_INT8, int8_t>();
    310 }
    311 
    312 template <enum TensorType tensor_type, typename integer_dtype>
    313 void QuantizedWithScalarBroadcast() {
    314   float kQuantizedTolerance = GetTolerance(-3.f, 3.f);
    315   std::vector<std::vector<int>> test_shapes = {
    316       {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
    317   for (int i = 0; i < test_shapes.size(); ++i) {
    318     QuantizedAddOpModel model_fixture(
    319         {tensor_type, test_shapes[i], -3.f, 3.f}, {tensor_type, {}, -3.f, 3.f},
    320         {tensor_type, {}, -3.f, 3.f}, ActivationFunctionType_NONE);
    321     model_fixture.QuantizeAndPopulate<integer_dtype>(
    322         model_fixture.input1(), {-2.0f, 0.2f, 0.7f, 0.8f, 1.1f, 2.0f});
    323     model_fixture.QuantizeAndPopulate<integer_dtype>(model_fixture.input2(),
    324                                                      {0.1f});
    325     model_fixture.Invoke();
    326     EXPECT_THAT(
    327         model_fixture.GetDequantizedOutput<integer_dtype>(),
    328         ElementsAreArray(ArrayFloatNear({-1.9f, 0.3f, 0.8f, 0.9f, 1.2f, 2.1f},
    329                                         kQuantizedTolerance)))
    330         << "With shape number " << i;
    331   }
    332   // Re-run with exchanged inputs.
    333   for (int i = 0; i < test_shapes.size(); ++i) {
    334     QuantizedAddOpModel model_fixture(
    335         {tensor_type, {}, -3.f, 3.f}, {tensor_type, test_shapes[i], -3.f, 3.f},
    336         {tensor_type, {}, -3.f, 3.f}, ActivationFunctionType_NONE);
    337     model_fixture.QuantizeAndPopulate<integer_dtype>(model_fixture.input1(),
    338                                                      {0.1f});
    339     model_fixture.QuantizeAndPopulate<integer_dtype>(
    340         model_fixture.input2(), {-2.0f, 0.2f, 0.7f, 0.8f, 1.1f, 2.0f});
    341     model_fixture.Invoke();
    342     EXPECT_THAT(
    343         model_fixture.GetDequantizedOutput<integer_dtype>(),
    344         ElementsAreArray(ArrayFloatNear({-1.9f, 0.3f, 0.8f, 0.9f, 1.2f, 2.1f},
    345                                         kQuantizedTolerance)))
    346         << "With shape number " << i;
    347   }
    348 }
    349 
    350 TEST(QuantizedAddOpModel, QuantizedWithScalarBroadcastUInt8) {
    351   QuantizedWithScalarBroadcast<TensorType_UINT8, uint8_t>();
    352 }
    353 
    354 TEST(QuantizedAddOpModel, QuantizedWithScalarBroadcastInt8) {
    355   QuantizedWithScalarBroadcast<TensorType_INT8, int8_t>();
    356 }
    357 
    358 template <enum TensorType tensor_type, typename integer_dtype>
    359 void QuantizedWithMixedBroadcast() {
    360   float kQuantizedTolerance = GetTolerance(-3.f, 3.f);
    361   const std::vector<int> base_shape = {2, 3, 1, 2};
    362   std::vector<std::vector<int>> test_shapes = {
    363       {1, 1, 3, 2}, {1, 3, 1, 2}, {2, 1, 3, 1}, {2, 3, 1, 1}};
    364   std::vector<std::vector<float>> test_outputs = {
    365       {-0.1f, 2.6f,  -0.7f, 2.8f, 0.7f,  3.0f, 1.1f,  0.8f, 0.5f,
    366        1.0f,  1.9f,  1.4f,  1.0f, -0.8f, 0.4f, -0.6f, 1.8f, -0.2f,
    367        1.4f,  3.0f,  0.8f,  3.0f, 2.2f,  3.0f, -1.4f, 0.3f, -2.0f,
    368        0.5f,  -0.6f, 0.9f,  0.9f, -1.9f, 0.3f, -1.7f, 1.7f, -1.3f},
    369       {-0.1f, 2.6f, 0.5f, 1.0f, 1.8f, -0.2f, 1.4f, 3.0f, -2.0f, 0.5f, 1.7f,
    370        -1.3f},
    371       {-0.1f, 2.5f,  0.0f, 2.6f, -0.7f, 1.9f, 1.1f,  0.7f, 1.2f,
    372        0.8f,  0.5f,  0.1f, 1.0f, -0.9f, 1.1f, -0.8f, 0.4f, -1.5f,
    373        1.7f,  3.0f,  2.2f, 3.0f, 2.1f,  3.0f, -1.1f, 0.5f, -0.6f,
    374        1.0f,  -0.7f, 0.9f, 1.2f, -1.7f, 1.7f, -1.2f, 1.6f, -1.3f},
    375       {-0.1f, 2.5f, 1.2f, 0.8f, 0.4f, -1.5f, 1.7f, 3.0f, -0.6f, 1.0f, 1.6f,
    376        -1.3f}};
    377   for (int i = 0; i < test_shapes.size(); ++i) {
    378     QuantizedAddOpModel model_fixture({tensor_type, base_shape, -3.f, 3.f},
    379                                       {tensor_type, test_shapes[i], -3.f, 3.f},
    380                                       {tensor_type, {}, -3.f, 3.f},
    381                                       ActivationFunctionType_NONE);
    382     model_fixture.QuantizeAndPopulate<integer_dtype>(
    383         model_fixture.input1(), {-0.3f, 2.3f, 0.9f, 0.5f, 0.8f, -1.1f, 1.2f,
    384                                  2.8f, -1.6f, 0.0f, 0.7f, -2.2f});
    385     model_fixture.QuantizeAndPopulate<integer_dtype>(
    386         model_fixture.input2(), {0.2f, 0.3f, -0.4f, 0.5f, 1.0f, 0.9f});
    387     model_fixture.Invoke();
    388     EXPECT_THAT(
    389         model_fixture.GetDequantizedOutput<integer_dtype>(),
    390         ElementsAreArray(ArrayFloatNear(test_outputs[i], kQuantizedTolerance)))
    391         << "With shape number " << i;
    392   }
    393   // Re-run with exchanged inputs.
    394   for (int i = 0; i < test_shapes.size(); ++i) {
    395     QuantizedAddOpModel model_fixture({tensor_type, test_shapes[i], -3.f, 3.f},
    396                                       {tensor_type, base_shape, -3.f, 3.f},
    397                                       {tensor_type, {}, -3.f, 3.f},
    398                                       ActivationFunctionType_NONE);
    399     model_fixture.QuantizeAndPopulate<integer_dtype>(
    400         model_fixture.input1(), {0.2f, 0.3f, -0.4f, 0.5f, 1.0f, 0.9f});
    401     model_fixture.QuantizeAndPopulate<integer_dtype>(
    402         model_fixture.input2(), {-0.3f, 2.3f, 0.9f, 0.5f, 0.8f, -1.1f, 1.2f,
    403                                  2.8f, -1.6f, 0.0f, 0.7f, -2.2f});
    404     model_fixture.Invoke();
    405     EXPECT_THAT(
    406         model_fixture.GetDequantizedOutput<integer_dtype>(),
    407         ElementsAreArray(ArrayFloatNear(test_outputs[i], kQuantizedTolerance)))
    408         << "With shape number " << i;
    409   }
    410 }
    411 
    412 TEST(QuantizedAddOpModel, QuantizedWithMixedBroadcastUInt8) {
    413   QuantizedWithMixedBroadcast<TensorType_UINT8, uint8_t>();
    414 }
    415 
    416 TEST(QuantizedAddOpModel, QuantizedWithMixedBroadcastInt8) {
    417   QuantizedWithMixedBroadcast<TensorType_INT8, int8_t>();
    418 }
    419 
    420 }  // namespace
    421 }  // namespace tflite
    422 int main(int argc, char** argv) {
    423   ::tflite::LogToStderr();
    424   ::testing::InitGoogleTest(&argc, argv);
    425   return RUN_ALL_TESTS();
    426 }
    427