Home | History | Annotate | Download | only in operations
      1 /*
      2  * Copyright (C) 2017 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "SVDF.h"
     18 
     19 #include "NeuralNetworksWrapper.h"
     20 #include "gmock/gmock-matchers.h"
     21 #include "gtest/gtest.h"
     22 
     23 using ::testing::FloatNear;
     24 using ::testing::Matcher;
     25 
     26 namespace android {
     27 namespace nn {
     28 namespace wrapper {
     29 
     30 namespace {
     31 
     32 std::vector<Matcher<float>> ArrayFloatNear(const std::vector<float>& values,
     33                                            float max_abs_error=1.e-6) {
     34   std::vector<Matcher<float>> matchers;
     35   matchers.reserve(values.size());
     36   for (const float& v : values) {
     37     matchers.emplace_back(FloatNear(v, max_abs_error));
     38   }
     39   return matchers;
     40 }
     41 
     42 }  // namespace
     43 
     44 using ::testing::ElementsAreArray;
     45 
     46 static float svdf_input[] = {0.12609188,  -0.46347019, -0.89598465,
     47                              0.12609188,  -0.46347019, -0.89598465,
     48 
     49                              0.14278367,  -1.64410412, -0.75222826,
     50                              0.14278367,  -1.64410412, -0.75222826,
     51 
     52                              0.49837467,  0.19278903,  0.26584083,
     53                              0.49837467,  0.19278903,  0.26584083,
     54 
     55                              -0.11186574, 0.13164264,  -0.05349274,
     56                              -0.11186574, 0.13164264,  -0.05349274,
     57 
     58                              -0.68892461, 0.37783599,  0.18263303,
     59                              -0.68892461, 0.37783599,  0.18263303,
     60 
     61                              -0.81299269, -0.86831826, 1.43940818,
     62                              -0.81299269, -0.86831826, 1.43940818,
     63 
     64                              -1.45006323, -0.82251364, -1.69082689,
     65                              -1.45006323, -0.82251364, -1.69082689,
     66 
     67                              0.03966608,  -0.24936394, -0.77526885,
     68                              0.03966608,  -0.24936394, -0.77526885,
     69 
     70                              0.11771342,  -0.23761693, -0.65898693,
     71                              0.11771342,  -0.23761693, -0.65898693,
     72 
     73                              -0.89477462, 1.67204106,  -0.53235275,
     74                              -0.89477462, 1.67204106,  -0.53235275};
     75 
     76 static float svdf_input_rank2[] = {
     77     0.12609188,  -0.46347019, -0.89598465,
     78     0.35867718,  0.36897406,  0.73463392,
     79 
     80     0.14278367,  -1.64410412, -0.75222826,
     81     -0.57290924, 0.12729003,  0.7567004,
     82 
     83     0.49837467,  0.19278903,  0.26584083,
     84     0.17660543,  0.52949083,  -0.77931279,
     85 
     86     -0.11186574, 0.13164264,  -0.05349274,
     87     -0.72674477, -0.5683046,  0.55900657,
     88 
     89     -0.68892461, 0.37783599,  0.18263303,
     90     -0.63690937, 0.44483393,  -0.71817774,
     91 
     92     -0.81299269, -0.86831826, 1.43940818,
     93     -0.95760226, 1.82078898,  0.71135032,
     94 
     95     -1.45006323, -0.82251364, -1.69082689,
     96     -1.65087092, -1.89238167, 1.54172635,
     97 
     98     0.03966608,  -0.24936394, -0.77526885,
     99     2.06740379,  -1.51439476, 1.43768692,
    100 
    101     0.11771342,  -0.23761693, -0.65898693,
    102     0.31088525,  -1.55601168, -0.87661445,
    103 
    104     -0.89477462, 1.67204106,  -0.53235275,
    105     -0.6230064,  0.29819036,  1.06939757,
    106 };
    107 
    108 static float svdf_golden_output[] = {
    109     0.014899,    -0.0517661, -0.143725, -0.00271883,
    110     0.014899,    -0.0517661, -0.143725, -0.00271883,
    111 
    112     0.068281,    -0.162217,  -0.152268, 0.00323521,
    113     0.068281,    -0.162217,  -0.152268, 0.00323521,
    114 
    115     -0.0317821,  -0.0333089, 0.0609602, 0.0333759,
    116     -0.0317821,  -0.0333089, 0.0609602, 0.0333759,
    117 
    118     -0.00623099, -0.077701,  -0.391193, -0.0136691,
    119     -0.00623099, -0.077701,  -0.391193, -0.0136691,
    120 
    121     0.201551,    -0.164607,  -0.179462, -0.0592739,
    122     0.201551,    -0.164607,  -0.179462, -0.0592739,
    123 
    124     0.0886511,   -0.0875401, -0.269283, 0.0281379,
    125     0.0886511,   -0.0875401, -0.269283, 0.0281379,
    126 
    127     -0.201174,   -0.586145,  -0.628624, -0.0330412,
    128     -0.201174,   -0.586145,  -0.628624, -0.0330412,
    129 
    130     -0.0839096,  -0.299329,  0.108746,  0.109808,
    131     -0.0839096,  -0.299329,  0.108746,  0.109808,
    132 
    133     0.419114,    -0.237824,  -0.422627, 0.175115,
    134     0.419114,    -0.237824,  -0.422627, 0.175115,
    135 
    136     0.36726,     -0.522303,  -0.456502, -0.175475,
    137     0.36726,     -0.522303,  -0.456502, -0.175475};
    138 
    139 static float svdf_golden_output_rank_2[] = {
    140     -0.09623547, -0.10193135, 0.11083051,  -0.0347917,
    141     0.1141196,   0.12965347,  -0.12652366, 0.01007236,
    142 
    143     -0.16396809, -0.21247184, 0.11259045,  -0.04156673,
    144     0.10132131,  -0.06143532, -0.00924693, 0.10084561,
    145 
    146     0.01257364,  0.0506071,   -0.19287863, -0.07162561,
    147     -0.02033747, 0.22673416,  0.15487903,  0.02525555,
    148 
    149     -0.1411963,  -0.37054959, 0.01774767,  0.05867489,
    150     0.09607603,  -0.0141301,  -0.08995658, 0.12867066,
    151 
    152     -0.27142537, -0.16955489, 0.18521598,  -0.12528358,
    153     0.00331409,  0.11167502,  0.02218599,  -0.07309391,
    154 
    155     0.09593632,  -0.28361851, -0.0773851,  0.17199151,
    156     -0.00075242, 0.33691186,  -0.1536046,  0.16572715,
    157 
    158     -0.27916506, -0.27626723, 0.42615682,  0.3225764,
    159     -0.37472126, -0.55655634, -0.05013514, 0.289112,
    160 
    161     -0.24418658, 0.07540751,  -0.1940318,  -0.08911639,
    162     0.00732617,  0.46737891,  0.26449674,  0.24888524,
    163 
    164     -0.17225097, -0.54660404, -0.38795233, 0.08389944,
    165     0.07736043,  -0.28260678, 0.15666828,  1.14949894,
    166 
    167     -0.57454878, -0.64704704, 0.73235172,  -0.34616736,
    168     0.21120001,  -0.22927976, 0.02455296,  -0.35906726,
    169 };
    170 
    171 #define FOR_ALL_INPUT_AND_WEIGHT_TENSORS(ACTION) \
    172   ACTION(Input)                                  \
    173   ACTION(WeightsFeature)                         \
    174   ACTION(WeightsTime)                            \
    175   ACTION(Bias)                                   \
    176   ACTION(StateIn)
    177 
    178 // For all output and intermediate states
    179 #define FOR_ALL_OUTPUT_TENSORS(ACTION) \
    180   ACTION(StateOut)                     \
    181   ACTION(Output)
    182 
    183 // Derived class of SingleOpModel, which is used to test SVDF TFLite op.
    184 class SVDFOpModel {
    185  public:
    186   SVDFOpModel(uint32_t batches, uint32_t units, uint32_t input_size,
    187               uint32_t memory_size, uint32_t rank)
    188       : batches_(batches),
    189         units_(units),
    190         input_size_(input_size),
    191         memory_size_(memory_size),
    192         rank_(rank) {
    193     std::vector<std::vector<uint32_t>> input_shapes{
    194         {batches_, input_size_},  // Input tensor
    195         {units_ * rank_, input_size_},    // weights_feature tensor
    196         {units_ * rank_, memory_size_},   // weights_time tensor
    197         {units_},                  // bias tensor
    198         {batches_,  memory_size * units_ * rank_},   // state in tensor
    199     };
    200     std::vector<uint32_t> inputs;
    201     auto it = input_shapes.begin();
    202 
    203     // Input and weights
    204 #define AddInput(X)                                   \
    205   OperandType X##OpndTy(Type::TENSOR_FLOAT32, *it++); \
    206   inputs.push_back(model_.addOperand(&X##OpndTy));
    207 
    208     FOR_ALL_INPUT_AND_WEIGHT_TENSORS(AddInput);
    209 
    210 #undef AddInput
    211 
    212     // Parameters
    213     OperandType RankParamTy(Type::INT32, {});
    214     inputs.push_back(model_.addOperand(&RankParamTy));
    215     OperandType ActivationParamTy(Type::INT32, {});
    216     inputs.push_back(model_.addOperand(&ActivationParamTy));
    217 
    218     // Output and other intermediate state
    219     std::vector<std::vector<uint32_t>> output_shapes{{batches_, memory_size_ * units_ * rank_},
    220                                                      {batches_, units_}};
    221     std::vector<uint32_t> outputs;
    222 
    223     auto it2 = output_shapes.begin();
    224 
    225 #define AddOutput(X)                                   \
    226   OperandType X##OpndTy(Type::TENSOR_FLOAT32, *it2++); \
    227   outputs.push_back(model_.addOperand(&X##OpndTy));
    228 
    229     FOR_ALL_OUTPUT_TENSORS(AddOutput);
    230 
    231 #undef AddOutput
    232 
    233     Input_.insert(Input_.end(), batches_ * input_size_, 0.f);
    234     StateIn_.insert(StateIn_.end(), batches_ * units_ * rank_ * memory_size_, 0.f);
    235 
    236     auto multiAll = [](const std::vector<uint32_t> &dims) -> uint32_t {
    237         uint32_t sz = 1;
    238         for(uint32_t d:dims) { sz *= d; }
    239         return sz;
    240     };
    241 
    242     it2 = output_shapes.begin();
    243 
    244 #define ReserveOutput(X) X##_.insert(X##_.end(), multiAll(*it2++), 0.f);
    245 
    246     FOR_ALL_OUTPUT_TENSORS(ReserveOutput);
    247 
    248     model_.addOperation(ANEURALNETWORKS_SVDF, inputs, outputs);
    249     model_.identifyInputsAndOutputs(inputs, outputs);
    250 
    251     model_.finish();
    252   }
    253 
    254   void Invoke() {
    255     ASSERT_TRUE(model_.isValid());
    256 
    257     Compilation compilation(&model_);
    258     compilation.finish();
    259     Execution execution(&compilation);
    260 
    261     StateIn_.swap(StateOut_);
    262 
    263 #define SetInputOrWeight(X)                                                    \
    264   ASSERT_EQ(execution.setInput(SVDF::k##X##Tensor, X##_.data(),                \
    265                                sizeof(float) * X##_.size()),                   \
    266             Result::NO_ERROR);
    267 
    268     FOR_ALL_INPUT_AND_WEIGHT_TENSORS(SetInputOrWeight);
    269 
    270 #undef SetInputOrWeight
    271 
    272 #define SetOutput(X)                                                            \
    273   EXPECT_TRUE(X##_.data() != nullptr);                                          \
    274   ASSERT_EQ(execution.setOutput(SVDF::k##X##Tensor, X##_.data(),                \
    275                                 sizeof(float) * X##_.size()),                   \
    276             Result::NO_ERROR);
    277 
    278     FOR_ALL_OUTPUT_TENSORS(SetOutput);
    279 
    280 #undef SetOutput
    281 
    282     ASSERT_EQ(execution.setInput(SVDF::kRankParam, &rank_, sizeof(rank_)),
    283               Result::NO_ERROR);
    284 
    285     int activation = TfLiteFusedActivation::kTfLiteActNone;
    286     ASSERT_EQ(execution.setInput(SVDF::kActivationParam, &activation,
    287                                  sizeof(activation)),
    288               Result::NO_ERROR);
    289 
    290     ASSERT_EQ(execution.compute(), Result::NO_ERROR);
    291   }
    292 
    293 #define DefineSetter(X)                          \
    294   void Set##X(const std::vector<float>& f) {     \
    295     X##_.insert(X##_.end(), f.begin(), f.end()); \
    296   }
    297 
    298   FOR_ALL_INPUT_AND_WEIGHT_TENSORS(DefineSetter);
    299 
    300 #undef DefineSetter
    301 
    302   void SetInput(int offset, float* begin, float* end) {
    303     for (; begin != end; begin++, offset++) {
    304       Input_[offset] = *begin;
    305     }
    306   }
    307 
    308   // Resets the state of SVDF op by filling it with 0's.
    309   void ResetState() {
    310       std::fill(StateIn_.begin(), StateIn_.end(), 0.f);
    311       std::fill(StateOut_.begin(), StateOut_.end(), 0.f);
    312   }
    313 
    314   // Extracts the output tensor from the SVDF op.
    315   const std::vector<float>& GetOutput() const { return Output_; }
    316 
    317   int input_size() const { return input_size_; }
    318   int num_units() const { return units_; }
    319   int num_batches() const { return batches_; }
    320 
    321  private:
    322   Model model_;
    323 
    324   const uint32_t batches_;
    325   const uint32_t units_;
    326   const uint32_t input_size_;
    327   const uint32_t memory_size_;
    328   const uint32_t rank_;
    329 
    330 #define DefineTensor(X) std::vector<float> X##_;
    331 
    332   FOR_ALL_INPUT_AND_WEIGHT_TENSORS(DefineTensor);
    333   FOR_ALL_OUTPUT_TENSORS(DefineTensor);
    334 
    335 #undef DefineTensor
    336 };
    337 
    338 TEST(SVDFOpTest, BlackBoxTest) {
    339   SVDFOpModel svdf(/*batches=*/2, /*units=*/4, /*input_size=*/3,
    340                    /*memory_size=*/10, /*rank=*/1);
    341   svdf.SetWeightsFeature({-0.31930989, -0.36118156, 0.0079667, 0.37613347,
    342                           0.22197971, 0.12416199, 0.27901134, 0.27557442,
    343                           0.3905206, -0.36137494, -0.06634006, -0.10640851});
    344 
    345   svdf.SetWeightsTime(
    346       {-0.31930989, 0.37613347,  0.27901134,  -0.36137494, -0.36118156,
    347        0.22197971,  0.27557442,  -0.06634006, 0.0079667,   0.12416199,
    348 
    349        0.3905206,   -0.10640851, -0.0976817,  0.15294972,  0.39635518,
    350        -0.02702999, 0.39296314,  0.15785322,  0.21931258,  0.31053296,
    351 
    352        -0.36916667, 0.38031587,  -0.21580373, 0.27072677,  0.23622236,
    353        0.34936687,  0.18174365,  0.35907319,  -0.17493086, 0.324846,
    354 
    355        -0.10781813, 0.27201805,  0.14324132,  -0.23681851, -0.27115166,
    356        -0.01580888, -0.14943552, 0.15465137,  0.09784451,  -0.0337657});
    357 
    358   svdf.SetBias({});
    359 
    360   svdf.ResetState();
    361   const int svdf_num_batches = svdf.num_batches();
    362   const int svdf_input_size = svdf.input_size();
    363   const int svdf_num_units = svdf.num_units();
    364   const int input_sequence_size =
    365       sizeof(svdf_input) / sizeof(float) / (svdf_input_size * svdf_num_batches);
    366   // Going over each input batch, setting the input tensor, invoking the SVDF op
    367   // and checking the output with the expected golden values.
    368   for (int i = 0; i < input_sequence_size; i++) {
    369     float* batch_start = svdf_input + i * svdf_input_size * svdf_num_batches;
    370     float* batch_end = batch_start + svdf_input_size * svdf_num_batches;
    371     svdf.SetInput(0, batch_start, batch_end);
    372 
    373     svdf.Invoke();
    374 
    375     float* golden_start =
    376         svdf_golden_output + i * svdf_num_units * svdf_num_batches;
    377     float* golden_end = golden_start + svdf_num_units * svdf_num_batches;
    378     std::vector<float> expected;
    379     expected.insert(expected.end(), golden_start, golden_end);
    380 
    381     EXPECT_THAT(svdf.GetOutput(), ElementsAreArray(ArrayFloatNear(expected)));
    382   }
    383 }
    384 
    385 TEST(SVDFOpTest, BlackBoxTestRank2) {
    386   SVDFOpModel svdf(/*batches=*/2, /*units=*/4, /*input_size=*/3,
    387                    /*memory_size=*/10, /*rank=*/2);
    388   svdf.SetWeightsFeature({-0.31930989, 0.0079667,   0.39296314,  0.37613347,
    389                           0.12416199,  0.15785322,  0.27901134,  0.3905206,
    390                           0.21931258,  -0.36137494, -0.10640851, 0.31053296,
    391                           -0.36118156, -0.0976817,  -0.36916667, 0.22197971,
    392                           0.15294972,  0.38031587,  0.27557442,  0.39635518,
    393                           -0.21580373, -0.06634006, -0.02702999, 0.27072677});
    394 
    395   svdf.SetWeightsTime(
    396       {-0.31930989, 0.37613347,  0.27901134,  -0.36137494, -0.36118156,
    397        0.22197971,  0.27557442,  -0.06634006, 0.0079667,   0.12416199,
    398 
    399        0.3905206,   -0.10640851, -0.0976817,  0.15294972,  0.39635518,
    400        -0.02702999, 0.39296314,  0.15785322,  0.21931258,  0.31053296,
    401 
    402        -0.36916667, 0.38031587,  -0.21580373, 0.27072677,  0.23622236,
    403        0.34936687,  0.18174365,  0.35907319,  -0.17493086, 0.324846,
    404 
    405        -0.10781813, 0.27201805,  0.14324132,  -0.23681851, -0.27115166,
    406        -0.01580888, -0.14943552, 0.15465137,  0.09784451,  -0.0337657,
    407 
    408        -0.14884081, 0.19931212,  -0.36002168, 0.34663299,  -0.11405486,
    409        0.12672701,  0.39463779,  -0.07886535, -0.06384811, 0.08249187,
    410 
    411        -0.26816407, -0.19905911, 0.29211238,  0.31264046,  -0.28664589,
    412        0.05698794,  0.11613581,  0.14078894,  0.02187902,  -0.21781836,
    413 
    414        -0.15567942, 0.08693647,  -0.38256618, 0.36580828,  -0.22922277,
    415        -0.0226903,  0.12878349,  -0.28122205, -0.10850525, -0.11955214,
    416 
    417        0.27179423,  -0.04710215, 0.31069002,  0.22672787,  0.09580326,
    418        0.08682203,  0.1258215,   0.1851041,   0.29228821,  0.12366763});
    419 
    420   svdf.SetBias({});
    421 
    422   svdf.ResetState();
    423   const int svdf_num_batches = svdf.num_batches();
    424   const int svdf_input_size = svdf.input_size();
    425   const int svdf_num_units = svdf.num_units();
    426   const int input_sequence_size =
    427       sizeof(svdf_input_rank2) / sizeof(float) / (svdf_input_size * svdf_num_batches);
    428   // Going over each input batch, setting the input tensor, invoking the SVDF op
    429   // and checking the output with the expected golden values.
    430   for (int i = 0; i < input_sequence_size; i++) {
    431     float* batch_start = svdf_input_rank2 + i * svdf_input_size * svdf_num_batches;
    432     float* batch_end = batch_start + svdf_input_size * svdf_num_batches;
    433     svdf.SetInput(0, batch_start, batch_end);
    434 
    435     svdf.Invoke();
    436 
    437     float* golden_start =
    438         svdf_golden_output_rank_2 + i * svdf_num_units * svdf_num_batches;
    439     float* golden_end = golden_start + svdf_num_units * svdf_num_batches;
    440     std::vector<float> expected;
    441     expected.insert(expected.end(), golden_start, golden_end);
    442 
    443     EXPECT_THAT(svdf.GetOutput(), ElementsAreArray(ArrayFloatNear(expected)));
    444   }
    445 }
    446 
    447 }  // namespace wrapper
    448 }  // namespace nn
    449 }  // namespace android
    450