Home | History | Annotate | Download | only in tools
      1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 #include <string>
     16 #include <vector>
     17 
     18 #include "flatbuffers/flatbuffers.h"
     19 #include "flatbuffers/util.h"
     20 #include <gtest/gtest.h>
     21 #include "tensorflow/contrib/lite/allocation.h"
     22 #include "tensorflow/contrib/lite/error_reporter.h"
     23 #include "tensorflow/contrib/lite/schema/schema_generated.h"
     24 #include "tensorflow/contrib/lite/testing/util.h"
     25 #include "tensorflow/contrib/lite/tools/mutable_op_resolver.h"
     26 #include "tensorflow/contrib/lite/tools/verifier.h"
     27 #include "tensorflow/contrib/lite/version.h"
     28 #include "tensorflow/core/framework/numeric_types.h"
     29 
     30 namespace tflite {
     31 
     32 using flatbuffers::FlatBufferBuilder;
     33 using flatbuffers::Offset;
     34 using flatbuffers::Vector;
     35 
     36 // Build single subgraph model.
     37 class TfLiteFlatbufferModelBuilder {
     38  public:
     39   TfLiteFlatbufferModelBuilder() {
     40     buffers_.push_back(
     41         CreateBuffer(builder_, builder_.CreateVector(std::vector<uint8_t>{})));
     42   }
     43 
     44   TfLiteFlatbufferModelBuilder(const std::vector<BuiltinOperator>& builtin_ops,
     45                                const std::vector<string>& custom_ops) {
     46     buffers_.push_back(
     47         CreateBuffer(builder_, builder_.CreateVector(std::vector<uint8_t>{})));
     48 
     49     for (const auto& iter : builtin_ops) {
     50       resolver_.AddBuiltin(iter, &fake_op_);
     51     }
     52     for (const auto& iter : custom_ops) {
     53       resolver_.AddCustom(iter.data(), &fake_op_);
     54     }
     55   }
     56 
     57   void AddTensor(const std::vector<int>& shape, tflite::TensorType type,
     58                  const std::vector<uint8_t>& buffer, const char* name) {
     59     int buffer_index = 0;
     60     if (!buffer.empty()) {
     61       buffer_index = buffers_.size();
     62       buffers_.push_back(CreateBuffer(builder_, builder_.CreateVector(buffer)));
     63     }
     64     tensors_.push_back(CreateTensorDirect(builder_, &shape, type, buffer_index,
     65                                           name, /*quantization=*/0));
     66   }
     67 
     68   void AddOperator(const std::vector<int32_t>& inputs,
     69                    const std::vector<int32_t>& outputs,
     70                    tflite::BuiltinOperator builtin_op, const char* custom_op) {
     71     operator_codes_.push_back(
     72         CreateOperatorCodeDirect(builder_, builtin_op, custom_op));
     73     operators_.push_back(CreateOperator(
     74         builder_, operator_codes_.size() - 1, builder_.CreateVector(inputs),
     75         builder_.CreateVector(outputs), BuiltinOptions_NONE,
     76         /*builtin_options=*/0,
     77         /*custom_options=*/0, tflite::CustomOptionsFormat_FLEXBUFFERS));
     78   }
     79 
     80   void FinishModel(const std::vector<int32_t>& inputs,
     81                    const std::vector<int32_t>& outputs) {
     82     auto subgraph = std::vector<Offset<SubGraph>>({CreateSubGraph(
     83         builder_, builder_.CreateVector(tensors_),
     84         builder_.CreateVector(inputs), builder_.CreateVector(outputs),
     85         builder_.CreateVector(operators_),
     86         builder_.CreateString("test_subgraph"))});
     87     auto result = CreateModel(
     88         builder_, TFLITE_SCHEMA_VERSION, builder_.CreateVector(operator_codes_),
     89         builder_.CreateVector(subgraph), builder_.CreateString("test_model"),
     90         builder_.CreateVector(buffers_));
     91     tflite::FinishModelBuffer(builder_, result);
     92   }
     93 
     94   bool Verify() {
     95     return tflite::Verify(builder_.GetBufferPointer(), builder_.GetSize(),
     96                           resolver_, DefaultErrorReporter());
     97   }
     98 
     99  private:
    100   FlatBufferBuilder builder_;
    101   MutableOpResolver resolver_;
    102   TfLiteRegistration fake_op_;
    103   std::vector<Offset<Operator>> operators_;
    104   std::vector<Offset<OperatorCode>> operator_codes_;
    105   std::vector<Offset<Tensor>> tensors_;
    106   std::vector<Offset<Buffer>> buffers_;
    107 };
    108 
    109 TEST(VerifyModel, TestEmptyModel) {
    110   FlatBufferBuilder builder;
    111   auto model = CreateModel(builder, /*version=*/TFLITE_SCHEMA_VERSION,
    112                            /*operator_codes=*/0, /*subgraphs=*/0,
    113                            /*description=*/0, /*buffers=*/0);
    114   ::tflite::FinishModelBuffer(builder, model);
    115 
    116   ASSERT_TRUE(Verify(builder.GetBufferPointer(), builder.GetSize(),
    117                      MutableOpResolver{}, DefaultErrorReporter()));
    118 }
    119 
    120 TEST(VerifyModel, TestSimpleModel) {
    121   TfLiteFlatbufferModelBuilder builder({}, {"test"});
    122   builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "test");
    123   builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
    124   builder.AddTensor(
    125       {2}, TensorType_STRING,
    126       {2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
    127       "data");
    128   builder.AddTensor({2, 3}, TensorType_INT32, {}, "output");
    129   builder.FinishModel({0, 1}, {2});
    130   ASSERT_TRUE(builder.Verify());
    131 }
    132 
    133 TEST(VerifyModel, TestCorruptedData) {
    134   std::string model = "123";
    135   ASSERT_FALSE(Verify(model.data(), model.size(), MutableOpResolver{},
    136                       /*error_reporter=*/nullptr));
    137 }
    138 
    139 TEST(VerifyModel, TestUnsupportedVersion) {
    140   FlatBufferBuilder builder;
    141   auto model = CreateModel(builder, /*version=*/1, /*operator_codes=*/0,
    142                            /*subgraphs=*/0, /*description=*/0, /*buffers=*/0);
    143   ::tflite::FinishModelBuffer(builder, model);
    144   ASSERT_FALSE(Verify(builder.GetBufferPointer(), builder.GetSize(),
    145                       MutableOpResolver{}, DefaultErrorReporter()));
    146 }
    147 
    148 TEST(VerifyModel, TestRandomModificationIsNotAllowed) {
    149   FlatBufferBuilder builder;
    150   auto model = CreateModel(builder, /*version=*/TFLITE_SCHEMA_VERSION,
    151                            /*operator_codes=*/0,
    152                            /*subgraphs=*/0, /*description=*/0, /*buffers=*/0);
    153   ::tflite::FinishModelBuffer(builder, model);
    154 
    155   std::string model_content(reinterpret_cast<char*>(builder.GetBufferPointer()),
    156                             builder.GetSize());
    157   for (int i = 0; i < model_content.size(); i++) {
    158     model_content[i] = (model_content[i] + 137) % 255;
    159     EXPECT_FALSE(Verify(model_content.data(), model_content.size(),
    160                         MutableOpResolver{}, DefaultErrorReporter()))
    161         << "Fail at position: " << i;
    162   }
    163 }
    164 
    165 TEST(VerifyModel, TestIntTensorShapeIsGreaterThanBuffer) {
    166   TfLiteFlatbufferModelBuilder builder;
    167   builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4}, "input");
    168   builder.FinishModel({}, {});
    169   ASSERT_FALSE(builder.Verify());
    170 }
    171 
    172 TEST(VerifyModel, TestIntTensorShapeIsSmallerThanBuffer) {
    173   TfLiteFlatbufferModelBuilder builder;
    174   builder.AddTensor({2, 1}, TensorType_UINT8, {1, 2, 3, 4}, "input");
    175   builder.FinishModel({}, {});
    176   ASSERT_FALSE(builder.Verify());
    177 }
    178 
    179 TEST(VerifyModel, TestIntTensorShapeOverflow) {
    180   TfLiteFlatbufferModelBuilder builder;
    181   builder.AddTensor({1024, 2048, 4096}, TensorType_UINT8, {1, 2, 3, 4},
    182                     "input");
    183   builder.FinishModel({}, {});
    184   ASSERT_FALSE(builder.Verify());
    185 }
    186 
    187 TEST(VerifyModel, TensorBufferIsNotValid) {
    188   FlatBufferBuilder builder;
    189   std::vector<int> shape = {2, 3};
    190   auto tensors = builder.CreateVector(std::vector<Offset<Tensor>>{
    191       CreateTensorDirect(builder, &shape, TensorType_INT32, /*buffer=*/2,
    192                          "input", /*quantization=*/0)});
    193   auto subgraph = std::vector<Offset<SubGraph>>(
    194       {CreateSubGraph(builder, tensors, /*inputs=*/0, /*outputs=*/0,
    195                       /*operators=*/0, builder.CreateString("Main"))});
    196 
    197   auto buffers = builder.CreateVector(std::vector<Offset<Buffer>>{
    198       CreateBuffer(builder,
    199                    builder.CreateVector(std::vector<uint8>{1, 2, 3, 4, 5, 6})),
    200   });
    201 
    202   auto model = CreateModel(builder, TFLITE_SCHEMA_VERSION, /*operator_codes=*/0,
    203                            builder.CreateVector(subgraph),
    204                            builder.CreateString("SmartReply"), buffers);
    205 
    206   ::tflite::FinishModelBuffer(builder, model);
    207   ASSERT_FALSE(Verify(builder.GetBufferPointer(), builder.GetSize(),
    208                       MutableOpResolver{}, DefaultErrorReporter()));
    209 }
    210 
    211 TEST(VerifyModel, StringTensorHasInvalidNumString) {
    212   TfLiteFlatbufferModelBuilder builder;
    213   builder.AddTensor(
    214       {2}, TensorType_STRING,
    215       {0x00, 0x00, 0x00, 0x20, 16, 0, 0, 0, 17, 0, 0, 0, 18, 0, 0, 0, 'A', 'B'},
    216       "input");
    217   builder.FinishModel({}, {});
    218   ASSERT_FALSE(builder.Verify());
    219 }
    220 
    221 TEST(VerifyModel, StringTensorOffsetTooSmall) {
    222   TfLiteFlatbufferModelBuilder builder;
    223   builder.AddTensor(
    224       {2}, TensorType_STRING,
    225       {2, 0, 0, 0, 12, 0, 0, 0, 17, 0, 0, 0, 18, 0, 0, 0, 'A', 'B'}, "input");
    226   builder.FinishModel({}, {});
    227   ASSERT_FALSE(builder.Verify());
    228 }
    229 
    230 TEST(VerifyModel, StringTensorOffsetOutOfRange) {
    231   TfLiteFlatbufferModelBuilder builder;
    232   builder.AddTensor(
    233       {2}, TensorType_STRING,
    234       {2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 22, 0, 0, 0, 'A', 'B'}, "input");
    235   builder.FinishModel({}, {});
    236   ASSERT_FALSE(builder.Verify());
    237 }
    238 
    239 TEST(VerifyModel, StringTensorIsLargerThanRequired) {
    240   TfLiteFlatbufferModelBuilder builder;
    241   builder.AddTensor(
    242       {2}, TensorType_STRING,
    243       {2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 18, 0, 0, 0, 'A', 'B', 'C'},
    244       "input");
    245   builder.FinishModel({}, {});
    246   ASSERT_FALSE(builder.Verify());
    247 }
    248 
    249 TEST(VerifyModel, AllOpsAreSupported) {
    250   TfLiteFlatbufferModelBuilder builder({BuiltinOperator_ADD}, {"CustomOp"});
    251   builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4}, "input1");
    252   builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4}, "input2");
    253   builder.AddTensor({2, 3}, TensorType_UINT8, {}, "output");
    254   builder.AddOperator({0, 1}, {2}, BuiltinOperator_ADD, nullptr);
    255   builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "CustomOp");
    256   builder.FinishModel({}, {});
    257   ASSERT_FALSE(builder.Verify());
    258 }
    259 
    260 TEST(VerifyModel, UseUnsupportedBuiltinOps) {
    261   TfLiteFlatbufferModelBuilder builder({BuiltinOperator_SUB}, {"CustomOp"});
    262   builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4}, "input1");
    263   builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4}, "input2");
    264   builder.AddTensor({2, 3}, TensorType_UINT8, {}, "output");
    265   builder.AddOperator({0, 1}, {2}, BuiltinOperator_ADD, nullptr);
    266   builder.FinishModel({}, {});
    267   ASSERT_FALSE(builder.Verify());
    268 }
    269 
    270 TEST(VerifyModel, UseUnsupportedCustomOps) {
    271   TfLiteFlatbufferModelBuilder builder({BuiltinOperator_ADD}, {"NewOp"});
    272   builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4}, "input1");
    273   builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4}, "input2");
    274   builder.AddTensor({2, 3}, TensorType_UINT8, {}, "output");
    275   builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "Not supported");
    276   builder.FinishModel({}, {});
    277   ASSERT_FALSE(builder.Verify());
    278 }
    279 
    280 // TODO(yichengfan): make up malicious files to test with.
    281 
    282 }  // namespace tflite
    283 
    284 int main(int argc, char** argv) {
    285   ::tflite::LogToStderr();
    286   ::testing::InitGoogleTest(&argc, argv);
    287   return RUN_ALL_TESTS();
    288 }
    289