Home | History | Annotate | Download | only in fuzzing
      1 /*
      2  * Copyright (C) 2019 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include <gtest/gtest.h>
     18 
     19 #include <algorithm>
     20 #include <string>
     21 
     22 #include "TestNeuralNetworksWrapper.h"
     23 #include "fuzzing/OperationManager.h"
     24 #include "fuzzing/RandomGraphGenerator.h"
     25 #include "fuzzing/RandomGraphGeneratorUtils.h"
     26 
     27 #ifndef NNTEST_CTS
     28 #include <android-base/properties.h>
     29 #include <vector>
     30 #include "Manager.h"
     31 #include "SampleDriverFull.h"
     32 
     33 using android::nn::sample_driver::SampleDriverFull;
     34 
     35 #endif
     36 
     37 namespace android {
     38 namespace nn {
     39 namespace fuzzing_test {
     40 
     41 using test_wrapper::Result;
     42 constexpr char kRefDeviceName[] = "nnapi-reference";
     43 
     44 #ifndef NNTEST_CTS
     45 class TestDriverV1_2 : public SampleDriverFull {
     46    public:
     47     TestDriverV1_2() : SampleDriverFull(name, {.execTime = 0.9f, .powerUsage = 0.9f}) {}
     48     static constexpr char name[] = "TestDriverV1_2";
     49 };
     50 
     51 // Like SampleDriverFull, but implementing 1.1
     52 class TestDriverV1_1 : public V1_1::IDevice {
     53    public:
     54     TestDriverV1_1()
     55         : mDriverV1_2(new SampleDriverFull(name, {.execTime = 0.8f, .powerUsage = 0.8f})) {}
     56     static constexpr char name[] = "TestDriverV1_1";
     57     Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
     58         return mDriverV1_2->getCapabilities_1_1(_hidl_cb);
     59     }
     60     Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
     61                                             getSupportedOperations_1_1_cb _hidl_cb) override {
     62         return mDriverV1_2->getSupportedOperations_1_1(model, _hidl_cb);
     63     }
     64     Return<ErrorStatus> prepareModel_1_1(
     65             const V1_1::Model& model, ExecutionPreference preference,
     66             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
     67         return mDriverV1_2->prepareModel_1_1(model, preference, actualCallback);
     68     }
     69     Return<DeviceStatus> getStatus() override { return mDriverV1_2->getStatus(); }
     70     Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
     71         return mDriverV1_2->getCapabilities(_hidl_cb);
     72     }
     73     Return<void> getSupportedOperations(const V1_0::Model& model,
     74                                         getSupportedOperations_cb _hidl_cb) override {
     75         return mDriverV1_2->getSupportedOperations(model, _hidl_cb);
     76     }
     77     Return<ErrorStatus> prepareModel(
     78             const V1_0::Model& model,
     79             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
     80         return mDriverV1_2->prepareModel(model, actualCallback);
     81     }
     82 
     83    private:
     84     const sp<V1_2::IDevice> mDriverV1_2;
     85 };
     86 
     87 // Like SampleDriverFull, but implementing 1.0
     88 class TestDriverV1_0 : public V1_0::IDevice {
     89    public:
     90     TestDriverV1_0()
     91         : mDriverV1_2(new SampleDriverFull(name, {.execTime = 0.7f, .powerUsage = 0.7f})) {}
     92     static constexpr char name[] = "TestDriverV1_0";
     93     Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
     94         return mDriverV1_2->getCapabilities(_hidl_cb);
     95     }
     96     Return<void> getSupportedOperations(const V1_0::Model& model,
     97                                         getSupportedOperations_cb _hidl_cb) override {
     98         return mDriverV1_2->getSupportedOperations(model, _hidl_cb);
     99     }
    100     Return<ErrorStatus> prepareModel(
    101             const V1_0::Model& model,
    102             const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
    103         return mDriverV1_2->prepareModel(model, actualCallback);
    104     }
    105     Return<DeviceStatus> getStatus() override { return mDriverV1_2->getStatus(); }
    106 
    107    private:
    108     const sp<V1_2::IDevice> mDriverV1_2;
    109 };
    110 
    111 template <class T_TestDriver>
    112 std::shared_ptr<Device> makeTestDevice() {
    113     return DeviceManager::forTest_makeDriverDevice(T_TestDriver::name, new T_TestDriver);
    114 }
    115 
    116 #endif
    117 
    118 // Manages compilation on one single device.
    119 class CompilationForDevice : public test_wrapper::Compilation {
    120    public:
    121     CompilationForDevice() = default;
    122     CompilationForDevice(const CompilationForDevice&) = delete;
    123     CompilationForDevice& operator=(const CompilationForDevice&) = delete;
    124 
    125     bool initialize(const test_wrapper::Model* model, const ANeuralNetworksDevice* device) {
    126         int ret = ANeuralNetworksCompilation_createForDevices(model->getHandle(), &device, 1,
    127                                                               &mCompilation);
    128         return ret == ANEURALNETWORKS_NO_ERROR;
    129     }
    130 };
    131 
    132 // NN API fuzzer logging setting comes from system property debug.nn.fuzzer.log and
    133 // debug.nn.fuzzer.dumpspec.
    134 // * setprop debug.nn.fuzzer.log 1 : enable logging.
    135 // * setprop debug.nn.fuzzer.log 0 : silence logging.
    136 // * setprop debug.nn.fuzzer.dumpspec 1 : dump the randomly generated graph to a spec file.
    137 // * setprop debug.nn.fuzzer.dumpspec 0 : do not dump the graph.
    138 //
    139 // Logs and spec files are dumped to /data/local/tmp/${testname}.{log,mod.py},
    140 // e.g. for test case TestRandomGraph/RandomGraphTest/Large/0,
    141 //      log : /data/local/tmp/TestRandomGraph_RandomGraphTest_Large_0.log
    142 //      spec: /data/local/tmp/TestRandomGraph_RandomGraphTest_Large_0.mod.py
    143 //
    144 class RandomGraphTest : public ::testing::TestWithParam<uint32_t> {
    145    public:
    146     static void SetUpTestCase() {
    147 #ifndef NNTEST_CTS
    148         mEnableLog = ::android::base::GetProperty("debug.nn.fuzzer.log", "") == "1";
    149         mDumpSpec = ::android::base::GetProperty("debug.nn.fuzzer.dumpspec", "") == "1";
    150 
    151         mStandardDevices = DeviceManager::get()->forTest_getDevices();
    152         mSyntheticDevices.push_back(makeTestDevice<TestDriverV1_2>());
    153         mSyntheticDevices.push_back(makeTestDevice<TestDriverV1_1>());
    154         mSyntheticDevices.push_back(makeTestDevice<TestDriverV1_0>());
    155 #endif
    156 
    157         // Get all the devices and device names.
    158         mStandardDevicesFeatureLevel = __ANDROID_API_FUTURE__;
    159         uint32_t numDevices = 0;
    160         ASSERT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
    161         for (uint32_t i = 0; i < numDevices; i++) {
    162             ANeuralNetworksDevice* device = nullptr;
    163             const char* name = nullptr;
    164             int64_t featureLevel;
    165             ASSERT_EQ(ANeuralNetworks_getDevice(i, &device), ANEURALNETWORKS_NO_ERROR);
    166             ASSERT_EQ(ANeuralNetworksDevice_getName(device, &name), ANEURALNETWORKS_NO_ERROR);
    167             ASSERT_EQ(ANeuralNetworksDevice_getFeatureLevel(device, &featureLevel),
    168                       ANEURALNETWORKS_NO_ERROR);
    169             mDevices.emplace(name, device);
    170             mStandardDevicesFeatureLevel = std::min(mStandardDevicesFeatureLevel, featureLevel);
    171         }
    172     }
    173 
    174    protected:
    175     virtual void SetUp() override {
    176         // Initialize logging.
    177         const ::testing::TestInfo* const testInfo =
    178                 ::testing::UnitTest::GetInstance()->current_test_info();
    179         mTestName = mTestName + testInfo->test_case_name() + "_" + testInfo->name();
    180         std::replace(mTestName.begin(), mTestName.end(), '/', '_');
    181         if (mEnableLog) NN_FUZZER_LOG_INIT("/data/local/tmp/" + mTestName + ".log");
    182     }
    183 
    184     virtual void TearDown() override {
    185         if (::testing::Test::HasFailure() || mDumpSpec) {
    186             mGraph.dumpSpecFile("/data/local/tmp/" + mTestName + ".mod.py", mTestName);
    187         }
    188         NN_FUZZER_LOG_CLOSE;
    189     }
    190 
    191     bool shouldSkipTest(int64_t featureLevel) {
    192         static const std::set<std::string> kDisabledTests = {
    193                 // In this test, the RGG produces a non-sensible graph with extreme large output
    194                 // gain and highly clamped output range.
    195                 // TODO: Currently quantized buffer values are uniformly distributed within
    196                 //       [0, 255]. We should investigate on a better buffer value generation
    197                 //       algorithm that represents the real-world cases.
    198                 "TestRandomGraph_SingleOperationTest_CONV_2D_V1_2_12",
    199         };
    200         if (kDisabledTests.find(mTestName) != kDisabledTests.end()) return true;
    201         if (featureLevel >= __ANDROID_API_Q__) return false;
    202         const auto& operations = mGraph.getOperations();
    203         for (const auto& op : operations) {
    204             // Skip if testing BATCH_TO_SPACE_ND with batch dimension == 1.
    205             if (op.opType == ANEURALNETWORKS_BATCH_TO_SPACE_ND &&
    206                 op.inputs[0]->dimensions[0].getValue() == 1)
    207                 return true;
    208         }
    209         return false;
    210     }
    211 
    212     // Compile and execute the generated graph on a device selected by name.
    213     void computeAndVerifyResultsForDevice(const test_wrapper::Model* model, uint32_t numOps,
    214                                           const std::string& name) {
    215         SCOPED_TRACE("Device: " + name);
    216         ASSERT_TRUE(mDevices.find(name) != mDevices.end());
    217         const auto device = mDevices[name];
    218         bool isRef = name.compare(kRefDeviceName) == 0;
    219 
    220         // Check if the device fully supports the graph.
    221         constexpr int kMaxNumberOperations = 1000;
    222         ASSERT_TRUE(numOps <= kMaxNumberOperations);
    223         bool supported[kMaxNumberOperations] = {false};
    224         ASSERT_EQ(ANeuralNetworksModel_getSupportedOperationsForDevices(model->getHandle(), &device,
    225                                                                         1, supported),
    226                   ANEURALNETWORKS_NO_ERROR);
    227         if (!std::all_of(supported, supported + numOps, [](bool v) { return v; })) {
    228             // The reference device should always support all operations.
    229             ASSERT_FALSE(isRef);
    230             std::cout << "[          ]   SKIP: " << name << " does not support the graph.\n";
    231             return;
    232         }
    233 
    234         // Since this test is introduced in Android Q, we only assert no compilation or execution
    235         // failure if the device has feature level >= Q (API level 29). For pre-Q devices, we allow
    236         // them to fail with OP_FAILED, but must not hang or crash.
    237         int64_t featureLevel;
    238         ASSERT_EQ(ANeuralNetworksDevice_getFeatureLevel(device, &featureLevel),
    239                   ANEURALNETWORKS_NO_ERROR);
    240         if (shouldSkipTest(featureLevel)) return;
    241 
    242         // Create compilation for device.
    243         CompilationForDevice compilation;
    244         ASSERT_TRUE(compilation.initialize(model, device));
    245         Result compileReturn = compilation.finish();
    246         // Even if the model is fully supported, the compilation may still fail, e.g. each operation
    247         // is supported, but model is too big (too many operations and/or too-large constants) for
    248         // device.
    249         if (compileReturn == Result::OP_FAILED) {
    250             ASSERT_FALSE(isRef);
    251             std::cout << "[          ]   SKIP: " << name << " failed at compilation step.\n";
    252             return;
    253         }
    254         ASSERT_EQ(compileReturn, Result::NO_ERROR);
    255 
    256         // Create request.
    257         test_wrapper::Execution execution(&compilation);
    258         std::vector<OperandBuffer> outputs;
    259         if (isRef) {
    260             mGraph.createRequest(&execution);
    261         } else {
    262             mGraph.createRequest(&execution, &outputs);
    263         }
    264 
    265         // Compute result.
    266         Result executeReturn = execution.compute();
    267         // Even if the model is fully supported and the compilation succeeds, the execution may
    268         // still fail, e.g. there may be operand shapes that are unknown until execution time, and
    269         // at execution time turn out to be too big.
    270         if (executeReturn == Result::OP_FAILED) {
    271             ASSERT_FALSE(isRef);
    272             std::cout << "[          ]   SKIP: " << name << " failed at execution step.\n";
    273             return;
    274         }
    275         ASSERT_EQ(executeReturn, Result::NO_ERROR);
    276         if (featureLevel >= __ANDROID_API_Q__ && !isRef) {
    277             mGraph.checkResults(outputs, mCriteria);
    278         }
    279     }
    280 
    281     // Compile and execute the generated graph normally (i.e., allow runtime to
    282     // distribute across devices).
    283     void computeAndVerifyResults(const test_wrapper::Model* model, bool checkResults) {
    284         // Because we're not using the introspection/control API, the CpuDevice
    285         // is available as a fallback, and hence we assume that compilation and
    286         // execution will succeed.
    287 
    288         // Create compilation.
    289         test_wrapper::Compilation compilation(model);
    290         ASSERT_EQ(compilation.finish(), Result::NO_ERROR);
    291 
    292         // Create request.
    293         test_wrapper::Execution execution(&compilation);
    294         std::vector<OperandBuffer> outputs;
    295         mGraph.createRequest(&execution, &outputs);
    296 
    297         // Compute and verify result.
    298         ASSERT_EQ(execution.compute(), Result::NO_ERROR);
    299         if (checkResults) {
    300             mGraph.checkResults(outputs, mCriteria);
    301         }
    302     }
    303 
    304     // Main test entrance.
    305     void testRandomGraph(uint32_t numOperations, uint32_t dimensionRange) {
    306         // Generate a random graph.
    307         ASSERT_TRUE(mGraph.generate(kSeed, numOperations, dimensionRange));
    308 
    309         // Create a model from the random graph.
    310         test_wrapper::Model model;
    311         mGraph.createModel(&model);
    312         ASSERT_TRUE(model.isValid());
    313         ASSERT_EQ(model.finish(), Result::NO_ERROR);
    314 
    315         // Compute reference result.
    316         computeAndVerifyResultsForDevice(&model, numOperations, kRefDeviceName);
    317 
    318         // Compute on each available device.
    319         for (auto& pair : mDevices) {
    320             // Skip the nnapi reference device.
    321             if (pair.first.compare(kRefDeviceName) == 0) continue;
    322             computeAndVerifyResultsForDevice(&model, numOperations, pair.first);
    323         }
    324 
    325         if (numOperations > 1) {
    326             if (!shouldSkipTest(mStandardDevicesFeatureLevel)) {
    327                 // Compute normally (i.e., allow runtime to distribute across
    328                 // devices).
    329                 SCOPED_TRACE("Compute normally");
    330                 computeAndVerifyResults(&model, mStandardDevicesFeatureLevel >= __ANDROID_API_Q__);
    331             }
    332 
    333 #ifndef NNTEST_CTS
    334             {
    335                 // Stress partitioner by allowing runtime to distribute across
    336                 // three synthetic devices.  The synthetic devices use the
    337                 // CpuExecutor for execution, so we always check results, even
    338                 // though some are of feature level < __ANDROID_API_Q__: In this
    339                 // case, we don't take feature level as an indication of
    340                 // reliability, as we do with real devices.
    341                 SCOPED_TRACE("Compute across synthetic devices");
    342                 DeviceManager::get()->forTest_setDevices(mSyntheticDevices);
    343                 computeAndVerifyResults(&model, true);
    344                 DeviceManager::get()->forTest_setDevices(mStandardDevices);
    345             }
    346 #endif
    347         }
    348     }
    349 
    350     enum GraphSize : uint32_t { SINGLE = 1, SMALL = 5, LARGE = 40 };
    351     enum DimensionRange : uint32_t { NARROW = 10, WIDE = 1000 };
    352 
    353     static bool mEnableLog;
    354     static bool mDumpSpec;
    355     static std::map<std::string, ANeuralNetworksDevice*> mDevices;
    356 
    357     const uint32_t kSeed = GetParam();
    358     std::string mTestName;
    359     RandomGraph mGraph;
    360     AccuracyCriteria mCriteria;
    361 
    362     static int64_t mStandardDevicesFeatureLevel;  // minimum across all devices
    363 #ifndef NNTEST_CTS
    364     static std::vector<std::shared_ptr<Device>> mStandardDevices;
    365     static std::vector<std::shared_ptr<Device>> mSyntheticDevices;
    366 #endif
    367 };
    368 
    369 bool RandomGraphTest::mEnableLog = false;
    370 bool RandomGraphTest::mDumpSpec = false;
    371 std::map<std::string, ANeuralNetworksDevice*> RandomGraphTest::mDevices;
    372 
    373 int64_t RandomGraphTest::mStandardDevicesFeatureLevel;
    374 #ifndef NNTEST_CTS
    375 std::vector<std::shared_ptr<Device>> RandomGraphTest::mStandardDevices;
    376 std::vector<std::shared_ptr<Device>> RandomGraphTest::mSyntheticDevices;
    377 #endif
    378 
    379 // Single-op graph with dimensions in range [1, 1000].
    380 class SingleOperationTest : public RandomGraphTest {};
    381 #define TEST_SINGLE_OPERATION(operation, halVersion, criteria)              \
    382     TEST_P(SingleOperationTest, operation##_##halVersion) {                 \
    383         OperationFilter filter = {.opcodes = {ANEURALNETWORKS_##operation}, \
    384                                   .versions = {HalVersion::halVersion}};    \
    385         OperationManager::get()->applyFilter(filter);                       \
    386         mCriteria = (criteria);                                             \
    387         testRandomGraph(GraphSize::SINGLE, DimensionRange::WIDE);           \
    388     }
    389 
    390 // TODO: Adjust the accuracy criteria based on testing.
    391 // We define three sets of accuracy criteria for single-operation tests.
    392 
    393 // This is for operations that only copy buffers around without any computation on buffer values.
    394 // Most of these operations fall into categories of reshape or selection, e.g. RESHAPE, GATHER.
    395 // Additionally, operations with only logical or comparison arithmetic also use this criteria, e.g.
    396 // EQUAL, ARGMAX, TOPK_V2.
    397 const AccuracyCriteria kStrictCriteria = {
    398         .float32 = {.atol = 1e-6f, .rtol = 1e-6f, .bias = 1e-7f, .mse = 1e-10f},
    399         .float16 = {.atol = 1e-3f, .rtol = 1e-3f, .bias = 1e-4f, .mse = 1e-8f},
    400         .int32 = {.atol = 1},
    401         .quant8Asymm = {.atol = 1, .bias = 0.1f, .mse = 0.1f},
    402         .quant8Symm = {.atol = 1, .bias = 0.1f, .mse = 0.1f},
    403         .quant16Asymm = {.atol = 1, .bias = 0.1f, .mse = 0.1f},
    404         .quant16Symm = {.atol = 1, .bias = 0.1f, .mse = 0.1f}};
    405 
    406 // This is for operations that only do simple and single computation on buffer values, such as
    407 // addition, multiplication, or requantization. Most of these operations fall into categories of
    408 // broadcast or elementwise, e.g ADD, FLOOR.
    409 const AccuracyCriteria kMediumCriteria = {
    410         .float32 = {.atol = 1e-5f, .rtol = 1e-5f, .bias = 1e-6f, .mse = 1e-8f},
    411         .float16 = {.atol = 1e-2f, .rtol = 1e-2f, .bias = 1e-3f, .mse = 1e-6f},
    412         .int32 = {.atol = 1},
    413         .quant8Asymm = {.atol = 2, .bias = 0.5f, .mse = 0.5f},
    414         .quant8Symm = {.atol = 2, .bias = 0.5f, .mse = 0.5f},
    415         .quant16Asymm = {.atol = 2, .bias = 0.5f, .mse = 0.5f},
    416         .quant16Symm = {.atol = 2, .bias = 0.5f, .mse = 0.5f}};
    417 
    418 // This is for operations that involve sophisticated computations on buffer values, either a single
    419 // but complex transformation, e.g. LOGISTIC, or multiple transformations with accumulated errors,
    420 // e.g. CONV_2D, REDUCE_*.
    421 const AccuracyCriteria kRelaxedCriteria = {
    422         .float32 = {.atol = 1e-3f, .rtol = 1e-3f, .bias = 2e-5f, .mse = 1e-7f},
    423         .float16 = {.atol = 1.0f, .rtol = 1.0f, .bias = 5e-3f, .mse = 1e-4f},
    424         .int32 = {.atol = 1},
    425         .quant8Asymm = {.atol = 10, .bias = 1.5, .mse = 1.5},
    426         .quant8Symm = {.atol = 10, .bias = 1.5, .mse = 1.5},
    427         .quant16Asymm = {.atol = 10, .bias = 1.5, .mse = 1.5},
    428         .quant16Symm = {.atol = 10, .bias = 1.5, .mse = 1.5}};
    429 
    430 /*-- NNAPI 1.0 Operations ---------------------------------------------------*/
    431 
    432 // TODO: The following 1.0 operation signatures are currently not defined:
    433 // - ANEURALNETWORKS_LSH_PROJECTION
    434 // - ANEURALNETWORKS_LSTM
    435 // - ANEURALNETWORKS_RNN
    436 // - ANEURALNETWORKS_SVDF
    437 
    438 TEST_SINGLE_OPERATION(ADD, V1_0, kMediumCriteria);
    439 TEST_SINGLE_OPERATION(MUL, V1_0, kMediumCriteria);
    440 TEST_SINGLE_OPERATION(FLOOR, V1_0, kMediumCriteria);
    441 TEST_SINGLE_OPERATION(LOGISTIC, V1_0, kRelaxedCriteria);
    442 TEST_SINGLE_OPERATION(RELU, V1_0, kMediumCriteria);
    443 TEST_SINGLE_OPERATION(RELU1, V1_0, kMediumCriteria);
    444 TEST_SINGLE_OPERATION(RELU6, V1_0, kMediumCriteria);
    445 TEST_SINGLE_OPERATION(TANH, V1_0, kRelaxedCriteria);
    446 TEST_SINGLE_OPERATION(SOFTMAX, V1_0, kRelaxedCriteria);
    447 TEST_SINGLE_OPERATION(L2_NORMALIZATION, V1_0, kRelaxedCriteria);
    448 TEST_SINGLE_OPERATION(LOCAL_RESPONSE_NORMALIZATION, V1_0, kRelaxedCriteria);
    449 TEST_SINGLE_OPERATION(AVERAGE_POOL_2D, V1_0, kRelaxedCriteria);
    450 TEST_SINGLE_OPERATION(L2_POOL_2D, V1_0, kRelaxedCriteria);
    451 TEST_SINGLE_OPERATION(MAX_POOL_2D, V1_0, kRelaxedCriteria);
    452 TEST_SINGLE_OPERATION(CONV_2D, V1_0, kRelaxedCriteria);
    453 TEST_SINGLE_OPERATION(DEPTHWISE_CONV_2D, V1_0, kRelaxedCriteria);
    454 TEST_SINGLE_OPERATION(CONCATENATION, V1_0, kMediumCriteria);
    455 TEST_SINGLE_OPERATION(RESIZE_BILINEAR, V1_0, kRelaxedCriteria);
    456 TEST_SINGLE_OPERATION(DEPTH_TO_SPACE, V1_0, kStrictCriteria);
    457 TEST_SINGLE_OPERATION(SPACE_TO_DEPTH, V1_0, kStrictCriteria);
    458 TEST_SINGLE_OPERATION(EMBEDDING_LOOKUP, V1_0, kStrictCriteria);
    459 TEST_SINGLE_OPERATION(HASHTABLE_LOOKUP, V1_0, kStrictCriteria);
    460 TEST_SINGLE_OPERATION(FULLY_CONNECTED, V1_0, kRelaxedCriteria);
    461 TEST_SINGLE_OPERATION(RESHAPE, V1_0, kStrictCriteria);
    462 TEST_SINGLE_OPERATION(DEQUANTIZE, V1_0, kMediumCriteria);
    463 
    464 /*-- NNAPI 1.1 Operations ---------------------------------------------------*/
    465 
    466 TEST_SINGLE_OPERATION(SUB, V1_1, kMediumCriteria);
    467 TEST_SINGLE_OPERATION(DIV, V1_1, kRelaxedCriteria);
    468 TEST_SINGLE_OPERATION(BATCH_TO_SPACE_ND, V1_1, kStrictCriteria);
    469 TEST_SINGLE_OPERATION(SPACE_TO_BATCH_ND, V1_1, kStrictCriteria);
    470 TEST_SINGLE_OPERATION(MEAN, V1_1, kRelaxedCriteria);
    471 TEST_SINGLE_OPERATION(PAD, V1_1, kStrictCriteria);
    472 TEST_SINGLE_OPERATION(TRANSPOSE, V1_1, kStrictCriteria);
    473 TEST_SINGLE_OPERATION(SQUEEZE, V1_1, kStrictCriteria);
    474 TEST_SINGLE_OPERATION(STRIDED_SLICE, V1_1, kStrictCriteria);
    475 
    476 /*-- NNAPI 1.0 and 1.1 Operations with Extended Behavior in 1.2 -------------*/
    477 
    478 TEST_SINGLE_OPERATION(ADD, V1_2, kMediumCriteria);
    479 TEST_SINGLE_OPERATION(MUL, V1_2, kMediumCriteria);
    480 TEST_SINGLE_OPERATION(SUB, V1_2, kMediumCriteria);
    481 TEST_SINGLE_OPERATION(DIV, V1_2, kRelaxedCriteria);
    482 TEST_SINGLE_OPERATION(FLOOR, V1_2, kMediumCriteria);
    483 TEST_SINGLE_OPERATION(LOGISTIC, V1_2, kRelaxedCriteria);
    484 TEST_SINGLE_OPERATION(RELU, V1_2, kMediumCriteria);
    485 TEST_SINGLE_OPERATION(RELU1, V1_2, kMediumCriteria);
    486 TEST_SINGLE_OPERATION(RELU6, V1_2, kMediumCriteria);
    487 TEST_SINGLE_OPERATION(TANH, V1_2, kRelaxedCriteria);
    488 TEST_SINGLE_OPERATION(CONCATENATION, V1_2, kMediumCriteria);
    489 TEST_SINGLE_OPERATION(DEPTH_TO_SPACE, V1_2, kStrictCriteria);
    490 TEST_SINGLE_OPERATION(SPACE_TO_DEPTH, V1_2, kStrictCriteria);
    491 TEST_SINGLE_OPERATION(BATCH_TO_SPACE_ND, V1_2, kStrictCriteria);
    492 TEST_SINGLE_OPERATION(SPACE_TO_BATCH_ND, V1_2, kStrictCriteria);
    493 TEST_SINGLE_OPERATION(FULLY_CONNECTED, V1_2, kRelaxedCriteria);
    494 TEST_SINGLE_OPERATION(RESHAPE, V1_2, kStrictCriteria);
    495 TEST_SINGLE_OPERATION(MEAN, V1_2, kRelaxedCriteria);
    496 TEST_SINGLE_OPERATION(PAD, V1_2, kStrictCriteria);
    497 TEST_SINGLE_OPERATION(TRANSPOSE, V1_2, kStrictCriteria);
    498 TEST_SINGLE_OPERATION(CONV_2D, V1_2, kRelaxedCriteria);
    499 TEST_SINGLE_OPERATION(DEPTHWISE_CONV_2D, V1_2, kRelaxedCriteria);
    500 TEST_SINGLE_OPERATION(AVERAGE_POOL_2D, V1_2, kRelaxedCriteria);
    501 TEST_SINGLE_OPERATION(L2_POOL_2D, V1_2, kRelaxedCriteria);
    502 TEST_SINGLE_OPERATION(MAX_POOL_2D, V1_2, kRelaxedCriteria);
    503 TEST_SINGLE_OPERATION(RESIZE_BILINEAR, V1_2, kRelaxedCriteria);
    504 TEST_SINGLE_OPERATION(SOFTMAX, V1_2, kRelaxedCriteria);
    505 TEST_SINGLE_OPERATION(L2_NORMALIZATION, V1_2, kRelaxedCriteria);
    506 TEST_SINGLE_OPERATION(LOCAL_RESPONSE_NORMALIZATION, V1_2, kRelaxedCriteria);
    507 TEST_SINGLE_OPERATION(DEQUANTIZE, V1_2, kMediumCriteria);
    508 TEST_SINGLE_OPERATION(SQUEEZE, V1_2, kStrictCriteria);
    509 TEST_SINGLE_OPERATION(STRIDED_SLICE, V1_2, kStrictCriteria);
    510 
    511 /*-- NNAPI 1.2 Operations ---------------------------------------------------*/
    512 
    513 // TODO: The following 1.2 operation signatures are currently not defined:
    514 // - ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM
    515 // - ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM
    516 // - ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_RNN
    517 // - ANEURALNETWORKS_BOX_WITH_NMS_LIMIT
    518 // - ANEURALNETWORKS_DETECTION_POSTPROCESSING
    519 // - ANEURALNETWORKS_GENERATE_PROPOSALS
    520 // - ANEURALNETWORKS_QUANTIZED_16BIT_LSTM
    521 // - ANEURALNETWORKS_RANDOM_MULTINOMIAL
    522 // - ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM
    523 // - ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN
    524 
    525 TEST_SINGLE_OPERATION(ABS, V1_2, kMediumCriteria);
    526 TEST_SINGLE_OPERATION(EXP, V1_2, kRelaxedCriteria);
    527 TEST_SINGLE_OPERATION(LOG, V1_2, kRelaxedCriteria);
    528 TEST_SINGLE_OPERATION(NEG, V1_2, kMediumCriteria);
    529 TEST_SINGLE_OPERATION(RSQRT, V1_2, kRelaxedCriteria);
    530 TEST_SINGLE_OPERATION(SIN, V1_2, kRelaxedCriteria);
    531 TEST_SINGLE_OPERATION(SQRT, V1_2, kRelaxedCriteria);
    532 TEST_SINGLE_OPERATION(ARGMAX, V1_2, kStrictCriteria);
    533 TEST_SINGLE_OPERATION(ARGMIN, V1_2, kStrictCriteria);
    534 TEST_SINGLE_OPERATION(EQUAL, V1_2, kStrictCriteria);
    535 TEST_SINGLE_OPERATION(GREATER, V1_2, kStrictCriteria);
    536 TEST_SINGLE_OPERATION(GREATER_EQUAL, V1_2, kStrictCriteria);
    537 TEST_SINGLE_OPERATION(LESS, V1_2, kStrictCriteria);
    538 TEST_SINGLE_OPERATION(LESS_EQUAL, V1_2, kStrictCriteria);
    539 TEST_SINGLE_OPERATION(LOGICAL_AND, V1_2, kStrictCriteria);
    540 TEST_SINGLE_OPERATION(LOGICAL_NOT, V1_2, kStrictCriteria);
    541 TEST_SINGLE_OPERATION(LOGICAL_OR, V1_2, kStrictCriteria);
    542 TEST_SINGLE_OPERATION(NOT_EQUAL, V1_2, kStrictCriteria);
    543 TEST_SINGLE_OPERATION(MAXIMUM, V1_2, kMediumCriteria);
    544 TEST_SINGLE_OPERATION(MINIMUM, V1_2, kMediumCriteria);
    545 TEST_SINGLE_OPERATION(POW, V1_2, kRelaxedCriteria);
    546 TEST_SINGLE_OPERATION(PRELU, V1_2, kMediumCriteria);
    547 TEST_SINGLE_OPERATION(REDUCE_ALL, V1_2, kRelaxedCriteria);
    548 TEST_SINGLE_OPERATION(REDUCE_ANY, V1_2, kRelaxedCriteria);
    549 TEST_SINGLE_OPERATION(REDUCE_MAX, V1_2, kRelaxedCriteria);
    550 TEST_SINGLE_OPERATION(REDUCE_MIN, V1_2, kRelaxedCriteria);
    551 TEST_SINGLE_OPERATION(REDUCE_PROD, V1_2, kRelaxedCriteria);
    552 TEST_SINGLE_OPERATION(REDUCE_SUM, V1_2, kRelaxedCriteria);
    553 TEST_SINGLE_OPERATION(CHANNEL_SHUFFLE, V1_2, kStrictCriteria);
    554 TEST_SINGLE_OPERATION(INSTANCE_NORMALIZATION, V1_2, kRelaxedCriteria);
    555 TEST_SINGLE_OPERATION(LOG_SOFTMAX, V1_2, kRelaxedCriteria);
    556 TEST_SINGLE_OPERATION(GROUPED_CONV_2D, V1_2, kRelaxedCriteria);
    557 TEST_SINGLE_OPERATION(TRANSPOSE_CONV_2D, V1_2, kRelaxedCriteria);
    558 TEST_SINGLE_OPERATION(RESIZE_NEAREST_NEIGHBOR, V1_2, kRelaxedCriteria);
    559 TEST_SINGLE_OPERATION(PAD_V2, V1_2, kStrictCriteria);
    560 TEST_SINGLE_OPERATION(QUANTIZE, V1_2, kMediumCriteria);
    561 TEST_SINGLE_OPERATION(CAST, V1_2, kMediumCriteria);
    562 TEST_SINGLE_OPERATION(EXPAND_DIMS, V1_2, kStrictCriteria);
    563 TEST_SINGLE_OPERATION(TILE, V1_2, kStrictCriteria);
    564 TEST_SINGLE_OPERATION(GATHER, V1_2, kStrictCriteria);
    565 TEST_SINGLE_OPERATION(SELECT, V1_2, kStrictCriteria);
    566 TEST_SINGLE_OPERATION(TOPK_V2, V1_2, kStrictCriteria);
    567 TEST_SINGLE_OPERATION(SLICE, V1_2, kStrictCriteria);
    568 TEST_SINGLE_OPERATION(SPLIT, V1_2, kMediumCriteria);
    569 TEST_SINGLE_OPERATION(ROI_ALIGN, V1_2, kRelaxedCriteria);
    570 TEST_SINGLE_OPERATION(ROI_POOLING, V1_2, kRelaxedCriteria);
    571 TEST_SINGLE_OPERATION(HEATMAP_MAX_KEYPOINT, V1_2, kRelaxedCriteria);
    572 
    573 const AccuracyCriteria kSmallGraphCriteria = {
    574         .float32 = {.atol = 1e-2f, .rtol = 1e-2f, .bias = 2e-5f, .mse = 1e-7f},
    575         .float16 = {.atol = 1.0f, .rtol = 1.0f, .bias = 5e-3f, .mse = 1e-4f},
    576         .int32 = {.atol = 1},
    577         .quant8Asymm = {.atol = 12, .bias = 2, .mse = 2},
    578         .quant8Symm = {.atol = 12, .bias = 2, .mse = 2},
    579         .quant16Asymm = {.atol = 12, .bias = 2, .mse = 2},
    580         .quant16Symm = {.atol = 12, .bias = 2, .mse = 2}};
    581 
    582 const AccuracyCriteria kLargeGraphCriteria = {
    583         .float32 = {.atol = 1e-1f, .rtol = 1e-1f, .bias = 1e-2f, .mse = 1e-4f},
    584         .float16 = {.atol = 1.0f, .rtol = 1.0f, .bias = 1e-1f, .mse = 5e-2f},
    585         .int32 = {.atol = 1},
    586         .quant8Asymm = {.atol = 12, .bias = 2, .mse = 2},
    587         .quant8Symm = {.atol = 12, .bias = 2, .mse = 2},
    588         .quant16Asymm = {.atol = 12, .bias = 2, .mse = 2},
    589         .quant16Symm = {.atol = 12, .bias = 2, .mse = 2}};
    590 
    591 // Due to the limitation of the random graph generator, graphs generated with mixed-type or
    592 // mixed-rank operations are likely to result in a disconnected network. Thus, we filter the
    593 // operation signatures by primary data type and rank first, then generate random graph tests for
    594 // each combination.
    595 //
    596 // Two parameterized tests are created for each filter:
    597 // * 5-op graph with dimensions in range [1, 1000].
    598 // * 40-op graph with dimensions in range [1, 10].
    599 //
    600 #define TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(dataType, rank)                  \
    601     TEST_P(RandomGraphTest, SmallGraph_##dataType##_Rank##rank) {                  \
    602         OperationFilter filter = {.dataTypes = {Type::dataType}, .ranks = {rank}}; \
    603         OperationManager::get()->applyFilter(filter);                              \
    604         mCriteria = kSmallGraphCriteria;                                           \
    605         testRandomGraph(GraphSize::SMALL, DimensionRange::WIDE);                   \
    606     }                                                                              \
    607     TEST_P(RandomGraphTest, LargeGraph_##dataType##_Rank##rank) {                  \
    608         OperationFilter filter = {.dataTypes = {Type::dataType}, .ranks = {rank}}; \
    609         OperationManager::get()->applyFilter(filter);                              \
    610         mCriteria = kLargeGraphCriteria;                                           \
    611         testRandomGraph(GraphSize::LARGE, DimensionRange::NARROW);                 \
    612     }
    613 
    614 // Random graph test with TENSOR_QUANT8_ASYMM as the primary data type is currently not defined.
    615 // The generated graph with TENSOR_QUANT8_ASYMM as the primary data type will likely to result in
    616 // disconnected graphs due to the mismatch between quantized parameters.
    617 
    618 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_FLOAT32, 4);
    619 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_FLOAT32, 3);
    620 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_FLOAT32, 2);
    621 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_FLOAT32, 1);
    622 
    623 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_FLOAT16, 4);
    624 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_FLOAT16, 3);
    625 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_FLOAT16, 2);
    626 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_FLOAT16, 1);
    627 
    628 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_INT32, 4);
    629 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_INT32, 3);
    630 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_INT32, 2);
    631 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_INT32, 1);
    632 
    633 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_BOOL8, 4);
    634 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_BOOL8, 3);
    635 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_BOOL8, 2);
    636 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_BOOL8, 1);
    637 
    638 #ifdef NNTEST_CTS
    639 INSTANTIATE_TEST_CASE_P(TestRandomGraph, SingleOperationTest, ::testing::Range(0u, 50u));
    640 INSTANTIATE_TEST_CASE_P(TestRandomGraph, RandomGraphTest, ::testing::Range(0u, 50u));
    641 #else
    642 INSTANTIATE_TEST_CASE_P(TestRandomGraph, SingleOperationTest, ::testing::Range(0u, 100u));
    643 INSTANTIATE_TEST_CASE_P(TestRandomGraph, RandomGraphTest, ::testing::Range(0u, 100u));
    644 #endif
    645 
    646 }  // namespace fuzzing_test
    647 }  // namespace nn
    648 }  // namespace android
    649