Home | History | Annotate | Download | only in test
      1 /*
      2  * Copyright (C) 2018 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "NeuralNetworksWrapper.h"
     18 #include "TestHarness.h"
     19 
     20 #include <gtest/gtest.h>
     21 
     22 #include <tuple>
     23 #include <vector>
     24 
     25 using namespace android::nn::wrapper;
     26 using namespace test_helper;
     27 
     28 namespace {
     29 
     30 const uint32_t INTENDED_SIZE = 3;
     31 const uint32_t OTHER_SIZE    = 2;
     32 const uint32_t UNKNOWN_SIZE  = 0;
     33 typedef uint8_t IntendedMatrix[INTENDED_SIZE][INTENDED_SIZE];
     34 
     35 // TODO: add a float version of this test for use against drivers that don't
     36 // support quantized add. b/72448000
     37 
     38 // We test three basic scenarios for each tensor dimension:
     39 //     INTENDED_AT_COMPILE_AND_EXECUTE: set the dimension at compile
     40 //     (addOperand) time to INTENDED_SIZE, use same size at execution
     41 //     (setInput/setOutput) time. This should always work.
     42 //
     43 //     INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE: set the dimension at compile
     44 //     (addOperand) time to INTENDED_SIZE, give no size at execution time.
     45 //     This should always work.
     46 //
     47 //     UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE: don't set the dimension at
     48 //     compile (addOperand) time, use INTENDED_SIZE at execute
     49 //     (setInput/setOutput) time. Note for constants, this just means using an
     50 //     unknown dimension at addOperand as there is no type parameter to
     51 //     setOperandValue. This should work for inputs and outputs and give an
     52 //     error for constants at compile time.
     53 //
     54 //     UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE: don't set the dimension at compile
     55 //     (addOperand) time, use OTHER_SIZE at execute (setInput/setOutput) time.
     56 //     This should give an error at execute time (as the constant value will
     57 //     have a different size).
     58 enum class DimensionKind { INTENDED_AT_COMPILE_AND_EXECUTE,
     59                            INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE,
     60                            UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE,
     61                            UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE };
     62 typedef std::tuple<DimensionKind, DimensionKind> OperandParams;
     63 typedef std::tuple<OperandParams,  // first input
     64                    OperandParams,  // second input
     65                    OperandParams,  // constant
     66                    OperandParams   // output
     67                   > TestParams;
     68 // All relevant combinations of the basic scenarios are then created with TEST_P
     69 auto ioDimensionValues = testing::Values(DimensionKind::INTENDED_AT_COMPILE_AND_EXECUTE,
     70                                          DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE,
     71                                          DimensionKind::UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE,
     72                                          DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE);
     73 auto constantDimensionValues = testing::Values(
     74         DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE,
     75         DimensionKind::UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE);
     76 auto ioValues = testing::Combine(ioDimensionValues, ioDimensionValues);
     77 auto constantValues = testing::Combine(constantDimensionValues, constantDimensionValues);
     78 auto combinedValues = testing::Combine(ioValues, ioValues, constantValues, ioValues);
     79 
     80 class UnknownDimensionsTest : public ::testing::TestWithParam<TestParams> {
     81 protected:
     82     const IntendedMatrix ones = { { 1, 1, 1 }, { 1, 1, 1 }, { 1, 1, 1 } };
     83     const IntendedMatrix twos = { { 2, 2, 2 }, { 2, 2, 2 }, { 2, 2, 2 } };
     84     const IntendedMatrix fives = { { 5, 5, 5 }, { 5, 5, 5 }, { 5, 5, 5 } };
     85 };
     86 
     87 TEST_P(UnknownDimensionsTest, UnknownDimensions) {
     88     TestParams params = GetParam();
     89     auto paramsForInput0 = std::get<0>(params),
     90          paramsForInput1 = std::get<1>(params),
     91          paramsForConst  = std::get<2>(params),
     92          paramsForOutput = std::get<3>(params);
     93 
     94     Model model;
     95     std::string input0Scope("Input 0:"), input1Scope("Input 1:"),
     96                 constantScope("Constant:"), outputScope("Output:");
     97 
     98     auto getDimForCompile = [](DimensionKind kind, std::string* scope) {
     99         switch (kind) {
    100             case DimensionKind::INTENDED_AT_COMPILE_AND_EXECUTE:
    101                 if (scope) scope->append(" INTENDED_AT_COMPILE_AND_EXECUTE");
    102                 return INTENDED_SIZE;
    103             case DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE:
    104                 if (scope) scope->append(" INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE");
    105                 return INTENDED_SIZE;
    106             case DimensionKind::UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE:
    107                 if (scope) scope->append(" UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE");
    108                 return UNKNOWN_SIZE;
    109             case DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE:
    110                 if (scope) scope->append(" UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE");
    111                 return UNKNOWN_SIZE;
    112         }
    113     };
    114     auto addOperand = [&model, &getDimForCompile](OperandParams params,
    115                                                   std::string* scope = nullptr) {
    116         OperandType matrixTypeWithPotentiallyUnknownDims(
    117                 Type::TENSOR_QUANT8_ASYMM,
    118                 { getDimForCompile(std::get<0>(params), scope),
    119                   getDimForCompile(std::get<1>(params), scope) },
    120                 1.0f);
    121         return model.addOperand(&matrixTypeWithPotentiallyUnknownDims);
    122     };
    123     auto inputOpd0 = addOperand(paramsForInput0, &input0Scope);
    124     auto inputOpd1 = addOperand(paramsForInput1, &input1Scope);
    125     auto intermediateOpd0 = addOperand(OperandParams{
    126             // Dimensions for intermediate operand actually deduced at execution time
    127             DimensionKind::UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE,
    128             DimensionKind::UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE});
    129     auto constantOpd0 = addOperand(paramsForConst, &constantScope);
    130     auto outputOpd0 = addOperand(paramsForOutput, &outputScope);
    131 
    132     // Make the gtest failure easier to read, TEST_P just outputs a list of
    133     // numbers
    134     SCOPED_TRACE(input0Scope);
    135     SCOPED_TRACE(input1Scope);
    136     SCOPED_TRACE(constantScope);
    137     SCOPED_TRACE(outputScope);
    138 
    139     OperandType scalarType(Type::INT32, {});
    140     int32_t activation(ANEURALNETWORKS_FUSED_NONE);
    141     auto activationOpd0 = model.addOperand(&scalarType);
    142 
    143     model.setOperandValue(activationOpd0, &activation, sizeof(activation));
    144     model.setOperandValue(constantOpd0, twos, sizeof(twos));
    145     model.addOperation(ANEURALNETWORKS_ADD,
    146                        {inputOpd0, inputOpd1, activationOpd0},
    147                        {intermediateOpd0});
    148     model.addOperation(ANEURALNETWORKS_ADD,
    149                        {intermediateOpd0, constantOpd0, activationOpd0},
    150                        {outputOpd0});
    151     model.identifyInputsAndOutputs({inputOpd0, inputOpd1}, {outputOpd0});
    152     if (std::get<0>(paramsForConst) == DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE &&
    153         std::get<1>(paramsForConst) == DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE) {
    154         ASSERT_TRUE(model.isValid());
    155         ASSERT_EQ(model.finish(), Result::NO_ERROR);
    156     } else {
    157         ASSERT_FALSE(model.isValid());
    158         // There is no contract (yet) for specific errors in NeuralNetworks.h,
    159         // so we just assert on not being successful.
    160         ASSERT_NE(model.finish(), Result::NO_ERROR);
    161         return;
    162     }
    163 
    164     Compilation compilation(&model);
    165     ASSERT_EQ(compilation.finish(), Result::NO_ERROR);
    166 
    167     IntendedMatrix actual = { { 10, 10, 10 }, { 10, 10, 10 }, { 10, 10, 10 } };
    168     Execution execution(&compilation);
    169 
    170     OperandType matrixTypeIntended(Type::TENSOR_QUANT8_ASYMM, {INTENDED_SIZE, INTENDED_SIZE}, 1.0f);
    171     OperandType matrixTypeFirstOther(Type::TENSOR_QUANT8_ASYMM, {OTHER_SIZE, INTENDED_SIZE}, 1.0f);
    172     OperandType matrixTypeSecondOther(Type::TENSOR_QUANT8_ASYMM, {INTENDED_SIZE, OTHER_SIZE}, 1.0f);
    173     OperandType matrixTypeBothOther(Type::TENSOR_QUANT8_ASYMM, {OTHER_SIZE, OTHER_SIZE}, 1.0f);
    174     bool allAreIntendedSizeAtExecution = true;
    175 
    176     // Helper to return appropriate "type" parameter to setInput/setOutput based
    177     // on OperandParams
    178     auto typeAtSet = [&](OperandParams params) {
    179         auto first = std::get<0>(params), second = std::get<1>(params);
    180         if (first == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE &&
    181             second == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE) {
    182             allAreIntendedSizeAtExecution = false;
    183             return &matrixTypeBothOther.operandType;
    184         } else if (first == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE) {
    185             allAreIntendedSizeAtExecution = false;
    186             return &matrixTypeFirstOther.operandType;
    187         } else if (second == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE) {
    188             allAreIntendedSizeAtExecution = false;
    189             return &matrixTypeSecondOther.operandType;
    190         } else if (first == DimensionKind::INTENDED_AT_COMPILE_AND_EXECUTE &&
    191                    second == DimensionKind::INTENDED_AT_COMPILE_AND_EXECUTE) {
    192             return &matrixTypeIntended.operandType;
    193         } else if (first == DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE &&
    194                    second == DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE) {
    195             return static_cast<ANeuralNetworksOperandType*>(nullptr);
    196         } else {
    197             return &matrixTypeIntended.operandType;
    198         }
    199     };
    200     // Helper to return appropriate "size" parameter to setInput/setOutput based
    201     // on OperandParams
    202     auto sizeAtSet = [](OperandParams params) {
    203         auto first = std::get<0>(params), second = std::get<1>(params);
    204         size_t firstDim = (first == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE) ?
    205             OTHER_SIZE : INTENDED_SIZE;
    206         size_t secondDim = (second == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE) ?
    207             OTHER_SIZE : INTENDED_SIZE;
    208         return firstDim * secondDim * sizeof(fives[0][0]);
    209     };
    210     ASSERT_EQ(execution.setInput(0, ones, sizeAtSet(paramsForInput0), typeAtSet(paramsForInput0)),
    211               Result::NO_ERROR);
    212     ASSERT_EQ(execution.setInput(1, twos, sizeAtSet(paramsForInput1), typeAtSet(paramsForInput1)),
    213               Result::NO_ERROR);
    214     ASSERT_EQ(execution.setOutput(0, actual, sizeAtSet(paramsForOutput),
    215                                   typeAtSet(paramsForOutput)),
    216               Result::NO_ERROR);
    217 
    218     if (allAreIntendedSizeAtExecution) {
    219         ASSERT_EQ(execution.compute(), Result::NO_ERROR);
    220     } else {
    221         // There is no contract (yet) for specific errors in NeuralNetworks.h,
    222         // so we just assert on not being successful.
    223         ASSERT_NE(execution.compute(), Result::NO_ERROR);
    224         return;
    225     }
    226 
    227     using qvec = std::vector<uint8_t>;
    228     constexpr size_t count = sizeof(fives) / sizeof(fives[0][0]);
    229     Quant8Operands expected_opds{{0, qvec{&fives[0][0], &fives[0][0] + count}}};
    230     Quant8Operands actual_opds{{0, qvec{&actual[0][0], &actual[0][0] + count}}};
    231     compare(MixedTyped{ {}, {}, expected_opds }, MixedTyped{ {}, {}, actual_opds });
    232 }
    233 
    234 INSTANTIATE_TEST_CASE_P(UnknownCombinationsTest, UnknownDimensionsTest,
    235                         combinedValues);
    236 
    237 }  // end namespace
    238