Home | History | Annotate | Download | only in models
      1 // clang-format off
      2 // Generated file (from: dequantize_v1_2.mod.py). Do not edit
      3 void CreateModel(Model *model) {
      4   OperandType type0(Type::TENSOR_QUANT8_ASYMM, {10}, 0.5f, 127);
      5   OperandType type1(Type::TENSOR_FLOAT32, {10});
      6   // Phase 1, operands
      7   auto input0 = model->addOperand(&type0);
      8   auto output0 = model->addOperand(&type1);
      9   // Phase 2, operations
     10   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input0}, {output0});
     11   // Phase 3, inputs and outputs
     12   model->identifyInputsAndOutputs(
     13     {input0},
     14     {output0});
     15   assert(model->isValid());
     16 }
     17 
     18 inline bool is_ignored(int i) {
     19   static std::set<int> ignore = {};
     20   return ignore.find(i) != ignore.end();
     21 }
     22 
     23 void CreateModel_relaxed(Model *model) {
     24   OperandType type0(Type::TENSOR_QUANT8_ASYMM, {10}, 0.5f, 127);
     25   OperandType type1(Type::TENSOR_FLOAT32, {10});
     26   // Phase 1, operands
     27   auto input0 = model->addOperand(&type0);
     28   auto output0 = model->addOperand(&type1);
     29   // Phase 2, operations
     30   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input0}, {output0});
     31   // Phase 3, inputs and outputs
     32   model->identifyInputsAndOutputs(
     33     {input0},
     34     {output0});
     35   // Phase 4: set relaxed execution
     36   model->relaxComputationFloat32toFloat16(true);
     37   assert(model->isValid());
     38 }
     39 
     40 inline bool is_ignored_relaxed(int i) {
     41   static std::set<int> ignore = {};
     42   return ignore.find(i) != ignore.end();
     43 }
     44 
     45 void CreateModel_float16(Model *model) {
     46   OperandType type0(Type::TENSOR_QUANT8_ASYMM, {10}, 0.5f, 127);
     47   OperandType type25(Type::TENSOR_FLOAT16, {10});
     48   // Phase 1, operands
     49   auto input0 = model->addOperand(&type0);
     50   auto output0 = model->addOperand(&type25);
     51   // Phase 2, operations
     52   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input0}, {output0});
     53   // Phase 3, inputs and outputs
     54   model->identifyInputsAndOutputs(
     55     {input0},
     56     {output0});
     57   assert(model->isValid());
     58 }
     59 
     60 inline bool is_ignored_float16(int i) {
     61   static std::set<int> ignore = {};
     62   return ignore.find(i) != ignore.end();
     63 }
     64 
     65 void CreateModel_dynamic_output_shape(Model *model) {
     66   OperandType type0(Type::TENSOR_QUANT8_ASYMM, {10}, 0.5f, 127);
     67   OperandType type26(Type::TENSOR_FLOAT32, {0});
     68   // Phase 1, operands
     69   auto input0 = model->addOperand(&type0);
     70   auto output0 = model->addOperand(&type26);
     71   // Phase 2, operations
     72   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input0}, {output0});
     73   // Phase 3, inputs and outputs
     74   model->identifyInputsAndOutputs(
     75     {input0},
     76     {output0});
     77   assert(model->isValid());
     78 }
     79 
     80 inline bool is_ignored_dynamic_output_shape(int i) {
     81   static std::set<int> ignore = {};
     82   return ignore.find(i) != ignore.end();
     83 }
     84 
     85 void CreateModel_dynamic_output_shape_relaxed(Model *model) {
     86   OperandType type0(Type::TENSOR_QUANT8_ASYMM, {10}, 0.5f, 127);
     87   OperandType type26(Type::TENSOR_FLOAT32, {0});
     88   // Phase 1, operands
     89   auto input0 = model->addOperand(&type0);
     90   auto output0 = model->addOperand(&type26);
     91   // Phase 2, operations
     92   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input0}, {output0});
     93   // Phase 3, inputs and outputs
     94   model->identifyInputsAndOutputs(
     95     {input0},
     96     {output0});
     97   // Phase 4: set relaxed execution
     98   model->relaxComputationFloat32toFloat16(true);
     99   assert(model->isValid());
    100 }
    101 
    102 inline bool is_ignored_dynamic_output_shape_relaxed(int i) {
    103   static std::set<int> ignore = {};
    104   return ignore.find(i) != ignore.end();
    105 }
    106 
    107 void CreateModel_dynamic_output_shape_float16(Model *model) {
    108   OperandType type0(Type::TENSOR_QUANT8_ASYMM, {10}, 0.5f, 127);
    109   OperandType type27(Type::TENSOR_FLOAT16, {0});
    110   // Phase 1, operands
    111   auto input0 = model->addOperand(&type0);
    112   auto output0 = model->addOperand(&type27);
    113   // Phase 2, operations
    114   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input0}, {output0});
    115   // Phase 3, inputs and outputs
    116   model->identifyInputsAndOutputs(
    117     {input0},
    118     {output0});
    119   assert(model->isValid());
    120 }
    121 
    122 inline bool is_ignored_dynamic_output_shape_float16(int i) {
    123   static std::set<int> ignore = {};
    124   return ignore.find(i) != ignore.end();
    125 }
    126 
    127 void CreateModel_2(Model *model) {
    128   OperandType type2(Type::TENSOR_QUANT8_ASYMM, {2, 5}, 0.5f, 127);
    129   OperandType type3(Type::TENSOR_FLOAT32, {2, 5});
    130   // Phase 1, operands
    131   auto input01 = model->addOperand(&type2);
    132   auto output01 = model->addOperand(&type3);
    133   // Phase 2, operations
    134   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input01}, {output01});
    135   // Phase 3, inputs and outputs
    136   model->identifyInputsAndOutputs(
    137     {input01},
    138     {output01});
    139   assert(model->isValid());
    140 }
    141 
    142 inline bool is_ignored_2(int i) {
    143   static std::set<int> ignore = {};
    144   return ignore.find(i) != ignore.end();
    145 }
    146 
    147 void CreateModel_relaxed_2(Model *model) {
    148   OperandType type2(Type::TENSOR_QUANT8_ASYMM, {2, 5}, 0.5f, 127);
    149   OperandType type3(Type::TENSOR_FLOAT32, {2, 5});
    150   // Phase 1, operands
    151   auto input01 = model->addOperand(&type2);
    152   auto output01 = model->addOperand(&type3);
    153   // Phase 2, operations
    154   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input01}, {output01});
    155   // Phase 3, inputs and outputs
    156   model->identifyInputsAndOutputs(
    157     {input01},
    158     {output01});
    159   // Phase 4: set relaxed execution
    160   model->relaxComputationFloat32toFloat16(true);
    161   assert(model->isValid());
    162 }
    163 
    164 inline bool is_ignored_relaxed_2(int i) {
    165   static std::set<int> ignore = {};
    166   return ignore.find(i) != ignore.end();
    167 }
    168 
    169 void CreateModel_float16_2(Model *model) {
    170   OperandType type2(Type::TENSOR_QUANT8_ASYMM, {2, 5}, 0.5f, 127);
    171   OperandType type28(Type::TENSOR_FLOAT16, {2, 5});
    172   // Phase 1, operands
    173   auto input01 = model->addOperand(&type2);
    174   auto output01 = model->addOperand(&type28);
    175   // Phase 2, operations
    176   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input01}, {output01});
    177   // Phase 3, inputs and outputs
    178   model->identifyInputsAndOutputs(
    179     {input01},
    180     {output01});
    181   assert(model->isValid());
    182 }
    183 
    184 inline bool is_ignored_float16_2(int i) {
    185   static std::set<int> ignore = {};
    186   return ignore.find(i) != ignore.end();
    187 }
    188 
    189 void CreateModel_dynamic_output_shape_2(Model *model) {
    190   OperandType type2(Type::TENSOR_QUANT8_ASYMM, {2, 5}, 0.5f, 127);
    191   OperandType type29(Type::TENSOR_FLOAT32, {0, 0});
    192   // Phase 1, operands
    193   auto input01 = model->addOperand(&type2);
    194   auto output01 = model->addOperand(&type29);
    195   // Phase 2, operations
    196   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input01}, {output01});
    197   // Phase 3, inputs and outputs
    198   model->identifyInputsAndOutputs(
    199     {input01},
    200     {output01});
    201   assert(model->isValid());
    202 }
    203 
    204 inline bool is_ignored_dynamic_output_shape_2(int i) {
    205   static std::set<int> ignore = {};
    206   return ignore.find(i) != ignore.end();
    207 }
    208 
    209 void CreateModel_dynamic_output_shape_relaxed_2(Model *model) {
    210   OperandType type2(Type::TENSOR_QUANT8_ASYMM, {2, 5}, 0.5f, 127);
    211   OperandType type29(Type::TENSOR_FLOAT32, {0, 0});
    212   // Phase 1, operands
    213   auto input01 = model->addOperand(&type2);
    214   auto output01 = model->addOperand(&type29);
    215   // Phase 2, operations
    216   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input01}, {output01});
    217   // Phase 3, inputs and outputs
    218   model->identifyInputsAndOutputs(
    219     {input01},
    220     {output01});
    221   // Phase 4: set relaxed execution
    222   model->relaxComputationFloat32toFloat16(true);
    223   assert(model->isValid());
    224 }
    225 
    226 inline bool is_ignored_dynamic_output_shape_relaxed_2(int i) {
    227   static std::set<int> ignore = {};
    228   return ignore.find(i) != ignore.end();
    229 }
    230 
    231 void CreateModel_dynamic_output_shape_float16_2(Model *model) {
    232   OperandType type2(Type::TENSOR_QUANT8_ASYMM, {2, 5}, 0.5f, 127);
    233   OperandType type30(Type::TENSOR_FLOAT16, {0, 0});
    234   // Phase 1, operands
    235   auto input01 = model->addOperand(&type2);
    236   auto output01 = model->addOperand(&type30);
    237   // Phase 2, operations
    238   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input01}, {output01});
    239   // Phase 3, inputs and outputs
    240   model->identifyInputsAndOutputs(
    241     {input01},
    242     {output01});
    243   assert(model->isValid());
    244 }
    245 
    246 inline bool is_ignored_dynamic_output_shape_float16_2(int i) {
    247   static std::set<int> ignore = {};
    248   return ignore.find(i) != ignore.end();
    249 }
    250 
    251 void CreateModel_3(Model *model) {
    252   OperandType type4(Type::TENSOR_QUANT8_SYMM, {2, 2, 2}, 0.5f, 0);
    253   OperandType type5(Type::TENSOR_FLOAT32, {2, 2, 2});
    254   // Phase 1, operands
    255   auto input02 = model->addOperand(&type4);
    256   auto output02 = model->addOperand(&type5);
    257   // Phase 2, operations
    258   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input02}, {output02});
    259   // Phase 3, inputs and outputs
    260   model->identifyInputsAndOutputs(
    261     {input02},
    262     {output02});
    263   assert(model->isValid());
    264 }
    265 
    266 inline bool is_ignored_3(int i) {
    267   static std::set<int> ignore = {};
    268   return ignore.find(i) != ignore.end();
    269 }
    270 
    271 void CreateModel_relaxed_3(Model *model) {
    272   OperandType type4(Type::TENSOR_QUANT8_SYMM, {2, 2, 2}, 0.5f, 0);
    273   OperandType type5(Type::TENSOR_FLOAT32, {2, 2, 2});
    274   // Phase 1, operands
    275   auto input02 = model->addOperand(&type4);
    276   auto output02 = model->addOperand(&type5);
    277   // Phase 2, operations
    278   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input02}, {output02});
    279   // Phase 3, inputs and outputs
    280   model->identifyInputsAndOutputs(
    281     {input02},
    282     {output02});
    283   // Phase 4: set relaxed execution
    284   model->relaxComputationFloat32toFloat16(true);
    285   assert(model->isValid());
    286 }
    287 
    288 inline bool is_ignored_relaxed_3(int i) {
    289   static std::set<int> ignore = {};
    290   return ignore.find(i) != ignore.end();
    291 }
    292 
    293 void CreateModel_float16_3(Model *model) {
    294   OperandType type31(Type::TENSOR_FLOAT16, {2, 2, 2});
    295   OperandType type4(Type::TENSOR_QUANT8_SYMM, {2, 2, 2}, 0.5f, 0);
    296   // Phase 1, operands
    297   auto input02 = model->addOperand(&type4);
    298   auto output02 = model->addOperand(&type31);
    299   // Phase 2, operations
    300   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input02}, {output02});
    301   // Phase 3, inputs and outputs
    302   model->identifyInputsAndOutputs(
    303     {input02},
    304     {output02});
    305   assert(model->isValid());
    306 }
    307 
    308 inline bool is_ignored_float16_3(int i) {
    309   static std::set<int> ignore = {};
    310   return ignore.find(i) != ignore.end();
    311 }
    312 
    313 void CreateModel_dynamic_output_shape_3(Model *model) {
    314   OperandType type32(Type::TENSOR_FLOAT32, {0, 0, 0});
    315   OperandType type4(Type::TENSOR_QUANT8_SYMM, {2, 2, 2}, 0.5f, 0);
    316   // Phase 1, operands
    317   auto input02 = model->addOperand(&type4);
    318   auto output02 = model->addOperand(&type32);
    319   // Phase 2, operations
    320   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input02}, {output02});
    321   // Phase 3, inputs and outputs
    322   model->identifyInputsAndOutputs(
    323     {input02},
    324     {output02});
    325   assert(model->isValid());
    326 }
    327 
    328 inline bool is_ignored_dynamic_output_shape_3(int i) {
    329   static std::set<int> ignore = {};
    330   return ignore.find(i) != ignore.end();
    331 }
    332 
    333 void CreateModel_dynamic_output_shape_relaxed_3(Model *model) {
    334   OperandType type32(Type::TENSOR_FLOAT32, {0, 0, 0});
    335   OperandType type4(Type::TENSOR_QUANT8_SYMM, {2, 2, 2}, 0.5f, 0);
    336   // Phase 1, operands
    337   auto input02 = model->addOperand(&type4);
    338   auto output02 = model->addOperand(&type32);
    339   // Phase 2, operations
    340   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input02}, {output02});
    341   // Phase 3, inputs and outputs
    342   model->identifyInputsAndOutputs(
    343     {input02},
    344     {output02});
    345   // Phase 4: set relaxed execution
    346   model->relaxComputationFloat32toFloat16(true);
    347   assert(model->isValid());
    348 }
    349 
    350 inline bool is_ignored_dynamic_output_shape_relaxed_3(int i) {
    351   static std::set<int> ignore = {};
    352   return ignore.find(i) != ignore.end();
    353 }
    354 
    355 void CreateModel_dynamic_output_shape_float16_3(Model *model) {
    356   OperandType type33(Type::TENSOR_FLOAT16, {0, 0, 0});
    357   OperandType type4(Type::TENSOR_QUANT8_SYMM, {2, 2, 2}, 0.5f, 0);
    358   // Phase 1, operands
    359   auto input02 = model->addOperand(&type4);
    360   auto output02 = model->addOperand(&type33);
    361   // Phase 2, operations
    362   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input02}, {output02});
    363   // Phase 3, inputs and outputs
    364   model->identifyInputsAndOutputs(
    365     {input02},
    366     {output02});
    367   assert(model->isValid());
    368 }
    369 
    370 inline bool is_ignored_dynamic_output_shape_float16_3(int i) {
    371   static std::set<int> ignore = {};
    372   return ignore.find(i) != ignore.end();
    373 }
    374 
    375 void CreateModel_4(Model *model) {
    376   OperandType type6(Type::TENSOR_QUANT8_SYMM, {2, 1, 2, 2}, 0.5f, 0);
    377   OperandType type7(Type::TENSOR_FLOAT32, {2, 1, 2, 2});
    378   // Phase 1, operands
    379   auto input03 = model->addOperand(&type6);
    380   auto output03 = model->addOperand(&type7);
    381   // Phase 2, operations
    382   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input03}, {output03});
    383   // Phase 3, inputs and outputs
    384   model->identifyInputsAndOutputs(
    385     {input03},
    386     {output03});
    387   assert(model->isValid());
    388 }
    389 
    390 inline bool is_ignored_4(int i) {
    391   static std::set<int> ignore = {};
    392   return ignore.find(i) != ignore.end();
    393 }
    394 
    395 void CreateModel_relaxed_4(Model *model) {
    396   OperandType type6(Type::TENSOR_QUANT8_SYMM, {2, 1, 2, 2}, 0.5f, 0);
    397   OperandType type7(Type::TENSOR_FLOAT32, {2, 1, 2, 2});
    398   // Phase 1, operands
    399   auto input03 = model->addOperand(&type6);
    400   auto output03 = model->addOperand(&type7);
    401   // Phase 2, operations
    402   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input03}, {output03});
    403   // Phase 3, inputs and outputs
    404   model->identifyInputsAndOutputs(
    405     {input03},
    406     {output03});
    407   // Phase 4: set relaxed execution
    408   model->relaxComputationFloat32toFloat16(true);
    409   assert(model->isValid());
    410 }
    411 
    412 inline bool is_ignored_relaxed_4(int i) {
    413   static std::set<int> ignore = {};
    414   return ignore.find(i) != ignore.end();
    415 }
    416 
    417 void CreateModel_float16_4(Model *model) {
    418   OperandType type34(Type::TENSOR_FLOAT16, {2, 1, 2, 2});
    419   OperandType type6(Type::TENSOR_QUANT8_SYMM, {2, 1, 2, 2}, 0.5f, 0);
    420   // Phase 1, operands
    421   auto input03 = model->addOperand(&type6);
    422   auto output03 = model->addOperand(&type34);
    423   // Phase 2, operations
    424   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input03}, {output03});
    425   // Phase 3, inputs and outputs
    426   model->identifyInputsAndOutputs(
    427     {input03},
    428     {output03});
    429   assert(model->isValid());
    430 }
    431 
    432 inline bool is_ignored_float16_4(int i) {
    433   static std::set<int> ignore = {};
    434   return ignore.find(i) != ignore.end();
    435 }
    436 
    437 void CreateModel_dynamic_output_shape_4(Model *model) {
    438   OperandType type35(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
    439   OperandType type6(Type::TENSOR_QUANT8_SYMM, {2, 1, 2, 2}, 0.5f, 0);
    440   // Phase 1, operands
    441   auto input03 = model->addOperand(&type6);
    442   auto output03 = model->addOperand(&type35);
    443   // Phase 2, operations
    444   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input03}, {output03});
    445   // Phase 3, inputs and outputs
    446   model->identifyInputsAndOutputs(
    447     {input03},
    448     {output03});
    449   assert(model->isValid());
    450 }
    451 
    452 inline bool is_ignored_dynamic_output_shape_4(int i) {
    453   static std::set<int> ignore = {};
    454   return ignore.find(i) != ignore.end();
    455 }
    456 
    457 void CreateModel_dynamic_output_shape_relaxed_4(Model *model) {
    458   OperandType type35(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
    459   OperandType type6(Type::TENSOR_QUANT8_SYMM, {2, 1, 2, 2}, 0.5f, 0);
    460   // Phase 1, operands
    461   auto input03 = model->addOperand(&type6);
    462   auto output03 = model->addOperand(&type35);
    463   // Phase 2, operations
    464   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input03}, {output03});
    465   // Phase 3, inputs and outputs
    466   model->identifyInputsAndOutputs(
    467     {input03},
    468     {output03});
    469   // Phase 4: set relaxed execution
    470   model->relaxComputationFloat32toFloat16(true);
    471   assert(model->isValid());
    472 }
    473 
    474 inline bool is_ignored_dynamic_output_shape_relaxed_4(int i) {
    475   static std::set<int> ignore = {};
    476   return ignore.find(i) != ignore.end();
    477 }
    478 
    479 void CreateModel_dynamic_output_shape_float16_4(Model *model) {
    480   OperandType type36(Type::TENSOR_FLOAT16, {0, 0, 0, 0});
    481   OperandType type6(Type::TENSOR_QUANT8_SYMM, {2, 1, 2, 2}, 0.5f, 0);
    482   // Phase 1, operands
    483   auto input03 = model->addOperand(&type6);
    484   auto output03 = model->addOperand(&type36);
    485   // Phase 2, operations
    486   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input03}, {output03});
    487   // Phase 3, inputs and outputs
    488   model->identifyInputsAndOutputs(
    489     {input03},
    490     {output03});
    491   assert(model->isValid());
    492 }
    493 
    494 inline bool is_ignored_dynamic_output_shape_float16_4(int i) {
    495   static std::set<int> ignore = {};
    496   return ignore.find(i) != ignore.end();
    497 }
    498 
    499 void CreateModel_5(Model *model) {
    500   OperandType type8(Type::TENSOR_QUANT8_SYMM_PER_CHANNEL, {2, 3, 4}, 0.0f, 0, SymmPerChannelQuantParams({2.0f, 0.5f},0));
    501   OperandType type9(Type::TENSOR_FLOAT32, {2, 3, 4});
    502   // Phase 1, operands
    503   auto input04 = model->addOperand(&type8);
    504   auto output04 = model->addOperand(&type9);
    505   // Phase 2, operations
    506   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input04}, {output04});
    507   // Phase 3, inputs and outputs
    508   model->identifyInputsAndOutputs(
    509     {input04},
    510     {output04});
    511   assert(model->isValid());
    512 }
    513 
    514 inline bool is_ignored_5(int i) {
    515   static std::set<int> ignore = {};
    516   return ignore.find(i) != ignore.end();
    517 }
    518 
    519 void CreateModel_relaxed_5(Model *model) {
    520   OperandType type8(Type::TENSOR_QUANT8_SYMM_PER_CHANNEL, {2, 3, 4}, 0.0f, 0, SymmPerChannelQuantParams({2.0f, 0.5f},0));
    521   OperandType type9(Type::TENSOR_FLOAT32, {2, 3, 4});
    522   // Phase 1, operands
    523   auto input04 = model->addOperand(&type8);
    524   auto output04 = model->addOperand(&type9);
    525   // Phase 2, operations
    526   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input04}, {output04});
    527   // Phase 3, inputs and outputs
    528   model->identifyInputsAndOutputs(
    529     {input04},
    530     {output04});
    531   // Phase 4: set relaxed execution
    532   model->relaxComputationFloat32toFloat16(true);
    533   assert(model->isValid());
    534 }
    535 
    536 inline bool is_ignored_relaxed_5(int i) {
    537   static std::set<int> ignore = {};
    538   return ignore.find(i) != ignore.end();
    539 }
    540 
    541 void CreateModel_float16_5(Model *model) {
    542   OperandType type37(Type::TENSOR_FLOAT16, {2, 3, 4});
    543   OperandType type8(Type::TENSOR_QUANT8_SYMM_PER_CHANNEL, {2, 3, 4}, 0.0f, 0, SymmPerChannelQuantParams({2.0f, 0.5f},0));
    544   // Phase 1, operands
    545   auto input04 = model->addOperand(&type8);
    546   auto output04 = model->addOperand(&type37);
    547   // Phase 2, operations
    548   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input04}, {output04});
    549   // Phase 3, inputs and outputs
    550   model->identifyInputsAndOutputs(
    551     {input04},
    552     {output04});
    553   assert(model->isValid());
    554 }
    555 
    556 inline bool is_ignored_float16_5(int i) {
    557   static std::set<int> ignore = {};
    558   return ignore.find(i) != ignore.end();
    559 }
    560 
    561 void CreateModel_dynamic_output_shape_5(Model *model) {
    562   OperandType type32(Type::TENSOR_FLOAT32, {0, 0, 0});
    563   OperandType type8(Type::TENSOR_QUANT8_SYMM_PER_CHANNEL, {2, 3, 4}, 0.0f, 0, SymmPerChannelQuantParams({2.0f, 0.5f},0));
    564   // Phase 1, operands
    565   auto input04 = model->addOperand(&type8);
    566   auto output04 = model->addOperand(&type32);
    567   // Phase 2, operations
    568   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input04}, {output04});
    569   // Phase 3, inputs and outputs
    570   model->identifyInputsAndOutputs(
    571     {input04},
    572     {output04});
    573   assert(model->isValid());
    574 }
    575 
    576 inline bool is_ignored_dynamic_output_shape_5(int i) {
    577   static std::set<int> ignore = {};
    578   return ignore.find(i) != ignore.end();
    579 }
    580 
    581 void CreateModel_dynamic_output_shape_relaxed_5(Model *model) {
    582   OperandType type32(Type::TENSOR_FLOAT32, {0, 0, 0});
    583   OperandType type8(Type::TENSOR_QUANT8_SYMM_PER_CHANNEL, {2, 3, 4}, 0.0f, 0, SymmPerChannelQuantParams({2.0f, 0.5f},0));
    584   // Phase 1, operands
    585   auto input04 = model->addOperand(&type8);
    586   auto output04 = model->addOperand(&type32);
    587   // Phase 2, operations
    588   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input04}, {output04});
    589   // Phase 3, inputs and outputs
    590   model->identifyInputsAndOutputs(
    591     {input04},
    592     {output04});
    593   // Phase 4: set relaxed execution
    594   model->relaxComputationFloat32toFloat16(true);
    595   assert(model->isValid());
    596 }
    597 
    598 inline bool is_ignored_dynamic_output_shape_relaxed_5(int i) {
    599   static std::set<int> ignore = {};
    600   return ignore.find(i) != ignore.end();
    601 }
    602 
    603 void CreateModel_dynamic_output_shape_float16_5(Model *model) {
    604   OperandType type33(Type::TENSOR_FLOAT16, {0, 0, 0});
    605   OperandType type8(Type::TENSOR_QUANT8_SYMM_PER_CHANNEL, {2, 3, 4}, 0.0f, 0, SymmPerChannelQuantParams({2.0f, 0.5f},0));
    606   // Phase 1, operands
    607   auto input04 = model->addOperand(&type8);
    608   auto output04 = model->addOperand(&type33);
    609   // Phase 2, operations
    610   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input04}, {output04});
    611   // Phase 3, inputs and outputs
    612   model->identifyInputsAndOutputs(
    613     {input04},
    614     {output04});
    615   assert(model->isValid());
    616 }
    617 
    618 inline bool is_ignored_dynamic_output_shape_float16_5(int i) {
    619   static std::set<int> ignore = {};
    620   return ignore.find(i) != ignore.end();
    621 }
    622 
    623 void CreateModel_6(Model *model) {
    624   OperandType type10(Type::TENSOR_QUANT8_SYMM_PER_CHANNEL, {2, 3, 4}, 0.0f, 0, SymmPerChannelQuantParams({2.0f, 1.0f, 0.5f},1));
    625   OperandType type9(Type::TENSOR_FLOAT32, {2, 3, 4});
    626   // Phase 1, operands
    627   auto input05 = model->addOperand(&type10);
    628   auto output05 = model->addOperand(&type9);
    629   // Phase 2, operations
    630   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input05}, {output05});
    631   // Phase 3, inputs and outputs
    632   model->identifyInputsAndOutputs(
    633     {input05},
    634     {output05});
    635   assert(model->isValid());
    636 }
    637 
    638 inline bool is_ignored_6(int i) {
    639   static std::set<int> ignore = {};
    640   return ignore.find(i) != ignore.end();
    641 }
    642 
    643 void CreateModel_relaxed_6(Model *model) {
    644   OperandType type10(Type::TENSOR_QUANT8_SYMM_PER_CHANNEL, {2, 3, 4}, 0.0f, 0, SymmPerChannelQuantParams({2.0f, 1.0f, 0.5f},1));
    645   OperandType type9(Type::TENSOR_FLOAT32, {2, 3, 4});
    646   // Phase 1, operands
    647   auto input05 = model->addOperand(&type10);
    648   auto output05 = model->addOperand(&type9);
    649   // Phase 2, operations
    650   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input05}, {output05});
    651   // Phase 3, inputs and outputs
    652   model->identifyInputsAndOutputs(
    653     {input05},
    654     {output05});
    655   // Phase 4: set relaxed execution
    656   model->relaxComputationFloat32toFloat16(true);
    657   assert(model->isValid());
    658 }
    659 
    660 inline bool is_ignored_relaxed_6(int i) {
    661   static std::set<int> ignore = {};
    662   return ignore.find(i) != ignore.end();
    663 }
    664 
    665 void CreateModel_float16_6(Model *model) {
    666   OperandType type10(Type::TENSOR_QUANT8_SYMM_PER_CHANNEL, {2, 3, 4}, 0.0f, 0, SymmPerChannelQuantParams({2.0f, 1.0f, 0.5f},1));
    667   OperandType type37(Type::TENSOR_FLOAT16, {2, 3, 4});
    668   // Phase 1, operands
    669   auto input05 = model->addOperand(&type10);
    670   auto output05 = model->addOperand(&type37);
    671   // Phase 2, operations
    672   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input05}, {output05});
    673   // Phase 3, inputs and outputs
    674   model->identifyInputsAndOutputs(
    675     {input05},
    676     {output05});
    677   assert(model->isValid());
    678 }
    679 
    680 inline bool is_ignored_float16_6(int i) {
    681   static std::set<int> ignore = {};
    682   return ignore.find(i) != ignore.end();
    683 }
    684 
    685 void CreateModel_dynamic_output_shape_6(Model *model) {
    686   OperandType type10(Type::TENSOR_QUANT8_SYMM_PER_CHANNEL, {2, 3, 4}, 0.0f, 0, SymmPerChannelQuantParams({2.0f, 1.0f, 0.5f},1));
    687   OperandType type32(Type::TENSOR_FLOAT32, {0, 0, 0});
    688   // Phase 1, operands
    689   auto input05 = model->addOperand(&type10);
    690   auto output05 = model->addOperand(&type32);
    691   // Phase 2, operations
    692   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input05}, {output05});
    693   // Phase 3, inputs and outputs
    694   model->identifyInputsAndOutputs(
    695     {input05},
    696     {output05});
    697   assert(model->isValid());
    698 }
    699 
    700 inline bool is_ignored_dynamic_output_shape_6(int i) {
    701   static std::set<int> ignore = {};
    702   return ignore.find(i) != ignore.end();
    703 }
    704 
    705 void CreateModel_dynamic_output_shape_relaxed_6(Model *model) {
    706   OperandType type10(Type::TENSOR_QUANT8_SYMM_PER_CHANNEL, {2, 3, 4}, 0.0f, 0, SymmPerChannelQuantParams({2.0f, 1.0f, 0.5f},1));
    707   OperandType type32(Type::TENSOR_FLOAT32, {0, 0, 0});
    708   // Phase 1, operands
    709   auto input05 = model->addOperand(&type10);
    710   auto output05 = model->addOperand(&type32);
    711   // Phase 2, operations
    712   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input05}, {output05});
    713   // Phase 3, inputs and outputs
    714   model->identifyInputsAndOutputs(
    715     {input05},
    716     {output05});
    717   // Phase 4: set relaxed execution
    718   model->relaxComputationFloat32toFloat16(true);
    719   assert(model->isValid());
    720 }
    721 
    722 inline bool is_ignored_dynamic_output_shape_relaxed_6(int i) {
    723   static std::set<int> ignore = {};
    724   return ignore.find(i) != ignore.end();
    725 }
    726 
    727 void CreateModel_dynamic_output_shape_float16_6(Model *model) {
    728   OperandType type10(Type::TENSOR_QUANT8_SYMM_PER_CHANNEL, {2, 3, 4}, 0.0f, 0, SymmPerChannelQuantParams({2.0f, 1.0f, 0.5f},1));
    729   OperandType type33(Type::TENSOR_FLOAT16, {0, 0, 0});
    730   // Phase 1, operands
    731   auto input05 = model->addOperand(&type10);
    732   auto output05 = model->addOperand(&type33);
    733   // Phase 2, operations
    734   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {input05}, {output05});
    735   // Phase 3, inputs and outputs
    736   model->identifyInputsAndOutputs(
    737     {input05},
    738     {output05});
    739   assert(model->isValid());
    740 }
    741 
    742 inline bool is_ignored_dynamic_output_shape_float16_6(int i) {
    743   static std::set<int> ignore = {};
    744   return ignore.find(i) != ignore.end();
    745 }
    746 
    747 void CreateModel_7(Model *model) {
    748   OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 1}, 1.0f, 0);
    749   OperandType type12(Type::TENSOR_FLOAT16, {1, 2, 2, 1});
    750   // Phase 1, operands
    751   auto op1 = model->addOperand(&type11);
    752   auto op2 = model->addOperand(&type12);
    753   // Phase 2, operations
    754   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {op1}, {op2});
    755   // Phase 3, inputs and outputs
    756   model->identifyInputsAndOutputs(
    757     {op1},
    758     {op2});
    759   assert(model->isValid());
    760 }
    761 
    762 inline bool is_ignored_7(int i) {
    763   static std::set<int> ignore = {};
    764   return ignore.find(i) != ignore.end();
    765 }
    766 
    767 void CreateModel_dynamic_output_shape_7(Model *model) {
    768   OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 1}, 1.0f, 0);
    769   OperandType type36(Type::TENSOR_FLOAT16, {0, 0, 0, 0});
    770   // Phase 1, operands
    771   auto op1 = model->addOperand(&type11);
    772   auto op2 = model->addOperand(&type36);
    773   // Phase 2, operations
    774   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {op1}, {op2});
    775   // Phase 3, inputs and outputs
    776   model->identifyInputsAndOutputs(
    777     {op1},
    778     {op2});
    779   assert(model->isValid());
    780 }
    781 
    782 inline bool is_ignored_dynamic_output_shape_7(int i) {
    783   static std::set<int> ignore = {};
    784   return ignore.find(i) != ignore.end();
    785 }
    786 
    787 void CreateModel_zero_sized(Model *model) {
    788   OperandType type13(Type::TENSOR_QUANT8_ASYMM, {1, 2}, 0.1f, 128);
    789   OperandType type14(Type::TENSOR_QUANT16_ASYMM, {1, 8}, 0.125f, 0);
    790   OperandType type15(Type::TENSOR_QUANT8_ASYMM, {0}, 0.1f, 128);
    791   OperandType type16(Type::TENSOR_INT32, {0});
    792   OperandType type17(Type::TENSOR_QUANT16_ASYMM, {0, 4}, 0.125f, 0);
    793   OperandType type18(Type::TENSOR_INT32, {1});
    794   OperandType type19(Type::FLOAT32, {});
    795   OperandType type20(Type::INT32, {});
    796   OperandType type21(Type::BOOL, {});
    797   OperandType type22(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 1}, 0.1f, 128);
    798   OperandType type23(Type::TENSOR_QUANT8_ASYMM, {0, 2, 2, 1}, 0.1f, 128);
    799   OperandType type24(Type::TENSOR_FLOAT32, {0, 2, 2, 1});
    800   // Phase 1, operands
    801   auto scores = model->addOperand(&type13);
    802   auto roi = model->addOperand(&type14);
    803   auto param = model->addOperand(&type18);
    804   auto param1 = model->addOperand(&type19);
    805   auto param2 = model->addOperand(&type20);
    806   auto param3 = model->addOperand(&type20);
    807   auto param4 = model->addOperand(&type19);
    808   auto param5 = model->addOperand(&type19);
    809   auto param6 = model->addOperand(&type19);
    810   auto scoresOut = model->addOperand(&type15);
    811   auto roiOut = model->addOperand(&type17);
    812   auto classesOut = model->addOperand(&type16);
    813   auto batchSplitOut = model->addOperand(&type16);
    814   auto in = model->addOperand(&type22);
    815   auto param7 = model->addOperand(&type20);
    816   auto param8 = model->addOperand(&type20);
    817   auto param9 = model->addOperand(&type19);
    818   auto param10 = model->addOperand(&type19);
    819   auto param11 = model->addOperand(&type20);
    820   auto param12 = model->addOperand(&type20);
    821   auto layout = model->addOperand(&type21);
    822   auto featureMap = model->addOperand(&type23);
    823   auto out = model->addOperand(&type24);
    824   // Phase 2, operations
    825   static uint8_t scores_init[] = {137, 129};
    826   model->setOperandValue(scores, scores_init, sizeof(uint8_t) * 2);
    827   static uint16_t roi_init[] = {8, 8, 80, 80, 0, 0, 80, 80};
    828   model->setOperandValue(roi, roi_init, sizeof(uint16_t) * 8);
    829   static int32_t param_init[] = {0};
    830   model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
    831   static float param1_init[] = {0.3f};
    832   model->setOperandValue(param1, param1_init, sizeof(float) * 1);
    833   static int32_t param2_init[] = {-1};
    834   model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
    835   static int32_t param3_init[] = {0};
    836   model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
    837   static float param4_init[] = {0.4f};
    838   model->setOperandValue(param4, param4_init, sizeof(float) * 1);
    839   static float param5_init[] = {1.0f};
    840   model->setOperandValue(param5, param5_init, sizeof(float) * 1);
    841   static float param6_init[] = {0.3f};
    842   model->setOperandValue(param6, param6_init, sizeof(float) * 1);
    843   static int32_t param7_init[] = {2};
    844   model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
    845   static int32_t param8_init[] = {2};
    846   model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
    847   static float param9_init[] = {2.0f};
    848   model->setOperandValue(param9, param9_init, sizeof(float) * 1);
    849   static float param10_init[] = {2.0f};
    850   model->setOperandValue(param10, param10_init, sizeof(float) * 1);
    851   static int32_t param11_init[] = {4};
    852   model->setOperandValue(param11, param11_init, sizeof(int32_t) * 1);
    853   static int32_t param12_init[] = {4};
    854   model->setOperandValue(param12, param12_init, sizeof(int32_t) * 1);
    855   static bool8 layout_init[] = {false};
    856   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
    857   model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3, param4, param5, param6}, {scoresOut, roiOut, classesOut, batchSplitOut});
    858   model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param7, param8, param9, param10, param11, param12, layout}, {featureMap});
    859   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {featureMap}, {out});
    860   // Phase 3, inputs and outputs
    861   model->identifyInputsAndOutputs(
    862     {in},
    863     {scoresOut, classesOut, out});
    864   assert(model->isValid());
    865 }
    866 
    867 inline bool is_ignored_zero_sized(int i) {
    868   static std::set<int> ignore = {};
    869   return ignore.find(i) != ignore.end();
    870 }
    871 
    872 void CreateModel_zero_sized_relaxed(Model *model) {
    873   OperandType type13(Type::TENSOR_QUANT8_ASYMM, {1, 2}, 0.1f, 128);
    874   OperandType type14(Type::TENSOR_QUANT16_ASYMM, {1, 8}, 0.125f, 0);
    875   OperandType type15(Type::TENSOR_QUANT8_ASYMM, {0}, 0.1f, 128);
    876   OperandType type16(Type::TENSOR_INT32, {0});
    877   OperandType type17(Type::TENSOR_QUANT16_ASYMM, {0, 4}, 0.125f, 0);
    878   OperandType type18(Type::TENSOR_INT32, {1});
    879   OperandType type19(Type::FLOAT32, {});
    880   OperandType type20(Type::INT32, {});
    881   OperandType type21(Type::BOOL, {});
    882   OperandType type22(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 1}, 0.1f, 128);
    883   OperandType type23(Type::TENSOR_QUANT8_ASYMM, {0, 2, 2, 1}, 0.1f, 128);
    884   OperandType type24(Type::TENSOR_FLOAT32, {0, 2, 2, 1});
    885   // Phase 1, operands
    886   auto scores = model->addOperand(&type13);
    887   auto roi = model->addOperand(&type14);
    888   auto param = model->addOperand(&type18);
    889   auto param1 = model->addOperand(&type19);
    890   auto param2 = model->addOperand(&type20);
    891   auto param3 = model->addOperand(&type20);
    892   auto param4 = model->addOperand(&type19);
    893   auto param5 = model->addOperand(&type19);
    894   auto param6 = model->addOperand(&type19);
    895   auto scoresOut = model->addOperand(&type15);
    896   auto roiOut = model->addOperand(&type17);
    897   auto classesOut = model->addOperand(&type16);
    898   auto batchSplitOut = model->addOperand(&type16);
    899   auto in = model->addOperand(&type22);
    900   auto param7 = model->addOperand(&type20);
    901   auto param8 = model->addOperand(&type20);
    902   auto param9 = model->addOperand(&type19);
    903   auto param10 = model->addOperand(&type19);
    904   auto param11 = model->addOperand(&type20);
    905   auto param12 = model->addOperand(&type20);
    906   auto layout = model->addOperand(&type21);
    907   auto featureMap = model->addOperand(&type23);
    908   auto out = model->addOperand(&type24);
    909   // Phase 2, operations
    910   static uint8_t scores_init[] = {137, 129};
    911   model->setOperandValue(scores, scores_init, sizeof(uint8_t) * 2);
    912   static uint16_t roi_init[] = {8, 8, 80, 80, 0, 0, 80, 80};
    913   model->setOperandValue(roi, roi_init, sizeof(uint16_t) * 8);
    914   static int32_t param_init[] = {0};
    915   model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
    916   static float param1_init[] = {0.3f};
    917   model->setOperandValue(param1, param1_init, sizeof(float) * 1);
    918   static int32_t param2_init[] = {-1};
    919   model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
    920   static int32_t param3_init[] = {0};
    921   model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
    922   static float param4_init[] = {0.4f};
    923   model->setOperandValue(param4, param4_init, sizeof(float) * 1);
    924   static float param5_init[] = {1.0f};
    925   model->setOperandValue(param5, param5_init, sizeof(float) * 1);
    926   static float param6_init[] = {0.3f};
    927   model->setOperandValue(param6, param6_init, sizeof(float) * 1);
    928   static int32_t param7_init[] = {2};
    929   model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
    930   static int32_t param8_init[] = {2};
    931   model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
    932   static float param9_init[] = {2.0f};
    933   model->setOperandValue(param9, param9_init, sizeof(float) * 1);
    934   static float param10_init[] = {2.0f};
    935   model->setOperandValue(param10, param10_init, sizeof(float) * 1);
    936   static int32_t param11_init[] = {4};
    937   model->setOperandValue(param11, param11_init, sizeof(int32_t) * 1);
    938   static int32_t param12_init[] = {4};
    939   model->setOperandValue(param12, param12_init, sizeof(int32_t) * 1);
    940   static bool8 layout_init[] = {false};
    941   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
    942   model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3, param4, param5, param6}, {scoresOut, roiOut, classesOut, batchSplitOut});
    943   model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param7, param8, param9, param10, param11, param12, layout}, {featureMap});
    944   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {featureMap}, {out});
    945   // Phase 3, inputs and outputs
    946   model->identifyInputsAndOutputs(
    947     {in},
    948     {scoresOut, classesOut, out});
    949   // Phase 4: set relaxed execution
    950   model->relaxComputationFloat32toFloat16(true);
    951   assert(model->isValid());
    952 }
    953 
    954 inline bool is_ignored_zero_sized_relaxed(int i) {
    955   static std::set<int> ignore = {};
    956   return ignore.find(i) != ignore.end();
    957 }
    958 
    959 void CreateModel_zero_sized_float16(Model *model) {
    960   OperandType type13(Type::TENSOR_QUANT8_ASYMM, {1, 2}, 0.1f, 128);
    961   OperandType type14(Type::TENSOR_QUANT16_ASYMM, {1, 8}, 0.125f, 0);
    962   OperandType type15(Type::TENSOR_QUANT8_ASYMM, {0}, 0.1f, 128);
    963   OperandType type16(Type::TENSOR_INT32, {0});
    964   OperandType type17(Type::TENSOR_QUANT16_ASYMM, {0, 4}, 0.125f, 0);
    965   OperandType type18(Type::TENSOR_INT32, {1});
    966   OperandType type19(Type::FLOAT32, {});
    967   OperandType type20(Type::INT32, {});
    968   OperandType type21(Type::BOOL, {});
    969   OperandType type22(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 1}, 0.1f, 128);
    970   OperandType type23(Type::TENSOR_QUANT8_ASYMM, {0, 2, 2, 1}, 0.1f, 128);
    971   OperandType type38(Type::TENSOR_FLOAT16, {0, 2, 2, 1});
    972   // Phase 1, operands
    973   auto scores = model->addOperand(&type13);
    974   auto roi = model->addOperand(&type14);
    975   auto param = model->addOperand(&type18);
    976   auto param1 = model->addOperand(&type19);
    977   auto param2 = model->addOperand(&type20);
    978   auto param3 = model->addOperand(&type20);
    979   auto param4 = model->addOperand(&type19);
    980   auto param5 = model->addOperand(&type19);
    981   auto param6 = model->addOperand(&type19);
    982   auto scoresOut = model->addOperand(&type15);
    983   auto roiOut = model->addOperand(&type17);
    984   auto classesOut = model->addOperand(&type16);
    985   auto batchSplitOut = model->addOperand(&type16);
    986   auto in = model->addOperand(&type22);
    987   auto param7 = model->addOperand(&type20);
    988   auto param8 = model->addOperand(&type20);
    989   auto param9 = model->addOperand(&type19);
    990   auto param10 = model->addOperand(&type19);
    991   auto param11 = model->addOperand(&type20);
    992   auto param12 = model->addOperand(&type20);
    993   auto layout = model->addOperand(&type21);
    994   auto featureMap = model->addOperand(&type23);
    995   auto out = model->addOperand(&type38);
    996   // Phase 2, operations
    997   static uint8_t scores_init[] = {137, 129};
    998   model->setOperandValue(scores, scores_init, sizeof(uint8_t) * 2);
    999   static uint16_t roi_init[] = {8, 8, 80, 80, 0, 0, 80, 80};
   1000   model->setOperandValue(roi, roi_init, sizeof(uint16_t) * 8);
   1001   static int32_t param_init[] = {0};
   1002   model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
   1003   static float param1_init[] = {0.3f};
   1004   model->setOperandValue(param1, param1_init, sizeof(float) * 1);
   1005   static int32_t param2_init[] = {-1};
   1006   model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
   1007   static int32_t param3_init[] = {0};
   1008   model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
   1009   static float param4_init[] = {0.4f};
   1010   model->setOperandValue(param4, param4_init, sizeof(float) * 1);
   1011   static float param5_init[] = {1.0f};
   1012   model->setOperandValue(param5, param5_init, sizeof(float) * 1);
   1013   static float param6_init[] = {0.3f};
   1014   model->setOperandValue(param6, param6_init, sizeof(float) * 1);
   1015   static int32_t param7_init[] = {2};
   1016   model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
   1017   static int32_t param8_init[] = {2};
   1018   model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
   1019   static float param9_init[] = {2.0f};
   1020   model->setOperandValue(param9, param9_init, sizeof(float) * 1);
   1021   static float param10_init[] = {2.0f};
   1022   model->setOperandValue(param10, param10_init, sizeof(float) * 1);
   1023   static int32_t param11_init[] = {4};
   1024   model->setOperandValue(param11, param11_init, sizeof(int32_t) * 1);
   1025   static int32_t param12_init[] = {4};
   1026   model->setOperandValue(param12, param12_init, sizeof(int32_t) * 1);
   1027   static bool8 layout_init[] = {false};
   1028   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
   1029   model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3, param4, param5, param6}, {scoresOut, roiOut, classesOut, batchSplitOut});
   1030   model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param7, param8, param9, param10, param11, param12, layout}, {featureMap});
   1031   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {featureMap}, {out});
   1032   // Phase 3, inputs and outputs
   1033   model->identifyInputsAndOutputs(
   1034     {in},
   1035     {scoresOut, classesOut, out});
   1036   assert(model->isValid());
   1037 }
   1038 
   1039 inline bool is_ignored_zero_sized_float16(int i) {
   1040   static std::set<int> ignore = {};
   1041   return ignore.find(i) != ignore.end();
   1042 }
   1043 
   1044 void CreateModel_zero_sized_dynamic_output_shape(Model *model) {
   1045   OperandType type13(Type::TENSOR_QUANT8_ASYMM, {1, 2}, 0.1f, 128);
   1046   OperandType type14(Type::TENSOR_QUANT16_ASYMM, {1, 8}, 0.125f, 0);
   1047   OperandType type15(Type::TENSOR_QUANT8_ASYMM, {0}, 0.1f, 128);
   1048   OperandType type16(Type::TENSOR_INT32, {0});
   1049   OperandType type17(Type::TENSOR_QUANT16_ASYMM, {0, 4}, 0.125f, 0);
   1050   OperandType type18(Type::TENSOR_INT32, {1});
   1051   OperandType type19(Type::FLOAT32, {});
   1052   OperandType type20(Type::INT32, {});
   1053   OperandType type21(Type::BOOL, {});
   1054   OperandType type22(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 1}, 0.1f, 128);
   1055   OperandType type23(Type::TENSOR_QUANT8_ASYMM, {0, 2, 2, 1}, 0.1f, 128);
   1056   OperandType type35(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
   1057   // Phase 1, operands
   1058   auto scores = model->addOperand(&type13);
   1059   auto roi = model->addOperand(&type14);
   1060   auto param = model->addOperand(&type18);
   1061   auto param1 = model->addOperand(&type19);
   1062   auto param2 = model->addOperand(&type20);
   1063   auto param3 = model->addOperand(&type20);
   1064   auto param4 = model->addOperand(&type19);
   1065   auto param5 = model->addOperand(&type19);
   1066   auto param6 = model->addOperand(&type19);
   1067   auto scoresOut = model->addOperand(&type15);
   1068   auto roiOut = model->addOperand(&type17);
   1069   auto classesOut = model->addOperand(&type16);
   1070   auto batchSplitOut = model->addOperand(&type16);
   1071   auto in = model->addOperand(&type22);
   1072   auto param7 = model->addOperand(&type20);
   1073   auto param8 = model->addOperand(&type20);
   1074   auto param9 = model->addOperand(&type19);
   1075   auto param10 = model->addOperand(&type19);
   1076   auto param11 = model->addOperand(&type20);
   1077   auto param12 = model->addOperand(&type20);
   1078   auto layout = model->addOperand(&type21);
   1079   auto featureMap = model->addOperand(&type23);
   1080   auto out = model->addOperand(&type35);
   1081   // Phase 2, operations
   1082   static uint8_t scores_init[] = {137, 129};
   1083   model->setOperandValue(scores, scores_init, sizeof(uint8_t) * 2);
   1084   static uint16_t roi_init[] = {8, 8, 80, 80, 0, 0, 80, 80};
   1085   model->setOperandValue(roi, roi_init, sizeof(uint16_t) * 8);
   1086   static int32_t param_init[] = {0};
   1087   model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
   1088   static float param1_init[] = {0.3f};
   1089   model->setOperandValue(param1, param1_init, sizeof(float) * 1);
   1090   static int32_t param2_init[] = {-1};
   1091   model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
   1092   static int32_t param3_init[] = {0};
   1093   model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
   1094   static float param4_init[] = {0.4f};
   1095   model->setOperandValue(param4, param4_init, sizeof(float) * 1);
   1096   static float param5_init[] = {1.0f};
   1097   model->setOperandValue(param5, param5_init, sizeof(float) * 1);
   1098   static float param6_init[] = {0.3f};
   1099   model->setOperandValue(param6, param6_init, sizeof(float) * 1);
   1100   static int32_t param7_init[] = {2};
   1101   model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
   1102   static int32_t param8_init[] = {2};
   1103   model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
   1104   static float param9_init[] = {2.0f};
   1105   model->setOperandValue(param9, param9_init, sizeof(float) * 1);
   1106   static float param10_init[] = {2.0f};
   1107   model->setOperandValue(param10, param10_init, sizeof(float) * 1);
   1108   static int32_t param11_init[] = {4};
   1109   model->setOperandValue(param11, param11_init, sizeof(int32_t) * 1);
   1110   static int32_t param12_init[] = {4};
   1111   model->setOperandValue(param12, param12_init, sizeof(int32_t) * 1);
   1112   static bool8 layout_init[] = {false};
   1113   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
   1114   model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3, param4, param5, param6}, {scoresOut, roiOut, classesOut, batchSplitOut});
   1115   model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param7, param8, param9, param10, param11, param12, layout}, {featureMap});
   1116   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {featureMap}, {out});
   1117   // Phase 3, inputs and outputs
   1118   model->identifyInputsAndOutputs(
   1119     {in},
   1120     {scoresOut, classesOut, out});
   1121   assert(model->isValid());
   1122 }
   1123 
   1124 inline bool is_ignored_zero_sized_dynamic_output_shape(int i) {
   1125   static std::set<int> ignore = {};
   1126   return ignore.find(i) != ignore.end();
   1127 }
   1128 
   1129 void CreateModel_zero_sized_dynamic_output_shape_relaxed(Model *model) {
   1130   OperandType type13(Type::TENSOR_QUANT8_ASYMM, {1, 2}, 0.1f, 128);
   1131   OperandType type14(Type::TENSOR_QUANT16_ASYMM, {1, 8}, 0.125f, 0);
   1132   OperandType type15(Type::TENSOR_QUANT8_ASYMM, {0}, 0.1f, 128);
   1133   OperandType type16(Type::TENSOR_INT32, {0});
   1134   OperandType type17(Type::TENSOR_QUANT16_ASYMM, {0, 4}, 0.125f, 0);
   1135   OperandType type18(Type::TENSOR_INT32, {1});
   1136   OperandType type19(Type::FLOAT32, {});
   1137   OperandType type20(Type::INT32, {});
   1138   OperandType type21(Type::BOOL, {});
   1139   OperandType type22(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 1}, 0.1f, 128);
   1140   OperandType type23(Type::TENSOR_QUANT8_ASYMM, {0, 2, 2, 1}, 0.1f, 128);
   1141   OperandType type35(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
   1142   // Phase 1, operands
   1143   auto scores = model->addOperand(&type13);
   1144   auto roi = model->addOperand(&type14);
   1145   auto param = model->addOperand(&type18);
   1146   auto param1 = model->addOperand(&type19);
   1147   auto param2 = model->addOperand(&type20);
   1148   auto param3 = model->addOperand(&type20);
   1149   auto param4 = model->addOperand(&type19);
   1150   auto param5 = model->addOperand(&type19);
   1151   auto param6 = model->addOperand(&type19);
   1152   auto scoresOut = model->addOperand(&type15);
   1153   auto roiOut = model->addOperand(&type17);
   1154   auto classesOut = model->addOperand(&type16);
   1155   auto batchSplitOut = model->addOperand(&type16);
   1156   auto in = model->addOperand(&type22);
   1157   auto param7 = model->addOperand(&type20);
   1158   auto param8 = model->addOperand(&type20);
   1159   auto param9 = model->addOperand(&type19);
   1160   auto param10 = model->addOperand(&type19);
   1161   auto param11 = model->addOperand(&type20);
   1162   auto param12 = model->addOperand(&type20);
   1163   auto layout = model->addOperand(&type21);
   1164   auto featureMap = model->addOperand(&type23);
   1165   auto out = model->addOperand(&type35);
   1166   // Phase 2, operations
   1167   static uint8_t scores_init[] = {137, 129};
   1168   model->setOperandValue(scores, scores_init, sizeof(uint8_t) * 2);
   1169   static uint16_t roi_init[] = {8, 8, 80, 80, 0, 0, 80, 80};
   1170   model->setOperandValue(roi, roi_init, sizeof(uint16_t) * 8);
   1171   static int32_t param_init[] = {0};
   1172   model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
   1173   static float param1_init[] = {0.3f};
   1174   model->setOperandValue(param1, param1_init, sizeof(float) * 1);
   1175   static int32_t param2_init[] = {-1};
   1176   model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
   1177   static int32_t param3_init[] = {0};
   1178   model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
   1179   static float param4_init[] = {0.4f};
   1180   model->setOperandValue(param4, param4_init, sizeof(float) * 1);
   1181   static float param5_init[] = {1.0f};
   1182   model->setOperandValue(param5, param5_init, sizeof(float) * 1);
   1183   static float param6_init[] = {0.3f};
   1184   model->setOperandValue(param6, param6_init, sizeof(float) * 1);
   1185   static int32_t param7_init[] = {2};
   1186   model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
   1187   static int32_t param8_init[] = {2};
   1188   model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
   1189   static float param9_init[] = {2.0f};
   1190   model->setOperandValue(param9, param9_init, sizeof(float) * 1);
   1191   static float param10_init[] = {2.0f};
   1192   model->setOperandValue(param10, param10_init, sizeof(float) * 1);
   1193   static int32_t param11_init[] = {4};
   1194   model->setOperandValue(param11, param11_init, sizeof(int32_t) * 1);
   1195   static int32_t param12_init[] = {4};
   1196   model->setOperandValue(param12, param12_init, sizeof(int32_t) * 1);
   1197   static bool8 layout_init[] = {false};
   1198   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
   1199   model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3, param4, param5, param6}, {scoresOut, roiOut, classesOut, batchSplitOut});
   1200   model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param7, param8, param9, param10, param11, param12, layout}, {featureMap});
   1201   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {featureMap}, {out});
   1202   // Phase 3, inputs and outputs
   1203   model->identifyInputsAndOutputs(
   1204     {in},
   1205     {scoresOut, classesOut, out});
   1206   // Phase 4: set relaxed execution
   1207   model->relaxComputationFloat32toFloat16(true);
   1208   assert(model->isValid());
   1209 }
   1210 
   1211 inline bool is_ignored_zero_sized_dynamic_output_shape_relaxed(int i) {
   1212   static std::set<int> ignore = {};
   1213   return ignore.find(i) != ignore.end();
   1214 }
   1215 
   1216 void CreateModel_zero_sized_dynamic_output_shape_float16(Model *model) {
   1217   OperandType type13(Type::TENSOR_QUANT8_ASYMM, {1, 2}, 0.1f, 128);
   1218   OperandType type14(Type::TENSOR_QUANT16_ASYMM, {1, 8}, 0.125f, 0);
   1219   OperandType type15(Type::TENSOR_QUANT8_ASYMM, {0}, 0.1f, 128);
   1220   OperandType type16(Type::TENSOR_INT32, {0});
   1221   OperandType type17(Type::TENSOR_QUANT16_ASYMM, {0, 4}, 0.125f, 0);
   1222   OperandType type18(Type::TENSOR_INT32, {1});
   1223   OperandType type19(Type::FLOAT32, {});
   1224   OperandType type20(Type::INT32, {});
   1225   OperandType type21(Type::BOOL, {});
   1226   OperandType type22(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 1}, 0.1f, 128);
   1227   OperandType type23(Type::TENSOR_QUANT8_ASYMM, {0, 2, 2, 1}, 0.1f, 128);
   1228   OperandType type36(Type::TENSOR_FLOAT16, {0, 0, 0, 0});
   1229   // Phase 1, operands
   1230   auto scores = model->addOperand(&type13);
   1231   auto roi = model->addOperand(&type14);
   1232   auto param = model->addOperand(&type18);
   1233   auto param1 = model->addOperand(&type19);
   1234   auto param2 = model->addOperand(&type20);
   1235   auto param3 = model->addOperand(&type20);
   1236   auto param4 = model->addOperand(&type19);
   1237   auto param5 = model->addOperand(&type19);
   1238   auto param6 = model->addOperand(&type19);
   1239   auto scoresOut = model->addOperand(&type15);
   1240   auto roiOut = model->addOperand(&type17);
   1241   auto classesOut = model->addOperand(&type16);
   1242   auto batchSplitOut = model->addOperand(&type16);
   1243   auto in = model->addOperand(&type22);
   1244   auto param7 = model->addOperand(&type20);
   1245   auto param8 = model->addOperand(&type20);
   1246   auto param9 = model->addOperand(&type19);
   1247   auto param10 = model->addOperand(&type19);
   1248   auto param11 = model->addOperand(&type20);
   1249   auto param12 = model->addOperand(&type20);
   1250   auto layout = model->addOperand(&type21);
   1251   auto featureMap = model->addOperand(&type23);
   1252   auto out = model->addOperand(&type36);
   1253   // Phase 2, operations
   1254   static uint8_t scores_init[] = {137, 129};
   1255   model->setOperandValue(scores, scores_init, sizeof(uint8_t) * 2);
   1256   static uint16_t roi_init[] = {8, 8, 80, 80, 0, 0, 80, 80};
   1257   model->setOperandValue(roi, roi_init, sizeof(uint16_t) * 8);
   1258   static int32_t param_init[] = {0};
   1259   model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
   1260   static float param1_init[] = {0.3f};
   1261   model->setOperandValue(param1, param1_init, sizeof(float) * 1);
   1262   static int32_t param2_init[] = {-1};
   1263   model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
   1264   static int32_t param3_init[] = {0};
   1265   model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
   1266   static float param4_init[] = {0.4f};
   1267   model->setOperandValue(param4, param4_init, sizeof(float) * 1);
   1268   static float param5_init[] = {1.0f};
   1269   model->setOperandValue(param5, param5_init, sizeof(float) * 1);
   1270   static float param6_init[] = {0.3f};
   1271   model->setOperandValue(param6, param6_init, sizeof(float) * 1);
   1272   static int32_t param7_init[] = {2};
   1273   model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
   1274   static int32_t param8_init[] = {2};
   1275   model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
   1276   static float param9_init[] = {2.0f};
   1277   model->setOperandValue(param9, param9_init, sizeof(float) * 1);
   1278   static float param10_init[] = {2.0f};
   1279   model->setOperandValue(param10, param10_init, sizeof(float) * 1);
   1280   static int32_t param11_init[] = {4};
   1281   model->setOperandValue(param11, param11_init, sizeof(int32_t) * 1);
   1282   static int32_t param12_init[] = {4};
   1283   model->setOperandValue(param12, param12_init, sizeof(int32_t) * 1);
   1284   static bool8 layout_init[] = {false};
   1285   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
   1286   model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3, param4, param5, param6}, {scoresOut, roiOut, classesOut, batchSplitOut});
   1287   model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param7, param8, param9, param10, param11, param12, layout}, {featureMap});
   1288   model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {featureMap}, {out});
   1289   // Phase 3, inputs and outputs
   1290   model->identifyInputsAndOutputs(
   1291     {in},
   1292     {scoresOut, classesOut, out});
   1293   assert(model->isValid());
   1294 }
   1295 
   1296 inline bool is_ignored_zero_sized_dynamic_output_shape_float16(int i) {
   1297   static std::set<int> ignore = {};
   1298   return ignore.find(i) != ignore.end();
   1299 }
   1300 
   1301