Home | History | Annotate | Download | only in models
      1 // clang-format off
      2 // Generated file (from: prelu.mod.py). Do not edit
      3 void CreateModel(Model *model) {
      4   OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 3});
      5   OperandType type1(Type::TENSOR_FLOAT32, {1, 1, 3});
      6   // Phase 1, operands
      7   auto input = model->addOperand(&type0);
      8   auto alpha = model->addOperand(&type1);
      9   auto output = model->addOperand(&type0);
     10   // Phase 2, operations
     11   static float alpha_init[] = {0.0f, 1.0f, 2.0f};
     12   model->setOperandValue(alpha, alpha_init, sizeof(float) * 3);
     13   model->addOperation(ANEURALNETWORKS_PRELU, {input, alpha}, {output});
     14   // Phase 3, inputs and outputs
     15   model->identifyInputsAndOutputs(
     16     {input},
     17     {output});
     18   assert(model->isValid());
     19 }
     20 
     21 inline bool is_ignored(int i) {
     22   static std::set<int> ignore = {};
     23   return ignore.find(i) != ignore.end();
     24 }
     25 
     26 void CreateModel_relaxed(Model *model) {
     27   OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 3});
     28   OperandType type1(Type::TENSOR_FLOAT32, {1, 1, 3});
     29   // Phase 1, operands
     30   auto input = model->addOperand(&type0);
     31   auto alpha = model->addOperand(&type1);
     32   auto output = model->addOperand(&type0);
     33   // Phase 2, operations
     34   static float alpha_init[] = {0.0f, 1.0f, 2.0f};
     35   model->setOperandValue(alpha, alpha_init, sizeof(float) * 3);
     36   model->addOperation(ANEURALNETWORKS_PRELU, {input, alpha}, {output});
     37   // Phase 3, inputs and outputs
     38   model->identifyInputsAndOutputs(
     39     {input},
     40     {output});
     41   // Phase 4: set relaxed execution
     42   model->relaxComputationFloat32toFloat16(true);
     43   assert(model->isValid());
     44 }
     45 
     46 inline bool is_ignored_relaxed(int i) {
     47   static std::set<int> ignore = {};
     48   return ignore.find(i) != ignore.end();
     49 }
     50 
     51 void CreateModel_quant8(Model *model) {
     52   OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 1, 3}, 0.25f, 50);
     53   OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 3}, 0.25f, 128);
     54   OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 3}, 0.5f, 120);
     55   // Phase 1, operands
     56   auto input = model->addOperand(&type3);
     57   auto alpha = model->addOperand(&type2);
     58   auto output = model->addOperand(&type4);
     59   // Phase 2, operations
     60   static uint8_t alpha_init[] = {50, 54, 58};
     61   model->setOperandValue(alpha, alpha_init, sizeof(uint8_t) * 3);
     62   model->addOperation(ANEURALNETWORKS_PRELU, {input, alpha}, {output});
     63   // Phase 3, inputs and outputs
     64   model->identifyInputsAndOutputs(
     65     {input},
     66     {output});
     67   assert(model->isValid());
     68 }
     69 
     70 inline bool is_ignored_quant8(int i) {
     71   static std::set<int> ignore = {};
     72   return ignore.find(i) != ignore.end();
     73 }
     74 
     75 void CreateModel_quant8_2(Model *model) {
     76   OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 1, 3}, 0.25f, 50);
     77   OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 3}, 0.25f, 128);
     78   OperandType type5(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 3}, 0.25f, 120);
     79   // Phase 1, operands
     80   auto input = model->addOperand(&type3);
     81   auto alpha = model->addOperand(&type2);
     82   auto output = model->addOperand(&type5);
     83   // Phase 2, operations
     84   static uint8_t alpha_init[] = {50, 54, 58};
     85   model->setOperandValue(alpha, alpha_init, sizeof(uint8_t) * 3);
     86   model->addOperation(ANEURALNETWORKS_PRELU, {input, alpha}, {output});
     87   // Phase 3, inputs and outputs
     88   model->identifyInputsAndOutputs(
     89     {input},
     90     {output});
     91   assert(model->isValid());
     92 }
     93 
     94 inline bool is_ignored_quant8_2(int i) {
     95   static std::set<int> ignore = {};
     96   return ignore.find(i) != ignore.end();
     97 }
     98 
     99 void CreateModel_quant8_3(Model *model) {
    100   OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 3}, 0.25f, 128);
    101   OperandType type6(Type::TENSOR_QUANT8_ASYMM, {1, 1, 3}, 0.5f, 50);
    102   OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 3}, 0.125f, 120);
    103   // Phase 1, operands
    104   auto input = model->addOperand(&type3);
    105   auto alpha = model->addOperand(&type6);
    106   auto output = model->addOperand(&type7);
    107   // Phase 2, operations
    108   static uint8_t alpha_init[] = {50, 52, 54};
    109   model->setOperandValue(alpha, alpha_init, sizeof(uint8_t) * 3);
    110   model->addOperation(ANEURALNETWORKS_PRELU, {input, alpha}, {output});
    111   // Phase 3, inputs and outputs
    112   model->identifyInputsAndOutputs(
    113     {input},
    114     {output});
    115   assert(model->isValid());
    116 }
    117 
    118 inline bool is_ignored_quant8_3(int i) {
    119   static std::set<int> ignore = {};
    120   return ignore.find(i) != ignore.end();
    121 }
    122 
    123 void CreateModel_quant8_4(Model *model) {
    124   OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 3}, 0.25f, 128);
    125   OperandType type6(Type::TENSOR_QUANT8_ASYMM, {1, 1, 3}, 0.5f, 50);
    126   OperandType type8(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 3}, 0.1f, 120);
    127   // Phase 1, operands
    128   auto input = model->addOperand(&type3);
    129   auto alpha = model->addOperand(&type6);
    130   auto output = model->addOperand(&type8);
    131   // Phase 2, operations
    132   static uint8_t alpha_init[] = {50, 52, 54};
    133   model->setOperandValue(alpha, alpha_init, sizeof(uint8_t) * 3);
    134   model->addOperation(ANEURALNETWORKS_PRELU, {input, alpha}, {output});
    135   // Phase 3, inputs and outputs
    136   model->identifyInputsAndOutputs(
    137     {input},
    138     {output});
    139   assert(model->isValid());
    140 }
    141 
    142 inline bool is_ignored_quant8_4(int i) {
    143   static std::set<int> ignore = {};
    144   return ignore.find(i) != ignore.end();
    145 }
    146 
    147 void CreateModel_float16(Model *model) {
    148   OperandType type10(Type::TENSOR_FLOAT16, {1, 2, 2, 3});
    149   OperandType type9(Type::TENSOR_FLOAT16, {1, 1, 3});
    150   // Phase 1, operands
    151   auto input = model->addOperand(&type10);
    152   auto alpha = model->addOperand(&type9);
    153   auto output = model->addOperand(&type10);
    154   // Phase 2, operations
    155   static _Float16 alpha_init[] = {0.0f, 1.0f, 2.0f};
    156   model->setOperandValue(alpha, alpha_init, sizeof(_Float16) * 3);
    157   model->addOperation(ANEURALNETWORKS_PRELU, {input, alpha}, {output});
    158   // Phase 3, inputs and outputs
    159   model->identifyInputsAndOutputs(
    160     {input},
    161     {output});
    162   assert(model->isValid());
    163 }
    164 
    165 inline bool is_ignored_float16(int i) {
    166   static std::set<int> ignore = {};
    167   return ignore.find(i) != ignore.end();
    168 }
    169 
    170 void CreateModel_weight_as_input(Model *model) {
    171   OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 3});
    172   OperandType type1(Type::TENSOR_FLOAT32, {1, 1, 3});
    173   // Phase 1, operands
    174   auto input = model->addOperand(&type0);
    175   auto alpha = model->addOperand(&type1);
    176   auto output = model->addOperand(&type0);
    177   // Phase 2, operations
    178   model->addOperation(ANEURALNETWORKS_PRELU, {input, alpha}, {output});
    179   // Phase 3, inputs and outputs
    180   model->identifyInputsAndOutputs(
    181     {input, alpha},
    182     {output});
    183   assert(model->isValid());
    184 }
    185 
    186 inline bool is_ignored_weight_as_input(int i) {
    187   static std::set<int> ignore = {};
    188   return ignore.find(i) != ignore.end();
    189 }
    190 
    191 void CreateModel_weight_as_input_relaxed(Model *model) {
    192   OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 3});
    193   OperandType type1(Type::TENSOR_FLOAT32, {1, 1, 3});
    194   // Phase 1, operands
    195   auto input = model->addOperand(&type0);
    196   auto alpha = model->addOperand(&type1);
    197   auto output = model->addOperand(&type0);
    198   // Phase 2, operations
    199   model->addOperation(ANEURALNETWORKS_PRELU, {input, alpha}, {output});
    200   // Phase 3, inputs and outputs
    201   model->identifyInputsAndOutputs(
    202     {input, alpha},
    203     {output});
    204   // Phase 4: set relaxed execution
    205   model->relaxComputationFloat32toFloat16(true);
    206   assert(model->isValid());
    207 }
    208 
    209 inline bool is_ignored_weight_as_input_relaxed(int i) {
    210   static std::set<int> ignore = {};
    211   return ignore.find(i) != ignore.end();
    212 }
    213 
    214 void CreateModel_weight_as_input_quant8(Model *model) {
    215   OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 1, 3}, 0.25f, 50);
    216   OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 3}, 0.25f, 128);
    217   OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 3}, 0.5f, 120);
    218   // Phase 1, operands
    219   auto input = model->addOperand(&type3);
    220   auto alpha = model->addOperand(&type2);
    221   auto output = model->addOperand(&type4);
    222   // Phase 2, operations
    223   model->addOperation(ANEURALNETWORKS_PRELU, {input, alpha}, {output});
    224   // Phase 3, inputs and outputs
    225   model->identifyInputsAndOutputs(
    226     {input, alpha},
    227     {output});
    228   assert(model->isValid());
    229 }
    230 
    231 inline bool is_ignored_weight_as_input_quant8(int i) {
    232   static std::set<int> ignore = {};
    233   return ignore.find(i) != ignore.end();
    234 }
    235 
    236 void CreateModel_weight_as_input_quant8_2(Model *model) {
    237   OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 1, 3}, 0.25f, 50);
    238   OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 3}, 0.25f, 128);
    239   OperandType type5(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 3}, 0.25f, 120);
    240   // Phase 1, operands
    241   auto input = model->addOperand(&type3);
    242   auto alpha = model->addOperand(&type2);
    243   auto output = model->addOperand(&type5);
    244   // Phase 2, operations
    245   model->addOperation(ANEURALNETWORKS_PRELU, {input, alpha}, {output});
    246   // Phase 3, inputs and outputs
    247   model->identifyInputsAndOutputs(
    248     {input, alpha},
    249     {output});
    250   assert(model->isValid());
    251 }
    252 
    253 inline bool is_ignored_weight_as_input_quant8_2(int i) {
    254   static std::set<int> ignore = {};
    255   return ignore.find(i) != ignore.end();
    256 }
    257 
    258 void CreateModel_weight_as_input_quant8_3(Model *model) {
    259   OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 3}, 0.25f, 128);
    260   OperandType type6(Type::TENSOR_QUANT8_ASYMM, {1, 1, 3}, 0.5f, 50);
    261   OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 3}, 0.125f, 120);
    262   // Phase 1, operands
    263   auto input = model->addOperand(&type3);
    264   auto alpha = model->addOperand(&type6);
    265   auto output = model->addOperand(&type7);
    266   // Phase 2, operations
    267   model->addOperation(ANEURALNETWORKS_PRELU, {input, alpha}, {output});
    268   // Phase 3, inputs and outputs
    269   model->identifyInputsAndOutputs(
    270     {input, alpha},
    271     {output});
    272   assert(model->isValid());
    273 }
    274 
    275 inline bool is_ignored_weight_as_input_quant8_3(int i) {
    276   static std::set<int> ignore = {};
    277   return ignore.find(i) != ignore.end();
    278 }
    279 
    280 void CreateModel_weight_as_input_quant8_4(Model *model) {
    281   OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 3}, 0.25f, 128);
    282   OperandType type6(Type::TENSOR_QUANT8_ASYMM, {1, 1, 3}, 0.5f, 50);
    283   OperandType type8(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 3}, 0.1f, 120);
    284   // Phase 1, operands
    285   auto input = model->addOperand(&type3);
    286   auto alpha = model->addOperand(&type6);
    287   auto output = model->addOperand(&type8);
    288   // Phase 2, operations
    289   model->addOperation(ANEURALNETWORKS_PRELU, {input, alpha}, {output});
    290   // Phase 3, inputs and outputs
    291   model->identifyInputsAndOutputs(
    292     {input, alpha},
    293     {output});
    294   assert(model->isValid());
    295 }
    296 
    297 inline bool is_ignored_weight_as_input_quant8_4(int i) {
    298   static std::set<int> ignore = {};
    299   return ignore.find(i) != ignore.end();
    300 }
    301 
    302 void CreateModel_weight_as_input_float16(Model *model) {
    303   OperandType type10(Type::TENSOR_FLOAT16, {1, 2, 2, 3});
    304   OperandType type9(Type::TENSOR_FLOAT16, {1, 1, 3});
    305   // Phase 1, operands
    306   auto input = model->addOperand(&type10);
    307   auto alpha = model->addOperand(&type9);
    308   auto output = model->addOperand(&type10);
    309   // Phase 2, operations
    310   model->addOperation(ANEURALNETWORKS_PRELU, {input, alpha}, {output});
    311   // Phase 3, inputs and outputs
    312   model->identifyInputsAndOutputs(
    313     {input, alpha},
    314     {output});
    315   assert(model->isValid());
    316 }
    317 
    318 inline bool is_ignored_weight_as_input_float16(int i) {
    319   static std::set<int> ignore = {};
    320   return ignore.find(i) != ignore.end();
    321 }
    322 
    323 void CreateModel_dynamic_output_shape(Model *model) {
    324   OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 3});
    325   OperandType type1(Type::TENSOR_FLOAT32, {1, 1, 3});
    326   OperandType type11(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
    327   // Phase 1, operands
    328   auto input = model->addOperand(&type0);
    329   auto alpha = model->addOperand(&type1);
    330   auto output = model->addOperand(&type11);
    331   // Phase 2, operations
    332   static float alpha_init[] = {0.0f, 1.0f, 2.0f};
    333   model->setOperandValue(alpha, alpha_init, sizeof(float) * 3);
    334   model->addOperation(ANEURALNETWORKS_PRELU, {input, alpha}, {output});
    335   // Phase 3, inputs and outputs
    336   model->identifyInputsAndOutputs(
    337     {input},
    338     {output});
    339   assert(model->isValid());
    340 }
    341 
    342 inline bool is_ignored_dynamic_output_shape(int i) {
    343   static std::set<int> ignore = {};
    344   return ignore.find(i) != ignore.end();
    345 }
    346 
    347 void CreateModel_dynamic_output_shape_relaxed(Model *model) {
    348   OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 3});
    349   OperandType type1(Type::TENSOR_FLOAT32, {1, 1, 3});
    350   OperandType type11(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
    351   // Phase 1, operands
    352   auto input = model->addOperand(&type0);
    353   auto alpha = model->addOperand(&type1);
    354   auto output = model->addOperand(&type11);
    355   // Phase 2, operations
    356   static float alpha_init[] = {0.0f, 1.0f, 2.0f};
    357   model->setOperandValue(alpha, alpha_init, sizeof(float) * 3);
    358   model->addOperation(ANEURALNETWORKS_PRELU, {input, alpha}, {output});
    359   // Phase 3, inputs and outputs
    360   model->identifyInputsAndOutputs(
    361     {input},
    362     {output});
    363   // Phase 4: set relaxed execution
    364   model->relaxComputationFloat32toFloat16(true);
    365   assert(model->isValid());
    366 }
    367 
    368 inline bool is_ignored_dynamic_output_shape_relaxed(int i) {
    369   static std::set<int> ignore = {};
    370   return ignore.find(i) != ignore.end();
    371 }
    372 
    373 void CreateModel_dynamic_output_shape_quant8(Model *model) {
    374   OperandType type12(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.5f, 120);
    375   OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 1, 3}, 0.25f, 50);
    376   OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 3}, 0.25f, 128);
    377   // Phase 1, operands
    378   auto input = model->addOperand(&type3);
    379   auto alpha = model->addOperand(&type2);
    380   auto output = model->addOperand(&type12);
    381   // Phase 2, operations
    382   static uint8_t alpha_init[] = {50, 54, 58};
    383   model->setOperandValue(alpha, alpha_init, sizeof(uint8_t) * 3);
    384   model->addOperation(ANEURALNETWORKS_PRELU, {input, alpha}, {output});
    385   // Phase 3, inputs and outputs
    386   model->identifyInputsAndOutputs(
    387     {input},
    388     {output});
    389   assert(model->isValid());
    390 }
    391 
    392 inline bool is_ignored_dynamic_output_shape_quant8(int i) {
    393   static std::set<int> ignore = {};
    394   return ignore.find(i) != ignore.end();
    395 }
    396 
    397 void CreateModel_dynamic_output_shape_quant8_2(Model *model) {
    398   OperandType type13(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.25f, 120);
    399   OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 1, 3}, 0.25f, 50);
    400   OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 3}, 0.25f, 128);
    401   // Phase 1, operands
    402   auto input = model->addOperand(&type3);
    403   auto alpha = model->addOperand(&type2);
    404   auto output = model->addOperand(&type13);
    405   // Phase 2, operations
    406   static uint8_t alpha_init[] = {50, 54, 58};
    407   model->setOperandValue(alpha, alpha_init, sizeof(uint8_t) * 3);
    408   model->addOperation(ANEURALNETWORKS_PRELU, {input, alpha}, {output});
    409   // Phase 3, inputs and outputs
    410   model->identifyInputsAndOutputs(
    411     {input},
    412     {output});
    413   assert(model->isValid());
    414 }
    415 
    416 inline bool is_ignored_dynamic_output_shape_quant8_2(int i) {
    417   static std::set<int> ignore = {};
    418   return ignore.find(i) != ignore.end();
    419 }
    420 
    421 void CreateModel_dynamic_output_shape_quant8_3(Model *model) {
    422   OperandType type14(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.125f, 120);
    423   OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 3}, 0.25f, 128);
    424   OperandType type6(Type::TENSOR_QUANT8_ASYMM, {1, 1, 3}, 0.5f, 50);
    425   // Phase 1, operands
    426   auto input = model->addOperand(&type3);
    427   auto alpha = model->addOperand(&type6);
    428   auto output = model->addOperand(&type14);
    429   // Phase 2, operations
    430   static uint8_t alpha_init[] = {50, 52, 54};
    431   model->setOperandValue(alpha, alpha_init, sizeof(uint8_t) * 3);
    432   model->addOperation(ANEURALNETWORKS_PRELU, {input, alpha}, {output});
    433   // Phase 3, inputs and outputs
    434   model->identifyInputsAndOutputs(
    435     {input},
    436     {output});
    437   assert(model->isValid());
    438 }
    439 
    440 inline bool is_ignored_dynamic_output_shape_quant8_3(int i) {
    441   static std::set<int> ignore = {};
    442   return ignore.find(i) != ignore.end();
    443 }
    444 
    445 void CreateModel_dynamic_output_shape_quant8_4(Model *model) {
    446   OperandType type15(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.1f, 120);
    447   OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 3}, 0.25f, 128);
    448   OperandType type6(Type::TENSOR_QUANT8_ASYMM, {1, 1, 3}, 0.5f, 50);
    449   // Phase 1, operands
    450   auto input = model->addOperand(&type3);
    451   auto alpha = model->addOperand(&type6);
    452   auto output = model->addOperand(&type15);
    453   // Phase 2, operations
    454   static uint8_t alpha_init[] = {50, 52, 54};
    455   model->setOperandValue(alpha, alpha_init, sizeof(uint8_t) * 3);
    456   model->addOperation(ANEURALNETWORKS_PRELU, {input, alpha}, {output});
    457   // Phase 3, inputs and outputs
    458   model->identifyInputsAndOutputs(
    459     {input},
    460     {output});
    461   assert(model->isValid());
    462 }
    463 
    464 inline bool is_ignored_dynamic_output_shape_quant8_4(int i) {
    465   static std::set<int> ignore = {};
    466   return ignore.find(i) != ignore.end();
    467 }
    468 
    469 void CreateModel_dynamic_output_shape_float16(Model *model) {
    470   OperandType type10(Type::TENSOR_FLOAT16, {1, 2, 2, 3});
    471   OperandType type16(Type::TENSOR_FLOAT16, {0, 0, 0, 0});
    472   OperandType type9(Type::TENSOR_FLOAT16, {1, 1, 3});
    473   // Phase 1, operands
    474   auto input = model->addOperand(&type10);
    475   auto alpha = model->addOperand(&type9);
    476   auto output = model->addOperand(&type16);
    477   // Phase 2, operations
    478   static _Float16 alpha_init[] = {0.0f, 1.0f, 2.0f};
    479   model->setOperandValue(alpha, alpha_init, sizeof(_Float16) * 3);
    480   model->addOperation(ANEURALNETWORKS_PRELU, {input, alpha}, {output});
    481   // Phase 3, inputs and outputs
    482   model->identifyInputsAndOutputs(
    483     {input},
    484     {output});
    485   assert(model->isValid());
    486 }
    487 
    488 inline bool is_ignored_dynamic_output_shape_float16(int i) {
    489   static std::set<int> ignore = {};
    490   return ignore.find(i) != ignore.end();
    491 }
    492 
    493 void CreateModel_dynamic_output_shape_weight_as_input(Model *model) {
    494   OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 3});
    495   OperandType type1(Type::TENSOR_FLOAT32, {1, 1, 3});
    496   OperandType type11(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
    497   // Phase 1, operands
    498   auto input = model->addOperand(&type0);
    499   auto alpha = model->addOperand(&type1);
    500   auto output = model->addOperand(&type11);
    501   // Phase 2, operations
    502   model->addOperation(ANEURALNETWORKS_PRELU, {input, alpha}, {output});
    503   // Phase 3, inputs and outputs
    504   model->identifyInputsAndOutputs(
    505     {input, alpha},
    506     {output});
    507   assert(model->isValid());
    508 }
    509 
    510 inline bool is_ignored_dynamic_output_shape_weight_as_input(int i) {
    511   static std::set<int> ignore = {};
    512   return ignore.find(i) != ignore.end();
    513 }
    514 
    515 void CreateModel_dynamic_output_shape_weight_as_input_relaxed(Model *model) {
    516   OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 3});
    517   OperandType type1(Type::TENSOR_FLOAT32, {1, 1, 3});
    518   OperandType type11(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
    519   // Phase 1, operands
    520   auto input = model->addOperand(&type0);
    521   auto alpha = model->addOperand(&type1);
    522   auto output = model->addOperand(&type11);
    523   // Phase 2, operations
    524   model->addOperation(ANEURALNETWORKS_PRELU, {input, alpha}, {output});
    525   // Phase 3, inputs and outputs
    526   model->identifyInputsAndOutputs(
    527     {input, alpha},
    528     {output});
    529   // Phase 4: set relaxed execution
    530   model->relaxComputationFloat32toFloat16(true);
    531   assert(model->isValid());
    532 }
    533 
    534 inline bool is_ignored_dynamic_output_shape_weight_as_input_relaxed(int i) {
    535   static std::set<int> ignore = {};
    536   return ignore.find(i) != ignore.end();
    537 }
    538 
    539 void CreateModel_dynamic_output_shape_weight_as_input_quant8(Model *model) {
    540   OperandType type12(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.5f, 120);
    541   OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 1, 3}, 0.25f, 50);
    542   OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 3}, 0.25f, 128);
    543   // Phase 1, operands
    544   auto input = model->addOperand(&type3);
    545   auto alpha = model->addOperand(&type2);
    546   auto output = model->addOperand(&type12);
    547   // Phase 2, operations
    548   model->addOperation(ANEURALNETWORKS_PRELU, {input, alpha}, {output});
    549   // Phase 3, inputs and outputs
    550   model->identifyInputsAndOutputs(
    551     {input, alpha},
    552     {output});
    553   assert(model->isValid());
    554 }
    555 
    556 inline bool is_ignored_dynamic_output_shape_weight_as_input_quant8(int i) {
    557   static std::set<int> ignore = {};
    558   return ignore.find(i) != ignore.end();
    559 }
    560 
    561 void CreateModel_dynamic_output_shape_weight_as_input_quant8_2(Model *model) {
    562   OperandType type13(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.25f, 120);
    563   OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 1, 3}, 0.25f, 50);
    564   OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 3}, 0.25f, 128);
    565   // Phase 1, operands
    566   auto input = model->addOperand(&type3);
    567   auto alpha = model->addOperand(&type2);
    568   auto output = model->addOperand(&type13);
    569   // Phase 2, operations
    570   model->addOperation(ANEURALNETWORKS_PRELU, {input, alpha}, {output});
    571   // Phase 3, inputs and outputs
    572   model->identifyInputsAndOutputs(
    573     {input, alpha},
    574     {output});
    575   assert(model->isValid());
    576 }
    577 
    578 inline bool is_ignored_dynamic_output_shape_weight_as_input_quant8_2(int i) {
    579   static std::set<int> ignore = {};
    580   return ignore.find(i) != ignore.end();
    581 }
    582 
    583 void CreateModel_dynamic_output_shape_weight_as_input_quant8_3(Model *model) {
    584   OperandType type14(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.125f, 120);
    585   OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 3}, 0.25f, 128);
    586   OperandType type6(Type::TENSOR_QUANT8_ASYMM, {1, 1, 3}, 0.5f, 50);
    587   // Phase 1, operands
    588   auto input = model->addOperand(&type3);
    589   auto alpha = model->addOperand(&type6);
    590   auto output = model->addOperand(&type14);
    591   // Phase 2, operations
    592   model->addOperation(ANEURALNETWORKS_PRELU, {input, alpha}, {output});
    593   // Phase 3, inputs and outputs
    594   model->identifyInputsAndOutputs(
    595     {input, alpha},
    596     {output});
    597   assert(model->isValid());
    598 }
    599 
    600 inline bool is_ignored_dynamic_output_shape_weight_as_input_quant8_3(int i) {
    601   static std::set<int> ignore = {};
    602   return ignore.find(i) != ignore.end();
    603 }
    604 
    605 void CreateModel_dynamic_output_shape_weight_as_input_quant8_4(Model *model) {
    606   OperandType type15(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.1f, 120);
    607   OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 3}, 0.25f, 128);
    608   OperandType type6(Type::TENSOR_QUANT8_ASYMM, {1, 1, 3}, 0.5f, 50);
    609   // Phase 1, operands
    610   auto input = model->addOperand(&type3);
    611   auto alpha = model->addOperand(&type6);
    612   auto output = model->addOperand(&type15);
    613   // Phase 2, operations
    614   model->addOperation(ANEURALNETWORKS_PRELU, {input, alpha}, {output});
    615   // Phase 3, inputs and outputs
    616   model->identifyInputsAndOutputs(
    617     {input, alpha},
    618     {output});
    619   assert(model->isValid());
    620 }
    621 
    622 inline bool is_ignored_dynamic_output_shape_weight_as_input_quant8_4(int i) {
    623   static std::set<int> ignore = {};
    624   return ignore.find(i) != ignore.end();
    625 }
    626 
    627 void CreateModel_dynamic_output_shape_weight_as_input_float16(Model *model) {
    628   OperandType type10(Type::TENSOR_FLOAT16, {1, 2, 2, 3});
    629   OperandType type16(Type::TENSOR_FLOAT16, {0, 0, 0, 0});
    630   OperandType type9(Type::TENSOR_FLOAT16, {1, 1, 3});
    631   // Phase 1, operands
    632   auto input = model->addOperand(&type10);
    633   auto alpha = model->addOperand(&type9);
    634   auto output = model->addOperand(&type16);
    635   // Phase 2, operations
    636   model->addOperation(ANEURALNETWORKS_PRELU, {input, alpha}, {output});
    637   // Phase 3, inputs and outputs
    638   model->identifyInputsAndOutputs(
    639     {input, alpha},
    640     {output});
    641   assert(model->isValid());
    642 }
    643 
    644 inline bool is_ignored_dynamic_output_shape_weight_as_input_float16(int i) {
    645   static std::set<int> ignore = {};
    646   return ignore.find(i) != ignore.end();
    647 }
    648 
    649