Home | History | Annotate | Download | only in models
      1 // clang-format off
      2 // Generated file (from: log_softmax.mod.py). Do not edit
      3 void CreateModel(Model *model) {
      4   OperandType type0(Type::TENSOR_FLOAT32, {1, 1, 1, 2, 4});
      5   OperandType type1(Type::FLOAT32, {});
      6   OperandType type2(Type::INT32, {});
      7   // Phase 1, operands
      8   auto input0 = model->addOperand(&type0);
      9   auto param = model->addOperand(&type1);
     10   auto param1 = model->addOperand(&type2);
     11   auto output0 = model->addOperand(&type0);
     12   // Phase 2, operations
     13   static float param_init[] = {1.0f};
     14   model->setOperandValue(param, param_init, sizeof(float) * 1);
     15   static int32_t param1_init[] = {4};
     16   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
     17   model->addOperation(ANEURALNETWORKS_LOG_SOFTMAX, {input0, param, param1}, {output0});
     18   // Phase 3, inputs and outputs
     19   model->identifyInputsAndOutputs(
     20     {input0},
     21     {output0});
     22   assert(model->isValid());
     23 }
     24 
     25 inline bool is_ignored(int i) {
     26   static std::set<int> ignore = {};
     27   return ignore.find(i) != ignore.end();
     28 }
     29 
     30 void CreateModel_relaxed(Model *model) {
     31   OperandType type0(Type::TENSOR_FLOAT32, {1, 1, 1, 2, 4});
     32   OperandType type1(Type::FLOAT32, {});
     33   OperandType type2(Type::INT32, {});
     34   // Phase 1, operands
     35   auto input0 = model->addOperand(&type0);
     36   auto param = model->addOperand(&type1);
     37   auto param1 = model->addOperand(&type2);
     38   auto output0 = model->addOperand(&type0);
     39   // Phase 2, operations
     40   static float param_init[] = {1.0f};
     41   model->setOperandValue(param, param_init, sizeof(float) * 1);
     42   static int32_t param1_init[] = {4};
     43   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
     44   model->addOperation(ANEURALNETWORKS_LOG_SOFTMAX, {input0, param, param1}, {output0});
     45   // Phase 3, inputs and outputs
     46   model->identifyInputsAndOutputs(
     47     {input0},
     48     {output0});
     49   // Phase 4: set relaxed execution
     50   model->relaxComputationFloat32toFloat16(true);
     51   assert(model->isValid());
     52 }
     53 
     54 inline bool is_ignored_relaxed(int i) {
     55   static std::set<int> ignore = {};
     56   return ignore.find(i) != ignore.end();
     57 }
     58 
     59 void CreateModel_float16(Model *model) {
     60   OperandType type2(Type::INT32, {});
     61   OperandType type5(Type::TENSOR_FLOAT16, {1, 1, 1, 2, 4});
     62   OperandType type6(Type::FLOAT16, {});
     63   // Phase 1, operands
     64   auto input0 = model->addOperand(&type5);
     65   auto param = model->addOperand(&type6);
     66   auto param1 = model->addOperand(&type2);
     67   auto output0 = model->addOperand(&type5);
     68   // Phase 2, operations
     69   static _Float16 param_init[] = {1.0f};
     70   model->setOperandValue(param, param_init, sizeof(_Float16) * 1);
     71   static int32_t param1_init[] = {4};
     72   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
     73   model->addOperation(ANEURALNETWORKS_LOG_SOFTMAX, {input0, param, param1}, {output0});
     74   // Phase 3, inputs and outputs
     75   model->identifyInputsAndOutputs(
     76     {input0},
     77     {output0});
     78   assert(model->isValid());
     79 }
     80 
     81 inline bool is_ignored_float16(int i) {
     82   static std::set<int> ignore = {};
     83   return ignore.find(i) != ignore.end();
     84 }
     85 
     86 void CreateModel_dynamic_output_shape(Model *model) {
     87   OperandType type0(Type::TENSOR_FLOAT32, {1, 1, 1, 2, 4});
     88   OperandType type1(Type::FLOAT32, {});
     89   OperandType type2(Type::INT32, {});
     90   OperandType type7(Type::TENSOR_FLOAT32, {0, 0, 0, 0, 0});
     91   // Phase 1, operands
     92   auto input0 = model->addOperand(&type0);
     93   auto param = model->addOperand(&type1);
     94   auto param1 = model->addOperand(&type2);
     95   auto output0 = model->addOperand(&type7);
     96   // Phase 2, operations
     97   static float param_init[] = {1.0f};
     98   model->setOperandValue(param, param_init, sizeof(float) * 1);
     99   static int32_t param1_init[] = {4};
    100   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
    101   model->addOperation(ANEURALNETWORKS_LOG_SOFTMAX, {input0, param, param1}, {output0});
    102   // Phase 3, inputs and outputs
    103   model->identifyInputsAndOutputs(
    104     {input0},
    105     {output0});
    106   assert(model->isValid());
    107 }
    108 
    109 inline bool is_ignored_dynamic_output_shape(int i) {
    110   static std::set<int> ignore = {};
    111   return ignore.find(i) != ignore.end();
    112 }
    113 
    114 void CreateModel_dynamic_output_shape_relaxed(Model *model) {
    115   OperandType type0(Type::TENSOR_FLOAT32, {1, 1, 1, 2, 4});
    116   OperandType type1(Type::FLOAT32, {});
    117   OperandType type2(Type::INT32, {});
    118   OperandType type7(Type::TENSOR_FLOAT32, {0, 0, 0, 0, 0});
    119   // Phase 1, operands
    120   auto input0 = model->addOperand(&type0);
    121   auto param = model->addOperand(&type1);
    122   auto param1 = model->addOperand(&type2);
    123   auto output0 = model->addOperand(&type7);
    124   // Phase 2, operations
    125   static float param_init[] = {1.0f};
    126   model->setOperandValue(param, param_init, sizeof(float) * 1);
    127   static int32_t param1_init[] = {4};
    128   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
    129   model->addOperation(ANEURALNETWORKS_LOG_SOFTMAX, {input0, param, param1}, {output0});
    130   // Phase 3, inputs and outputs
    131   model->identifyInputsAndOutputs(
    132     {input0},
    133     {output0});
    134   // Phase 4: set relaxed execution
    135   model->relaxComputationFloat32toFloat16(true);
    136   assert(model->isValid());
    137 }
    138 
    139 inline bool is_ignored_dynamic_output_shape_relaxed(int i) {
    140   static std::set<int> ignore = {};
    141   return ignore.find(i) != ignore.end();
    142 }
    143 
    144 void CreateModel_dynamic_output_shape_float16(Model *model) {
    145   OperandType type2(Type::INT32, {});
    146   OperandType type5(Type::TENSOR_FLOAT16, {1, 1, 1, 2, 4});
    147   OperandType type6(Type::FLOAT16, {});
    148   OperandType type8(Type::TENSOR_FLOAT16, {0, 0, 0, 0, 0});
    149   // Phase 1, operands
    150   auto input0 = model->addOperand(&type5);
    151   auto param = model->addOperand(&type6);
    152   auto param1 = model->addOperand(&type2);
    153   auto output0 = model->addOperand(&type8);
    154   // Phase 2, operations
    155   static _Float16 param_init[] = {1.0f};
    156   model->setOperandValue(param, param_init, sizeof(_Float16) * 1);
    157   static int32_t param1_init[] = {4};
    158   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
    159   model->addOperation(ANEURALNETWORKS_LOG_SOFTMAX, {input0, param, param1}, {output0});
    160   // Phase 3, inputs and outputs
    161   model->identifyInputsAndOutputs(
    162     {input0},
    163     {output0});
    164   assert(model->isValid());
    165 }
    166 
    167 inline bool is_ignored_dynamic_output_shape_float16(int i) {
    168   static std::set<int> ignore = {};
    169   return ignore.find(i) != ignore.end();
    170 }
    171 
    172 void CreateModel_2(Model *model) {
    173   OperandType type1(Type::FLOAT32, {});
    174   OperandType type2(Type::INT32, {});
    175   OperandType type3(Type::TENSOR_FLOAT32, {1, 1, 1, 4, 2});
    176   // Phase 1, operands
    177   auto input01 = model->addOperand(&type3);
    178   auto param2 = model->addOperand(&type1);
    179   auto param3 = model->addOperand(&type2);
    180   auto output01 = model->addOperand(&type3);
    181   // Phase 2, operations
    182   static float param2_init[] = {1.0f};
    183   model->setOperandValue(param2, param2_init, sizeof(float) * 1);
    184   static int32_t param3_init[] = {-1};
    185   model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
    186   model->addOperation(ANEURALNETWORKS_LOG_SOFTMAX, {input01, param2, param3}, {output01});
    187   // Phase 3, inputs and outputs
    188   model->identifyInputsAndOutputs(
    189     {input01},
    190     {output01});
    191   assert(model->isValid());
    192 }
    193 
    194 inline bool is_ignored_2(int i) {
    195   static std::set<int> ignore = {};
    196   return ignore.find(i) != ignore.end();
    197 }
    198 
    199 void CreateModel_relaxed_2(Model *model) {
    200   OperandType type1(Type::FLOAT32, {});
    201   OperandType type2(Type::INT32, {});
    202   OperandType type3(Type::TENSOR_FLOAT32, {1, 1, 1, 4, 2});
    203   // Phase 1, operands
    204   auto input01 = model->addOperand(&type3);
    205   auto param2 = model->addOperand(&type1);
    206   auto param3 = model->addOperand(&type2);
    207   auto output01 = model->addOperand(&type3);
    208   // Phase 2, operations
    209   static float param2_init[] = {1.0f};
    210   model->setOperandValue(param2, param2_init, sizeof(float) * 1);
    211   static int32_t param3_init[] = {-1};
    212   model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
    213   model->addOperation(ANEURALNETWORKS_LOG_SOFTMAX, {input01, param2, param3}, {output01});
    214   // Phase 3, inputs and outputs
    215   model->identifyInputsAndOutputs(
    216     {input01},
    217     {output01});
    218   // Phase 4: set relaxed execution
    219   model->relaxComputationFloat32toFloat16(true);
    220   assert(model->isValid());
    221 }
    222 
    223 inline bool is_ignored_relaxed_2(int i) {
    224   static std::set<int> ignore = {};
    225   return ignore.find(i) != ignore.end();
    226 }
    227 
    228 void CreateModel_float16_2(Model *model) {
    229   OperandType type2(Type::INT32, {});
    230   OperandType type6(Type::FLOAT16, {});
    231   OperandType type9(Type::TENSOR_FLOAT16, {1, 1, 1, 4, 2});
    232   // Phase 1, operands
    233   auto input01 = model->addOperand(&type9);
    234   auto param2 = model->addOperand(&type6);
    235   auto param3 = model->addOperand(&type2);
    236   auto output01 = model->addOperand(&type9);
    237   // Phase 2, operations
    238   static _Float16 param2_init[] = {1.0f};
    239   model->setOperandValue(param2, param2_init, sizeof(_Float16) * 1);
    240   static int32_t param3_init[] = {-1};
    241   model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
    242   model->addOperation(ANEURALNETWORKS_LOG_SOFTMAX, {input01, param2, param3}, {output01});
    243   // Phase 3, inputs and outputs
    244   model->identifyInputsAndOutputs(
    245     {input01},
    246     {output01});
    247   assert(model->isValid());
    248 }
    249 
    250 inline bool is_ignored_float16_2(int i) {
    251   static std::set<int> ignore = {};
    252   return ignore.find(i) != ignore.end();
    253 }
    254 
    255 void CreateModel_dynamic_output_shape_2(Model *model) {
    256   OperandType type1(Type::FLOAT32, {});
    257   OperandType type2(Type::INT32, {});
    258   OperandType type3(Type::TENSOR_FLOAT32, {1, 1, 1, 4, 2});
    259   OperandType type7(Type::TENSOR_FLOAT32, {0, 0, 0, 0, 0});
    260   // Phase 1, operands
    261   auto input01 = model->addOperand(&type3);
    262   auto param2 = model->addOperand(&type1);
    263   auto param3 = model->addOperand(&type2);
    264   auto output01 = model->addOperand(&type7);
    265   // Phase 2, operations
    266   static float param2_init[] = {1.0f};
    267   model->setOperandValue(param2, param2_init, sizeof(float) * 1);
    268   static int32_t param3_init[] = {-1};
    269   model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
    270   model->addOperation(ANEURALNETWORKS_LOG_SOFTMAX, {input01, param2, param3}, {output01});
    271   // Phase 3, inputs and outputs
    272   model->identifyInputsAndOutputs(
    273     {input01},
    274     {output01});
    275   assert(model->isValid());
    276 }
    277 
    278 inline bool is_ignored_dynamic_output_shape_2(int i) {
    279   static std::set<int> ignore = {};
    280   return ignore.find(i) != ignore.end();
    281 }
    282 
    283 void CreateModel_dynamic_output_shape_relaxed_2(Model *model) {
    284   OperandType type1(Type::FLOAT32, {});
    285   OperandType type2(Type::INT32, {});
    286   OperandType type3(Type::TENSOR_FLOAT32, {1, 1, 1, 4, 2});
    287   OperandType type7(Type::TENSOR_FLOAT32, {0, 0, 0, 0, 0});
    288   // Phase 1, operands
    289   auto input01 = model->addOperand(&type3);
    290   auto param2 = model->addOperand(&type1);
    291   auto param3 = model->addOperand(&type2);
    292   auto output01 = model->addOperand(&type7);
    293   // Phase 2, operations
    294   static float param2_init[] = {1.0f};
    295   model->setOperandValue(param2, param2_init, sizeof(float) * 1);
    296   static int32_t param3_init[] = {-1};
    297   model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
    298   model->addOperation(ANEURALNETWORKS_LOG_SOFTMAX, {input01, param2, param3}, {output01});
    299   // Phase 3, inputs and outputs
    300   model->identifyInputsAndOutputs(
    301     {input01},
    302     {output01});
    303   // Phase 4: set relaxed execution
    304   model->relaxComputationFloat32toFloat16(true);
    305   assert(model->isValid());
    306 }
    307 
    308 inline bool is_ignored_dynamic_output_shape_relaxed_2(int i) {
    309   static std::set<int> ignore = {};
    310   return ignore.find(i) != ignore.end();
    311 }
    312 
    313 void CreateModel_dynamic_output_shape_float16_2(Model *model) {
    314   OperandType type2(Type::INT32, {});
    315   OperandType type6(Type::FLOAT16, {});
    316   OperandType type8(Type::TENSOR_FLOAT16, {0, 0, 0, 0, 0});
    317   OperandType type9(Type::TENSOR_FLOAT16, {1, 1, 1, 4, 2});
    318   // Phase 1, operands
    319   auto input01 = model->addOperand(&type9);
    320   auto param2 = model->addOperand(&type6);
    321   auto param3 = model->addOperand(&type2);
    322   auto output01 = model->addOperand(&type8);
    323   // Phase 2, operations
    324   static _Float16 param2_init[] = {1.0f};
    325   model->setOperandValue(param2, param2_init, sizeof(_Float16) * 1);
    326   static int32_t param3_init[] = {-1};
    327   model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
    328   model->addOperation(ANEURALNETWORKS_LOG_SOFTMAX, {input01, param2, param3}, {output01});
    329   // Phase 3, inputs and outputs
    330   model->identifyInputsAndOutputs(
    331     {input01},
    332     {output01});
    333   assert(model->isValid());
    334 }
    335 
    336 inline bool is_ignored_dynamic_output_shape_float16_2(int i) {
    337   static std::set<int> ignore = {};
    338   return ignore.find(i) != ignore.end();
    339 }
    340 
    341 void CreateModel_3(Model *model) {
    342   OperandType type1(Type::FLOAT32, {});
    343   OperandType type2(Type::INT32, {});
    344   OperandType type4(Type::TENSOR_FLOAT32, {1, 1, 2, 4, 1});
    345   // Phase 1, operands
    346   auto input02 = model->addOperand(&type4);
    347   auto param4 = model->addOperand(&type1);
    348   auto param5 = model->addOperand(&type2);
    349   auto output02 = model->addOperand(&type4);
    350   // Phase 2, operations
    351   static float param4_init[] = {1.0f};
    352   model->setOperandValue(param4, param4_init, sizeof(float) * 1);
    353   static int32_t param5_init[] = {-3};
    354   model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
    355   model->addOperation(ANEURALNETWORKS_LOG_SOFTMAX, {input02, param4, param5}, {output02});
    356   // Phase 3, inputs and outputs
    357   model->identifyInputsAndOutputs(
    358     {input02},
    359     {output02});
    360   assert(model->isValid());
    361 }
    362 
    363 inline bool is_ignored_3(int i) {
    364   static std::set<int> ignore = {};
    365   return ignore.find(i) != ignore.end();
    366 }
    367 
    368 void CreateModel_relaxed_3(Model *model) {
    369   OperandType type1(Type::FLOAT32, {});
    370   OperandType type2(Type::INT32, {});
    371   OperandType type4(Type::TENSOR_FLOAT32, {1, 1, 2, 4, 1});
    372   // Phase 1, operands
    373   auto input02 = model->addOperand(&type4);
    374   auto param4 = model->addOperand(&type1);
    375   auto param5 = model->addOperand(&type2);
    376   auto output02 = model->addOperand(&type4);
    377   // Phase 2, operations
    378   static float param4_init[] = {1.0f};
    379   model->setOperandValue(param4, param4_init, sizeof(float) * 1);
    380   static int32_t param5_init[] = {-3};
    381   model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
    382   model->addOperation(ANEURALNETWORKS_LOG_SOFTMAX, {input02, param4, param5}, {output02});
    383   // Phase 3, inputs and outputs
    384   model->identifyInputsAndOutputs(
    385     {input02},
    386     {output02});
    387   // Phase 4: set relaxed execution
    388   model->relaxComputationFloat32toFloat16(true);
    389   assert(model->isValid());
    390 }
    391 
    392 inline bool is_ignored_relaxed_3(int i) {
    393   static std::set<int> ignore = {};
    394   return ignore.find(i) != ignore.end();
    395 }
    396 
    397 void CreateModel_float16_3(Model *model) {
    398   OperandType type10(Type::TENSOR_FLOAT16, {1, 1, 2, 4, 1});
    399   OperandType type2(Type::INT32, {});
    400   OperandType type6(Type::FLOAT16, {});
    401   // Phase 1, operands
    402   auto input02 = model->addOperand(&type10);
    403   auto param4 = model->addOperand(&type6);
    404   auto param5 = model->addOperand(&type2);
    405   auto output02 = model->addOperand(&type10);
    406   // Phase 2, operations
    407   static _Float16 param4_init[] = {1.0f};
    408   model->setOperandValue(param4, param4_init, sizeof(_Float16) * 1);
    409   static int32_t param5_init[] = {-3};
    410   model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
    411   model->addOperation(ANEURALNETWORKS_LOG_SOFTMAX, {input02, param4, param5}, {output02});
    412   // Phase 3, inputs and outputs
    413   model->identifyInputsAndOutputs(
    414     {input02},
    415     {output02});
    416   assert(model->isValid());
    417 }
    418 
    419 inline bool is_ignored_float16_3(int i) {
    420   static std::set<int> ignore = {};
    421   return ignore.find(i) != ignore.end();
    422 }
    423 
    424 void CreateModel_dynamic_output_shape_3(Model *model) {
    425   OperandType type1(Type::FLOAT32, {});
    426   OperandType type2(Type::INT32, {});
    427   OperandType type4(Type::TENSOR_FLOAT32, {1, 1, 2, 4, 1});
    428   OperandType type7(Type::TENSOR_FLOAT32, {0, 0, 0, 0, 0});
    429   // Phase 1, operands
    430   auto input02 = model->addOperand(&type4);
    431   auto param4 = model->addOperand(&type1);
    432   auto param5 = model->addOperand(&type2);
    433   auto output02 = model->addOperand(&type7);
    434   // Phase 2, operations
    435   static float param4_init[] = {1.0f};
    436   model->setOperandValue(param4, param4_init, sizeof(float) * 1);
    437   static int32_t param5_init[] = {-3};
    438   model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
    439   model->addOperation(ANEURALNETWORKS_LOG_SOFTMAX, {input02, param4, param5}, {output02});
    440   // Phase 3, inputs and outputs
    441   model->identifyInputsAndOutputs(
    442     {input02},
    443     {output02});
    444   assert(model->isValid());
    445 }
    446 
    447 inline bool is_ignored_dynamic_output_shape_3(int i) {
    448   static std::set<int> ignore = {};
    449   return ignore.find(i) != ignore.end();
    450 }
    451 
    452 void CreateModel_dynamic_output_shape_relaxed_3(Model *model) {
    453   OperandType type1(Type::FLOAT32, {});
    454   OperandType type2(Type::INT32, {});
    455   OperandType type4(Type::TENSOR_FLOAT32, {1, 1, 2, 4, 1});
    456   OperandType type7(Type::TENSOR_FLOAT32, {0, 0, 0, 0, 0});
    457   // Phase 1, operands
    458   auto input02 = model->addOperand(&type4);
    459   auto param4 = model->addOperand(&type1);
    460   auto param5 = model->addOperand(&type2);
    461   auto output02 = model->addOperand(&type7);
    462   // Phase 2, operations
    463   static float param4_init[] = {1.0f};
    464   model->setOperandValue(param4, param4_init, sizeof(float) * 1);
    465   static int32_t param5_init[] = {-3};
    466   model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
    467   model->addOperation(ANEURALNETWORKS_LOG_SOFTMAX, {input02, param4, param5}, {output02});
    468   // Phase 3, inputs and outputs
    469   model->identifyInputsAndOutputs(
    470     {input02},
    471     {output02});
    472   // Phase 4: set relaxed execution
    473   model->relaxComputationFloat32toFloat16(true);
    474   assert(model->isValid());
    475 }
    476 
    477 inline bool is_ignored_dynamic_output_shape_relaxed_3(int i) {
    478   static std::set<int> ignore = {};
    479   return ignore.find(i) != ignore.end();
    480 }
    481 
    482 void CreateModel_dynamic_output_shape_float16_3(Model *model) {
    483   OperandType type10(Type::TENSOR_FLOAT16, {1, 1, 2, 4, 1});
    484   OperandType type2(Type::INT32, {});
    485   OperandType type6(Type::FLOAT16, {});
    486   OperandType type8(Type::TENSOR_FLOAT16, {0, 0, 0, 0, 0});
    487   // Phase 1, operands
    488   auto input02 = model->addOperand(&type10);
    489   auto param4 = model->addOperand(&type6);
    490   auto param5 = model->addOperand(&type2);
    491   auto output02 = model->addOperand(&type8);
    492   // Phase 2, operations
    493   static _Float16 param4_init[] = {1.0f};
    494   model->setOperandValue(param4, param4_init, sizeof(_Float16) * 1);
    495   static int32_t param5_init[] = {-3};
    496   model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
    497   model->addOperation(ANEURALNETWORKS_LOG_SOFTMAX, {input02, param4, param5}, {output02});
    498   // Phase 3, inputs and outputs
    499   model->identifyInputsAndOutputs(
    500     {input02},
    501     {output02});
    502   assert(model->isValid());
    503 }
    504 
    505 inline bool is_ignored_dynamic_output_shape_float16_3(int i) {
    506   static std::set<int> ignore = {};
    507   return ignore.find(i) != ignore.end();
    508 }
    509 
    510 void CreateModel_4(Model *model) {
    511   OperandType type0(Type::TENSOR_FLOAT32, {1, 1, 1, 2, 4});
    512   OperandType type1(Type::FLOAT32, {});
    513   OperandType type2(Type::INT32, {});
    514   // Phase 1, operands
    515   auto input03 = model->addOperand(&type0);
    516   auto param6 = model->addOperand(&type1);
    517   auto param7 = model->addOperand(&type2);
    518   auto output03 = model->addOperand(&type0);
    519   // Phase 2, operations
    520   static float param6_init[] = {10.0f};
    521   model->setOperandValue(param6, param6_init, sizeof(float) * 1);
    522   static int32_t param7_init[] = {4};
    523   model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
    524   model->addOperation(ANEURALNETWORKS_LOG_SOFTMAX, {input03, param6, param7}, {output03});
    525   // Phase 3, inputs and outputs
    526   model->identifyInputsAndOutputs(
    527     {input03},
    528     {output03});
    529   assert(model->isValid());
    530 }
    531 
    532 inline bool is_ignored_4(int i) {
    533   static std::set<int> ignore = {};
    534   return ignore.find(i) != ignore.end();
    535 }
    536 
    537 void CreateModel_relaxed_4(Model *model) {
    538   OperandType type0(Type::TENSOR_FLOAT32, {1, 1, 1, 2, 4});
    539   OperandType type1(Type::FLOAT32, {});
    540   OperandType type2(Type::INT32, {});
    541   // Phase 1, operands
    542   auto input03 = model->addOperand(&type0);
    543   auto param6 = model->addOperand(&type1);
    544   auto param7 = model->addOperand(&type2);
    545   auto output03 = model->addOperand(&type0);
    546   // Phase 2, operations
    547   static float param6_init[] = {10.0f};
    548   model->setOperandValue(param6, param6_init, sizeof(float) * 1);
    549   static int32_t param7_init[] = {4};
    550   model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
    551   model->addOperation(ANEURALNETWORKS_LOG_SOFTMAX, {input03, param6, param7}, {output03});
    552   // Phase 3, inputs and outputs
    553   model->identifyInputsAndOutputs(
    554     {input03},
    555     {output03});
    556   // Phase 4: set relaxed execution
    557   model->relaxComputationFloat32toFloat16(true);
    558   assert(model->isValid());
    559 }
    560 
    561 inline bool is_ignored_relaxed_4(int i) {
    562   static std::set<int> ignore = {};
    563   return ignore.find(i) != ignore.end();
    564 }
    565 
    566 void CreateModel_float16_4(Model *model) {
    567   OperandType type2(Type::INT32, {});
    568   OperandType type5(Type::TENSOR_FLOAT16, {1, 1, 1, 2, 4});
    569   OperandType type6(Type::FLOAT16, {});
    570   // Phase 1, operands
    571   auto input03 = model->addOperand(&type5);
    572   auto param6 = model->addOperand(&type6);
    573   auto param7 = model->addOperand(&type2);
    574   auto output03 = model->addOperand(&type5);
    575   // Phase 2, operations
    576   static _Float16 param6_init[] = {10.0f};
    577   model->setOperandValue(param6, param6_init, sizeof(_Float16) * 1);
    578   static int32_t param7_init[] = {4};
    579   model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
    580   model->addOperation(ANEURALNETWORKS_LOG_SOFTMAX, {input03, param6, param7}, {output03});
    581   // Phase 3, inputs and outputs
    582   model->identifyInputsAndOutputs(
    583     {input03},
    584     {output03});
    585   assert(model->isValid());
    586 }
    587 
    588 inline bool is_ignored_float16_4(int i) {
    589   static std::set<int> ignore = {};
    590   return ignore.find(i) != ignore.end();
    591 }
    592 
    593 void CreateModel_dynamic_output_shape_4(Model *model) {
    594   OperandType type0(Type::TENSOR_FLOAT32, {1, 1, 1, 2, 4});
    595   OperandType type1(Type::FLOAT32, {});
    596   OperandType type2(Type::INT32, {});
    597   OperandType type7(Type::TENSOR_FLOAT32, {0, 0, 0, 0, 0});
    598   // Phase 1, operands
    599   auto input03 = model->addOperand(&type0);
    600   auto param6 = model->addOperand(&type1);
    601   auto param7 = model->addOperand(&type2);
    602   auto output03 = model->addOperand(&type7);
    603   // Phase 2, operations
    604   static float param6_init[] = {10.0f};
    605   model->setOperandValue(param6, param6_init, sizeof(float) * 1);
    606   static int32_t param7_init[] = {4};
    607   model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
    608   model->addOperation(ANEURALNETWORKS_LOG_SOFTMAX, {input03, param6, param7}, {output03});
    609   // Phase 3, inputs and outputs
    610   model->identifyInputsAndOutputs(
    611     {input03},
    612     {output03});
    613   assert(model->isValid());
    614 }
    615 
    616 inline bool is_ignored_dynamic_output_shape_4(int i) {
    617   static std::set<int> ignore = {};
    618   return ignore.find(i) != ignore.end();
    619 }
    620 
    621 void CreateModel_dynamic_output_shape_relaxed_4(Model *model) {
    622   OperandType type0(Type::TENSOR_FLOAT32, {1, 1, 1, 2, 4});
    623   OperandType type1(Type::FLOAT32, {});
    624   OperandType type2(Type::INT32, {});
    625   OperandType type7(Type::TENSOR_FLOAT32, {0, 0, 0, 0, 0});
    626   // Phase 1, operands
    627   auto input03 = model->addOperand(&type0);
    628   auto param6 = model->addOperand(&type1);
    629   auto param7 = model->addOperand(&type2);
    630   auto output03 = model->addOperand(&type7);
    631   // Phase 2, operations
    632   static float param6_init[] = {10.0f};
    633   model->setOperandValue(param6, param6_init, sizeof(float) * 1);
    634   static int32_t param7_init[] = {4};
    635   model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
    636   model->addOperation(ANEURALNETWORKS_LOG_SOFTMAX, {input03, param6, param7}, {output03});
    637   // Phase 3, inputs and outputs
    638   model->identifyInputsAndOutputs(
    639     {input03},
    640     {output03});
    641   // Phase 4: set relaxed execution
    642   model->relaxComputationFloat32toFloat16(true);
    643   assert(model->isValid());
    644 }
    645 
    646 inline bool is_ignored_dynamic_output_shape_relaxed_4(int i) {
    647   static std::set<int> ignore = {};
    648   return ignore.find(i) != ignore.end();
    649 }
    650 
    651 void CreateModel_dynamic_output_shape_float16_4(Model *model) {
    652   OperandType type2(Type::INT32, {});
    653   OperandType type5(Type::TENSOR_FLOAT16, {1, 1, 1, 2, 4});
    654   OperandType type6(Type::FLOAT16, {});
    655   OperandType type8(Type::TENSOR_FLOAT16, {0, 0, 0, 0, 0});
    656   // Phase 1, operands
    657   auto input03 = model->addOperand(&type5);
    658   auto param6 = model->addOperand(&type6);
    659   auto param7 = model->addOperand(&type2);
    660   auto output03 = model->addOperand(&type8);
    661   // Phase 2, operations
    662   static _Float16 param6_init[] = {10.0f};
    663   model->setOperandValue(param6, param6_init, sizeof(_Float16) * 1);
    664   static int32_t param7_init[] = {4};
    665   model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
    666   model->addOperation(ANEURALNETWORKS_LOG_SOFTMAX, {input03, param6, param7}, {output03});
    667   // Phase 3, inputs and outputs
    668   model->identifyInputsAndOutputs(
    669     {input03},
    670     {output03});
    671   assert(model->isValid());
    672 }
    673 
    674 inline bool is_ignored_dynamic_output_shape_float16_4(int i) {
    675   static std::set<int> ignore = {};
    676   return ignore.find(i) != ignore.end();
    677 }
    678 
    679