Home | History | Annotate | Download | only in models
      1 // clang-format off
      2 // Generated file (from: abs.mod.py). Do not edit
      3 void CreateModel(Model *model) {
      4   OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 3, 4, 5});
      5   // Phase 1, operands
      6   auto input0 = model->addOperand(&type0);
      7   auto output0 = model->addOperand(&type0);
      8   // Phase 2, operations
      9   model->addOperation(ANEURALNETWORKS_ABS, {input0}, {output0});
     10   // Phase 3, inputs and outputs
     11   model->identifyInputsAndOutputs(
     12     {input0},
     13     {output0});
     14   assert(model->isValid());
     15 }
     16 
     17 inline bool is_ignored(int i) {
     18   static std::set<int> ignore = {};
     19   return ignore.find(i) != ignore.end();
     20 }
     21 
     22 void CreateModel_relaxed(Model *model) {
     23   OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 3, 4, 5});
     24   // Phase 1, operands
     25   auto input0 = model->addOperand(&type0);
     26   auto output0 = model->addOperand(&type0);
     27   // Phase 2, operations
     28   model->addOperation(ANEURALNETWORKS_ABS, {input0}, {output0});
     29   // Phase 3, inputs and outputs
     30   model->identifyInputsAndOutputs(
     31     {input0},
     32     {output0});
     33   // Phase 4: set relaxed execution
     34   model->relaxComputationFloat32toFloat16(true);
     35   assert(model->isValid());
     36 }
     37 
     38 inline bool is_ignored_relaxed(int i) {
     39   static std::set<int> ignore = {};
     40   return ignore.find(i) != ignore.end();
     41 }
     42 
     43 void CreateModel_float16(Model *model) {
     44   OperandType type1(Type::TENSOR_FLOAT16, {1, 2, 3, 4, 5});
     45   // Phase 1, operands
     46   auto input0 = model->addOperand(&type1);
     47   auto output0 = model->addOperand(&type1);
     48   // Phase 2, operations
     49   model->addOperation(ANEURALNETWORKS_ABS, {input0}, {output0});
     50   // Phase 3, inputs and outputs
     51   model->identifyInputsAndOutputs(
     52     {input0},
     53     {output0});
     54   assert(model->isValid());
     55 }
     56 
     57 inline bool is_ignored_float16(int i) {
     58   static std::set<int> ignore = {};
     59   return ignore.find(i) != ignore.end();
     60 }
     61 
     62 void CreateModel_dynamic_output_shape(Model *model) {
     63   OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 3, 4, 5});
     64   OperandType type2(Type::TENSOR_FLOAT32, {0, 0, 0, 0, 0});
     65   // Phase 1, operands
     66   auto input0 = model->addOperand(&type0);
     67   auto output0 = model->addOperand(&type2);
     68   // Phase 2, operations
     69   model->addOperation(ANEURALNETWORKS_ABS, {input0}, {output0});
     70   // Phase 3, inputs and outputs
     71   model->identifyInputsAndOutputs(
     72     {input0},
     73     {output0});
     74   assert(model->isValid());
     75 }
     76 
     77 inline bool is_ignored_dynamic_output_shape(int i) {
     78   static std::set<int> ignore = {};
     79   return ignore.find(i) != ignore.end();
     80 }
     81 
     82 void CreateModel_dynamic_output_shape_relaxed(Model *model) {
     83   OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 3, 4, 5});
     84   OperandType type2(Type::TENSOR_FLOAT32, {0, 0, 0, 0, 0});
     85   // Phase 1, operands
     86   auto input0 = model->addOperand(&type0);
     87   auto output0 = model->addOperand(&type2);
     88   // Phase 2, operations
     89   model->addOperation(ANEURALNETWORKS_ABS, {input0}, {output0});
     90   // Phase 3, inputs and outputs
     91   model->identifyInputsAndOutputs(
     92     {input0},
     93     {output0});
     94   // Phase 4: set relaxed execution
     95   model->relaxComputationFloat32toFloat16(true);
     96   assert(model->isValid());
     97 }
     98 
     99 inline bool is_ignored_dynamic_output_shape_relaxed(int i) {
    100   static std::set<int> ignore = {};
    101   return ignore.find(i) != ignore.end();
    102 }
    103 
    104 void CreateModel_dynamic_output_shape_float16(Model *model) {
    105   OperandType type1(Type::TENSOR_FLOAT16, {1, 2, 3, 4, 5});
    106   OperandType type3(Type::TENSOR_FLOAT16, {0, 0, 0, 0, 0});
    107   // Phase 1, operands
    108   auto input0 = model->addOperand(&type1);
    109   auto output0 = model->addOperand(&type3);
    110   // Phase 2, operations
    111   model->addOperation(ANEURALNETWORKS_ABS, {input0}, {output0});
    112   // Phase 3, inputs and outputs
    113   model->identifyInputsAndOutputs(
    114     {input0},
    115     {output0});
    116   assert(model->isValid());
    117 }
    118 
    119 inline bool is_ignored_dynamic_output_shape_float16(int i) {
    120   static std::set<int> ignore = {};
    121   return ignore.find(i) != ignore.end();
    122 }
    123 
    124