1 // clang-format off 2 // Generated file (from: gather_higher_rank.mod.py). Do not edit 3 void CreateModel(Model *model) { 4 OperandType type0(Type::TENSOR_FLOAT32, {1, 3, 2}); 5 OperandType type1(Type::TENSOR_INT32, {3, 2}); 6 OperandType type2(Type::TENSOR_FLOAT32, {1, 3, 2, 2}); 7 OperandType type3(Type::INT32, {}); 8 // Phase 1, operands 9 auto input0 = model->addOperand(&type0); 10 auto param = model->addOperand(&type3); 11 auto indices = model->addOperand(&type1); 12 auto output0 = model->addOperand(&type2); 13 // Phase 2, operations 14 static int32_t param_init[] = {1}; 15 model->setOperandValue(param, param_init, sizeof(int32_t) * 1); 16 model->addOperation(ANEURALNETWORKS_GATHER, {input0, param, indices}, {output0}); 17 // Phase 3, inputs and outputs 18 model->identifyInputsAndOutputs( 19 {input0, indices}, 20 {output0}); 21 assert(model->isValid()); 22 } 23 24 inline bool is_ignored(int i) { 25 static std::set<int> ignore = {}; 26 return ignore.find(i) != ignore.end(); 27 } 28 29 void CreateModel_relaxed(Model *model) { 30 OperandType type0(Type::TENSOR_FLOAT32, {1, 3, 2}); 31 OperandType type1(Type::TENSOR_INT32, {3, 2}); 32 OperandType type2(Type::TENSOR_FLOAT32, {1, 3, 2, 2}); 33 OperandType type3(Type::INT32, {}); 34 // Phase 1, operands 35 auto input0 = model->addOperand(&type0); 36 auto param = model->addOperand(&type3); 37 auto indices = model->addOperand(&type1); 38 auto output0 = model->addOperand(&type2); 39 // Phase 2, operations 40 static int32_t param_init[] = {1}; 41 model->setOperandValue(param, param_init, sizeof(int32_t) * 1); 42 model->addOperation(ANEURALNETWORKS_GATHER, {input0, param, indices}, {output0}); 43 // Phase 3, inputs and outputs 44 model->identifyInputsAndOutputs( 45 {input0, indices}, 46 {output0}); 47 // Phase 4: set relaxed execution 48 model->relaxComputationFloat32toFloat16(true); 49 assert(model->isValid()); 50 } 51 52 inline bool is_ignored_relaxed(int i) { 53 static std::set<int> ignore = {}; 54 return ignore.find(i) != ignore.end(); 55 } 56 57 void CreateModel_quant8(Model *model) { 58 OperandType type1(Type::TENSOR_INT32, {3, 2}); 59 OperandType type3(Type::INT32, {}); 60 OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 3, 2}, 0.5f, 127); 61 OperandType type5(Type::TENSOR_QUANT8_ASYMM, {1, 3, 2, 2}, 0.5f, 127); 62 // Phase 1, operands 63 auto input0 = model->addOperand(&type4); 64 auto param = model->addOperand(&type3); 65 auto indices = model->addOperand(&type1); 66 auto output0 = model->addOperand(&type5); 67 // Phase 2, operations 68 static int32_t param_init[] = {1}; 69 model->setOperandValue(param, param_init, sizeof(int32_t) * 1); 70 model->addOperation(ANEURALNETWORKS_GATHER, {input0, param, indices}, {output0}); 71 // Phase 3, inputs and outputs 72 model->identifyInputsAndOutputs( 73 {input0, indices}, 74 {output0}); 75 assert(model->isValid()); 76 } 77 78 inline bool is_ignored_quant8(int i) { 79 static std::set<int> ignore = {}; 80 return ignore.find(i) != ignore.end(); 81 } 82 83 void CreateModel_int32(Model *model) { 84 OperandType type1(Type::TENSOR_INT32, {3, 2}); 85 OperandType type3(Type::INT32, {}); 86 OperandType type6(Type::TENSOR_INT32, {1, 3, 2}); 87 OperandType type7(Type::TENSOR_INT32, {1, 3, 2, 2}); 88 // Phase 1, operands 89 auto input0 = model->addOperand(&type6); 90 auto param = model->addOperand(&type3); 91 auto indices = model->addOperand(&type1); 92 auto output0 = model->addOperand(&type7); 93 // Phase 2, operations 94 static int32_t param_init[] = {1}; 95 model->setOperandValue(param, param_init, sizeof(int32_t) * 1); 96 model->addOperation(ANEURALNETWORKS_GATHER, {input0, param, indices}, {output0}); 97 // Phase 3, inputs and outputs 98 model->identifyInputsAndOutputs( 99 {input0, indices}, 100 {output0}); 101 assert(model->isValid()); 102 } 103 104 inline bool is_ignored_int32(int i) { 105 static std::set<int> ignore = {}; 106 return ignore.find(i) != ignore.end(); 107 } 108 109 void CreateModel_dynamic_output_shape(Model *model) { 110 OperandType type0(Type::TENSOR_FLOAT32, {1, 3, 2}); 111 OperandType type1(Type::TENSOR_INT32, {3, 2}); 112 OperandType type3(Type::INT32, {}); 113 OperandType type8(Type::TENSOR_FLOAT32, {0, 0, 0, 0}); 114 // Phase 1, operands 115 auto input0 = model->addOperand(&type0); 116 auto param = model->addOperand(&type3); 117 auto indices = model->addOperand(&type1); 118 auto output0 = model->addOperand(&type8); 119 // Phase 2, operations 120 static int32_t param_init[] = {1}; 121 model->setOperandValue(param, param_init, sizeof(int32_t) * 1); 122 model->addOperation(ANEURALNETWORKS_GATHER, {input0, param, indices}, {output0}); 123 // Phase 3, inputs and outputs 124 model->identifyInputsAndOutputs( 125 {input0, indices}, 126 {output0}); 127 assert(model->isValid()); 128 } 129 130 inline bool is_ignored_dynamic_output_shape(int i) { 131 static std::set<int> ignore = {}; 132 return ignore.find(i) != ignore.end(); 133 } 134 135 void CreateModel_dynamic_output_shape_relaxed(Model *model) { 136 OperandType type0(Type::TENSOR_FLOAT32, {1, 3, 2}); 137 OperandType type1(Type::TENSOR_INT32, {3, 2}); 138 OperandType type3(Type::INT32, {}); 139 OperandType type8(Type::TENSOR_FLOAT32, {0, 0, 0, 0}); 140 // Phase 1, operands 141 auto input0 = model->addOperand(&type0); 142 auto param = model->addOperand(&type3); 143 auto indices = model->addOperand(&type1); 144 auto output0 = model->addOperand(&type8); 145 // Phase 2, operations 146 static int32_t param_init[] = {1}; 147 model->setOperandValue(param, param_init, sizeof(int32_t) * 1); 148 model->addOperation(ANEURALNETWORKS_GATHER, {input0, param, indices}, {output0}); 149 // Phase 3, inputs and outputs 150 model->identifyInputsAndOutputs( 151 {input0, indices}, 152 {output0}); 153 // Phase 4: set relaxed execution 154 model->relaxComputationFloat32toFloat16(true); 155 assert(model->isValid()); 156 } 157 158 inline bool is_ignored_dynamic_output_shape_relaxed(int i) { 159 static std::set<int> ignore = {}; 160 return ignore.find(i) != ignore.end(); 161 } 162 163 void CreateModel_dynamic_output_shape_quant8(Model *model) { 164 OperandType type1(Type::TENSOR_INT32, {3, 2}); 165 OperandType type3(Type::INT32, {}); 166 OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 3, 2}, 0.5f, 127); 167 OperandType type9(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.5f, 127); 168 // Phase 1, operands 169 auto input0 = model->addOperand(&type4); 170 auto param = model->addOperand(&type3); 171 auto indices = model->addOperand(&type1); 172 auto output0 = model->addOperand(&type9); 173 // Phase 2, operations 174 static int32_t param_init[] = {1}; 175 model->setOperandValue(param, param_init, sizeof(int32_t) * 1); 176 model->addOperation(ANEURALNETWORKS_GATHER, {input0, param, indices}, {output0}); 177 // Phase 3, inputs and outputs 178 model->identifyInputsAndOutputs( 179 {input0, indices}, 180 {output0}); 181 assert(model->isValid()); 182 } 183 184 inline bool is_ignored_dynamic_output_shape_quant8(int i) { 185 static std::set<int> ignore = {}; 186 return ignore.find(i) != ignore.end(); 187 } 188 189 void CreateModel_dynamic_output_shape_int32(Model *model) { 190 OperandType type1(Type::TENSOR_INT32, {3, 2}); 191 OperandType type10(Type::TENSOR_INT32, {0, 0, 0, 0}); 192 OperandType type3(Type::INT32, {}); 193 OperandType type6(Type::TENSOR_INT32, {1, 3, 2}); 194 // Phase 1, operands 195 auto input0 = model->addOperand(&type6); 196 auto param = model->addOperand(&type3); 197 auto indices = model->addOperand(&type1); 198 auto output0 = model->addOperand(&type10); 199 // Phase 2, operations 200 static int32_t param_init[] = {1}; 201 model->setOperandValue(param, param_init, sizeof(int32_t) * 1); 202 model->addOperation(ANEURALNETWORKS_GATHER, {input0, param, indices}, {output0}); 203 // Phase 3, inputs and outputs 204 model->identifyInputsAndOutputs( 205 {input0, indices}, 206 {output0}); 207 assert(model->isValid()); 208 } 209 210 inline bool is_ignored_dynamic_output_shape_int32(int i) { 211 static std::set<int> ignore = {}; 212 return ignore.find(i) != ignore.end(); 213 } 214 215