1 // clang-format off 2 // Generated file (from: pad_float16.mod.py). Do not edit 3 void CreateModel(Model *model) { 4 OperandType type0(Type::TENSOR_FLOAT16, {1, 2, 2, 1}); 5 OperandType type1(Type::TENSOR_INT32, {4, 2}); 6 OperandType type2(Type::TENSOR_FLOAT16, {1, 4, 4, 1}); 7 // Phase 1, operands 8 auto op1 = model->addOperand(&type0); 9 auto op2 = model->addOperand(&type1); 10 auto op3 = model->addOperand(&type2); 11 // Phase 2, operations 12 static int32_t op2_init[] = {0, 0, 1, 1, 1, 1, 0, 0}; 13 model->setOperandValue(op2, op2_init, sizeof(int32_t) * 8); 14 model->addOperation(ANEURALNETWORKS_PAD, {op1, op2}, {op3}); 15 // Phase 3, inputs and outputs 16 model->identifyInputsAndOutputs( 17 {op1}, 18 {op3}); 19 assert(model->isValid()); 20 } 21 22 inline bool is_ignored(int i) { 23 static std::set<int> ignore = {}; 24 return ignore.find(i) != ignore.end(); 25 } 26 27 void CreateModel_dynamic_output_shape(Model *model) { 28 OperandType type0(Type::TENSOR_FLOAT16, {1, 2, 2, 1}); 29 OperandType type1(Type::TENSOR_INT32, {4, 2}); 30 OperandType type3(Type::TENSOR_FLOAT16, {0, 0, 0, 0}); 31 // Phase 1, operands 32 auto op1 = model->addOperand(&type0); 33 auto op2 = model->addOperand(&type1); 34 auto op3 = model->addOperand(&type3); 35 // Phase 2, operations 36 static int32_t op2_init[] = {0, 0, 1, 1, 1, 1, 0, 0}; 37 model->setOperandValue(op2, op2_init, sizeof(int32_t) * 8); 38 model->addOperation(ANEURALNETWORKS_PAD, {op1, op2}, {op3}); 39 // Phase 3, inputs and outputs 40 model->identifyInputsAndOutputs( 41 {op1}, 42 {op3}); 43 assert(model->isValid()); 44 } 45 46 inline bool is_ignored_dynamic_output_shape(int i) { 47 static std::set<int> ignore = {}; 48 return ignore.find(i) != ignore.end(); 49 } 50 51