1 // clang-format off 2 // Generated file (from: squeeze_quant8_1.mod.py). Do not edit 3 void CreateModel(Model *model) { 4 OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 24, 1}, 1.0f, 0); 5 OperandType type1(Type::TENSOR_INT32, {1}); 6 OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 24}, 1.0f, 0); 7 // Phase 1, operands 8 auto input = model->addOperand(&type0); 9 auto squeezeDims = model->addOperand(&type1); 10 auto output = model->addOperand(&type2); 11 // Phase 2, operations 12 static int32_t squeezeDims_init[] = {2}; 13 model->setOperandValue(squeezeDims, squeezeDims_init, sizeof(int32_t) * 1); 14 model->addOperation(ANEURALNETWORKS_SQUEEZE, {input, squeezeDims}, {output}); 15 // Phase 3, inputs and outputs 16 model->identifyInputsAndOutputs( 17 {input}, 18 {output}); 19 assert(model->isValid()); 20 } 21 22 inline bool is_ignored(int i) { 23 static std::set<int> ignore = {}; 24 return ignore.find(i) != ignore.end(); 25 } 26 27 void CreateModel_dynamic_output_shape(Model *model) { 28 OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 24, 1}, 1.0f, 0); 29 OperandType type1(Type::TENSOR_INT32, {1}); 30 OperandType type3(Type::TENSOR_QUANT8_ASYMM, {0, 0}, 1.0f, 0); 31 // Phase 1, operands 32 auto input = model->addOperand(&type0); 33 auto squeezeDims = model->addOperand(&type1); 34 auto output = model->addOperand(&type3); 35 // Phase 2, operations 36 static int32_t squeezeDims_init[] = {2}; 37 model->setOperandValue(squeezeDims, squeezeDims_init, sizeof(int32_t) * 1); 38 model->addOperation(ANEURALNETWORKS_SQUEEZE, {input, squeezeDims}, {output}); 39 // Phase 3, inputs and outputs 40 model->identifyInputsAndOutputs( 41 {input}, 42 {output}); 43 assert(model->isValid()); 44 } 45 46 inline bool is_ignored_dynamic_output_shape(int i) { 47 static std::set<int> ignore = {}; 48 return ignore.find(i) != ignore.end(); 49 } 50 51