1 // clang-format off 2 // Generated file (from: lstm_state2_float16.mod.py). Do not edit 3 void CreateModel(Model *model) { 4 OperandType type0(Type::TENSOR_FLOAT16, {1, 2}); 5 OperandType type1(Type::TENSOR_FLOAT16, {4, 2}); 6 OperandType type2(Type::TENSOR_FLOAT16, {4, 4}); 7 OperandType type3(Type::TENSOR_FLOAT16, {0}); 8 OperandType type4(Type::TENSOR_FLOAT16, {4}); 9 OperandType type5(Type::TENSOR_FLOAT16, {0, 0}); 10 OperandType type6(Type::TENSOR_FLOAT16, {1, 4}); 11 OperandType type7(Type::INT32, {}); 12 OperandType type8(Type::FLOAT16, {}); 13 OperandType type9(Type::TENSOR_FLOAT16, {1, 16}); 14 // Phase 1, operands 15 auto input = model->addOperand(&type0); 16 auto input_to_input_weights = model->addOperand(&type1); 17 auto input_to_forget_weights = model->addOperand(&type1); 18 auto input_to_cell_weights = model->addOperand(&type1); 19 auto input_to_output_weights = model->addOperand(&type1); 20 auto recurrent_to_intput_weights = model->addOperand(&type2); 21 auto recurrent_to_forget_weights = model->addOperand(&type2); 22 auto recurrent_to_cell_weights = model->addOperand(&type2); 23 auto recurrent_to_output_weights = model->addOperand(&type2); 24 auto cell_to_input_weights = model->addOperand(&type3); 25 auto cell_to_forget_weights = model->addOperand(&type3); 26 auto cell_to_output_weights = model->addOperand(&type3); 27 auto input_gate_bias = model->addOperand(&type4); 28 auto forget_gate_bias = model->addOperand(&type4); 29 auto cell_gate_bias = model->addOperand(&type4); 30 auto output_gate_bias = model->addOperand(&type4); 31 auto projection_weights = model->addOperand(&type5); 32 auto projection_bias = model->addOperand(&type3); 33 auto output_state_in = model->addOperand(&type6); 34 auto cell_state_in = model->addOperand(&type6); 35 auto activation_param = model->addOperand(&type7); 36 auto cell_clip_param = model->addOperand(&type8); 37 auto proj_clip_param = model->addOperand(&type8); 38 auto scratch_buffer = model->addOperand(&type9); 39 auto output_state_out = model->addOperand(&type6); 40 auto cell_state_out = model->addOperand(&type6); 41 auto output = model->addOperand(&type6); 42 // Phase 2, operations 43 static int32_t activation_param_init[] = {4}; 44 model->setOperandValue(activation_param, activation_param_init, sizeof(int32_t) * 1); 45 static _Float16 cell_clip_param_init[] = {0.0f}; 46 model->setOperandValue(cell_clip_param, cell_clip_param_init, sizeof(_Float16) * 1); 47 static _Float16 proj_clip_param_init[] = {0.0f}; 48 model->setOperandValue(proj_clip_param, proj_clip_param_init, sizeof(_Float16) * 1); 49 model->addOperation(ANEURALNETWORKS_LSTM, {input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_intput_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, output_state_in, cell_state_in, activation_param, cell_clip_param, proj_clip_param}, {scratch_buffer, output_state_out, cell_state_out, output}); 50 // Phase 3, inputs and outputs 51 model->identifyInputsAndOutputs( 52 {input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_intput_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, output_state_in, cell_state_in}, 53 {scratch_buffer, output_state_out, cell_state_out, output}); 54 assert(model->isValid()); 55 } 56 57 inline bool is_ignored(int i) { 58 static std::set<int> ignore = {0, 1, 2}; 59 return ignore.find(i) != ignore.end(); 60 } 61 62 void CreateModel_dynamic_output_shape(Model *model) { 63 OperandType type0(Type::TENSOR_FLOAT16, {1, 2}); 64 OperandType type1(Type::TENSOR_FLOAT16, {4, 2}); 65 OperandType type2(Type::TENSOR_FLOAT16, {4, 4}); 66 OperandType type3(Type::TENSOR_FLOAT16, {0}); 67 OperandType type4(Type::TENSOR_FLOAT16, {4}); 68 OperandType type5(Type::TENSOR_FLOAT16, {0, 0}); 69 OperandType type6(Type::TENSOR_FLOAT16, {1, 4}); 70 OperandType type7(Type::INT32, {}); 71 OperandType type8(Type::FLOAT16, {}); 72 // Phase 1, operands 73 auto input = model->addOperand(&type0); 74 auto input_to_input_weights = model->addOperand(&type1); 75 auto input_to_forget_weights = model->addOperand(&type1); 76 auto input_to_cell_weights = model->addOperand(&type1); 77 auto input_to_output_weights = model->addOperand(&type1); 78 auto recurrent_to_intput_weights = model->addOperand(&type2); 79 auto recurrent_to_forget_weights = model->addOperand(&type2); 80 auto recurrent_to_cell_weights = model->addOperand(&type2); 81 auto recurrent_to_output_weights = model->addOperand(&type2); 82 auto cell_to_input_weights = model->addOperand(&type3); 83 auto cell_to_forget_weights = model->addOperand(&type3); 84 auto cell_to_output_weights = model->addOperand(&type3); 85 auto input_gate_bias = model->addOperand(&type4); 86 auto forget_gate_bias = model->addOperand(&type4); 87 auto cell_gate_bias = model->addOperand(&type4); 88 auto output_gate_bias = model->addOperand(&type4); 89 auto projection_weights = model->addOperand(&type5); 90 auto projection_bias = model->addOperand(&type3); 91 auto output_state_in = model->addOperand(&type6); 92 auto cell_state_in = model->addOperand(&type6); 93 auto activation_param = model->addOperand(&type7); 94 auto cell_clip_param = model->addOperand(&type8); 95 auto proj_clip_param = model->addOperand(&type8); 96 auto scratch_buffer = model->addOperand(&type5); 97 auto output_state_out = model->addOperand(&type5); 98 auto cell_state_out = model->addOperand(&type5); 99 auto output = model->addOperand(&type5); 100 // Phase 2, operations 101 static int32_t activation_param_init[] = {4}; 102 model->setOperandValue(activation_param, activation_param_init, sizeof(int32_t) * 1); 103 static _Float16 cell_clip_param_init[] = {0.0f}; 104 model->setOperandValue(cell_clip_param, cell_clip_param_init, sizeof(_Float16) * 1); 105 static _Float16 proj_clip_param_init[] = {0.0f}; 106 model->setOperandValue(proj_clip_param, proj_clip_param_init, sizeof(_Float16) * 1); 107 model->addOperation(ANEURALNETWORKS_LSTM, {input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_intput_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, output_state_in, cell_state_in, activation_param, cell_clip_param, proj_clip_param}, {scratch_buffer, output_state_out, cell_state_out, output}); 108 // Phase 3, inputs and outputs 109 model->identifyInputsAndOutputs( 110 {input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_intput_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, output_state_in, cell_state_in}, 111 {scratch_buffer, output_state_out, cell_state_out, output}); 112 assert(model->isValid()); 113 } 114 115 inline bool is_ignored_dynamic_output_shape(int i) { 116 static std::set<int> ignore = {0, 1, 2}; 117 return ignore.find(i) != ignore.end(); 118 } 119 120