Home | History | Annotate | Download | only in generated
      1 // DO NOT EDIT;
      2 // Generated by ml/nn/runtime/test/specs/generate_vts_test.sh
      3 
      4 namespace add_broadcast_quant8 {
      5 std::vector<MixedTypedExample> examples = {
      6 // Generated add_broadcast_quant8 test
      7 #include "examples/add_broadcast_quant8.example.cpp"
      8 };
      9 // Generated model constructor
     10 #include "vts_models/add_broadcast_quant8.model.cpp"
     11 } // namespace add_broadcast_quant8
     12 TEST_F(NeuralnetworksHidlTest, add_broadcast_quant8) {
     13     generated_tests::Execute(device,
     14                              add_broadcast_quant8::createTestModel,
     15                              add_broadcast_quant8::is_ignored,
     16                              add_broadcast_quant8::examples);
     17 }
     18 
     19 namespace add {
     20 std::vector<MixedTypedExample> examples = {
     21 // Generated add test
     22 #include "examples/add.example.cpp"
     23 };
     24 // Generated model constructor
     25 #include "vts_models/add.model.cpp"
     26 } // namespace add
     27 TEST_F(NeuralnetworksHidlTest, add) {
     28     generated_tests::Execute(device,
     29                              add::createTestModel,
     30                              add::is_ignored,
     31                              add::examples);
     32 }
     33 
     34 namespace add_quant8 {
     35 std::vector<MixedTypedExample> examples = {
     36 // Generated add_quant8 test
     37 #include "examples/add_quant8.example.cpp"
     38 };
     39 // Generated model constructor
     40 #include "vts_models/add_quant8.model.cpp"
     41 } // namespace add_quant8
     42 TEST_F(NeuralnetworksHidlTest, add_quant8) {
     43     generated_tests::Execute(device,
     44                              add_quant8::createTestModel,
     45                              add_quant8::is_ignored,
     46                              add_quant8::examples);
     47 }
     48 
     49 namespace avg_pool_float_1 {
     50 std::vector<MixedTypedExample> examples = {
     51 // Generated avg_pool_float_1 test
     52 #include "examples/avg_pool_float_1.example.cpp"
     53 };
     54 // Generated model constructor
     55 #include "vts_models/avg_pool_float_1.model.cpp"
     56 } // namespace avg_pool_float_1
     57 TEST_F(NeuralnetworksHidlTest, avg_pool_float_1) {
     58     generated_tests::Execute(device,
     59                              avg_pool_float_1::createTestModel,
     60                              avg_pool_float_1::is_ignored,
     61                              avg_pool_float_1::examples);
     62 }
     63 
     64 namespace avg_pool_float_2 {
     65 std::vector<MixedTypedExample> examples = {
     66 // Generated avg_pool_float_2 test
     67 #include "examples/avg_pool_float_2.example.cpp"
     68 };
     69 // Generated model constructor
     70 #include "vts_models/avg_pool_float_2.model.cpp"
     71 } // namespace avg_pool_float_2
     72 TEST_F(NeuralnetworksHidlTest, avg_pool_float_2) {
     73     generated_tests::Execute(device,
     74                              avg_pool_float_2::createTestModel,
     75                              avg_pool_float_2::is_ignored,
     76                              avg_pool_float_2::examples);
     77 }
     78 
     79 namespace avg_pool_float_3 {
     80 std::vector<MixedTypedExample> examples = {
     81 // Generated avg_pool_float_3 test
     82 #include "examples/avg_pool_float_3.example.cpp"
     83 };
     84 // Generated model constructor
     85 #include "vts_models/avg_pool_float_3.model.cpp"
     86 } // namespace avg_pool_float_3
     87 TEST_F(NeuralnetworksHidlTest, avg_pool_float_3) {
     88     generated_tests::Execute(device,
     89                              avg_pool_float_3::createTestModel,
     90                              avg_pool_float_3::is_ignored,
     91                              avg_pool_float_3::examples);
     92 }
     93 
     94 namespace avg_pool_float_4 {
     95 std::vector<MixedTypedExample> examples = {
     96 // Generated avg_pool_float_4 test
     97 #include "examples/avg_pool_float_4.example.cpp"
     98 };
     99 // Generated model constructor
    100 #include "vts_models/avg_pool_float_4.model.cpp"
    101 } // namespace avg_pool_float_4
    102 TEST_F(NeuralnetworksHidlTest, avg_pool_float_4) {
    103     generated_tests::Execute(device,
    104                              avg_pool_float_4::createTestModel,
    105                              avg_pool_float_4::is_ignored,
    106                              avg_pool_float_4::examples);
    107 }
    108 
    109 namespace avg_pool_quant8_1 {
    110 std::vector<MixedTypedExample> examples = {
    111 // Generated avg_pool_quant8_1 test
    112 #include "examples/avg_pool_quant8_1.example.cpp"
    113 };
    114 // Generated model constructor
    115 #include "vts_models/avg_pool_quant8_1.model.cpp"
    116 } // namespace avg_pool_quant8_1
    117 TEST_F(NeuralnetworksHidlTest, avg_pool_quant8_1) {
    118     generated_tests::Execute(device,
    119                              avg_pool_quant8_1::createTestModel,
    120                              avg_pool_quant8_1::is_ignored,
    121                              avg_pool_quant8_1::examples);
    122 }
    123 
    124 namespace avg_pool_quant8_2 {
    125 std::vector<MixedTypedExample> examples = {
    126 // Generated avg_pool_quant8_2 test
    127 #include "examples/avg_pool_quant8_2.example.cpp"
    128 };
    129 // Generated model constructor
    130 #include "vts_models/avg_pool_quant8_2.model.cpp"
    131 } // namespace avg_pool_quant8_2
    132 TEST_F(NeuralnetworksHidlTest, avg_pool_quant8_2) {
    133     generated_tests::Execute(device,
    134                              avg_pool_quant8_2::createTestModel,
    135                              avg_pool_quant8_2::is_ignored,
    136                              avg_pool_quant8_2::examples);
    137 }
    138 
    139 namespace avg_pool_quant8_3 {
    140 std::vector<MixedTypedExample> examples = {
    141 // Generated avg_pool_quant8_3 test
    142 #include "examples/avg_pool_quant8_3.example.cpp"
    143 };
    144 // Generated model constructor
    145 #include "vts_models/avg_pool_quant8_3.model.cpp"
    146 } // namespace avg_pool_quant8_3
    147 TEST_F(NeuralnetworksHidlTest, avg_pool_quant8_3) {
    148     generated_tests::Execute(device,
    149                              avg_pool_quant8_3::createTestModel,
    150                              avg_pool_quant8_3::is_ignored,
    151                              avg_pool_quant8_3::examples);
    152 }
    153 
    154 namespace avg_pool_quant8_4 {
    155 std::vector<MixedTypedExample> examples = {
    156 // Generated avg_pool_quant8_4 test
    157 #include "examples/avg_pool_quant8_4.example.cpp"
    158 };
    159 // Generated model constructor
    160 #include "vts_models/avg_pool_quant8_4.model.cpp"
    161 } // namespace avg_pool_quant8_4
    162 TEST_F(NeuralnetworksHidlTest, avg_pool_quant8_4) {
    163     generated_tests::Execute(device,
    164                              avg_pool_quant8_4::createTestModel,
    165                              avg_pool_quant8_4::is_ignored,
    166                              avg_pool_quant8_4::examples);
    167 }
    168 
    169 namespace concat_float_1 {
    170 std::vector<MixedTypedExample> examples = {
    171 // Generated concat_float_1 test
    172 #include "examples/concat_float_1.example.cpp"
    173 };
    174 // Generated model constructor
    175 #include "vts_models/concat_float_1.model.cpp"
    176 } // namespace concat_float_1
    177 TEST_F(NeuralnetworksHidlTest, concat_float_1) {
    178     generated_tests::Execute(device,
    179                              concat_float_1::createTestModel,
    180                              concat_float_1::is_ignored,
    181                              concat_float_1::examples);
    182 }
    183 
    184 namespace concat_float_2 {
    185 std::vector<MixedTypedExample> examples = {
    186 // Generated concat_float_2 test
    187 #include "examples/concat_float_2.example.cpp"
    188 };
    189 // Generated model constructor
    190 #include "vts_models/concat_float_2.model.cpp"
    191 } // namespace concat_float_2
    192 TEST_F(NeuralnetworksHidlTest, concat_float_2) {
    193     generated_tests::Execute(device,
    194                              concat_float_2::createTestModel,
    195                              concat_float_2::is_ignored,
    196                              concat_float_2::examples);
    197 }
    198 
    199 namespace concat_float_3 {
    200 std::vector<MixedTypedExample> examples = {
    201 // Generated concat_float_3 test
    202 #include "examples/concat_float_3.example.cpp"
    203 };
    204 // Generated model constructor
    205 #include "vts_models/concat_float_3.model.cpp"
    206 } // namespace concat_float_3
    207 TEST_F(NeuralnetworksHidlTest, concat_float_3) {
    208     generated_tests::Execute(device,
    209                              concat_float_3::createTestModel,
    210                              concat_float_3::is_ignored,
    211                              concat_float_3::examples);
    212 }
    213 
    214 namespace concat_quant8_1 {
    215 std::vector<MixedTypedExample> examples = {
    216 // Generated concat_quant8_1 test
    217 #include "examples/concat_quant8_1.example.cpp"
    218 };
    219 // Generated model constructor
    220 #include "vts_models/concat_quant8_1.model.cpp"
    221 } // namespace concat_quant8_1
    222 TEST_F(NeuralnetworksHidlTest, concat_quant8_1) {
    223     generated_tests::Execute(device,
    224                              concat_quant8_1::createTestModel,
    225                              concat_quant8_1::is_ignored,
    226                              concat_quant8_1::examples);
    227 }
    228 
    229 namespace concat_quant8_2 {
    230 std::vector<MixedTypedExample> examples = {
    231 // Generated concat_quant8_2 test
    232 #include "examples/concat_quant8_2.example.cpp"
    233 };
    234 // Generated model constructor
    235 #include "vts_models/concat_quant8_2.model.cpp"
    236 } // namespace concat_quant8_2
    237 TEST_F(NeuralnetworksHidlTest, concat_quant8_2) {
    238     generated_tests::Execute(device,
    239                              concat_quant8_2::createTestModel,
    240                              concat_quant8_2::is_ignored,
    241                              concat_quant8_2::examples);
    242 }
    243 
    244 namespace concat_quant8_3 {
    245 std::vector<MixedTypedExample> examples = {
    246 // Generated concat_quant8_3 test
    247 #include "examples/concat_quant8_3.example.cpp"
    248 };
    249 // Generated model constructor
    250 #include "vts_models/concat_quant8_3.model.cpp"
    251 } // namespace concat_quant8_3
    252 TEST_F(NeuralnetworksHidlTest, concat_quant8_3) {
    253     generated_tests::Execute(device,
    254                              concat_quant8_3::createTestModel,
    255                              concat_quant8_3::is_ignored,
    256                              concat_quant8_3::examples);
    257 }
    258 
    259 namespace conv_float_channels {
    260 std::vector<MixedTypedExample> examples = {
    261 // Generated conv_float_channels test
    262 #include "examples/conv_float_channels.example.cpp"
    263 };
    264 // Generated model constructor
    265 #include "vts_models/conv_float_channels.model.cpp"
    266 } // namespace conv_float_channels
    267 TEST_F(NeuralnetworksHidlTest, conv_float_channels) {
    268     generated_tests::Execute(device,
    269                              conv_float_channels::createTestModel,
    270                              conv_float_channels::is_ignored,
    271                              conv_float_channels::examples);
    272 }
    273 
    274 namespace conv_float_channels_weights_as_inputs {
    275 std::vector<MixedTypedExample> examples = {
    276 // Generated conv_float_channels_weights_as_inputs test
    277 #include "examples/conv_float_channels_weights_as_inputs.example.cpp"
    278 };
    279 // Generated model constructor
    280 #include "vts_models/conv_float_channels_weights_as_inputs.model.cpp"
    281 } // namespace conv_float_channels_weights_as_inputs
    282 TEST_F(NeuralnetworksHidlTest, conv_float_channels_weights_as_inputs) {
    283     generated_tests::Execute(device,
    284                              conv_float_channels_weights_as_inputs::createTestModel,
    285                              conv_float_channels_weights_as_inputs::is_ignored,
    286                              conv_float_channels_weights_as_inputs::examples);
    287 }
    288 
    289 namespace conv_float_large {
    290 std::vector<MixedTypedExample> examples = {
    291 // Generated conv_float_large test
    292 #include "examples/conv_float_large.example.cpp"
    293 };
    294 // Generated model constructor
    295 #include "vts_models/conv_float_large.model.cpp"
    296 } // namespace conv_float_large
    297 TEST_F(NeuralnetworksHidlTest, conv_float_large) {
    298     generated_tests::Execute(device,
    299                              conv_float_large::createTestModel,
    300                              conv_float_large::is_ignored,
    301                              conv_float_large::examples);
    302 }
    303 
    304 namespace conv_float_large_weights_as_inputs {
    305 std::vector<MixedTypedExample> examples = {
    306 // Generated conv_float_large_weights_as_inputs test
    307 #include "examples/conv_float_large_weights_as_inputs.example.cpp"
    308 };
    309 // Generated model constructor
    310 #include "vts_models/conv_float_large_weights_as_inputs.model.cpp"
    311 } // namespace conv_float_large_weights_as_inputs
    312 TEST_F(NeuralnetworksHidlTest, conv_float_large_weights_as_inputs) {
    313     generated_tests::Execute(device,
    314                              conv_float_large_weights_as_inputs::createTestModel,
    315                              conv_float_large_weights_as_inputs::is_ignored,
    316                              conv_float_large_weights_as_inputs::examples);
    317 }
    318 
    319 namespace conv_float {
    320 std::vector<MixedTypedExample> examples = {
    321 // Generated conv_float test
    322 #include "examples/conv_float.example.cpp"
    323 };
    324 // Generated model constructor
    325 #include "vts_models/conv_float.model.cpp"
    326 } // namespace conv_float
    327 TEST_F(NeuralnetworksHidlTest, conv_float) {
    328     generated_tests::Execute(device,
    329                              conv_float::createTestModel,
    330                              conv_float::is_ignored,
    331                              conv_float::examples);
    332 }
    333 
    334 namespace conv_float_weights_as_inputs {
    335 std::vector<MixedTypedExample> examples = {
    336 // Generated conv_float_weights_as_inputs test
    337 #include "examples/conv_float_weights_as_inputs.example.cpp"
    338 };
    339 // Generated model constructor
    340 #include "vts_models/conv_float_weights_as_inputs.model.cpp"
    341 } // namespace conv_float_weights_as_inputs
    342 TEST_F(NeuralnetworksHidlTest, conv_float_weights_as_inputs) {
    343     generated_tests::Execute(device,
    344                              conv_float_weights_as_inputs::createTestModel,
    345                              conv_float_weights_as_inputs::is_ignored,
    346                              conv_float_weights_as_inputs::examples);
    347 }
    348 
    349 namespace conv_quant8_channels {
    350 std::vector<MixedTypedExample> examples = {
    351 // Generated conv_quant8_channels test
    352 #include "examples/conv_quant8_channels.example.cpp"
    353 };
    354 // Generated model constructor
    355 #include "vts_models/conv_quant8_channels.model.cpp"
    356 } // namespace conv_quant8_channels
    357 TEST_F(NeuralnetworksHidlTest, conv_quant8_channels) {
    358     generated_tests::Execute(device,
    359                              conv_quant8_channels::createTestModel,
    360                              conv_quant8_channels::is_ignored,
    361                              conv_quant8_channels::examples);
    362 }
    363 
    364 namespace conv_quant8_channels_weights_as_inputs {
    365 std::vector<MixedTypedExample> examples = {
    366 // Generated conv_quant8_channels_weights_as_inputs test
    367 #include "examples/conv_quant8_channels_weights_as_inputs.example.cpp"
    368 };
    369 // Generated model constructor
    370 #include "vts_models/conv_quant8_channels_weights_as_inputs.model.cpp"
    371 } // namespace conv_quant8_channels_weights_as_inputs
    372 TEST_F(NeuralnetworksHidlTest, conv_quant8_channels_weights_as_inputs) {
    373     generated_tests::Execute(device,
    374                              conv_quant8_channels_weights_as_inputs::createTestModel,
    375                              conv_quant8_channels_weights_as_inputs::is_ignored,
    376                              conv_quant8_channels_weights_as_inputs::examples);
    377 }
    378 
    379 namespace conv_quant8_large {
    380 std::vector<MixedTypedExample> examples = {
    381 // Generated conv_quant8_large test
    382 #include "examples/conv_quant8_large.example.cpp"
    383 };
    384 // Generated model constructor
    385 #include "vts_models/conv_quant8_large.model.cpp"
    386 } // namespace conv_quant8_large
    387 TEST_F(NeuralnetworksHidlTest, conv_quant8_large) {
    388     generated_tests::Execute(device,
    389                              conv_quant8_large::createTestModel,
    390                              conv_quant8_large::is_ignored,
    391                              conv_quant8_large::examples);
    392 }
    393 
    394 namespace conv_quant8_large_weights_as_inputs {
    395 std::vector<MixedTypedExample> examples = {
    396 // Generated conv_quant8_large_weights_as_inputs test
    397 #include "examples/conv_quant8_large_weights_as_inputs.example.cpp"
    398 };
    399 // Generated model constructor
    400 #include "vts_models/conv_quant8_large_weights_as_inputs.model.cpp"
    401 } // namespace conv_quant8_large_weights_as_inputs
    402 TEST_F(NeuralnetworksHidlTest, conv_quant8_large_weights_as_inputs) {
    403     generated_tests::Execute(device,
    404                              conv_quant8_large_weights_as_inputs::createTestModel,
    405                              conv_quant8_large_weights_as_inputs::is_ignored,
    406                              conv_quant8_large_weights_as_inputs::examples);
    407 }
    408 
    409 namespace conv_quant8 {
    410 std::vector<MixedTypedExample> examples = {
    411 // Generated conv_quant8 test
    412 #include "examples/conv_quant8.example.cpp"
    413 };
    414 // Generated model constructor
    415 #include "vts_models/conv_quant8.model.cpp"
    416 } // namespace conv_quant8
    417 TEST_F(NeuralnetworksHidlTest, conv_quant8) {
    418     generated_tests::Execute(device,
    419                              conv_quant8::createTestModel,
    420                              conv_quant8::is_ignored,
    421                              conv_quant8::examples);
    422 }
    423 
    424 namespace conv_quant8_overflow {
    425 std::vector<MixedTypedExample> examples = {
    426 // Generated conv_quant8_overflow test
    427 #include "examples/conv_quant8_overflow.example.cpp"
    428 };
    429 // Generated model constructor
    430 #include "vts_models/conv_quant8_overflow.model.cpp"
    431 } // namespace conv_quant8_overflow
    432 TEST_F(NeuralnetworksHidlTest, conv_quant8_overflow) {
    433     generated_tests::Execute(device,
    434                              conv_quant8_overflow::createTestModel,
    435                              conv_quant8_overflow::is_ignored,
    436                              conv_quant8_overflow::examples);
    437 }
    438 
    439 namespace conv_quant8_overflow_weights_as_inputs {
    440 std::vector<MixedTypedExample> examples = {
    441 // Generated conv_quant8_overflow_weights_as_inputs test
    442 #include "examples/conv_quant8_overflow_weights_as_inputs.example.cpp"
    443 };
    444 // Generated model constructor
    445 #include "vts_models/conv_quant8_overflow_weights_as_inputs.model.cpp"
    446 } // namespace conv_quant8_overflow_weights_as_inputs
    447 TEST_F(NeuralnetworksHidlTest, conv_quant8_overflow_weights_as_inputs) {
    448     generated_tests::Execute(device,
    449                              conv_quant8_overflow_weights_as_inputs::createTestModel,
    450                              conv_quant8_overflow_weights_as_inputs::is_ignored,
    451                              conv_quant8_overflow_weights_as_inputs::examples);
    452 }
    453 
    454 namespace conv_quant8_weights_as_inputs {
    455 std::vector<MixedTypedExample> examples = {
    456 // Generated conv_quant8_weights_as_inputs test
    457 #include "examples/conv_quant8_weights_as_inputs.example.cpp"
    458 };
    459 // Generated model constructor
    460 #include "vts_models/conv_quant8_weights_as_inputs.model.cpp"
    461 } // namespace conv_quant8_weights_as_inputs
    462 TEST_F(NeuralnetworksHidlTest, conv_quant8_weights_as_inputs) {
    463     generated_tests::Execute(device,
    464                              conv_quant8_weights_as_inputs::createTestModel,
    465                              conv_quant8_weights_as_inputs::is_ignored,
    466                              conv_quant8_weights_as_inputs::examples);
    467 }
    468 
    469 namespace depth_to_space_float_1 {
    470 std::vector<MixedTypedExample> examples = {
    471 // Generated depth_to_space_float_1 test
    472 #include "examples/depth_to_space_float_1.example.cpp"
    473 };
    474 // Generated model constructor
    475 #include "vts_models/depth_to_space_float_1.model.cpp"
    476 } // namespace depth_to_space_float_1
    477 TEST_F(NeuralnetworksHidlTest, depth_to_space_float_1) {
    478     generated_tests::Execute(device,
    479                              depth_to_space_float_1::createTestModel,
    480                              depth_to_space_float_1::is_ignored,
    481                              depth_to_space_float_1::examples);
    482 }
    483 
    484 namespace depth_to_space_float_2 {
    485 std::vector<MixedTypedExample> examples = {
    486 // Generated depth_to_space_float_2 test
    487 #include "examples/depth_to_space_float_2.example.cpp"
    488 };
    489 // Generated model constructor
    490 #include "vts_models/depth_to_space_float_2.model.cpp"
    491 } // namespace depth_to_space_float_2
    492 TEST_F(NeuralnetworksHidlTest, depth_to_space_float_2) {
    493     generated_tests::Execute(device,
    494                              depth_to_space_float_2::createTestModel,
    495                              depth_to_space_float_2::is_ignored,
    496                              depth_to_space_float_2::examples);
    497 }
    498 
    499 namespace depth_to_space_float_3 {
    500 std::vector<MixedTypedExample> examples = {
    501 // Generated depth_to_space_float_3 test
    502 #include "examples/depth_to_space_float_3.example.cpp"
    503 };
    504 // Generated model constructor
    505 #include "vts_models/depth_to_space_float_3.model.cpp"
    506 } // namespace depth_to_space_float_3
    507 TEST_F(NeuralnetworksHidlTest, depth_to_space_float_3) {
    508     generated_tests::Execute(device,
    509                              depth_to_space_float_3::createTestModel,
    510                              depth_to_space_float_3::is_ignored,
    511                              depth_to_space_float_3::examples);
    512 }
    513 
    514 namespace depth_to_space_quant8_1 {
    515 std::vector<MixedTypedExample> examples = {
    516 // Generated depth_to_space_quant8_1 test
    517 #include "examples/depth_to_space_quant8_1.example.cpp"
    518 };
    519 // Generated model constructor
    520 #include "vts_models/depth_to_space_quant8_1.model.cpp"
    521 } // namespace depth_to_space_quant8_1
    522 TEST_F(NeuralnetworksHidlTest, depth_to_space_quant8_1) {
    523     generated_tests::Execute(device,
    524                              depth_to_space_quant8_1::createTestModel,
    525                              depth_to_space_quant8_1::is_ignored,
    526                              depth_to_space_quant8_1::examples);
    527 }
    528 
    529 namespace depth_to_space_quant8_2 {
    530 std::vector<MixedTypedExample> examples = {
    531 // Generated depth_to_space_quant8_2 test
    532 #include "examples/depth_to_space_quant8_2.example.cpp"
    533 };
    534 // Generated model constructor
    535 #include "vts_models/depth_to_space_quant8_2.model.cpp"
    536 } // namespace depth_to_space_quant8_2
    537 TEST_F(NeuralnetworksHidlTest, depth_to_space_quant8_2) {
    538     generated_tests::Execute(device,
    539                              depth_to_space_quant8_2::createTestModel,
    540                              depth_to_space_quant8_2::is_ignored,
    541                              depth_to_space_quant8_2::examples);
    542 }
    543 
    544 namespace depthwise_conv2d_float_large_2 {
    545 std::vector<MixedTypedExample> examples = {
    546 // Generated depthwise_conv2d_float_large_2 test
    547 #include "examples/depthwise_conv2d_float_large_2.example.cpp"
    548 };
    549 // Generated model constructor
    550 #include "vts_models/depthwise_conv2d_float_large_2.model.cpp"
    551 } // namespace depthwise_conv2d_float_large_2
    552 TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_float_large_2) {
    553     generated_tests::Execute(device,
    554                              depthwise_conv2d_float_large_2::createTestModel,
    555                              depthwise_conv2d_float_large_2::is_ignored,
    556                              depthwise_conv2d_float_large_2::examples);
    557 }
    558 
    559 namespace depthwise_conv2d_float_large_2_weights_as_inputs {
    560 std::vector<MixedTypedExample> examples = {
    561 // Generated depthwise_conv2d_float_large_2_weights_as_inputs test
    562 #include "examples/depthwise_conv2d_float_large_2_weights_as_inputs.example.cpp"
    563 };
    564 // Generated model constructor
    565 #include "vts_models/depthwise_conv2d_float_large_2_weights_as_inputs.model.cpp"
    566 } // namespace depthwise_conv2d_float_large_2_weights_as_inputs
    567 TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_float_large_2_weights_as_inputs) {
    568     generated_tests::Execute(device,
    569                              depthwise_conv2d_float_large_2_weights_as_inputs::createTestModel,
    570                              depthwise_conv2d_float_large_2_weights_as_inputs::is_ignored,
    571                              depthwise_conv2d_float_large_2_weights_as_inputs::examples);
    572 }
    573 
    574 namespace depthwise_conv2d_float_large {
    575 std::vector<MixedTypedExample> examples = {
    576 // Generated depthwise_conv2d_float_large test
    577 #include "examples/depthwise_conv2d_float_large.example.cpp"
    578 };
    579 // Generated model constructor
    580 #include "vts_models/depthwise_conv2d_float_large.model.cpp"
    581 } // namespace depthwise_conv2d_float_large
    582 TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_float_large) {
    583     generated_tests::Execute(device,
    584                              depthwise_conv2d_float_large::createTestModel,
    585                              depthwise_conv2d_float_large::is_ignored,
    586                              depthwise_conv2d_float_large::examples);
    587 }
    588 
    589 namespace depthwise_conv2d_float_large_weights_as_inputs {
    590 std::vector<MixedTypedExample> examples = {
    591 // Generated depthwise_conv2d_float_large_weights_as_inputs test
    592 #include "examples/depthwise_conv2d_float_large_weights_as_inputs.example.cpp"
    593 };
    594 // Generated model constructor
    595 #include "vts_models/depthwise_conv2d_float_large_weights_as_inputs.model.cpp"
    596 } // namespace depthwise_conv2d_float_large_weights_as_inputs
    597 TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_float_large_weights_as_inputs) {
    598     generated_tests::Execute(device,
    599                              depthwise_conv2d_float_large_weights_as_inputs::createTestModel,
    600                              depthwise_conv2d_float_large_weights_as_inputs::is_ignored,
    601                              depthwise_conv2d_float_large_weights_as_inputs::examples);
    602 }
    603 
    604 namespace depthwise_conv2d_float {
    605 std::vector<MixedTypedExample> examples = {
    606 // Generated depthwise_conv2d_float test
    607 #include "examples/depthwise_conv2d_float.example.cpp"
    608 };
    609 // Generated model constructor
    610 #include "vts_models/depthwise_conv2d_float.model.cpp"
    611 } // namespace depthwise_conv2d_float
    612 TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_float) {
    613     generated_tests::Execute(device,
    614                              depthwise_conv2d_float::createTestModel,
    615                              depthwise_conv2d_float::is_ignored,
    616                              depthwise_conv2d_float::examples);
    617 }
    618 
    619 namespace depthwise_conv2d_float_weights_as_inputs {
    620 std::vector<MixedTypedExample> examples = {
    621 // Generated depthwise_conv2d_float_weights_as_inputs test
    622 #include "examples/depthwise_conv2d_float_weights_as_inputs.example.cpp"
    623 };
    624 // Generated model constructor
    625 #include "vts_models/depthwise_conv2d_float_weights_as_inputs.model.cpp"
    626 } // namespace depthwise_conv2d_float_weights_as_inputs
    627 TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_float_weights_as_inputs) {
    628     generated_tests::Execute(device,
    629                              depthwise_conv2d_float_weights_as_inputs::createTestModel,
    630                              depthwise_conv2d_float_weights_as_inputs::is_ignored,
    631                              depthwise_conv2d_float_weights_as_inputs::examples);
    632 }
    633 
    634 namespace depthwise_conv2d_quant8_large {
    635 std::vector<MixedTypedExample> examples = {
    636 // Generated depthwise_conv2d_quant8_large test
    637 #include "examples/depthwise_conv2d_quant8_large.example.cpp"
    638 };
    639 // Generated model constructor
    640 #include "vts_models/depthwise_conv2d_quant8_large.model.cpp"
    641 } // namespace depthwise_conv2d_quant8_large
    642 TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_quant8_large) {
    643     generated_tests::Execute(device,
    644                              depthwise_conv2d_quant8_large::createTestModel,
    645                              depthwise_conv2d_quant8_large::is_ignored,
    646                              depthwise_conv2d_quant8_large::examples);
    647 }
    648 
    649 namespace depthwise_conv2d_quant8_large_weights_as_inputs {
    650 std::vector<MixedTypedExample> examples = {
    651 // Generated depthwise_conv2d_quant8_large_weights_as_inputs test
    652 #include "examples/depthwise_conv2d_quant8_large_weights_as_inputs.example.cpp"
    653 };
    654 // Generated model constructor
    655 #include "vts_models/depthwise_conv2d_quant8_large_weights_as_inputs.model.cpp"
    656 } // namespace depthwise_conv2d_quant8_large_weights_as_inputs
    657 TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_quant8_large_weights_as_inputs) {
    658     generated_tests::Execute(device,
    659                              depthwise_conv2d_quant8_large_weights_as_inputs::createTestModel,
    660                              depthwise_conv2d_quant8_large_weights_as_inputs::is_ignored,
    661                              depthwise_conv2d_quant8_large_weights_as_inputs::examples);
    662 }
    663 
    664 namespace depthwise_conv2d_quant8 {
    665 std::vector<MixedTypedExample> examples = {
    666 // Generated depthwise_conv2d_quant8 test
    667 #include "examples/depthwise_conv2d_quant8.example.cpp"
    668 };
    669 // Generated model constructor
    670 #include "vts_models/depthwise_conv2d_quant8.model.cpp"
    671 } // namespace depthwise_conv2d_quant8
    672 TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_quant8) {
    673     generated_tests::Execute(device,
    674                              depthwise_conv2d_quant8::createTestModel,
    675                              depthwise_conv2d_quant8::is_ignored,
    676                              depthwise_conv2d_quant8::examples);
    677 }
    678 
    679 namespace depthwise_conv2d_quant8_weights_as_inputs {
    680 std::vector<MixedTypedExample> examples = {
    681 // Generated depthwise_conv2d_quant8_weights_as_inputs test
    682 #include "examples/depthwise_conv2d_quant8_weights_as_inputs.example.cpp"
    683 };
    684 // Generated model constructor
    685 #include "vts_models/depthwise_conv2d_quant8_weights_as_inputs.model.cpp"
    686 } // namespace depthwise_conv2d_quant8_weights_as_inputs
    687 TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_quant8_weights_as_inputs) {
    688     generated_tests::Execute(device,
    689                              depthwise_conv2d_quant8_weights_as_inputs::createTestModel,
    690                              depthwise_conv2d_quant8_weights_as_inputs::is_ignored,
    691                              depthwise_conv2d_quant8_weights_as_inputs::examples);
    692 }
    693 
    694 namespace dequantize {
    695 std::vector<MixedTypedExample> examples = {
    696 // Generated dequantize test
    697 #include "examples/dequantize.example.cpp"
    698 };
    699 // Generated model constructor
    700 #include "vts_models/dequantize.model.cpp"
    701 } // namespace dequantize
    702 TEST_F(NeuralnetworksHidlTest, dequantize) {
    703     generated_tests::Execute(device,
    704                              dequantize::createTestModel,
    705                              dequantize::is_ignored,
    706                              dequantize::examples);
    707 }
    708 
    709 namespace embedding_lookup {
    710 std::vector<MixedTypedExample> examples = {
    711 // Generated embedding_lookup test
    712 #include "examples/embedding_lookup.example.cpp"
    713 };
    714 // Generated model constructor
    715 #include "vts_models/embedding_lookup.model.cpp"
    716 } // namespace embedding_lookup
    717 TEST_F(NeuralnetworksHidlTest, embedding_lookup) {
    718     generated_tests::Execute(device,
    719                              embedding_lookup::createTestModel,
    720                              embedding_lookup::is_ignored,
    721                              embedding_lookup::examples);
    722 }
    723 
    724 namespace floor {
    725 std::vector<MixedTypedExample> examples = {
    726 // Generated floor test
    727 #include "examples/floor.example.cpp"
    728 };
    729 // Generated model constructor
    730 #include "vts_models/floor.model.cpp"
    731 } // namespace floor
    732 TEST_F(NeuralnetworksHidlTest, floor) {
    733     generated_tests::Execute(device,
    734                              floor::createTestModel,
    735                              floor::is_ignored,
    736                              floor::examples);
    737 }
    738 
    739 namespace fully_connected_float_large {
    740 std::vector<MixedTypedExample> examples = {
    741 // Generated fully_connected_float_large test
    742 #include "examples/fully_connected_float_large.example.cpp"
    743 };
    744 // Generated model constructor
    745 #include "vts_models/fully_connected_float_large.model.cpp"
    746 } // namespace fully_connected_float_large
    747 TEST_F(NeuralnetworksHidlTest, fully_connected_float_large) {
    748     generated_tests::Execute(device,
    749                              fully_connected_float_large::createTestModel,
    750                              fully_connected_float_large::is_ignored,
    751                              fully_connected_float_large::examples);
    752 }
    753 
    754 namespace fully_connected_float_large_weights_as_inputs {
    755 std::vector<MixedTypedExample> examples = {
    756 // Generated fully_connected_float_large_weights_as_inputs test
    757 #include "examples/fully_connected_float_large_weights_as_inputs.example.cpp"
    758 };
    759 // Generated model constructor
    760 #include "vts_models/fully_connected_float_large_weights_as_inputs.model.cpp"
    761 } // namespace fully_connected_float_large_weights_as_inputs
    762 TEST_F(NeuralnetworksHidlTest, fully_connected_float_large_weights_as_inputs) {
    763     generated_tests::Execute(device,
    764                              fully_connected_float_large_weights_as_inputs::createTestModel,
    765                              fully_connected_float_large_weights_as_inputs::is_ignored,
    766                              fully_connected_float_large_weights_as_inputs::examples);
    767 }
    768 
    769 namespace fully_connected_float {
    770 std::vector<MixedTypedExample> examples = {
    771 // Generated fully_connected_float test
    772 #include "examples/fully_connected_float.example.cpp"
    773 };
    774 // Generated model constructor
    775 #include "vts_models/fully_connected_float.model.cpp"
    776 } // namespace fully_connected_float
    777 TEST_F(NeuralnetworksHidlTest, fully_connected_float) {
    778     generated_tests::Execute(device,
    779                              fully_connected_float::createTestModel,
    780                              fully_connected_float::is_ignored,
    781                              fully_connected_float::examples);
    782 }
    783 
    784 namespace fully_connected_float_weights_as_inputs {
    785 std::vector<MixedTypedExample> examples = {
    786 // Generated fully_connected_float_weights_as_inputs test
    787 #include "examples/fully_connected_float_weights_as_inputs.example.cpp"
    788 };
    789 // Generated model constructor
    790 #include "vts_models/fully_connected_float_weights_as_inputs.model.cpp"
    791 } // namespace fully_connected_float_weights_as_inputs
    792 TEST_F(NeuralnetworksHidlTest, fully_connected_float_weights_as_inputs) {
    793     generated_tests::Execute(device,
    794                              fully_connected_float_weights_as_inputs::createTestModel,
    795                              fully_connected_float_weights_as_inputs::is_ignored,
    796                              fully_connected_float_weights_as_inputs::examples);
    797 }
    798 
    799 namespace fully_connected_quant8_large {
    800 std::vector<MixedTypedExample> examples = {
    801 // Generated fully_connected_quant8_large test
    802 #include "examples/fully_connected_quant8_large.example.cpp"
    803 };
    804 // Generated model constructor
    805 #include "vts_models/fully_connected_quant8_large.model.cpp"
    806 } // namespace fully_connected_quant8_large
    807 TEST_F(NeuralnetworksHidlTest, fully_connected_quant8_large) {
    808     generated_tests::Execute(device,
    809                              fully_connected_quant8_large::createTestModel,
    810                              fully_connected_quant8_large::is_ignored,
    811                              fully_connected_quant8_large::examples);
    812 }
    813 
    814 namespace fully_connected_quant8_large_weights_as_inputs {
    815 std::vector<MixedTypedExample> examples = {
    816 // Generated fully_connected_quant8_large_weights_as_inputs test
    817 #include "examples/fully_connected_quant8_large_weights_as_inputs.example.cpp"
    818 };
    819 // Generated model constructor
    820 #include "vts_models/fully_connected_quant8_large_weights_as_inputs.model.cpp"
    821 } // namespace fully_connected_quant8_large_weights_as_inputs
    822 TEST_F(NeuralnetworksHidlTest, fully_connected_quant8_large_weights_as_inputs) {
    823     generated_tests::Execute(device,
    824                              fully_connected_quant8_large_weights_as_inputs::createTestModel,
    825                              fully_connected_quant8_large_weights_as_inputs::is_ignored,
    826                              fully_connected_quant8_large_weights_as_inputs::examples);
    827 }
    828 
    829 namespace fully_connected_quant8 {
    830 std::vector<MixedTypedExample> examples = {
    831 // Generated fully_connected_quant8 test
    832 #include "examples/fully_connected_quant8.example.cpp"
    833 };
    834 // Generated model constructor
    835 #include "vts_models/fully_connected_quant8.model.cpp"
    836 } // namespace fully_connected_quant8
    837 TEST_F(NeuralnetworksHidlTest, fully_connected_quant8) {
    838     generated_tests::Execute(device,
    839                              fully_connected_quant8::createTestModel,
    840                              fully_connected_quant8::is_ignored,
    841                              fully_connected_quant8::examples);
    842 }
    843 
    844 namespace fully_connected_quant8_weights_as_inputs {
    845 std::vector<MixedTypedExample> examples = {
    846 // Generated fully_connected_quant8_weights_as_inputs test
    847 #include "examples/fully_connected_quant8_weights_as_inputs.example.cpp"
    848 };
    849 // Generated model constructor
    850 #include "vts_models/fully_connected_quant8_weights_as_inputs.model.cpp"
    851 } // namespace fully_connected_quant8_weights_as_inputs
    852 TEST_F(NeuralnetworksHidlTest, fully_connected_quant8_weights_as_inputs) {
    853     generated_tests::Execute(device,
    854                              fully_connected_quant8_weights_as_inputs::createTestModel,
    855                              fully_connected_quant8_weights_as_inputs::is_ignored,
    856                              fully_connected_quant8_weights_as_inputs::examples);
    857 }
    858 
    859 namespace hashtable_lookup_float {
    860 std::vector<MixedTypedExample> examples = {
    861 // Generated hashtable_lookup_float test
    862 #include "examples/hashtable_lookup_float.example.cpp"
    863 };
    864 // Generated model constructor
    865 #include "vts_models/hashtable_lookup_float.model.cpp"
    866 } // namespace hashtable_lookup_float
    867 TEST_F(NeuralnetworksHidlTest, hashtable_lookup_float) {
    868     generated_tests::Execute(device,
    869                              hashtable_lookup_float::createTestModel,
    870                              hashtable_lookup_float::is_ignored,
    871                              hashtable_lookup_float::examples);
    872 }
    873 
    874 namespace hashtable_lookup_quant8 {
    875 std::vector<MixedTypedExample> examples = {
    876 // Generated hashtable_lookup_quant8 test
    877 #include "examples/hashtable_lookup_quant8.example.cpp"
    878 };
    879 // Generated model constructor
    880 #include "vts_models/hashtable_lookup_quant8.model.cpp"
    881 } // namespace hashtable_lookup_quant8
    882 TEST_F(NeuralnetworksHidlTest, hashtable_lookup_quant8) {
    883     generated_tests::Execute(device,
    884                              hashtable_lookup_quant8::createTestModel,
    885                              hashtable_lookup_quant8::is_ignored,
    886                              hashtable_lookup_quant8::examples);
    887 }
    888 
    889 namespace l2_normalization_large {
    890 std::vector<MixedTypedExample> examples = {
    891 // Generated l2_normalization_large test
    892 #include "examples/l2_normalization_large.example.cpp"
    893 };
    894 // Generated model constructor
    895 #include "vts_models/l2_normalization_large.model.cpp"
    896 } // namespace l2_normalization_large
    897 TEST_F(NeuralnetworksHidlTest, l2_normalization_large) {
    898     generated_tests::Execute(device,
    899                              l2_normalization_large::createTestModel,
    900                              l2_normalization_large::is_ignored,
    901                              l2_normalization_large::examples);
    902 }
    903 
    904 namespace l2_normalization {
    905 std::vector<MixedTypedExample> examples = {
    906 // Generated l2_normalization test
    907 #include "examples/l2_normalization.example.cpp"
    908 };
    909 // Generated model constructor
    910 #include "vts_models/l2_normalization.model.cpp"
    911 } // namespace l2_normalization
    912 TEST_F(NeuralnetworksHidlTest, l2_normalization) {
    913     generated_tests::Execute(device,
    914                              l2_normalization::createTestModel,
    915                              l2_normalization::is_ignored,
    916                              l2_normalization::examples);
    917 }
    918 
    919 namespace l2_pool_float_large {
    920 std::vector<MixedTypedExample> examples = {
    921 // Generated l2_pool_float_large test
    922 #include "examples/l2_pool_float_large.example.cpp"
    923 };
    924 // Generated model constructor
    925 #include "vts_models/l2_pool_float_large.model.cpp"
    926 } // namespace l2_pool_float_large
    927 TEST_F(NeuralnetworksHidlTest, l2_pool_float_large) {
    928     generated_tests::Execute(device,
    929                              l2_pool_float_large::createTestModel,
    930                              l2_pool_float_large::is_ignored,
    931                              l2_pool_float_large::examples);
    932 }
    933 
    934 namespace l2_pool_float {
    935 std::vector<MixedTypedExample> examples = {
    936 // Generated l2_pool_float test
    937 #include "examples/l2_pool_float.example.cpp"
    938 };
    939 // Generated model constructor
    940 #include "vts_models/l2_pool_float.model.cpp"
    941 } // namespace l2_pool_float
    942 TEST_F(NeuralnetworksHidlTest, l2_pool_float) {
    943     generated_tests::Execute(device,
    944                              l2_pool_float::createTestModel,
    945                              l2_pool_float::is_ignored,
    946                              l2_pool_float::examples);
    947 }
    948 
    949 namespace local_response_norm_float_1 {
    950 std::vector<MixedTypedExample> examples = {
    951 // Generated local_response_norm_float_1 test
    952 #include "examples/local_response_norm_float_1.example.cpp"
    953 };
    954 // Generated model constructor
    955 #include "vts_models/local_response_norm_float_1.model.cpp"
    956 } // namespace local_response_norm_float_1
    957 TEST_F(NeuralnetworksHidlTest, local_response_norm_float_1) {
    958     generated_tests::Execute(device,
    959                              local_response_norm_float_1::createTestModel,
    960                              local_response_norm_float_1::is_ignored,
    961                              local_response_norm_float_1::examples);
    962 }
    963 
    964 namespace local_response_norm_float_2 {
    965 std::vector<MixedTypedExample> examples = {
    966 // Generated local_response_norm_float_2 test
    967 #include "examples/local_response_norm_float_2.example.cpp"
    968 };
    969 // Generated model constructor
    970 #include "vts_models/local_response_norm_float_2.model.cpp"
    971 } // namespace local_response_norm_float_2
    972 TEST_F(NeuralnetworksHidlTest, local_response_norm_float_2) {
    973     generated_tests::Execute(device,
    974                              local_response_norm_float_2::createTestModel,
    975                              local_response_norm_float_2::is_ignored,
    976                              local_response_norm_float_2::examples);
    977 }
    978 
    979 namespace local_response_norm_float_3 {
    980 std::vector<MixedTypedExample> examples = {
    981 // Generated local_response_norm_float_3 test
    982 #include "examples/local_response_norm_float_3.example.cpp"
    983 };
    984 // Generated model constructor
    985 #include "vts_models/local_response_norm_float_3.model.cpp"
    986 } // namespace local_response_norm_float_3
    987 TEST_F(NeuralnetworksHidlTest, local_response_norm_float_3) {
    988     generated_tests::Execute(device,
    989                              local_response_norm_float_3::createTestModel,
    990                              local_response_norm_float_3::is_ignored,
    991                              local_response_norm_float_3::examples);
    992 }
    993 
    994 namespace local_response_norm_float_4 {
    995 std::vector<MixedTypedExample> examples = {
    996 // Generated local_response_norm_float_4 test
    997 #include "examples/local_response_norm_float_4.example.cpp"
    998 };
    999 // Generated model constructor
   1000 #include "vts_models/local_response_norm_float_4.model.cpp"
   1001 } // namespace local_response_norm_float_4
   1002 TEST_F(NeuralnetworksHidlTest, local_response_norm_float_4) {
   1003     generated_tests::Execute(device,
   1004                              local_response_norm_float_4::createTestModel,
   1005                              local_response_norm_float_4::is_ignored,
   1006                              local_response_norm_float_4::examples);
   1007 }
   1008 
   1009 namespace logistic_float_1 {
   1010 std::vector<MixedTypedExample> examples = {
   1011 // Generated logistic_float_1 test
   1012 #include "examples/logistic_float_1.example.cpp"
   1013 };
   1014 // Generated model constructor
   1015 #include "vts_models/logistic_float_1.model.cpp"
   1016 } // namespace logistic_float_1
   1017 TEST_F(NeuralnetworksHidlTest, logistic_float_1) {
   1018     generated_tests::Execute(device,
   1019                              logistic_float_1::createTestModel,
   1020                              logistic_float_1::is_ignored,
   1021                              logistic_float_1::examples);
   1022 }
   1023 
   1024 namespace logistic_float_2 {
   1025 std::vector<MixedTypedExample> examples = {
   1026 // Generated logistic_float_2 test
   1027 #include "examples/logistic_float_2.example.cpp"
   1028 };
   1029 // Generated model constructor
   1030 #include "vts_models/logistic_float_2.model.cpp"
   1031 } // namespace logistic_float_2
   1032 TEST_F(NeuralnetworksHidlTest, logistic_float_2) {
   1033     generated_tests::Execute(device,
   1034                              logistic_float_2::createTestModel,
   1035                              logistic_float_2::is_ignored,
   1036                              logistic_float_2::examples);
   1037 }
   1038 
   1039 namespace logistic_quant8_1 {
   1040 std::vector<MixedTypedExample> examples = {
   1041 // Generated logistic_quant8_1 test
   1042 #include "examples/logistic_quant8_1.example.cpp"
   1043 };
   1044 // Generated model constructor
   1045 #include "vts_models/logistic_quant8_1.model.cpp"
   1046 } // namespace logistic_quant8_1
   1047 TEST_F(NeuralnetworksHidlTest, logistic_quant8_1) {
   1048     generated_tests::Execute(device,
   1049                              logistic_quant8_1::createTestModel,
   1050                              logistic_quant8_1::is_ignored,
   1051                              logistic_quant8_1::examples);
   1052 }
   1053 
   1054 namespace logistic_quant8_2 {
   1055 std::vector<MixedTypedExample> examples = {
   1056 // Generated logistic_quant8_2 test
   1057 #include "examples/logistic_quant8_2.example.cpp"
   1058 };
   1059 // Generated model constructor
   1060 #include "vts_models/logistic_quant8_2.model.cpp"
   1061 } // namespace logistic_quant8_2
   1062 TEST_F(NeuralnetworksHidlTest, logistic_quant8_2) {
   1063     generated_tests::Execute(device,
   1064                              logistic_quant8_2::createTestModel,
   1065                              logistic_quant8_2::is_ignored,
   1066                              logistic_quant8_2::examples);
   1067 }
   1068 
   1069 namespace lsh_projection_2 {
   1070 std::vector<MixedTypedExample> examples = {
   1071 // Generated lsh_projection_2 test
   1072 #include "examples/lsh_projection_2.example.cpp"
   1073 };
   1074 // Generated model constructor
   1075 #include "vts_models/lsh_projection_2.model.cpp"
   1076 } // namespace lsh_projection_2
   1077 TEST_F(NeuralnetworksHidlTest, lsh_projection_2) {
   1078     generated_tests::Execute(device,
   1079                              lsh_projection_2::createTestModel,
   1080                              lsh_projection_2::is_ignored,
   1081                              lsh_projection_2::examples);
   1082 }
   1083 
   1084 namespace lsh_projection {
   1085 std::vector<MixedTypedExample> examples = {
   1086 // Generated lsh_projection test
   1087 #include "examples/lsh_projection.example.cpp"
   1088 };
   1089 // Generated model constructor
   1090 #include "vts_models/lsh_projection.model.cpp"
   1091 } // namespace lsh_projection
   1092 TEST_F(NeuralnetworksHidlTest, lsh_projection) {
   1093     generated_tests::Execute(device,
   1094                              lsh_projection::createTestModel,
   1095                              lsh_projection::is_ignored,
   1096                              lsh_projection::examples);
   1097 }
   1098 
   1099 namespace lsh_projection_weights_as_inputs {
   1100 std::vector<MixedTypedExample> examples = {
   1101 // Generated lsh_projection_weights_as_inputs test
   1102 #include "examples/lsh_projection_weights_as_inputs.example.cpp"
   1103 };
   1104 // Generated model constructor
   1105 #include "vts_models/lsh_projection_weights_as_inputs.model.cpp"
   1106 } // namespace lsh_projection_weights_as_inputs
   1107 TEST_F(NeuralnetworksHidlTest, lsh_projection_weights_as_inputs) {
   1108     generated_tests::Execute(device,
   1109                              lsh_projection_weights_as_inputs::createTestModel,
   1110                              lsh_projection_weights_as_inputs::is_ignored,
   1111                              lsh_projection_weights_as_inputs::examples);
   1112 }
   1113 
   1114 namespace lstm2 {
   1115 std::vector<MixedTypedExample> examples = {
   1116 // Generated lstm2 test
   1117 #include "examples/lstm2.example.cpp"
   1118 };
   1119 // Generated model constructor
   1120 #include "vts_models/lstm2.model.cpp"
   1121 } // namespace lstm2
   1122 TEST_F(NeuralnetworksHidlTest, lstm2) {
   1123     generated_tests::Execute(device,
   1124                              lstm2::createTestModel,
   1125                              lstm2::is_ignored,
   1126                              lstm2::examples);
   1127 }
   1128 
   1129 namespace lstm3 {
   1130 std::vector<MixedTypedExample> examples = {
   1131 // Generated lstm3 test
   1132 #include "examples/lstm3.example.cpp"
   1133 };
   1134 // Generated model constructor
   1135 #include "vts_models/lstm3.model.cpp"
   1136 } // namespace lstm3
   1137 TEST_F(NeuralnetworksHidlTest, lstm3) {
   1138     generated_tests::Execute(device,
   1139                              lstm3::createTestModel,
   1140                              lstm3::is_ignored,
   1141                              lstm3::examples);
   1142 }
   1143 
   1144 namespace lstm {
   1145 std::vector<MixedTypedExample> examples = {
   1146 // Generated lstm test
   1147 #include "examples/lstm.example.cpp"
   1148 };
   1149 // Generated model constructor
   1150 #include "vts_models/lstm.model.cpp"
   1151 } // namespace lstm
   1152 TEST_F(NeuralnetworksHidlTest, lstm) {
   1153     generated_tests::Execute(device,
   1154                              lstm::createTestModel,
   1155                              lstm::is_ignored,
   1156                              lstm::examples);
   1157 }
   1158 
   1159 namespace max_pool_float_1 {
   1160 std::vector<MixedTypedExample> examples = {
   1161 // Generated max_pool_float_1 test
   1162 #include "examples/max_pool_float_1.example.cpp"
   1163 };
   1164 // Generated model constructor
   1165 #include "vts_models/max_pool_float_1.model.cpp"
   1166 } // namespace max_pool_float_1
   1167 TEST_F(NeuralnetworksHidlTest, max_pool_float_1) {
   1168     generated_tests::Execute(device,
   1169                              max_pool_float_1::createTestModel,
   1170                              max_pool_float_1::is_ignored,
   1171                              max_pool_float_1::examples);
   1172 }
   1173 
   1174 namespace max_pool_float_2 {
   1175 std::vector<MixedTypedExample> examples = {
   1176 // Generated max_pool_float_2 test
   1177 #include "examples/max_pool_float_2.example.cpp"
   1178 };
   1179 // Generated model constructor
   1180 #include "vts_models/max_pool_float_2.model.cpp"
   1181 } // namespace max_pool_float_2
   1182 TEST_F(NeuralnetworksHidlTest, max_pool_float_2) {
   1183     generated_tests::Execute(device,
   1184                              max_pool_float_2::createTestModel,
   1185                              max_pool_float_2::is_ignored,
   1186                              max_pool_float_2::examples);
   1187 }
   1188 
   1189 namespace max_pool_float_3 {
   1190 std::vector<MixedTypedExample> examples = {
   1191 // Generated max_pool_float_3 test
   1192 #include "examples/max_pool_float_3.example.cpp"
   1193 };
   1194 // Generated model constructor
   1195 #include "vts_models/max_pool_float_3.model.cpp"
   1196 } // namespace max_pool_float_3
   1197 TEST_F(NeuralnetworksHidlTest, max_pool_float_3) {
   1198     generated_tests::Execute(device,
   1199                              max_pool_float_3::createTestModel,
   1200                              max_pool_float_3::is_ignored,
   1201                              max_pool_float_3::examples);
   1202 }
   1203 
   1204 namespace max_pool_quant8_1 {
   1205 std::vector<MixedTypedExample> examples = {
   1206 // Generated max_pool_quant8_1 test
   1207 #include "examples/max_pool_quant8_1.example.cpp"
   1208 };
   1209 // Generated model constructor
   1210 #include "vts_models/max_pool_quant8_1.model.cpp"
   1211 } // namespace max_pool_quant8_1
   1212 TEST_F(NeuralnetworksHidlTest, max_pool_quant8_1) {
   1213     generated_tests::Execute(device,
   1214                              max_pool_quant8_1::createTestModel,
   1215                              max_pool_quant8_1::is_ignored,
   1216                              max_pool_quant8_1::examples);
   1217 }
   1218 
   1219 namespace max_pool_quant8_2 {
   1220 std::vector<MixedTypedExample> examples = {
   1221 // Generated max_pool_quant8_2 test
   1222 #include "examples/max_pool_quant8_2.example.cpp"
   1223 };
   1224 // Generated model constructor
   1225 #include "vts_models/max_pool_quant8_2.model.cpp"
   1226 } // namespace max_pool_quant8_2
   1227 TEST_F(NeuralnetworksHidlTest, max_pool_quant8_2) {
   1228     generated_tests::Execute(device,
   1229                              max_pool_quant8_2::createTestModel,
   1230                              max_pool_quant8_2::is_ignored,
   1231                              max_pool_quant8_2::examples);
   1232 }
   1233 
   1234 namespace max_pool_quant8_3 {
   1235 std::vector<MixedTypedExample> examples = {
   1236 // Generated max_pool_quant8_3 test
   1237 #include "examples/max_pool_quant8_3.example.cpp"
   1238 };
   1239 // Generated model constructor
   1240 #include "vts_models/max_pool_quant8_3.model.cpp"
   1241 } // namespace max_pool_quant8_3
   1242 TEST_F(NeuralnetworksHidlTest, max_pool_quant8_3) {
   1243     generated_tests::Execute(device,
   1244                              max_pool_quant8_3::createTestModel,
   1245                              max_pool_quant8_3::is_ignored,
   1246                              max_pool_quant8_3::examples);
   1247 }
   1248 
   1249 namespace mul_broadcast_quant8 {
   1250 std::vector<MixedTypedExample> examples = {
   1251 // Generated mul_broadcast_quant8 test
   1252 #include "examples/mul_broadcast_quant8.example.cpp"
   1253 };
   1254 // Generated model constructor
   1255 #include "vts_models/mul_broadcast_quant8.model.cpp"
   1256 } // namespace mul_broadcast_quant8
   1257 TEST_F(NeuralnetworksHidlTest, mul_broadcast_quant8) {
   1258     generated_tests::Execute(device,
   1259                              mul_broadcast_quant8::createTestModel,
   1260                              mul_broadcast_quant8::is_ignored,
   1261                              mul_broadcast_quant8::examples);
   1262 }
   1263 
   1264 namespace mul {
   1265 std::vector<MixedTypedExample> examples = {
   1266 // Generated mul test
   1267 #include "examples/mul.example.cpp"
   1268 };
   1269 // Generated model constructor
   1270 #include "vts_models/mul.model.cpp"
   1271 } // namespace mul
   1272 TEST_F(NeuralnetworksHidlTest, mul) {
   1273     generated_tests::Execute(device,
   1274                              mul::createTestModel,
   1275                              mul::is_ignored,
   1276                              mul::examples);
   1277 }
   1278 
   1279 namespace mul_quant8 {
   1280 std::vector<MixedTypedExample> examples = {
   1281 // Generated mul_quant8 test
   1282 #include "examples/mul_quant8.example.cpp"
   1283 };
   1284 // Generated model constructor
   1285 #include "vts_models/mul_quant8.model.cpp"
   1286 } // namespace mul_quant8
   1287 TEST_F(NeuralnetworksHidlTest, mul_quant8) {
   1288     generated_tests::Execute(device,
   1289                              mul_quant8::createTestModel,
   1290                              mul_quant8::is_ignored,
   1291                              mul_quant8::examples);
   1292 }
   1293 
   1294 namespace mul_relu {
   1295 std::vector<MixedTypedExample> examples = {
   1296 // Generated mul_relu test
   1297 #include "examples/mul_relu.example.cpp"
   1298 };
   1299 // Generated model constructor
   1300 #include "vts_models/mul_relu.model.cpp"
   1301 } // namespace mul_relu
   1302 TEST_F(NeuralnetworksHidlTest, mul_relu) {
   1303     generated_tests::Execute(device,
   1304                              mul_relu::createTestModel,
   1305                              mul_relu::is_ignored,
   1306                              mul_relu::examples);
   1307 }
   1308 
   1309 namespace relu1_float_1 {
   1310 std::vector<MixedTypedExample> examples = {
   1311 // Generated relu1_float_1 test
   1312 #include "examples/relu1_float_1.example.cpp"
   1313 };
   1314 // Generated model constructor
   1315 #include "vts_models/relu1_float_1.model.cpp"
   1316 } // namespace relu1_float_1
   1317 TEST_F(NeuralnetworksHidlTest, relu1_float_1) {
   1318     generated_tests::Execute(device,
   1319                              relu1_float_1::createTestModel,
   1320                              relu1_float_1::is_ignored,
   1321                              relu1_float_1::examples);
   1322 }
   1323 
   1324 namespace relu1_float_2 {
   1325 std::vector<MixedTypedExample> examples = {
   1326 // Generated relu1_float_2 test
   1327 #include "examples/relu1_float_2.example.cpp"
   1328 };
   1329 // Generated model constructor
   1330 #include "vts_models/relu1_float_2.model.cpp"
   1331 } // namespace relu1_float_2
   1332 TEST_F(NeuralnetworksHidlTest, relu1_float_2) {
   1333     generated_tests::Execute(device,
   1334                              relu1_float_2::createTestModel,
   1335                              relu1_float_2::is_ignored,
   1336                              relu1_float_2::examples);
   1337 }
   1338 
   1339 namespace relu1_quant8_1 {
   1340 std::vector<MixedTypedExample> examples = {
   1341 // Generated relu1_quant8_1 test
   1342 #include "examples/relu1_quant8_1.example.cpp"
   1343 };
   1344 // Generated model constructor
   1345 #include "vts_models/relu1_quant8_1.model.cpp"
   1346 } // namespace relu1_quant8_1
   1347 TEST_F(NeuralnetworksHidlTest, relu1_quant8_1) {
   1348     generated_tests::Execute(device,
   1349                              relu1_quant8_1::createTestModel,
   1350                              relu1_quant8_1::is_ignored,
   1351                              relu1_quant8_1::examples);
   1352 }
   1353 
   1354 namespace relu1_quant8_2 {
   1355 std::vector<MixedTypedExample> examples = {
   1356 // Generated relu1_quant8_2 test
   1357 #include "examples/relu1_quant8_2.example.cpp"
   1358 };
   1359 // Generated model constructor
   1360 #include "vts_models/relu1_quant8_2.model.cpp"
   1361 } // namespace relu1_quant8_2
   1362 TEST_F(NeuralnetworksHidlTest, relu1_quant8_2) {
   1363     generated_tests::Execute(device,
   1364                              relu1_quant8_2::createTestModel,
   1365                              relu1_quant8_2::is_ignored,
   1366                              relu1_quant8_2::examples);
   1367 }
   1368 
   1369 namespace relu6_float_1 {
   1370 std::vector<MixedTypedExample> examples = {
   1371 // Generated relu6_float_1 test
   1372 #include "examples/relu6_float_1.example.cpp"
   1373 };
   1374 // Generated model constructor
   1375 #include "vts_models/relu6_float_1.model.cpp"
   1376 } // namespace relu6_float_1
   1377 TEST_F(NeuralnetworksHidlTest, relu6_float_1) {
   1378     generated_tests::Execute(device,
   1379                              relu6_float_1::createTestModel,
   1380                              relu6_float_1::is_ignored,
   1381                              relu6_float_1::examples);
   1382 }
   1383 
   1384 namespace relu6_float_2 {
   1385 std::vector<MixedTypedExample> examples = {
   1386 // Generated relu6_float_2 test
   1387 #include "examples/relu6_float_2.example.cpp"
   1388 };
   1389 // Generated model constructor
   1390 #include "vts_models/relu6_float_2.model.cpp"
   1391 } // namespace relu6_float_2
   1392 TEST_F(NeuralnetworksHidlTest, relu6_float_2) {
   1393     generated_tests::Execute(device,
   1394                              relu6_float_2::createTestModel,
   1395                              relu6_float_2::is_ignored,
   1396                              relu6_float_2::examples);
   1397 }
   1398 
   1399 namespace relu6_quant8_1 {
   1400 std::vector<MixedTypedExample> examples = {
   1401 // Generated relu6_quant8_1 test
   1402 #include "examples/relu6_quant8_1.example.cpp"
   1403 };
   1404 // Generated model constructor
   1405 #include "vts_models/relu6_quant8_1.model.cpp"
   1406 } // namespace relu6_quant8_1
   1407 TEST_F(NeuralnetworksHidlTest, relu6_quant8_1) {
   1408     generated_tests::Execute(device,
   1409                              relu6_quant8_1::createTestModel,
   1410                              relu6_quant8_1::is_ignored,
   1411                              relu6_quant8_1::examples);
   1412 }
   1413 
   1414 namespace relu6_quant8_2 {
   1415 std::vector<MixedTypedExample> examples = {
   1416 // Generated relu6_quant8_2 test
   1417 #include "examples/relu6_quant8_2.example.cpp"
   1418 };
   1419 // Generated model constructor
   1420 #include "vts_models/relu6_quant8_2.model.cpp"
   1421 } // namespace relu6_quant8_2
   1422 TEST_F(NeuralnetworksHidlTest, relu6_quant8_2) {
   1423     generated_tests::Execute(device,
   1424                              relu6_quant8_2::createTestModel,
   1425                              relu6_quant8_2::is_ignored,
   1426                              relu6_quant8_2::examples);
   1427 }
   1428 
   1429 namespace relu_float_1 {
   1430 std::vector<MixedTypedExample> examples = {
   1431 // Generated relu_float_1 test
   1432 #include "examples/relu_float_1.example.cpp"
   1433 };
   1434 // Generated model constructor
   1435 #include "vts_models/relu_float_1.model.cpp"
   1436 } // namespace relu_float_1
   1437 TEST_F(NeuralnetworksHidlTest, relu_float_1) {
   1438     generated_tests::Execute(device,
   1439                              relu_float_1::createTestModel,
   1440                              relu_float_1::is_ignored,
   1441                              relu_float_1::examples);
   1442 }
   1443 
   1444 namespace relu_float_2 {
   1445 std::vector<MixedTypedExample> examples = {
   1446 // Generated relu_float_2 test
   1447 #include "examples/relu_float_2.example.cpp"
   1448 };
   1449 // Generated model constructor
   1450 #include "vts_models/relu_float_2.model.cpp"
   1451 } // namespace relu_float_2
   1452 TEST_F(NeuralnetworksHidlTest, relu_float_2) {
   1453     generated_tests::Execute(device,
   1454                              relu_float_2::createTestModel,
   1455                              relu_float_2::is_ignored,
   1456                              relu_float_2::examples);
   1457 }
   1458 
   1459 namespace relu_quant8_1 {
   1460 std::vector<MixedTypedExample> examples = {
   1461 // Generated relu_quant8_1 test
   1462 #include "examples/relu_quant8_1.example.cpp"
   1463 };
   1464 // Generated model constructor
   1465 #include "vts_models/relu_quant8_1.model.cpp"
   1466 } // namespace relu_quant8_1
   1467 TEST_F(NeuralnetworksHidlTest, relu_quant8_1) {
   1468     generated_tests::Execute(device,
   1469                              relu_quant8_1::createTestModel,
   1470                              relu_quant8_1::is_ignored,
   1471                              relu_quant8_1::examples);
   1472 }
   1473 
   1474 namespace relu_quant8_2 {
   1475 std::vector<MixedTypedExample> examples = {
   1476 // Generated relu_quant8_2 test
   1477 #include "examples/relu_quant8_2.example.cpp"
   1478 };
   1479 // Generated model constructor
   1480 #include "vts_models/relu_quant8_2.model.cpp"
   1481 } // namespace relu_quant8_2
   1482 TEST_F(NeuralnetworksHidlTest, relu_quant8_2) {
   1483     generated_tests::Execute(device,
   1484                              relu_quant8_2::createTestModel,
   1485                              relu_quant8_2::is_ignored,
   1486                              relu_quant8_2::examples);
   1487 }
   1488 
   1489 namespace reshape {
   1490 std::vector<MixedTypedExample> examples = {
   1491 // Generated reshape test
   1492 #include "examples/reshape.example.cpp"
   1493 };
   1494 // Generated model constructor
   1495 #include "vts_models/reshape.model.cpp"
   1496 } // namespace reshape
   1497 TEST_F(NeuralnetworksHidlTest, reshape) {
   1498     generated_tests::Execute(device,
   1499                              reshape::createTestModel,
   1500                              reshape::is_ignored,
   1501                              reshape::examples);
   1502 }
   1503 
   1504 namespace reshape_quant8 {
   1505 std::vector<MixedTypedExample> examples = {
   1506 // Generated reshape_quant8 test
   1507 #include "examples/reshape_quant8.example.cpp"
   1508 };
   1509 // Generated model constructor
   1510 #include "vts_models/reshape_quant8.model.cpp"
   1511 } // namespace reshape_quant8
   1512 TEST_F(NeuralnetworksHidlTest, reshape_quant8) {
   1513     generated_tests::Execute(device,
   1514                              reshape_quant8::createTestModel,
   1515                              reshape_quant8::is_ignored,
   1516                              reshape_quant8::examples);
   1517 }
   1518 
   1519 namespace reshape_quant8_weights_as_inputs {
   1520 std::vector<MixedTypedExample> examples = {
   1521 // Generated reshape_quant8_weights_as_inputs test
   1522 #include "examples/reshape_quant8_weights_as_inputs.example.cpp"
   1523 };
   1524 // Generated model constructor
   1525 #include "vts_models/reshape_quant8_weights_as_inputs.model.cpp"
   1526 } // namespace reshape_quant8_weights_as_inputs
   1527 TEST_F(NeuralnetworksHidlTest, reshape_quant8_weights_as_inputs) {
   1528     generated_tests::Execute(device,
   1529                              reshape_quant8_weights_as_inputs::createTestModel,
   1530                              reshape_quant8_weights_as_inputs::is_ignored,
   1531                              reshape_quant8_weights_as_inputs::examples);
   1532 }
   1533 
   1534 namespace reshape_weights_as_inputs {
   1535 std::vector<MixedTypedExample> examples = {
   1536 // Generated reshape_weights_as_inputs test
   1537 #include "examples/reshape_weights_as_inputs.example.cpp"
   1538 };
   1539 // Generated model constructor
   1540 #include "vts_models/reshape_weights_as_inputs.model.cpp"
   1541 } // namespace reshape_weights_as_inputs
   1542 TEST_F(NeuralnetworksHidlTest, reshape_weights_as_inputs) {
   1543     generated_tests::Execute(device,
   1544                              reshape_weights_as_inputs::createTestModel,
   1545                              reshape_weights_as_inputs::is_ignored,
   1546                              reshape_weights_as_inputs::examples);
   1547 }
   1548 
   1549 namespace resize_bilinear {
   1550 std::vector<MixedTypedExample> examples = {
   1551 // Generated resize_bilinear test
   1552 #include "examples/resize_bilinear.example.cpp"
   1553 };
   1554 // Generated model constructor
   1555 #include "vts_models/resize_bilinear.model.cpp"
   1556 } // namespace resize_bilinear
   1557 TEST_F(NeuralnetworksHidlTest, resize_bilinear) {
   1558     generated_tests::Execute(device,
   1559                              resize_bilinear::createTestModel,
   1560                              resize_bilinear::is_ignored,
   1561                              resize_bilinear::examples);
   1562 }
   1563 
   1564 namespace rnn {
   1565 std::vector<MixedTypedExample> examples = {
   1566 // Generated rnn test
   1567 #include "examples/rnn.example.cpp"
   1568 };
   1569 // Generated model constructor
   1570 #include "vts_models/rnn.model.cpp"
   1571 } // namespace rnn
   1572 TEST_F(NeuralnetworksHidlTest, rnn) {
   1573     generated_tests::Execute(device,
   1574                              rnn::createTestModel,
   1575                              rnn::is_ignored,
   1576                              rnn::examples);
   1577 }
   1578 
   1579 namespace softmax_float_1 {
   1580 std::vector<MixedTypedExample> examples = {
   1581 // Generated softmax_float_1 test
   1582 #include "examples/softmax_float_1.example.cpp"
   1583 };
   1584 // Generated model constructor
   1585 #include "vts_models/softmax_float_1.model.cpp"
   1586 } // namespace softmax_float_1
   1587 TEST_F(NeuralnetworksHidlTest, softmax_float_1) {
   1588     generated_tests::Execute(device,
   1589                              softmax_float_1::createTestModel,
   1590                              softmax_float_1::is_ignored,
   1591                              softmax_float_1::examples);
   1592 }
   1593 
   1594 namespace softmax_float_2 {
   1595 std::vector<MixedTypedExample> examples = {
   1596 // Generated softmax_float_2 test
   1597 #include "examples/softmax_float_2.example.cpp"
   1598 };
   1599 // Generated model constructor
   1600 #include "vts_models/softmax_float_2.model.cpp"
   1601 } // namespace softmax_float_2
   1602 TEST_F(NeuralnetworksHidlTest, softmax_float_2) {
   1603     generated_tests::Execute(device,
   1604                              softmax_float_2::createTestModel,
   1605                              softmax_float_2::is_ignored,
   1606                              softmax_float_2::examples);
   1607 }
   1608 
   1609 namespace softmax_quant8_1 {
   1610 std::vector<MixedTypedExample> examples = {
   1611 // Generated softmax_quant8_1 test
   1612 #include "examples/softmax_quant8_1.example.cpp"
   1613 };
   1614 // Generated model constructor
   1615 #include "vts_models/softmax_quant8_1.model.cpp"
   1616 } // namespace softmax_quant8_1
   1617 TEST_F(NeuralnetworksHidlTest, softmax_quant8_1) {
   1618     generated_tests::Execute(device,
   1619                              softmax_quant8_1::createTestModel,
   1620                              softmax_quant8_1::is_ignored,
   1621                              softmax_quant8_1::examples);
   1622 }
   1623 
   1624 namespace softmax_quant8_2 {
   1625 std::vector<MixedTypedExample> examples = {
   1626 // Generated softmax_quant8_2 test
   1627 #include "examples/softmax_quant8_2.example.cpp"
   1628 };
   1629 // Generated model constructor
   1630 #include "vts_models/softmax_quant8_2.model.cpp"
   1631 } // namespace softmax_quant8_2
   1632 TEST_F(NeuralnetworksHidlTest, softmax_quant8_2) {
   1633     generated_tests::Execute(device,
   1634                              softmax_quant8_2::createTestModel,
   1635                              softmax_quant8_2::is_ignored,
   1636                              softmax_quant8_2::examples);
   1637 }
   1638 
   1639 namespace space_to_depth_float_1 {
   1640 std::vector<MixedTypedExample> examples = {
   1641 // Generated space_to_depth_float_1 test
   1642 #include "examples/space_to_depth_float_1.example.cpp"
   1643 };
   1644 // Generated model constructor
   1645 #include "vts_models/space_to_depth_float_1.model.cpp"
   1646 } // namespace space_to_depth_float_1
   1647 TEST_F(NeuralnetworksHidlTest, space_to_depth_float_1) {
   1648     generated_tests::Execute(device,
   1649                              space_to_depth_float_1::createTestModel,
   1650                              space_to_depth_float_1::is_ignored,
   1651                              space_to_depth_float_1::examples);
   1652 }
   1653 
   1654 namespace space_to_depth_float_2 {
   1655 std::vector<MixedTypedExample> examples = {
   1656 // Generated space_to_depth_float_2 test
   1657 #include "examples/space_to_depth_float_2.example.cpp"
   1658 };
   1659 // Generated model constructor
   1660 #include "vts_models/space_to_depth_float_2.model.cpp"
   1661 } // namespace space_to_depth_float_2
   1662 TEST_F(NeuralnetworksHidlTest, space_to_depth_float_2) {
   1663     generated_tests::Execute(device,
   1664                              space_to_depth_float_2::createTestModel,
   1665                              space_to_depth_float_2::is_ignored,
   1666                              space_to_depth_float_2::examples);
   1667 }
   1668 
   1669 namespace space_to_depth_float_3 {
   1670 std::vector<MixedTypedExample> examples = {
   1671 // Generated space_to_depth_float_3 test
   1672 #include "examples/space_to_depth_float_3.example.cpp"
   1673 };
   1674 // Generated model constructor
   1675 #include "vts_models/space_to_depth_float_3.model.cpp"
   1676 } // namespace space_to_depth_float_3
   1677 TEST_F(NeuralnetworksHidlTest, space_to_depth_float_3) {
   1678     generated_tests::Execute(device,
   1679                              space_to_depth_float_3::createTestModel,
   1680                              space_to_depth_float_3::is_ignored,
   1681                              space_to_depth_float_3::examples);
   1682 }
   1683 
   1684 namespace space_to_depth_quant8_1 {
   1685 std::vector<MixedTypedExample> examples = {
   1686 // Generated space_to_depth_quant8_1 test
   1687 #include "examples/space_to_depth_quant8_1.example.cpp"
   1688 };
   1689 // Generated model constructor
   1690 #include "vts_models/space_to_depth_quant8_1.model.cpp"
   1691 } // namespace space_to_depth_quant8_1
   1692 TEST_F(NeuralnetworksHidlTest, space_to_depth_quant8_1) {
   1693     generated_tests::Execute(device,
   1694                              space_to_depth_quant8_1::createTestModel,
   1695                              space_to_depth_quant8_1::is_ignored,
   1696                              space_to_depth_quant8_1::examples);
   1697 }
   1698 
   1699 namespace space_to_depth_quant8_2 {
   1700 std::vector<MixedTypedExample> examples = {
   1701 // Generated space_to_depth_quant8_2 test
   1702 #include "examples/space_to_depth_quant8_2.example.cpp"
   1703 };
   1704 // Generated model constructor
   1705 #include "vts_models/space_to_depth_quant8_2.model.cpp"
   1706 } // namespace space_to_depth_quant8_2
   1707 TEST_F(NeuralnetworksHidlTest, space_to_depth_quant8_2) {
   1708     generated_tests::Execute(device,
   1709                              space_to_depth_quant8_2::createTestModel,
   1710                              space_to_depth_quant8_2::is_ignored,
   1711                              space_to_depth_quant8_2::examples);
   1712 }
   1713 
   1714 namespace svdf {
   1715 std::vector<MixedTypedExample> examples = {
   1716 // Generated svdf test
   1717 #include "examples/svdf.example.cpp"
   1718 };
   1719 // Generated model constructor
   1720 #include "vts_models/svdf.model.cpp"
   1721 } // namespace svdf
   1722 TEST_F(NeuralnetworksHidlTest, svdf) {
   1723     generated_tests::Execute(device,
   1724                              svdf::createTestModel,
   1725                              svdf::is_ignored,
   1726                              svdf::examples);
   1727 }
   1728 
   1729 namespace tanh {
   1730 std::vector<MixedTypedExample> examples = {
   1731 // Generated tanh test
   1732 #include "examples/tanh.example.cpp"
   1733 };
   1734 // Generated model constructor
   1735 #include "vts_models/tanh.model.cpp"
   1736 } // namespace tanh
   1737 TEST_F(NeuralnetworksHidlTest, tanh) {
   1738     generated_tests::Execute(device,
   1739                              tanh::createTestModel,
   1740                              tanh::is_ignored,
   1741                              tanh::examples);
   1742 }
   1743