1 /* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include <functional> 18 #include <memory> 19 20 #include "base/macros.h" 21 #include "base/utils.h" 22 #include "builder.h" 23 #include "codegen_test_utils.h" 24 #include "dex/dex_file.h" 25 #include "dex/dex_instruction.h" 26 #include "driver/compiler_options.h" 27 #include "nodes.h" 28 #include "optimizing_unit_test.h" 29 #include "register_allocator_linear_scan.h" 30 #include "utils/arm/assembler_arm_vixl.h" 31 #include "utils/arm/managed_register_arm.h" 32 #include "utils/mips/managed_register_mips.h" 33 #include "utils/mips64/managed_register_mips64.h" 34 #include "utils/x86/managed_register_x86.h" 35 36 #include "gtest/gtest.h" 37 38 namespace art { 39 40 // Return all combinations of ISA and code generator that are executable on 41 // hardware, or on simulator, and that we'd like to test. 42 static ::std::vector<CodegenTargetConfig> GetTargetConfigs() { 43 ::std::vector<CodegenTargetConfig> v; 44 ::std::vector<CodegenTargetConfig> test_config_candidates = { 45 #ifdef ART_ENABLE_CODEGEN_arm 46 // TODO: Should't this be `kThumb2` instead of `kArm` here? 47 CodegenTargetConfig(InstructionSet::kArm, create_codegen_arm_vixl32), 48 #endif 49 #ifdef ART_ENABLE_CODEGEN_arm64 50 CodegenTargetConfig(InstructionSet::kArm64, create_codegen_arm64), 51 #endif 52 #ifdef ART_ENABLE_CODEGEN_x86 53 CodegenTargetConfig(InstructionSet::kX86, create_codegen_x86), 54 #endif 55 #ifdef ART_ENABLE_CODEGEN_x86_64 56 CodegenTargetConfig(InstructionSet::kX86_64, create_codegen_x86_64), 57 #endif 58 #ifdef ART_ENABLE_CODEGEN_mips 59 CodegenTargetConfig(InstructionSet::kMips, create_codegen_mips), 60 #endif 61 #ifdef ART_ENABLE_CODEGEN_mips64 62 CodegenTargetConfig(InstructionSet::kMips64, create_codegen_mips64) 63 #endif 64 }; 65 66 for (const CodegenTargetConfig& test_config : test_config_candidates) { 67 if (CanExecute(test_config.GetInstructionSet())) { 68 v.push_back(test_config); 69 } 70 } 71 72 return v; 73 } 74 75 class CodegenTest : public OptimizingUnitTest { 76 protected: 77 void TestCode(const std::vector<uint16_t>& data, bool has_result = false, int32_t expected = 0); 78 void TestCodeLong(const std::vector<uint16_t>& data, bool has_result, int64_t expected); 79 void TestComparison(IfCondition condition, 80 int64_t i, 81 int64_t j, 82 DataType::Type type, 83 const CodegenTargetConfig target_config); 84 }; 85 86 void CodegenTest::TestCode(const std::vector<uint16_t>& data, bool has_result, int32_t expected) { 87 for (const CodegenTargetConfig& target_config : GetTargetConfigs()) { 88 ResetPoolAndAllocator(); 89 HGraph* graph = CreateCFG(data); 90 // Remove suspend checks, they cannot be executed in this context. 91 RemoveSuspendChecks(graph); 92 RunCode(target_config, graph, [](HGraph*) {}, has_result, expected); 93 } 94 } 95 96 void CodegenTest::TestCodeLong(const std::vector<uint16_t>& data, 97 bool has_result, int64_t expected) { 98 for (const CodegenTargetConfig& target_config : GetTargetConfigs()) { 99 ResetPoolAndAllocator(); 100 HGraph* graph = CreateCFG(data, DataType::Type::kInt64); 101 // Remove suspend checks, they cannot be executed in this context. 102 RemoveSuspendChecks(graph); 103 RunCode(target_config, graph, [](HGraph*) {}, has_result, expected); 104 } 105 } 106 107 TEST_F(CodegenTest, ReturnVoid) { 108 const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(Instruction::RETURN_VOID); 109 TestCode(data); 110 } 111 112 TEST_F(CodegenTest, CFG1) { 113 const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM( 114 Instruction::GOTO | 0x100, 115 Instruction::RETURN_VOID); 116 117 TestCode(data); 118 } 119 120 TEST_F(CodegenTest, CFG2) { 121 const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM( 122 Instruction::GOTO | 0x100, 123 Instruction::GOTO | 0x100, 124 Instruction::RETURN_VOID); 125 126 TestCode(data); 127 } 128 129 TEST_F(CodegenTest, CFG3) { 130 const std::vector<uint16_t> data1 = ZERO_REGISTER_CODE_ITEM( 131 Instruction::GOTO | 0x200, 132 Instruction::RETURN_VOID, 133 Instruction::GOTO | 0xFF00); 134 135 TestCode(data1); 136 137 const std::vector<uint16_t> data2 = ZERO_REGISTER_CODE_ITEM( 138 Instruction::GOTO_16, 3, 139 Instruction::RETURN_VOID, 140 Instruction::GOTO_16, 0xFFFF); 141 142 TestCode(data2); 143 144 const std::vector<uint16_t> data3 = ZERO_REGISTER_CODE_ITEM( 145 Instruction::GOTO_32, 4, 0, 146 Instruction::RETURN_VOID, 147 Instruction::GOTO_32, 0xFFFF, 0xFFFF); 148 149 TestCode(data3); 150 } 151 152 TEST_F(CodegenTest, CFG4) { 153 const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM( 154 Instruction::RETURN_VOID, 155 Instruction::GOTO | 0x100, 156 Instruction::GOTO | 0xFE00); 157 158 TestCode(data); 159 } 160 161 TEST_F(CodegenTest, CFG5) { 162 const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM( 163 Instruction::CONST_4 | 0 | 0, 164 Instruction::IF_EQ, 3, 165 Instruction::GOTO | 0x100, 166 Instruction::RETURN_VOID); 167 168 TestCode(data); 169 } 170 171 TEST_F(CodegenTest, IntConstant) { 172 const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM( 173 Instruction::CONST_4 | 0 | 0, 174 Instruction::RETURN_VOID); 175 176 TestCode(data); 177 } 178 179 TEST_F(CodegenTest, Return1) { 180 const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM( 181 Instruction::CONST_4 | 0 | 0, 182 Instruction::RETURN | 0); 183 184 TestCode(data, true, 0); 185 } 186 187 TEST_F(CodegenTest, Return2) { 188 const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM( 189 Instruction::CONST_4 | 0 | 0, 190 Instruction::CONST_4 | 0 | 1 << 8, 191 Instruction::RETURN | 1 << 8); 192 193 TestCode(data, true, 0); 194 } 195 196 TEST_F(CodegenTest, Return3) { 197 const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM( 198 Instruction::CONST_4 | 0 | 0, 199 Instruction::CONST_4 | 1 << 8 | 1 << 12, 200 Instruction::RETURN | 1 << 8); 201 202 TestCode(data, true, 1); 203 } 204 205 TEST_F(CodegenTest, ReturnIf1) { 206 const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM( 207 Instruction::CONST_4 | 0 | 0, 208 Instruction::CONST_4 | 1 << 8 | 1 << 12, 209 Instruction::IF_EQ, 3, 210 Instruction::RETURN | 0 << 8, 211 Instruction::RETURN | 1 << 8); 212 213 TestCode(data, true, 1); 214 } 215 216 TEST_F(CodegenTest, ReturnIf2) { 217 const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM( 218 Instruction::CONST_4 | 0 | 0, 219 Instruction::CONST_4 | 1 << 8 | 1 << 12, 220 Instruction::IF_EQ | 0 << 4 | 1 << 8, 3, 221 Instruction::RETURN | 0 << 8, 222 Instruction::RETURN | 1 << 8); 223 224 TestCode(data, true, 0); 225 } 226 227 // Exercise bit-wise (one's complement) not-int instruction. 228 #define NOT_INT_TEST(TEST_NAME, INPUT, EXPECTED_OUTPUT) \ 229 TEST_F(CodegenTest, TEST_NAME) { \ 230 const int32_t input = INPUT; \ 231 const uint16_t input_lo = Low16Bits(input); \ 232 const uint16_t input_hi = High16Bits(input); \ 233 const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM( \ 234 Instruction::CONST | 0 << 8, input_lo, input_hi, \ 235 Instruction::NOT_INT | 1 << 8 | 0 << 12 , \ 236 Instruction::RETURN | 1 << 8); \ 237 \ 238 TestCode(data, true, EXPECTED_OUTPUT); \ 239 } 240 241 NOT_INT_TEST(ReturnNotIntMinus2, -2, 1) 242 NOT_INT_TEST(ReturnNotIntMinus1, -1, 0) 243 NOT_INT_TEST(ReturnNotInt0, 0, -1) 244 NOT_INT_TEST(ReturnNotInt1, 1, -2) 245 NOT_INT_TEST(ReturnNotIntINT32_MIN, -2147483648, 2147483647) // (2^31) - 1 246 NOT_INT_TEST(ReturnNotIntINT32_MINPlus1, -2147483647, 2147483646) // (2^31) - 2 247 NOT_INT_TEST(ReturnNotIntINT32_MAXMinus1, 2147483646, -2147483647) // -(2^31) - 1 248 NOT_INT_TEST(ReturnNotIntINT32_MAX, 2147483647, -2147483648) // -(2^31) 249 250 #undef NOT_INT_TEST 251 252 // Exercise bit-wise (one's complement) not-long instruction. 253 #define NOT_LONG_TEST(TEST_NAME, INPUT, EXPECTED_OUTPUT) \ 254 TEST_F(CodegenTest, TEST_NAME) { \ 255 const int64_t input = INPUT; \ 256 const uint16_t word0 = Low16Bits(Low32Bits(input)); /* LSW. */ \ 257 const uint16_t word1 = High16Bits(Low32Bits(input)); \ 258 const uint16_t word2 = Low16Bits(High32Bits(input)); \ 259 const uint16_t word3 = High16Bits(High32Bits(input)); /* MSW. */ \ 260 const std::vector<uint16_t> data = FOUR_REGISTERS_CODE_ITEM( \ 261 Instruction::CONST_WIDE | 0 << 8, word0, word1, word2, word3, \ 262 Instruction::NOT_LONG | 2 << 8 | 0 << 12, \ 263 Instruction::RETURN_WIDE | 2 << 8); \ 264 \ 265 TestCodeLong(data, true, EXPECTED_OUTPUT); \ 266 } 267 268 NOT_LONG_TEST(ReturnNotLongMinus2, INT64_C(-2), INT64_C(1)) 269 NOT_LONG_TEST(ReturnNotLongMinus1, INT64_C(-1), INT64_C(0)) 270 NOT_LONG_TEST(ReturnNotLong0, INT64_C(0), INT64_C(-1)) 271 NOT_LONG_TEST(ReturnNotLong1, INT64_C(1), INT64_C(-2)) 272 273 NOT_LONG_TEST(ReturnNotLongINT32_MIN, 274 INT64_C(-2147483648), 275 INT64_C(2147483647)) // (2^31) - 1 276 NOT_LONG_TEST(ReturnNotLongINT32_MINPlus1, 277 INT64_C(-2147483647), 278 INT64_C(2147483646)) // (2^31) - 2 279 NOT_LONG_TEST(ReturnNotLongINT32_MAXMinus1, 280 INT64_C(2147483646), 281 INT64_C(-2147483647)) // -(2^31) - 1 282 NOT_LONG_TEST(ReturnNotLongINT32_MAX, 283 INT64_C(2147483647), 284 INT64_C(-2147483648)) // -(2^31) 285 286 // Note that the C++ compiler won't accept 287 // INT64_C(-9223372036854775808) (that is, INT64_MIN) as a valid 288 // int64_t literal, so we use INT64_C(-9223372036854775807)-1 instead. 289 NOT_LONG_TEST(ReturnNotINT64_MIN, 290 INT64_C(-9223372036854775807)-1, 291 INT64_C(9223372036854775807)); // (2^63) - 1 292 NOT_LONG_TEST(ReturnNotINT64_MINPlus1, 293 INT64_C(-9223372036854775807), 294 INT64_C(9223372036854775806)); // (2^63) - 2 295 NOT_LONG_TEST(ReturnNotLongINT64_MAXMinus1, 296 INT64_C(9223372036854775806), 297 INT64_C(-9223372036854775807)); // -(2^63) - 1 298 NOT_LONG_TEST(ReturnNotLongINT64_MAX, 299 INT64_C(9223372036854775807), 300 INT64_C(-9223372036854775807)-1); // -(2^63) 301 302 #undef NOT_LONG_TEST 303 304 TEST_F(CodegenTest, IntToLongOfLongToInt) { 305 const int64_t input = INT64_C(4294967296); // 2^32 306 const uint16_t word0 = Low16Bits(Low32Bits(input)); // LSW. 307 const uint16_t word1 = High16Bits(Low32Bits(input)); 308 const uint16_t word2 = Low16Bits(High32Bits(input)); 309 const uint16_t word3 = High16Bits(High32Bits(input)); // MSW. 310 const std::vector<uint16_t> data = FIVE_REGISTERS_CODE_ITEM( 311 Instruction::CONST_WIDE | 0 << 8, word0, word1, word2, word3, 312 Instruction::CONST_WIDE | 2 << 8, 1, 0, 0, 0, 313 Instruction::ADD_LONG | 0, 0 << 8 | 2, // v0 <- 2^32 + 1 314 Instruction::LONG_TO_INT | 4 << 8 | 0 << 12, 315 Instruction::INT_TO_LONG | 2 << 8 | 4 << 12, 316 Instruction::RETURN_WIDE | 2 << 8); 317 318 TestCodeLong(data, true, 1); 319 } 320 321 TEST_F(CodegenTest, ReturnAdd1) { 322 const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM( 323 Instruction::CONST_4 | 3 << 12 | 0, 324 Instruction::CONST_4 | 4 << 12 | 1 << 8, 325 Instruction::ADD_INT, 1 << 8 | 0, 326 Instruction::RETURN); 327 328 TestCode(data, true, 7); 329 } 330 331 TEST_F(CodegenTest, ReturnAdd2) { 332 const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM( 333 Instruction::CONST_4 | 3 << 12 | 0, 334 Instruction::CONST_4 | 4 << 12 | 1 << 8, 335 Instruction::ADD_INT_2ADDR | 1 << 12, 336 Instruction::RETURN); 337 338 TestCode(data, true, 7); 339 } 340 341 TEST_F(CodegenTest, ReturnAdd3) { 342 const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM( 343 Instruction::CONST_4 | 4 << 12 | 0 << 8, 344 Instruction::ADD_INT_LIT8, 3 << 8 | 0, 345 Instruction::RETURN); 346 347 TestCode(data, true, 7); 348 } 349 350 TEST_F(CodegenTest, ReturnAdd4) { 351 const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM( 352 Instruction::CONST_4 | 4 << 12 | 0 << 8, 353 Instruction::ADD_INT_LIT16, 3, 354 Instruction::RETURN); 355 356 TestCode(data, true, 7); 357 } 358 359 TEST_F(CodegenTest, ReturnMulInt) { 360 const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM( 361 Instruction::CONST_4 | 3 << 12 | 0, 362 Instruction::CONST_4 | 4 << 12 | 1 << 8, 363 Instruction::MUL_INT, 1 << 8 | 0, 364 Instruction::RETURN); 365 366 TestCode(data, true, 12); 367 } 368 369 TEST_F(CodegenTest, ReturnMulInt2addr) { 370 const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM( 371 Instruction::CONST_4 | 3 << 12 | 0, 372 Instruction::CONST_4 | 4 << 12 | 1 << 8, 373 Instruction::MUL_INT_2ADDR | 1 << 12, 374 Instruction::RETURN); 375 376 TestCode(data, true, 12); 377 } 378 379 TEST_F(CodegenTest, ReturnMulLong) { 380 const std::vector<uint16_t> data = FOUR_REGISTERS_CODE_ITEM( 381 Instruction::CONST_WIDE | 0 << 8, 3, 0, 0, 0, 382 Instruction::CONST_WIDE | 2 << 8, 4, 0, 0, 0, 383 Instruction::MUL_LONG, 2 << 8 | 0, 384 Instruction::RETURN_WIDE); 385 386 TestCodeLong(data, true, 12); 387 } 388 389 TEST_F(CodegenTest, ReturnMulLong2addr) { 390 const std::vector<uint16_t> data = FOUR_REGISTERS_CODE_ITEM( 391 Instruction::CONST_WIDE | 0 << 8, 3, 0, 0, 0, 392 Instruction::CONST_WIDE | 2 << 8, 4, 0, 0, 0, 393 Instruction::MUL_LONG_2ADDR | 2 << 12, 394 Instruction::RETURN_WIDE); 395 396 TestCodeLong(data, true, 12); 397 } 398 399 TEST_F(CodegenTest, ReturnMulIntLit8) { 400 const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM( 401 Instruction::CONST_4 | 4 << 12 | 0 << 8, 402 Instruction::MUL_INT_LIT8, 3 << 8 | 0, 403 Instruction::RETURN); 404 405 TestCode(data, true, 12); 406 } 407 408 TEST_F(CodegenTest, ReturnMulIntLit16) { 409 const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM( 410 Instruction::CONST_4 | 4 << 12 | 0 << 8, 411 Instruction::MUL_INT_LIT16, 3, 412 Instruction::RETURN); 413 414 TestCode(data, true, 12); 415 } 416 417 TEST_F(CodegenTest, NonMaterializedCondition) { 418 for (CodegenTargetConfig target_config : GetTargetConfigs()) { 419 HGraph* graph = CreateGraph(); 420 421 HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph); 422 graph->AddBlock(entry); 423 graph->SetEntryBlock(entry); 424 entry->AddInstruction(new (GetAllocator()) HGoto()); 425 426 HBasicBlock* first_block = new (GetAllocator()) HBasicBlock(graph); 427 graph->AddBlock(first_block); 428 entry->AddSuccessor(first_block); 429 HIntConstant* constant0 = graph->GetIntConstant(0); 430 HIntConstant* constant1 = graph->GetIntConstant(1); 431 HEqual* equal = new (GetAllocator()) HEqual(constant0, constant0); 432 first_block->AddInstruction(equal); 433 first_block->AddInstruction(new (GetAllocator()) HIf(equal)); 434 435 HBasicBlock* then_block = new (GetAllocator()) HBasicBlock(graph); 436 HBasicBlock* else_block = new (GetAllocator()) HBasicBlock(graph); 437 HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph); 438 graph->SetExitBlock(exit_block); 439 440 graph->AddBlock(then_block); 441 graph->AddBlock(else_block); 442 graph->AddBlock(exit_block); 443 first_block->AddSuccessor(then_block); 444 first_block->AddSuccessor(else_block); 445 then_block->AddSuccessor(exit_block); 446 else_block->AddSuccessor(exit_block); 447 448 exit_block->AddInstruction(new (GetAllocator()) HExit()); 449 then_block->AddInstruction(new (GetAllocator()) HReturn(constant0)); 450 else_block->AddInstruction(new (GetAllocator()) HReturn(constant1)); 451 452 ASSERT_FALSE(equal->IsEmittedAtUseSite()); 453 graph->BuildDominatorTree(); 454 PrepareForRegisterAllocation(graph).Run(); 455 ASSERT_TRUE(equal->IsEmittedAtUseSite()); 456 457 auto hook_before_codegen = [](HGraph* graph_in) { 458 HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0]; 459 HParallelMove* move = new (graph_in->GetAllocator()) HParallelMove(graph_in->GetAllocator()); 460 block->InsertInstructionBefore(move, block->GetLastInstruction()); 461 }; 462 463 RunCode(target_config, graph, hook_before_codegen, true, 0); 464 } 465 } 466 467 TEST_F(CodegenTest, MaterializedCondition1) { 468 for (CodegenTargetConfig target_config : GetTargetConfigs()) { 469 // Check that condition are materialized correctly. A materialized condition 470 // should yield `1` if it evaluated to true, and `0` otherwise. 471 // We force the materialization of comparisons for different combinations of 472 473 // inputs and check the results. 474 475 int lhs[] = {1, 2, -1, 2, 0xabc}; 476 int rhs[] = {2, 1, 2, -1, 0xabc}; 477 478 for (size_t i = 0; i < arraysize(lhs); i++) { 479 HGraph* graph = CreateGraph(); 480 481 HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph); 482 graph->AddBlock(entry_block); 483 graph->SetEntryBlock(entry_block); 484 entry_block->AddInstruction(new (GetAllocator()) HGoto()); 485 HBasicBlock* code_block = new (GetAllocator()) HBasicBlock(graph); 486 graph->AddBlock(code_block); 487 HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph); 488 graph->AddBlock(exit_block); 489 exit_block->AddInstruction(new (GetAllocator()) HExit()); 490 491 entry_block->AddSuccessor(code_block); 492 code_block->AddSuccessor(exit_block); 493 graph->SetExitBlock(exit_block); 494 495 HIntConstant* cst_lhs = graph->GetIntConstant(lhs[i]); 496 HIntConstant* cst_rhs = graph->GetIntConstant(rhs[i]); 497 HLessThan cmp_lt(cst_lhs, cst_rhs); 498 code_block->AddInstruction(&cmp_lt); 499 HReturn ret(&cmp_lt); 500 code_block->AddInstruction(&ret); 501 502 graph->BuildDominatorTree(); 503 auto hook_before_codegen = [](HGraph* graph_in) { 504 HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0]; 505 HParallelMove* move = 506 new (graph_in->GetAllocator()) HParallelMove(graph_in->GetAllocator()); 507 block->InsertInstructionBefore(move, block->GetLastInstruction()); 508 }; 509 RunCode(target_config, graph, hook_before_codegen, true, lhs[i] < rhs[i]); 510 } 511 } 512 } 513 514 TEST_F(CodegenTest, MaterializedCondition2) { 515 for (CodegenTargetConfig target_config : GetTargetConfigs()) { 516 // Check that HIf correctly interprets a materialized condition. 517 // We force the materialization of comparisons for different combinations of 518 // inputs. An HIf takes the materialized combination as input and returns a 519 // value that we verify. 520 521 int lhs[] = {1, 2, -1, 2, 0xabc}; 522 int rhs[] = {2, 1, 2, -1, 0xabc}; 523 524 525 for (size_t i = 0; i < arraysize(lhs); i++) { 526 HGraph* graph = CreateGraph(); 527 528 HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph); 529 graph->AddBlock(entry_block); 530 graph->SetEntryBlock(entry_block); 531 entry_block->AddInstruction(new (GetAllocator()) HGoto()); 532 533 HBasicBlock* if_block = new (GetAllocator()) HBasicBlock(graph); 534 graph->AddBlock(if_block); 535 HBasicBlock* if_true_block = new (GetAllocator()) HBasicBlock(graph); 536 graph->AddBlock(if_true_block); 537 HBasicBlock* if_false_block = new (GetAllocator()) HBasicBlock(graph); 538 graph->AddBlock(if_false_block); 539 HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph); 540 graph->AddBlock(exit_block); 541 exit_block->AddInstruction(new (GetAllocator()) HExit()); 542 543 graph->SetEntryBlock(entry_block); 544 entry_block->AddSuccessor(if_block); 545 if_block->AddSuccessor(if_true_block); 546 if_block->AddSuccessor(if_false_block); 547 if_true_block->AddSuccessor(exit_block); 548 if_false_block->AddSuccessor(exit_block); 549 graph->SetExitBlock(exit_block); 550 551 HIntConstant* cst_lhs = graph->GetIntConstant(lhs[i]); 552 HIntConstant* cst_rhs = graph->GetIntConstant(rhs[i]); 553 HLessThan cmp_lt(cst_lhs, cst_rhs); 554 if_block->AddInstruction(&cmp_lt); 555 // We insert a dummy instruction to separate the HIf from the HLessThan 556 // and force the materialization of the condition. 557 HMemoryBarrier force_materialization(MemBarrierKind::kAnyAny, 0); 558 if_block->AddInstruction(&force_materialization); 559 HIf if_lt(&cmp_lt); 560 if_block->AddInstruction(&if_lt); 561 562 HIntConstant* cst_lt = graph->GetIntConstant(1); 563 HReturn ret_lt(cst_lt); 564 if_true_block->AddInstruction(&ret_lt); 565 HIntConstant* cst_ge = graph->GetIntConstant(0); 566 HReturn ret_ge(cst_ge); 567 if_false_block->AddInstruction(&ret_ge); 568 569 graph->BuildDominatorTree(); 570 auto hook_before_codegen = [](HGraph* graph_in) { 571 HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0]; 572 HParallelMove* move = 573 new (graph_in->GetAllocator()) HParallelMove(graph_in->GetAllocator()); 574 block->InsertInstructionBefore(move, block->GetLastInstruction()); 575 }; 576 RunCode(target_config, graph, hook_before_codegen, true, lhs[i] < rhs[i]); 577 } 578 } 579 } 580 581 TEST_F(CodegenTest, ReturnDivIntLit8) { 582 const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM( 583 Instruction::CONST_4 | 4 << 12 | 0 << 8, 584 Instruction::DIV_INT_LIT8, 3 << 8 | 0, 585 Instruction::RETURN); 586 587 TestCode(data, true, 1); 588 } 589 590 TEST_F(CodegenTest, ReturnDivInt2Addr) { 591 const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM( 592 Instruction::CONST_4 | 4 << 12 | 0, 593 Instruction::CONST_4 | 2 << 12 | 1 << 8, 594 Instruction::DIV_INT_2ADDR | 1 << 12, 595 Instruction::RETURN); 596 597 TestCode(data, true, 2); 598 } 599 600 // Helper method. 601 void CodegenTest::TestComparison(IfCondition condition, 602 int64_t i, 603 int64_t j, 604 DataType::Type type, 605 const CodegenTargetConfig target_config) { 606 HGraph* graph = CreateGraph(); 607 608 HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph); 609 graph->AddBlock(entry_block); 610 graph->SetEntryBlock(entry_block); 611 entry_block->AddInstruction(new (GetAllocator()) HGoto()); 612 613 HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph); 614 graph->AddBlock(block); 615 616 HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph); 617 graph->AddBlock(exit_block); 618 graph->SetExitBlock(exit_block); 619 exit_block->AddInstruction(new (GetAllocator()) HExit()); 620 621 entry_block->AddSuccessor(block); 622 block->AddSuccessor(exit_block); 623 624 HInstruction* op1; 625 HInstruction* op2; 626 if (type == DataType::Type::kInt32) { 627 op1 = graph->GetIntConstant(i); 628 op2 = graph->GetIntConstant(j); 629 } else { 630 DCHECK_EQ(type, DataType::Type::kInt64); 631 op1 = graph->GetLongConstant(i); 632 op2 = graph->GetLongConstant(j); 633 } 634 635 HInstruction* comparison = nullptr; 636 bool expected_result = false; 637 const uint64_t x = i; 638 const uint64_t y = j; 639 switch (condition) { 640 case kCondEQ: 641 comparison = new (GetAllocator()) HEqual(op1, op2); 642 expected_result = (i == j); 643 break; 644 case kCondNE: 645 comparison = new (GetAllocator()) HNotEqual(op1, op2); 646 expected_result = (i != j); 647 break; 648 case kCondLT: 649 comparison = new (GetAllocator()) HLessThan(op1, op2); 650 expected_result = (i < j); 651 break; 652 case kCondLE: 653 comparison = new (GetAllocator()) HLessThanOrEqual(op1, op2); 654 expected_result = (i <= j); 655 break; 656 case kCondGT: 657 comparison = new (GetAllocator()) HGreaterThan(op1, op2); 658 expected_result = (i > j); 659 break; 660 case kCondGE: 661 comparison = new (GetAllocator()) HGreaterThanOrEqual(op1, op2); 662 expected_result = (i >= j); 663 break; 664 case kCondB: 665 comparison = new (GetAllocator()) HBelow(op1, op2); 666 expected_result = (x < y); 667 break; 668 case kCondBE: 669 comparison = new (GetAllocator()) HBelowOrEqual(op1, op2); 670 expected_result = (x <= y); 671 break; 672 case kCondA: 673 comparison = new (GetAllocator()) HAbove(op1, op2); 674 expected_result = (x > y); 675 break; 676 case kCondAE: 677 comparison = new (GetAllocator()) HAboveOrEqual(op1, op2); 678 expected_result = (x >= y); 679 break; 680 } 681 block->AddInstruction(comparison); 682 block->AddInstruction(new (GetAllocator()) HReturn(comparison)); 683 684 graph->BuildDominatorTree(); 685 RunCode(target_config, graph, [](HGraph*) {}, true, expected_result); 686 } 687 688 TEST_F(CodegenTest, ComparisonsInt) { 689 for (CodegenTargetConfig target_config : GetTargetConfigs()) { 690 for (int64_t i = -1; i <= 1; i++) { 691 for (int64_t j = -1; j <= 1; j++) { 692 for (int cond = kCondFirst; cond <= kCondLast; cond++) { 693 TestComparison( 694 static_cast<IfCondition>(cond), i, j, DataType::Type::kInt32, target_config); 695 } 696 } 697 } 698 } 699 } 700 701 TEST_F(CodegenTest, ComparisonsLong) { 702 for (CodegenTargetConfig target_config : GetTargetConfigs()) { 703 for (int64_t i = -1; i <= 1; i++) { 704 for (int64_t j = -1; j <= 1; j++) { 705 for (int cond = kCondFirst; cond <= kCondLast; cond++) { 706 TestComparison( 707 static_cast<IfCondition>(cond), i, j, DataType::Type::kInt64, target_config); 708 } 709 } 710 } 711 } 712 } 713 714 #ifdef ART_ENABLE_CODEGEN_arm 715 TEST_F(CodegenTest, ARMVIXLParallelMoveResolver) { 716 std::unique_ptr<const ArmInstructionSetFeatures> features( 717 ArmInstructionSetFeatures::FromCppDefines()); 718 HGraph* graph = CreateGraph(); 719 arm::CodeGeneratorARMVIXL codegen(graph, *features.get(), CompilerOptions()); 720 721 codegen.Initialize(); 722 723 // This will result in calling EmitSwap -> void ParallelMoveResolverARMVIXL::Exchange(int mem1, 724 // int mem2) which was faulty (before the fix). So previously GPR and FP scratch registers were 725 // used as temps; however GPR scratch register is required for big stack offsets which don't fit 726 // LDR encoding. So the following code is a regression test for that situation. 727 HParallelMove* move = new (graph->GetAllocator()) HParallelMove(graph->GetAllocator()); 728 move->AddMove(Location::StackSlot(0), Location::StackSlot(8192), DataType::Type::kInt32, nullptr); 729 move->AddMove(Location::StackSlot(8192), Location::StackSlot(0), DataType::Type::kInt32, nullptr); 730 codegen.GetMoveResolver()->EmitNativeCode(move); 731 732 InternalCodeAllocator code_allocator; 733 codegen.Finalize(&code_allocator); 734 } 735 #endif 736 737 #ifdef ART_ENABLE_CODEGEN_arm64 738 // Regression test for b/34760542. 739 TEST_F(CodegenTest, ARM64ParallelMoveResolverB34760542) { 740 std::unique_ptr<const Arm64InstructionSetFeatures> features( 741 Arm64InstructionSetFeatures::FromCppDefines()); 742 HGraph* graph = CreateGraph(); 743 arm64::CodeGeneratorARM64 codegen(graph, *features.get(), CompilerOptions()); 744 745 codegen.Initialize(); 746 747 // The following ParallelMove used to fail this assertion: 748 // 749 // Assertion failed (!available->IsEmpty()) 750 // 751 // in vixl::aarch64::UseScratchRegisterScope::AcquireNextAvailable, 752 // because of the following situation: 753 // 754 // 1. a temp register (IP0) is allocated as a scratch register by 755 // the parallel move resolver to solve a cycle (swap): 756 // 757 // [ source=DS0 destination=DS257 type=PrimDouble instruction=null ] 758 // [ source=DS257 destination=DS0 type=PrimDouble instruction=null ] 759 // 760 // 2. within CodeGeneratorARM64::MoveLocation, another temp 761 // register (IP1) is allocated to generate the swap between two 762 // double stack slots; 763 // 764 // 3. VIXL requires a third temp register to emit the `Ldr` or 765 // `Str` operation from CodeGeneratorARM64::MoveLocation (as 766 // one of the stack slots' offsets cannot be encoded as an 767 // immediate), but the pool of (core) temp registers is now 768 // empty. 769 // 770 // The solution used so far is to use a floating-point temp register 771 // (D31) in step #2, so that IP1 is available for step #3. 772 773 HParallelMove* move = new (graph->GetAllocator()) HParallelMove(graph->GetAllocator()); 774 move->AddMove(Location::DoubleStackSlot(0), 775 Location::DoubleStackSlot(257), 776 DataType::Type::kFloat64, 777 nullptr); 778 move->AddMove(Location::DoubleStackSlot(257), 779 Location::DoubleStackSlot(0), 780 DataType::Type::kFloat64, 781 nullptr); 782 codegen.GetMoveResolver()->EmitNativeCode(move); 783 784 InternalCodeAllocator code_allocator; 785 codegen.Finalize(&code_allocator); 786 } 787 788 // Check that ParallelMoveResolver works fine for ARM64 for both cases when SIMD is on and off. 789 TEST_F(CodegenTest, ARM64ParallelMoveResolverSIMD) { 790 std::unique_ptr<const Arm64InstructionSetFeatures> features( 791 Arm64InstructionSetFeatures::FromCppDefines()); 792 HGraph* graph = CreateGraph(); 793 arm64::CodeGeneratorARM64 codegen(graph, *features.get(), CompilerOptions()); 794 795 codegen.Initialize(); 796 797 graph->SetHasSIMD(true); 798 for (int i = 0; i < 2; i++) { 799 HParallelMove* move = new (graph->GetAllocator()) HParallelMove(graph->GetAllocator()); 800 move->AddMove(Location::SIMDStackSlot(0), 801 Location::SIMDStackSlot(257), 802 DataType::Type::kFloat64, 803 nullptr); 804 move->AddMove(Location::SIMDStackSlot(257), 805 Location::SIMDStackSlot(0), 806 DataType::Type::kFloat64, 807 nullptr); 808 move->AddMove(Location::FpuRegisterLocation(0), 809 Location::FpuRegisterLocation(1), 810 DataType::Type::kFloat64, 811 nullptr); 812 move->AddMove(Location::FpuRegisterLocation(1), 813 Location::FpuRegisterLocation(0), 814 DataType::Type::kFloat64, 815 nullptr); 816 codegen.GetMoveResolver()->EmitNativeCode(move); 817 graph->SetHasSIMD(false); 818 } 819 820 InternalCodeAllocator code_allocator; 821 codegen.Finalize(&code_allocator); 822 } 823 #endif 824 825 #ifdef ART_ENABLE_CODEGEN_mips 826 TEST_F(CodegenTest, MipsClobberRA) { 827 std::unique_ptr<const MipsInstructionSetFeatures> features_mips( 828 MipsInstructionSetFeatures::FromCppDefines()); 829 if (!CanExecute(InstructionSet::kMips) || features_mips->IsR6()) { 830 // HMipsComputeBaseMethodAddress and the NAL instruction behind it 831 // should only be generated on non-R6. 832 return; 833 } 834 835 HGraph* graph = CreateGraph(); 836 837 HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph); 838 graph->AddBlock(entry_block); 839 graph->SetEntryBlock(entry_block); 840 entry_block->AddInstruction(new (GetAllocator()) HGoto()); 841 842 HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph); 843 graph->AddBlock(block); 844 845 HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph); 846 graph->AddBlock(exit_block); 847 graph->SetExitBlock(exit_block); 848 exit_block->AddInstruction(new (GetAllocator()) HExit()); 849 850 entry_block->AddSuccessor(block); 851 block->AddSuccessor(exit_block); 852 853 // To simplify matters, don't create PC-relative HLoadClass or HLoadString. 854 // Instead, generate HMipsComputeBaseMethodAddress directly. 855 HMipsComputeBaseMethodAddress* base = new (GetAllocator()) HMipsComputeBaseMethodAddress(); 856 block->AddInstruction(base); 857 // HMipsComputeBaseMethodAddress is defined as int, so just make the 858 // compiled method return it. 859 block->AddInstruction(new (GetAllocator()) HReturn(base)); 860 861 graph->BuildDominatorTree(); 862 863 mips::CodeGeneratorMIPS codegenMIPS(graph, *features_mips.get(), CompilerOptions()); 864 // Since there isn't HLoadClass or HLoadString, we need to manually indicate 865 // that RA is clobbered and the method entry code should generate a stack frame 866 // and preserve RA in it. And this is what we're testing here. 867 codegenMIPS.ClobberRA(); 868 // Without ClobberRA() the code would be: 869 // nal # Sets RA to point to the jr instruction below 870 // move v0, ra # and the CPU falls into an infinite loop. 871 // jr ra 872 // nop 873 // The expected code is: 874 // addiu sp, sp, -16 875 // sw ra, 12(sp) 876 // sw a0, 0(sp) 877 // nal # Sets RA to point to the lw instruction below. 878 // move v0, ra 879 // lw ra, 12(sp) 880 // jr ra 881 // addiu sp, sp, 16 882 RunCode(&codegenMIPS, graph, [](HGraph*) {}, false, 0); 883 } 884 #endif 885 886 } // namespace art 887