1 /* 2 * Copyright (C) 2016 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "loop_optimization.h" 18 19 #include "arch/instruction_set.h" 20 #include "arch/arm/instruction_set_features_arm.h" 21 #include "arch/arm64/instruction_set_features_arm64.h" 22 #include "arch/mips/instruction_set_features_mips.h" 23 #include "arch/mips64/instruction_set_features_mips64.h" 24 #include "arch/x86/instruction_set_features_x86.h" 25 #include "arch/x86_64/instruction_set_features_x86_64.h" 26 #include "driver/compiler_driver.h" 27 #include "linear_order.h" 28 29 namespace art { 30 31 // Enables vectorization (SIMDization) in the loop optimizer. 32 static constexpr bool kEnableVectorization = true; 33 34 // Remove the instruction from the graph. A bit more elaborate than the usual 35 // instruction removal, since there may be a cycle in the use structure. 36 static void RemoveFromCycle(HInstruction* instruction) { 37 instruction->RemoveAsUserOfAllInputs(); 38 instruction->RemoveEnvironmentUsers(); 39 instruction->GetBlock()->RemoveInstructionOrPhi(instruction, /*ensure_safety=*/ false); 40 } 41 42 // Detect a goto block and sets succ to the single successor. 43 static bool IsGotoBlock(HBasicBlock* block, /*out*/ HBasicBlock** succ) { 44 if (block->GetPredecessors().size() == 1 && 45 block->GetSuccessors().size() == 1 && 46 block->IsSingleGoto()) { 47 *succ = block->GetSingleSuccessor(); 48 return true; 49 } 50 return false; 51 } 52 53 // Detect an early exit loop. 54 static bool IsEarlyExit(HLoopInformation* loop_info) { 55 HBlocksInLoopReversePostOrderIterator it_loop(*loop_info); 56 for (it_loop.Advance(); !it_loop.Done(); it_loop.Advance()) { 57 for (HBasicBlock* successor : it_loop.Current()->GetSuccessors()) { 58 if (!loop_info->Contains(*successor)) { 59 return true; 60 } 61 } 62 } 63 return false; 64 } 65 66 // Detect a sign extension from the given type. Returns the promoted operand on success. 67 static bool IsSignExtensionAndGet(HInstruction* instruction, 68 Primitive::Type type, 69 /*out*/ HInstruction** operand) { 70 // Accept any already wider constant that would be handled properly by sign 71 // extension when represented in the *width* of the given narrower data type 72 // (the fact that char normally zero extends does not matter here). 73 int64_t value = 0; 74 if (IsInt64AndGet(instruction, &value)) { 75 switch (type) { 76 case Primitive::kPrimByte: 77 if (std::numeric_limits<int8_t>::min() <= value && 78 std::numeric_limits<int8_t>::max() >= value) { 79 *operand = instruction; 80 return true; 81 } 82 return false; 83 case Primitive::kPrimChar: 84 case Primitive::kPrimShort: 85 if (std::numeric_limits<int16_t>::min() <= value && 86 std::numeric_limits<int16_t>::max() <= value) { 87 *operand = instruction; 88 return true; 89 } 90 return false; 91 default: 92 return false; 93 } 94 } 95 // An implicit widening conversion of a signed integer to an integral type sign-extends 96 // the two's-complement representation of the integer value to fill the wider format. 97 if (instruction->GetType() == type && (instruction->IsArrayGet() || 98 instruction->IsStaticFieldGet() || 99 instruction->IsInstanceFieldGet())) { 100 switch (type) { 101 case Primitive::kPrimByte: 102 case Primitive::kPrimShort: 103 *operand = instruction; 104 return true; 105 default: 106 return false; 107 } 108 } 109 // TODO: perhaps explicit conversions later too? 110 // (this may return something different from instruction) 111 return false; 112 } 113 114 // Detect a zero extension from the given type. Returns the promoted operand on success. 115 static bool IsZeroExtensionAndGet(HInstruction* instruction, 116 Primitive::Type type, 117 /*out*/ HInstruction** operand) { 118 // Accept any already wider constant that would be handled properly by zero 119 // extension when represented in the *width* of the given narrower data type 120 // (the fact that byte/short normally sign extend does not matter here). 121 int64_t value = 0; 122 if (IsInt64AndGet(instruction, &value)) { 123 switch (type) { 124 case Primitive::kPrimByte: 125 if (std::numeric_limits<uint8_t>::min() <= value && 126 std::numeric_limits<uint8_t>::max() >= value) { 127 *operand = instruction; 128 return true; 129 } 130 return false; 131 case Primitive::kPrimChar: 132 case Primitive::kPrimShort: 133 if (std::numeric_limits<uint16_t>::min() <= value && 134 std::numeric_limits<uint16_t>::max() <= value) { 135 *operand = instruction; 136 return true; 137 } 138 return false; 139 default: 140 return false; 141 } 142 } 143 // An implicit widening conversion of a char to an integral type zero-extends 144 // the representation of the char value to fill the wider format. 145 if (instruction->GetType() == type && (instruction->IsArrayGet() || 146 instruction->IsStaticFieldGet() || 147 instruction->IsInstanceFieldGet())) { 148 if (type == Primitive::kPrimChar) { 149 *operand = instruction; 150 return true; 151 } 152 } 153 // A sign (or zero) extension followed by an explicit removal of just the 154 // higher sign bits is equivalent to a zero extension of the underlying operand. 155 if (instruction->IsAnd()) { 156 int64_t mask = 0; 157 HInstruction* a = instruction->InputAt(0); 158 HInstruction* b = instruction->InputAt(1); 159 // In (a & b) find (mask & b) or (a & mask) with sign or zero extension on the non-mask. 160 if ((IsInt64AndGet(a, /*out*/ &mask) && (IsSignExtensionAndGet(b, type, /*out*/ operand) || 161 IsZeroExtensionAndGet(b, type, /*out*/ operand))) || 162 (IsInt64AndGet(b, /*out*/ &mask) && (IsSignExtensionAndGet(a, type, /*out*/ operand) || 163 IsZeroExtensionAndGet(a, type, /*out*/ operand)))) { 164 switch ((*operand)->GetType()) { 165 case Primitive::kPrimByte: return mask == std::numeric_limits<uint8_t>::max(); 166 case Primitive::kPrimChar: 167 case Primitive::kPrimShort: return mask == std::numeric_limits<uint16_t>::max(); 168 default: return false; 169 } 170 } 171 } 172 // TODO: perhaps explicit conversions later too? 173 return false; 174 } 175 176 // Test vector restrictions. 177 static bool HasVectorRestrictions(uint64_t restrictions, uint64_t tested) { 178 return (restrictions & tested) != 0; 179 } 180 181 // Insert an instruction. 182 static HInstruction* Insert(HBasicBlock* block, HInstruction* instruction) { 183 DCHECK(block != nullptr); 184 DCHECK(instruction != nullptr); 185 block->InsertInstructionBefore(instruction, block->GetLastInstruction()); 186 return instruction; 187 } 188 189 // 190 // Class methods. 191 // 192 193 HLoopOptimization::HLoopOptimization(HGraph* graph, 194 CompilerDriver* compiler_driver, 195 HInductionVarAnalysis* induction_analysis) 196 : HOptimization(graph, kLoopOptimizationPassName), 197 compiler_driver_(compiler_driver), 198 induction_range_(induction_analysis), 199 loop_allocator_(nullptr), 200 global_allocator_(graph_->GetArena()), 201 top_loop_(nullptr), 202 last_loop_(nullptr), 203 iset_(nullptr), 204 induction_simplication_count_(0), 205 simplified_(false), 206 vector_length_(0), 207 vector_refs_(nullptr), 208 vector_map_(nullptr) { 209 } 210 211 void HLoopOptimization::Run() { 212 // Skip if there is no loop or the graph has try-catch/irreducible loops. 213 // TODO: make this less of a sledgehammer. 214 if (!graph_->HasLoops() || graph_->HasTryCatch() || graph_->HasIrreducibleLoops()) { 215 return; 216 } 217 218 // Phase-local allocator that draws from the global pool. Since the allocator 219 // itself resides on the stack, it is destructed on exiting Run(), which 220 // implies its underlying memory is released immediately. 221 ArenaAllocator allocator(global_allocator_->GetArenaPool()); 222 loop_allocator_ = &allocator; 223 224 // Perform loop optimizations. 225 LocalRun(); 226 if (top_loop_ == nullptr) { 227 graph_->SetHasLoops(false); // no more loops 228 } 229 230 // Detach. 231 loop_allocator_ = nullptr; 232 last_loop_ = top_loop_ = nullptr; 233 } 234 235 void HLoopOptimization::LocalRun() { 236 // Build the linear order using the phase-local allocator. This step enables building 237 // a loop hierarchy that properly reflects the outer-inner and previous-next relation. 238 ArenaVector<HBasicBlock*> linear_order(loop_allocator_->Adapter(kArenaAllocLinearOrder)); 239 LinearizeGraph(graph_, loop_allocator_, &linear_order); 240 241 // Build the loop hierarchy. 242 for (HBasicBlock* block : linear_order) { 243 if (block->IsLoopHeader()) { 244 AddLoop(block->GetLoopInformation()); 245 } 246 } 247 248 // Traverse the loop hierarchy inner-to-outer and optimize. Traversal can use 249 // temporary data structures using the phase-local allocator. All new HIR 250 // should use the global allocator. 251 if (top_loop_ != nullptr) { 252 ArenaSet<HInstruction*> iset(loop_allocator_->Adapter(kArenaAllocLoopOptimization)); 253 ArenaSet<ArrayReference> refs(loop_allocator_->Adapter(kArenaAllocLoopOptimization)); 254 ArenaSafeMap<HInstruction*, HInstruction*> map( 255 std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization)); 256 // Attach. 257 iset_ = &iset; 258 vector_refs_ = &refs; 259 vector_map_ = ↦ 260 // Traverse. 261 TraverseLoopsInnerToOuter(top_loop_); 262 // Detach. 263 iset_ = nullptr; 264 vector_refs_ = nullptr; 265 vector_map_ = nullptr; 266 } 267 } 268 269 void HLoopOptimization::AddLoop(HLoopInformation* loop_info) { 270 DCHECK(loop_info != nullptr); 271 LoopNode* node = new (loop_allocator_) LoopNode(loop_info); 272 if (last_loop_ == nullptr) { 273 // First loop. 274 DCHECK(top_loop_ == nullptr); 275 last_loop_ = top_loop_ = node; 276 } else if (loop_info->IsIn(*last_loop_->loop_info)) { 277 // Inner loop. 278 node->outer = last_loop_; 279 DCHECK(last_loop_->inner == nullptr); 280 last_loop_ = last_loop_->inner = node; 281 } else { 282 // Subsequent loop. 283 while (last_loop_->outer != nullptr && !loop_info->IsIn(*last_loop_->outer->loop_info)) { 284 last_loop_ = last_loop_->outer; 285 } 286 node->outer = last_loop_->outer; 287 node->previous = last_loop_; 288 DCHECK(last_loop_->next == nullptr); 289 last_loop_ = last_loop_->next = node; 290 } 291 } 292 293 void HLoopOptimization::RemoveLoop(LoopNode* node) { 294 DCHECK(node != nullptr); 295 DCHECK(node->inner == nullptr); 296 if (node->previous != nullptr) { 297 // Within sequence. 298 node->previous->next = node->next; 299 if (node->next != nullptr) { 300 node->next->previous = node->previous; 301 } 302 } else { 303 // First of sequence. 304 if (node->outer != nullptr) { 305 node->outer->inner = node->next; 306 } else { 307 top_loop_ = node->next; 308 } 309 if (node->next != nullptr) { 310 node->next->outer = node->outer; 311 node->next->previous = nullptr; 312 } 313 } 314 } 315 316 void HLoopOptimization::TraverseLoopsInnerToOuter(LoopNode* node) { 317 for ( ; node != nullptr; node = node->next) { 318 // Visit inner loops first. 319 uint32_t current_induction_simplification_count = induction_simplication_count_; 320 if (node->inner != nullptr) { 321 TraverseLoopsInnerToOuter(node->inner); 322 } 323 // Recompute induction information of this loop if the induction 324 // of any inner loop has been simplified. 325 if (current_induction_simplification_count != induction_simplication_count_) { 326 induction_range_.ReVisit(node->loop_info); 327 } 328 // Repeat simplifications in the loop-body until no more changes occur. 329 // Note that since each simplification consists of eliminating code (without 330 // introducing new code), this process is always finite. 331 do { 332 simplified_ = false; 333 SimplifyInduction(node); 334 SimplifyBlocks(node); 335 } while (simplified_); 336 // Optimize inner loop. 337 if (node->inner == nullptr) { 338 OptimizeInnerLoop(node); 339 } 340 } 341 } 342 343 // 344 // Optimization. 345 // 346 347 bool HLoopOptimization::CanRemoveCycle() { 348 for (HInstruction* i : *iset_) { 349 // We can never remove instructions that have environment 350 // uses when we compile 'debuggable'. 351 if (i->HasEnvironmentUses() && graph_->IsDebuggable()) { 352 return false; 353 } 354 // A deoptimization should never have an environment input removed. 355 for (const HUseListNode<HEnvironment*>& use : i->GetEnvUses()) { 356 if (use.GetUser()->GetHolder()->IsDeoptimize()) { 357 return false; 358 } 359 } 360 } 361 return true; 362 } 363 364 void HLoopOptimization::SimplifyInduction(LoopNode* node) { 365 HBasicBlock* header = node->loop_info->GetHeader(); 366 HBasicBlock* preheader = node->loop_info->GetPreHeader(); 367 // Scan the phis in the header to find opportunities to simplify an induction 368 // cycle that is only used outside the loop. Replace these uses, if any, with 369 // the last value and remove the induction cycle. 370 // Examples: for (int i = 0; x != null; i++) { .... no i .... } 371 // for (int i = 0; i < 10; i++, k++) { .... no k .... } return k; 372 for (HInstructionIterator it(header->GetPhis()); !it.Done(); it.Advance()) { 373 HPhi* phi = it.Current()->AsPhi(); 374 iset_->clear(); // prepare phi induction 375 if (TrySetPhiInduction(phi, /*restrict_uses*/ true) && 376 TryAssignLastValue(node->loop_info, phi, preheader, /*collect_loop_uses*/ false)) { 377 // Note that it's ok to have replaced uses after the loop with the last value, without 378 // being able to remove the cycle. Environment uses (which are the reason we may not be 379 // able to remove the cycle) within the loop will still hold the right value. 380 if (CanRemoveCycle()) { 381 for (HInstruction* i : *iset_) { 382 RemoveFromCycle(i); 383 } 384 simplified_ = true; 385 } 386 } 387 } 388 } 389 390 void HLoopOptimization::SimplifyBlocks(LoopNode* node) { 391 // Iterate over all basic blocks in the loop-body. 392 for (HBlocksInLoopIterator it(*node->loop_info); !it.Done(); it.Advance()) { 393 HBasicBlock* block = it.Current(); 394 // Remove dead instructions from the loop-body. 395 RemoveDeadInstructions(block->GetPhis()); 396 RemoveDeadInstructions(block->GetInstructions()); 397 // Remove trivial control flow blocks from the loop-body. 398 if (block->GetPredecessors().size() == 1 && 399 block->GetSuccessors().size() == 1 && 400 block->GetSingleSuccessor()->GetPredecessors().size() == 1) { 401 simplified_ = true; 402 block->MergeWith(block->GetSingleSuccessor()); 403 } else if (block->GetSuccessors().size() == 2) { 404 // Trivial if block can be bypassed to either branch. 405 HBasicBlock* succ0 = block->GetSuccessors()[0]; 406 HBasicBlock* succ1 = block->GetSuccessors()[1]; 407 HBasicBlock* meet0 = nullptr; 408 HBasicBlock* meet1 = nullptr; 409 if (succ0 != succ1 && 410 IsGotoBlock(succ0, &meet0) && 411 IsGotoBlock(succ1, &meet1) && 412 meet0 == meet1 && // meets again 413 meet0 != block && // no self-loop 414 meet0->GetPhis().IsEmpty()) { // not used for merging 415 simplified_ = true; 416 succ0->DisconnectAndDelete(); 417 if (block->Dominates(meet0)) { 418 block->RemoveDominatedBlock(meet0); 419 succ1->AddDominatedBlock(meet0); 420 meet0->SetDominator(succ1); 421 } 422 } 423 } 424 } 425 } 426 427 void HLoopOptimization::OptimizeInnerLoop(LoopNode* node) { 428 HBasicBlock* header = node->loop_info->GetHeader(); 429 HBasicBlock* preheader = node->loop_info->GetPreHeader(); 430 // Ensure loop header logic is finite. 431 int64_t trip_count = 0; 432 if (!induction_range_.IsFinite(node->loop_info, &trip_count)) { 433 return; 434 } 435 436 // Ensure there is only a single loop-body (besides the header). 437 HBasicBlock* body = nullptr; 438 for (HBlocksInLoopIterator it(*node->loop_info); !it.Done(); it.Advance()) { 439 if (it.Current() != header) { 440 if (body != nullptr) { 441 return; 442 } 443 body = it.Current(); 444 } 445 } 446 // Ensure there is only a single exit point. 447 if (header->GetSuccessors().size() != 2) { 448 return; 449 } 450 HBasicBlock* exit = (header->GetSuccessors()[0] == body) 451 ? header->GetSuccessors()[1] 452 : header->GetSuccessors()[0]; 453 // Ensure exit can only be reached by exiting loop. 454 if (exit->GetPredecessors().size() != 1) { 455 return; 456 } 457 // Detect either an empty loop (no side effects other than plain iteration) or 458 // a trivial loop (just iterating once). Replace subsequent index uses, if any, 459 // with the last value and remove the loop, possibly after unrolling its body. 460 HInstruction* phi = header->GetFirstPhi(); 461 iset_->clear(); // prepare phi induction 462 if (TrySetSimpleLoopHeader(header)) { 463 bool is_empty = IsEmptyBody(body); 464 if ((is_empty || trip_count == 1) && 465 TryAssignLastValue(node->loop_info, phi, preheader, /*collect_loop_uses*/ true)) { 466 if (!is_empty) { 467 // Unroll the loop-body, which sees initial value of the index. 468 phi->ReplaceWith(phi->InputAt(0)); 469 preheader->MergeInstructionsWith(body); 470 } 471 body->DisconnectAndDelete(); 472 exit->RemovePredecessor(header); 473 header->RemoveSuccessor(exit); 474 header->RemoveDominatedBlock(exit); 475 header->DisconnectAndDelete(); 476 preheader->AddSuccessor(exit); 477 preheader->AddInstruction(new (global_allocator_) HGoto()); 478 preheader->AddDominatedBlock(exit); 479 exit->SetDominator(preheader); 480 RemoveLoop(node); // update hierarchy 481 return; 482 } 483 } 484 485 // Vectorize loop, if possible and valid. 486 if (kEnableVectorization) { 487 iset_->clear(); // prepare phi induction 488 if (TrySetSimpleLoopHeader(header) && 489 CanVectorize(node, body, trip_count) && 490 TryAssignLastValue(node->loop_info, phi, preheader, /*collect_loop_uses*/ true)) { 491 Vectorize(node, body, exit, trip_count); 492 graph_->SetHasSIMD(true); // flag SIMD usage 493 return; 494 } 495 } 496 } 497 498 // 499 // Loop vectorization. The implementation is based on the book by Aart J.C. Bik: 500 // "The Software Vectorization Handbook. Applying Multimedia Extensions for Maximum Performance." 501 // Intel Press, June, 2004 (http://www.aartbik.com/). 502 // 503 504 bool HLoopOptimization::CanVectorize(LoopNode* node, HBasicBlock* block, int64_t trip_count) { 505 // Reset vector bookkeeping. 506 vector_length_ = 0; 507 vector_refs_->clear(); 508 vector_runtime_test_a_ = 509 vector_runtime_test_b_= nullptr; 510 511 // Phis in the loop-body prevent vectorization. 512 if (!block->GetPhis().IsEmpty()) { 513 return false; 514 } 515 516 // Scan the loop-body, starting a right-hand-side tree traversal at each left-hand-side 517 // occurrence, which allows passing down attributes down the use tree. 518 for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { 519 if (!VectorizeDef(node, it.Current(), /*generate_code*/ false)) { 520 return false; // failure to vectorize a left-hand-side 521 } 522 } 523 524 // Heuristics. Does vectorization seem profitable? 525 // TODO: refine 526 if (vector_length_ == 0) { 527 return false; // nothing found 528 } else if (0 < trip_count && trip_count < vector_length_) { 529 return false; // insufficient iterations 530 } 531 532 // Data dependence analysis. Find each pair of references with same type, where 533 // at least one is a write. Each such pair denotes a possible data dependence. 534 // This analysis exploits the property that differently typed arrays cannot be 535 // aliased, as well as the property that references either point to the same 536 // array or to two completely disjoint arrays, i.e., no partial aliasing. 537 // Other than a few simply heuristics, no detailed subscript analysis is done. 538 for (auto i = vector_refs_->begin(); i != vector_refs_->end(); ++i) { 539 for (auto j = i; ++j != vector_refs_->end(); ) { 540 if (i->type == j->type && (i->lhs || j->lhs)) { 541 // Found same-typed a[i+x] vs. b[i+y], where at least one is a write. 542 HInstruction* a = i->base; 543 HInstruction* b = j->base; 544 HInstruction* x = i->offset; 545 HInstruction* y = j->offset; 546 if (a == b) { 547 // Found a[i+x] vs. a[i+y]. Accept if x == y (loop-independent data dependence). 548 // Conservatively assume a loop-carried data dependence otherwise, and reject. 549 if (x != y) { 550 return false; 551 } 552 } else { 553 // Found a[i+x] vs. b[i+y]. Accept if x == y (at worst loop-independent data dependence). 554 // Conservatively assume a potential loop-carried data dependence otherwise, avoided by 555 // generating an explicit a != b disambiguation runtime test on the two references. 556 if (x != y) { 557 // For now, we reject after one test to avoid excessive overhead. 558 if (vector_runtime_test_a_ != nullptr) { 559 return false; 560 } 561 vector_runtime_test_a_ = a; 562 vector_runtime_test_b_ = b; 563 } 564 } 565 } 566 } 567 } 568 569 // Success! 570 return true; 571 } 572 573 void HLoopOptimization::Vectorize(LoopNode* node, 574 HBasicBlock* block, 575 HBasicBlock* exit, 576 int64_t trip_count) { 577 Primitive::Type induc_type = Primitive::kPrimInt; 578 HBasicBlock* header = node->loop_info->GetHeader(); 579 HBasicBlock* preheader = node->loop_info->GetPreHeader(); 580 581 // A cleanup is needed for any unknown trip count or for a known trip count 582 // with remainder iterations after vectorization. 583 bool needs_cleanup = trip_count == 0 || (trip_count % vector_length_) != 0; 584 585 // Adjust vector bookkeeping. 586 iset_->clear(); // prepare phi induction 587 bool is_simple_loop_header = TrySetSimpleLoopHeader(header); // fills iset_ 588 DCHECK(is_simple_loop_header); 589 590 // Generate preheader: 591 // stc = <trip-count>; 592 // vtc = stc - stc % VL; 593 HInstruction* stc = induction_range_.GenerateTripCount(node->loop_info, graph_, preheader); 594 HInstruction* vtc = stc; 595 if (needs_cleanup) { 596 DCHECK(IsPowerOfTwo(vector_length_)); 597 HInstruction* rem = Insert( 598 preheader, new (global_allocator_) HAnd(induc_type, 599 stc, 600 graph_->GetIntConstant(vector_length_ - 1))); 601 vtc = Insert(preheader, new (global_allocator_) HSub(induc_type, stc, rem)); 602 } 603 604 // Generate runtime disambiguation test: 605 // vtc = a != b ? vtc : 0; 606 if (vector_runtime_test_a_ != nullptr) { 607 HInstruction* rt = Insert( 608 preheader, 609 new (global_allocator_) HNotEqual(vector_runtime_test_a_, vector_runtime_test_b_)); 610 vtc = Insert(preheader, 611 new (global_allocator_) HSelect(rt, vtc, graph_->GetIntConstant(0), kNoDexPc)); 612 needs_cleanup = true; 613 } 614 615 // Generate vector loop: 616 // for (i = 0; i < vtc; i += VL) 617 // <vectorized-loop-body> 618 vector_mode_ = kVector; 619 GenerateNewLoop(node, 620 block, 621 graph_->TransformLoopForVectorization(header, block, exit), 622 graph_->GetIntConstant(0), 623 vtc, 624 graph_->GetIntConstant(vector_length_)); 625 HLoopInformation* vloop = vector_header_->GetLoopInformation(); 626 627 // Generate cleanup loop, if needed: 628 // for ( ; i < stc; i += 1) 629 // <loop-body> 630 if (needs_cleanup) { 631 vector_mode_ = kSequential; 632 GenerateNewLoop(node, 633 block, 634 graph_->TransformLoopForVectorization(vector_header_, vector_body_, exit), 635 vector_phi_, 636 stc, 637 graph_->GetIntConstant(1)); 638 } 639 640 // Remove the original loop by disconnecting the body block 641 // and removing all instructions from the header. 642 block->DisconnectAndDelete(); 643 while (!header->GetFirstInstruction()->IsGoto()) { 644 header->RemoveInstruction(header->GetFirstInstruction()); 645 } 646 // Update loop hierarchy: the old header now resides in the 647 // same outer loop as the old preheader. 648 header->SetLoopInformation(preheader->GetLoopInformation()); // outward 649 node->loop_info = vloop; 650 } 651 652 void HLoopOptimization::GenerateNewLoop(LoopNode* node, 653 HBasicBlock* block, 654 HBasicBlock* new_preheader, 655 HInstruction* lo, 656 HInstruction* hi, 657 HInstruction* step) { 658 Primitive::Type induc_type = Primitive::kPrimInt; 659 // Prepare new loop. 660 vector_map_->clear(); 661 vector_preheader_ = new_preheader, 662 vector_header_ = vector_preheader_->GetSingleSuccessor(); 663 vector_body_ = vector_header_->GetSuccessors()[1]; 664 vector_phi_ = new (global_allocator_) HPhi(global_allocator_, 665 kNoRegNumber, 666 0, 667 HPhi::ToPhiType(induc_type)); 668 // Generate header and prepare body. 669 // for (i = lo; i < hi; i += step) 670 // <loop-body> 671 HInstruction* cond = new (global_allocator_) HAboveOrEqual(vector_phi_, hi); 672 vector_header_->AddPhi(vector_phi_); 673 vector_header_->AddInstruction(cond); 674 vector_header_->AddInstruction(new (global_allocator_) HIf(cond)); 675 for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { 676 bool vectorized_def = VectorizeDef(node, it.Current(), /*generate_code*/ true); 677 DCHECK(vectorized_def); 678 } 679 // Generate body from the instruction map, but in original program order. 680 HEnvironment* env = vector_header_->GetFirstInstruction()->GetEnvironment(); 681 for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { 682 auto i = vector_map_->find(it.Current()); 683 if (i != vector_map_->end() && !i->second->IsInBlock()) { 684 Insert(vector_body_, i->second); 685 // Deal with instructions that need an environment, such as the scalar intrinsics. 686 if (i->second->NeedsEnvironment()) { 687 i->second->CopyEnvironmentFromWithLoopPhiAdjustment(env, vector_header_); 688 } 689 } 690 } 691 // Finalize increment and phi. 692 HInstruction* inc = new (global_allocator_) HAdd(induc_type, vector_phi_, step); 693 vector_phi_->AddInput(lo); 694 vector_phi_->AddInput(Insert(vector_body_, inc)); 695 } 696 697 // TODO: accept reductions at left-hand-side, mixed-type store idioms, etc. 698 bool HLoopOptimization::VectorizeDef(LoopNode* node, 699 HInstruction* instruction, 700 bool generate_code) { 701 // Accept a left-hand-side array base[index] for 702 // (1) supported vector type, 703 // (2) loop-invariant base, 704 // (3) unit stride index, 705 // (4) vectorizable right-hand-side value. 706 uint64_t restrictions = kNone; 707 if (instruction->IsArraySet()) { 708 Primitive::Type type = instruction->AsArraySet()->GetComponentType(); 709 HInstruction* base = instruction->InputAt(0); 710 HInstruction* index = instruction->InputAt(1); 711 HInstruction* value = instruction->InputAt(2); 712 HInstruction* offset = nullptr; 713 if (TrySetVectorType(type, &restrictions) && 714 node->loop_info->IsDefinedOutOfTheLoop(base) && 715 induction_range_.IsUnitStride(instruction, index, &offset) && 716 VectorizeUse(node, value, generate_code, type, restrictions)) { 717 if (generate_code) { 718 GenerateVecSub(index, offset); 719 GenerateVecMem(instruction, vector_map_->Get(index), vector_map_->Get(value), type); 720 } else { 721 vector_refs_->insert(ArrayReference(base, offset, type, /*lhs*/ true)); 722 } 723 return true; 724 } 725 return false; 726 } 727 // Branch back okay. 728 if (instruction->IsGoto()) { 729 return true; 730 } 731 // Otherwise accept only expressions with no effects outside the immediate loop-body. 732 // Note that actual uses are inspected during right-hand-side tree traversal. 733 return !IsUsedOutsideLoop(node->loop_info, instruction) && !instruction->DoesAnyWrite(); 734 } 735 736 // TODO: more operations and intrinsics, detect saturation arithmetic, etc. 737 bool HLoopOptimization::VectorizeUse(LoopNode* node, 738 HInstruction* instruction, 739 bool generate_code, 740 Primitive::Type type, 741 uint64_t restrictions) { 742 // Accept anything for which code has already been generated. 743 if (generate_code) { 744 if (vector_map_->find(instruction) != vector_map_->end()) { 745 return true; 746 } 747 } 748 // Continue the right-hand-side tree traversal, passing in proper 749 // types and vector restrictions along the way. During code generation, 750 // all new nodes are drawn from the global allocator. 751 if (node->loop_info->IsDefinedOutOfTheLoop(instruction)) { 752 // Accept invariant use, using scalar expansion. 753 if (generate_code) { 754 GenerateVecInv(instruction, type); 755 } 756 return true; 757 } else if (instruction->IsArrayGet()) { 758 // Strings are different, with a different offset to the actual data 759 // and some compressed to save memory. For now, all cases are rejected 760 // to avoid the complexity. 761 if (instruction->AsArrayGet()->IsStringCharAt()) { 762 return false; 763 } 764 // Accept a right-hand-side array base[index] for 765 // (1) exact matching vector type, 766 // (2) loop-invariant base, 767 // (3) unit stride index, 768 // (4) vectorizable right-hand-side value. 769 HInstruction* base = instruction->InputAt(0); 770 HInstruction* index = instruction->InputAt(1); 771 HInstruction* offset = nullptr; 772 if (type == instruction->GetType() && 773 node->loop_info->IsDefinedOutOfTheLoop(base) && 774 induction_range_.IsUnitStride(instruction, index, &offset)) { 775 if (generate_code) { 776 GenerateVecSub(index, offset); 777 GenerateVecMem(instruction, vector_map_->Get(index), nullptr, type); 778 } else { 779 vector_refs_->insert(ArrayReference(base, offset, type, /*lhs*/ false)); 780 } 781 return true; 782 } 783 } else if (instruction->IsTypeConversion()) { 784 // Accept particular type conversions. 785 HTypeConversion* conversion = instruction->AsTypeConversion(); 786 HInstruction* opa = conversion->InputAt(0); 787 Primitive::Type from = conversion->GetInputType(); 788 Primitive::Type to = conversion->GetResultType(); 789 if ((to == Primitive::kPrimByte || 790 to == Primitive::kPrimChar || 791 to == Primitive::kPrimShort) && from == Primitive::kPrimInt) { 792 // Accept a "narrowing" type conversion from a "wider" computation for 793 // (1) conversion into final required type, 794 // (2) vectorizable operand, 795 // (3) "wider" operations cannot bring in higher order bits. 796 if (to == type && VectorizeUse(node, opa, generate_code, type, restrictions | kNoHiBits)) { 797 if (generate_code) { 798 if (vector_mode_ == kVector) { 799 vector_map_->Put(instruction, vector_map_->Get(opa)); // operand pass-through 800 } else { 801 GenerateVecOp(instruction, vector_map_->Get(opa), nullptr, type); 802 } 803 } 804 return true; 805 } 806 } else if (to == Primitive::kPrimFloat && from == Primitive::kPrimInt) { 807 DCHECK_EQ(to, type); 808 // Accept int to float conversion for 809 // (1) supported int, 810 // (2) vectorizable operand. 811 if (TrySetVectorType(from, &restrictions) && 812 VectorizeUse(node, opa, generate_code, from, restrictions)) { 813 if (generate_code) { 814 GenerateVecOp(instruction, vector_map_->Get(opa), nullptr, type); 815 } 816 return true; 817 } 818 } 819 return false; 820 } else if (instruction->IsNeg() || instruction->IsNot() || instruction->IsBooleanNot()) { 821 // Accept unary operator for vectorizable operand. 822 HInstruction* opa = instruction->InputAt(0); 823 if (VectorizeUse(node, opa, generate_code, type, restrictions)) { 824 if (generate_code) { 825 GenerateVecOp(instruction, vector_map_->Get(opa), nullptr, type); 826 } 827 return true; 828 } 829 } else if (instruction->IsAdd() || instruction->IsSub() || 830 instruction->IsMul() || instruction->IsDiv() || 831 instruction->IsAnd() || instruction->IsOr() || instruction->IsXor()) { 832 // Deal with vector restrictions. 833 if ((instruction->IsMul() && HasVectorRestrictions(restrictions, kNoMul)) || 834 (instruction->IsDiv() && HasVectorRestrictions(restrictions, kNoDiv))) { 835 return false; 836 } 837 // Accept binary operator for vectorizable operands. 838 HInstruction* opa = instruction->InputAt(0); 839 HInstruction* opb = instruction->InputAt(1); 840 if (VectorizeUse(node, opa, generate_code, type, restrictions) && 841 VectorizeUse(node, opb, generate_code, type, restrictions)) { 842 if (generate_code) { 843 GenerateVecOp(instruction, vector_map_->Get(opa), vector_map_->Get(opb), type); 844 } 845 return true; 846 } 847 } else if (instruction->IsShl() || instruction->IsShr() || instruction->IsUShr()) { 848 // Recognize vectorization idioms. 849 if (VectorizeHalvingAddIdiom(node, instruction, generate_code, type, restrictions)) { 850 return true; 851 } 852 // Deal with vector restrictions. 853 if ((HasVectorRestrictions(restrictions, kNoShift)) || 854 (instruction->IsShr() && HasVectorRestrictions(restrictions, kNoShr))) { 855 return false; // unsupported instruction 856 } else if ((instruction->IsShr() || instruction->IsUShr()) && 857 HasVectorRestrictions(restrictions, kNoHiBits)) { 858 return false; // hibits may impact lobits; TODO: we can do better! 859 } 860 // Accept shift operator for vectorizable/invariant operands. 861 // TODO: accept symbolic, albeit loop invariant shift factors. 862 HInstruction* opa = instruction->InputAt(0); 863 HInstruction* opb = instruction->InputAt(1); 864 int64_t value = 0; 865 if (VectorizeUse(node, opa, generate_code, type, restrictions) && IsInt64AndGet(opb, &value)) { 866 // Make sure shift distance only looks at lower bits, as defined for sequential shifts. 867 int64_t mask = (instruction->GetType() == Primitive::kPrimLong) 868 ? kMaxLongShiftDistance 869 : kMaxIntShiftDistance; 870 int64_t distance = value & mask; 871 // Restrict shift distance to packed data type width. 872 int64_t max_distance = Primitive::ComponentSize(type) * 8; 873 if (0 <= distance && distance < max_distance) { 874 if (generate_code) { 875 HInstruction* s = graph_->GetIntConstant(distance); 876 GenerateVecOp(instruction, vector_map_->Get(opa), s, type); 877 } 878 return true; 879 } 880 } 881 } else if (instruction->IsInvokeStaticOrDirect()) { 882 // Accept particular intrinsics. 883 HInvokeStaticOrDirect* invoke = instruction->AsInvokeStaticOrDirect(); 884 switch (invoke->GetIntrinsic()) { 885 case Intrinsics::kMathAbsInt: 886 case Intrinsics::kMathAbsLong: 887 case Intrinsics::kMathAbsFloat: 888 case Intrinsics::kMathAbsDouble: { 889 // Deal with vector restrictions. 890 if (HasVectorRestrictions(restrictions, kNoAbs) || 891 HasVectorRestrictions(restrictions, kNoHiBits)) { 892 // TODO: we can do better for some hibits cases. 893 return false; 894 } 895 // Accept ABS(x) for vectorizable operand. 896 HInstruction* opa = instruction->InputAt(0); 897 if (VectorizeUse(node, opa, generate_code, type, restrictions)) { 898 if (generate_code) { 899 GenerateVecOp(instruction, vector_map_->Get(opa), nullptr, type); 900 } 901 return true; 902 } 903 return false; 904 } 905 default: 906 return false; 907 } // switch 908 } 909 return false; 910 } 911 912 bool HLoopOptimization::TrySetVectorType(Primitive::Type type, uint64_t* restrictions) { 913 const InstructionSetFeatures* features = compiler_driver_->GetInstructionSetFeatures(); 914 switch (compiler_driver_->GetInstructionSet()) { 915 case kArm: 916 case kThumb2: 917 return false; 918 case kArm64: 919 // Allow vectorization for all ARM devices, because Android assumes that 920 // ARMv8 AArch64 always supports advanced SIMD. 921 switch (type) { 922 case Primitive::kPrimBoolean: 923 case Primitive::kPrimByte: 924 *restrictions |= kNoDiv | kNoAbs; 925 return TrySetVectorLength(16); 926 case Primitive::kPrimChar: 927 case Primitive::kPrimShort: 928 *restrictions |= kNoDiv | kNoAbs; 929 return TrySetVectorLength(8); 930 case Primitive::kPrimInt: 931 *restrictions |= kNoDiv; 932 return TrySetVectorLength(4); 933 case Primitive::kPrimLong: 934 *restrictions |= kNoDiv | kNoMul; 935 return TrySetVectorLength(2); 936 case Primitive::kPrimFloat: 937 return TrySetVectorLength(4); 938 case Primitive::kPrimDouble: 939 return TrySetVectorLength(2); 940 default: 941 return false; 942 } 943 case kX86: 944 case kX86_64: 945 // Allow vectorization for SSE4-enabled X86 devices only (128-bit vectors). 946 if (features->AsX86InstructionSetFeatures()->HasSSE4_1()) { 947 switch (type) { 948 case Primitive::kPrimBoolean: 949 case Primitive::kPrimByte: 950 *restrictions |= kNoMul | kNoDiv | kNoShift | kNoAbs | kNoSignedHAdd | kNoUnroundedHAdd; 951 return TrySetVectorLength(16); 952 case Primitive::kPrimChar: 953 case Primitive::kPrimShort: 954 *restrictions |= kNoDiv | kNoAbs | kNoSignedHAdd | kNoUnroundedHAdd; 955 return TrySetVectorLength(8); 956 case Primitive::kPrimInt: 957 *restrictions |= kNoDiv; 958 return TrySetVectorLength(4); 959 case Primitive::kPrimLong: 960 *restrictions |= kNoMul | kNoDiv | kNoShr | kNoAbs; 961 return TrySetVectorLength(2); 962 case Primitive::kPrimFloat: 963 return TrySetVectorLength(4); 964 case Primitive::kPrimDouble: 965 return TrySetVectorLength(2); 966 default: 967 break; 968 } // switch type 969 } 970 return false; 971 case kMips: 972 case kMips64: 973 // TODO: implement MIPS SIMD. 974 return false; 975 default: 976 return false; 977 } // switch instruction set 978 } 979 980 bool HLoopOptimization::TrySetVectorLength(uint32_t length) { 981 DCHECK(IsPowerOfTwo(length) && length >= 2u); 982 // First time set? 983 if (vector_length_ == 0) { 984 vector_length_ = length; 985 } 986 // Different types are acceptable within a loop-body, as long as all the corresponding vector 987 // lengths match exactly to obtain a uniform traversal through the vector iteration space 988 // (idiomatic exceptions to this rule can be handled by further unrolling sub-expressions). 989 return vector_length_ == length; 990 } 991 992 void HLoopOptimization::GenerateVecInv(HInstruction* org, Primitive::Type type) { 993 if (vector_map_->find(org) == vector_map_->end()) { 994 // In scalar code, just use a self pass-through for scalar invariants 995 // (viz. expression remains itself). 996 if (vector_mode_ == kSequential) { 997 vector_map_->Put(org, org); 998 return; 999 } 1000 // In vector code, explicit scalar expansion is needed. 1001 HInstruction* vector = new (global_allocator_) HVecReplicateScalar( 1002 global_allocator_, org, type, vector_length_); 1003 vector_map_->Put(org, Insert(vector_preheader_, vector)); 1004 } 1005 } 1006 1007 void HLoopOptimization::GenerateVecSub(HInstruction* org, HInstruction* offset) { 1008 if (vector_map_->find(org) == vector_map_->end()) { 1009 HInstruction* subscript = vector_phi_; 1010 if (offset != nullptr) { 1011 subscript = new (global_allocator_) HAdd(Primitive::kPrimInt, subscript, offset); 1012 if (org->IsPhi()) { 1013 Insert(vector_body_, subscript); // lacks layout placeholder 1014 } 1015 } 1016 vector_map_->Put(org, subscript); 1017 } 1018 } 1019 1020 void HLoopOptimization::GenerateVecMem(HInstruction* org, 1021 HInstruction* opa, 1022 HInstruction* opb, 1023 Primitive::Type type) { 1024 HInstruction* vector = nullptr; 1025 if (vector_mode_ == kVector) { 1026 // Vector store or load. 1027 if (opb != nullptr) { 1028 vector = new (global_allocator_) HVecStore( 1029 global_allocator_, org->InputAt(0), opa, opb, type, vector_length_); 1030 } else { 1031 bool is_string_char_at = org->AsArrayGet()->IsStringCharAt(); 1032 vector = new (global_allocator_) HVecLoad( 1033 global_allocator_, org->InputAt(0), opa, type, vector_length_, is_string_char_at); 1034 } 1035 } else { 1036 // Scalar store or load. 1037 DCHECK(vector_mode_ == kSequential); 1038 if (opb != nullptr) { 1039 vector = new (global_allocator_) HArraySet(org->InputAt(0), opa, opb, type, kNoDexPc); 1040 } else { 1041 bool is_string_char_at = org->AsArrayGet()->IsStringCharAt(); 1042 vector = new (global_allocator_) HArrayGet( 1043 org->InputAt(0), opa, type, kNoDexPc, is_string_char_at); 1044 } 1045 } 1046 vector_map_->Put(org, vector); 1047 } 1048 1049 #define GENERATE_VEC(x, y) \ 1050 if (vector_mode_ == kVector) { \ 1051 vector = (x); \ 1052 } else { \ 1053 DCHECK(vector_mode_ == kSequential); \ 1054 vector = (y); \ 1055 } \ 1056 break; 1057 1058 void HLoopOptimization::GenerateVecOp(HInstruction* org, 1059 HInstruction* opa, 1060 HInstruction* opb, 1061 Primitive::Type type) { 1062 if (vector_mode_ == kSequential) { 1063 // Scalar code follows implicit integral promotion. 1064 if (type == Primitive::kPrimBoolean || 1065 type == Primitive::kPrimByte || 1066 type == Primitive::kPrimChar || 1067 type == Primitive::kPrimShort) { 1068 type = Primitive::kPrimInt; 1069 } 1070 } 1071 HInstruction* vector = nullptr; 1072 switch (org->GetKind()) { 1073 case HInstruction::kNeg: 1074 DCHECK(opb == nullptr); 1075 GENERATE_VEC( 1076 new (global_allocator_) HVecNeg(global_allocator_, opa, type, vector_length_), 1077 new (global_allocator_) HNeg(type, opa)); 1078 case HInstruction::kNot: 1079 DCHECK(opb == nullptr); 1080 GENERATE_VEC( 1081 new (global_allocator_) HVecNot(global_allocator_, opa, type, vector_length_), 1082 new (global_allocator_) HNot(type, opa)); 1083 case HInstruction::kBooleanNot: 1084 DCHECK(opb == nullptr); 1085 GENERATE_VEC( 1086 new (global_allocator_) HVecNot(global_allocator_, opa, type, vector_length_), 1087 new (global_allocator_) HBooleanNot(opa)); 1088 case HInstruction::kTypeConversion: 1089 DCHECK(opb == nullptr); 1090 GENERATE_VEC( 1091 new (global_allocator_) HVecCnv(global_allocator_, opa, type, vector_length_), 1092 new (global_allocator_) HTypeConversion(type, opa, kNoDexPc)); 1093 case HInstruction::kAdd: 1094 GENERATE_VEC( 1095 new (global_allocator_) HVecAdd(global_allocator_, opa, opb, type, vector_length_), 1096 new (global_allocator_) HAdd(type, opa, opb)); 1097 case HInstruction::kSub: 1098 GENERATE_VEC( 1099 new (global_allocator_) HVecSub(global_allocator_, opa, opb, type, vector_length_), 1100 new (global_allocator_) HSub(type, opa, opb)); 1101 case HInstruction::kMul: 1102 GENERATE_VEC( 1103 new (global_allocator_) HVecMul(global_allocator_, opa, opb, type, vector_length_), 1104 new (global_allocator_) HMul(type, opa, opb)); 1105 case HInstruction::kDiv: 1106 GENERATE_VEC( 1107 new (global_allocator_) HVecDiv(global_allocator_, opa, opb, type, vector_length_), 1108 new (global_allocator_) HDiv(type, opa, opb, kNoDexPc)); 1109 case HInstruction::kAnd: 1110 GENERATE_VEC( 1111 new (global_allocator_) HVecAnd(global_allocator_, opa, opb, type, vector_length_), 1112 new (global_allocator_) HAnd(type, opa, opb)); 1113 case HInstruction::kOr: 1114 GENERATE_VEC( 1115 new (global_allocator_) HVecOr(global_allocator_, opa, opb, type, vector_length_), 1116 new (global_allocator_) HOr(type, opa, opb)); 1117 case HInstruction::kXor: 1118 GENERATE_VEC( 1119 new (global_allocator_) HVecXor(global_allocator_, opa, opb, type, vector_length_), 1120 new (global_allocator_) HXor(type, opa, opb)); 1121 case HInstruction::kShl: 1122 GENERATE_VEC( 1123 new (global_allocator_) HVecShl(global_allocator_, opa, opb, type, vector_length_), 1124 new (global_allocator_) HShl(type, opa, opb)); 1125 case HInstruction::kShr: 1126 GENERATE_VEC( 1127 new (global_allocator_) HVecShr(global_allocator_, opa, opb, type, vector_length_), 1128 new (global_allocator_) HShr(type, opa, opb)); 1129 case HInstruction::kUShr: 1130 GENERATE_VEC( 1131 new (global_allocator_) HVecUShr(global_allocator_, opa, opb, type, vector_length_), 1132 new (global_allocator_) HUShr(type, opa, opb)); 1133 case HInstruction::kInvokeStaticOrDirect: { 1134 HInvokeStaticOrDirect* invoke = org->AsInvokeStaticOrDirect(); 1135 if (vector_mode_ == kVector) { 1136 switch (invoke->GetIntrinsic()) { 1137 case Intrinsics::kMathAbsInt: 1138 case Intrinsics::kMathAbsLong: 1139 case Intrinsics::kMathAbsFloat: 1140 case Intrinsics::kMathAbsDouble: 1141 DCHECK(opb == nullptr); 1142 vector = new (global_allocator_) HVecAbs(global_allocator_, opa, type, vector_length_); 1143 break; 1144 default: 1145 LOG(FATAL) << "Unsupported SIMD intrinsic"; 1146 UNREACHABLE(); 1147 } // switch invoke 1148 } else { 1149 // In scalar code, simply clone the method invoke, and replace its operands with the 1150 // corresponding new scalar instructions in the loop. The instruction will get an 1151 // environment while being inserted from the instruction map in original program order. 1152 DCHECK(vector_mode_ == kSequential); 1153 HInvokeStaticOrDirect* new_invoke = new (global_allocator_) HInvokeStaticOrDirect( 1154 global_allocator_, 1155 invoke->GetNumberOfArguments(), 1156 invoke->GetType(), 1157 invoke->GetDexPc(), 1158 invoke->GetDexMethodIndex(), 1159 invoke->GetResolvedMethod(), 1160 invoke->GetDispatchInfo(), 1161 invoke->GetInvokeType(), 1162 invoke->GetTargetMethod(), 1163 invoke->GetClinitCheckRequirement()); 1164 HInputsRef inputs = invoke->GetInputs(); 1165 for (size_t index = 0; index < inputs.size(); ++index) { 1166 new_invoke->SetArgumentAt(index, vector_map_->Get(inputs[index])); 1167 } 1168 new_invoke->SetIntrinsic(invoke->GetIntrinsic(), 1169 kNeedsEnvironmentOrCache, 1170 kNoSideEffects, 1171 kNoThrow); 1172 vector = new_invoke; 1173 } 1174 break; 1175 } 1176 default: 1177 break; 1178 } // switch 1179 CHECK(vector != nullptr) << "Unsupported SIMD operator"; 1180 vector_map_->Put(org, vector); 1181 } 1182 1183 #undef GENERATE_VEC 1184 1185 // 1186 // Vectorization idioms. 1187 // 1188 1189 // Method recognizes the following idioms: 1190 // rounding halving add (a + b + 1) >> 1 for unsigned/signed operands a, b 1191 // regular halving add (a + b) >> 1 for unsigned/signed operands a, b 1192 // Provided that the operands are promoted to a wider form to do the arithmetic and 1193 // then cast back to narrower form, the idioms can be mapped into efficient SIMD 1194 // implementation that operates directly in narrower form (plus one extra bit). 1195 // TODO: current version recognizes implicit byte/short/char widening only; 1196 // explicit widening from int to long could be added later. 1197 bool HLoopOptimization::VectorizeHalvingAddIdiom(LoopNode* node, 1198 HInstruction* instruction, 1199 bool generate_code, 1200 Primitive::Type type, 1201 uint64_t restrictions) { 1202 // Test for top level arithmetic shift right x >> 1 or logical shift right x >>> 1 1203 // (note whether the sign bit in higher precision is shifted in has no effect 1204 // on the narrow precision computed by the idiom). 1205 int64_t value = 0; 1206 if ((instruction->IsShr() || 1207 instruction->IsUShr()) && 1208 IsInt64AndGet(instruction->InputAt(1), &value) && value == 1) { 1209 // 1210 // TODO: make following code less sensitive to associativity and commutativity differences. 1211 // 1212 HInstruction* x = instruction->InputAt(0); 1213 // Test for an optional rounding part (x + 1) >> 1. 1214 bool is_rounded = false; 1215 if (x->IsAdd() && IsInt64AndGet(x->InputAt(1), &value) && value == 1) { 1216 x = x->InputAt(0); 1217 is_rounded = true; 1218 } 1219 // Test for a core addition (a + b) >> 1 (possibly rounded), either unsigned or signed. 1220 if (x->IsAdd()) { 1221 HInstruction* a = x->InputAt(0); 1222 HInstruction* b = x->InputAt(1); 1223 HInstruction* r = nullptr; 1224 HInstruction* s = nullptr; 1225 bool is_unsigned = false; 1226 if (IsZeroExtensionAndGet(a, type, &r) && IsZeroExtensionAndGet(b, type, &s)) { 1227 is_unsigned = true; 1228 } else if (IsSignExtensionAndGet(a, type, &r) && IsSignExtensionAndGet(b, type, &s)) { 1229 is_unsigned = false; 1230 } else { 1231 return false; 1232 } 1233 // Deal with vector restrictions. 1234 if ((!is_unsigned && HasVectorRestrictions(restrictions, kNoSignedHAdd)) || 1235 (!is_rounded && HasVectorRestrictions(restrictions, kNoUnroundedHAdd))) { 1236 return false; 1237 } 1238 // Accept recognized halving add for vectorizable operands. Vectorized code uses the 1239 // shorthand idiomatic operation. Sequential code uses the original scalar expressions. 1240 DCHECK(r != nullptr && s != nullptr); 1241 if (VectorizeUse(node, r, generate_code, type, restrictions) && 1242 VectorizeUse(node, s, generate_code, type, restrictions)) { 1243 if (generate_code) { 1244 if (vector_mode_ == kVector) { 1245 vector_map_->Put(instruction, new (global_allocator_) HVecHalvingAdd( 1246 global_allocator_, 1247 vector_map_->Get(r), 1248 vector_map_->Get(s), 1249 type, 1250 vector_length_, 1251 is_unsigned, 1252 is_rounded)); 1253 } else { 1254 VectorizeUse(node, instruction->InputAt(0), generate_code, type, restrictions); 1255 VectorizeUse(node, instruction->InputAt(1), generate_code, type, restrictions); 1256 GenerateVecOp(instruction, 1257 vector_map_->Get(instruction->InputAt(0)), 1258 vector_map_->Get(instruction->InputAt(1)), 1259 type); 1260 } 1261 } 1262 return true; 1263 } 1264 } 1265 } 1266 return false; 1267 } 1268 1269 // 1270 // Helpers. 1271 // 1272 1273 bool HLoopOptimization::TrySetPhiInduction(HPhi* phi, bool restrict_uses) { 1274 DCHECK(iset_->empty()); 1275 ArenaSet<HInstruction*>* set = induction_range_.LookupCycle(phi); 1276 if (set != nullptr) { 1277 for (HInstruction* i : *set) { 1278 // Check that, other than instructions that are no longer in the graph (removed earlier) 1279 // each instruction is removable and, when restrict uses are requested, other than for phi, 1280 // all uses are contained within the cycle. 1281 if (!i->IsInBlock()) { 1282 continue; 1283 } else if (!i->IsRemovable()) { 1284 return false; 1285 } else if (i != phi && restrict_uses) { 1286 for (const HUseListNode<HInstruction*>& use : i->GetUses()) { 1287 if (set->find(use.GetUser()) == set->end()) { 1288 return false; 1289 } 1290 } 1291 } 1292 iset_->insert(i); // copy 1293 } 1294 return true; 1295 } 1296 return false; 1297 } 1298 1299 // Find: phi: Phi(init, addsub) 1300 // s: SuspendCheck 1301 // c: Condition(phi, bound) 1302 // i: If(c) 1303 // TODO: Find a less pattern matching approach? 1304 bool HLoopOptimization::TrySetSimpleLoopHeader(HBasicBlock* block) { 1305 DCHECK(iset_->empty()); 1306 HInstruction* phi = block->GetFirstPhi(); 1307 if (phi != nullptr && 1308 phi->GetNext() == nullptr && 1309 TrySetPhiInduction(phi->AsPhi(), /*restrict_uses*/ false)) { 1310 HInstruction* s = block->GetFirstInstruction(); 1311 if (s != nullptr && s->IsSuspendCheck()) { 1312 HInstruction* c = s->GetNext(); 1313 if (c != nullptr && 1314 c->IsCondition() && 1315 c->GetUses().HasExactlyOneElement() && // only used for termination 1316 !c->HasEnvironmentUses()) { // unlikely, but not impossible 1317 HInstruction* i = c->GetNext(); 1318 if (i != nullptr && i->IsIf() && i->InputAt(0) == c) { 1319 iset_->insert(c); 1320 iset_->insert(s); 1321 return true; 1322 } 1323 } 1324 } 1325 } 1326 return false; 1327 } 1328 1329 bool HLoopOptimization::IsEmptyBody(HBasicBlock* block) { 1330 if (!block->GetPhis().IsEmpty()) { 1331 return false; 1332 } 1333 for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { 1334 HInstruction* instruction = it.Current(); 1335 if (!instruction->IsGoto() && iset_->find(instruction) == iset_->end()) { 1336 return false; 1337 } 1338 } 1339 return true; 1340 } 1341 1342 bool HLoopOptimization::IsUsedOutsideLoop(HLoopInformation* loop_info, 1343 HInstruction* instruction) { 1344 for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) { 1345 if (use.GetUser()->GetBlock()->GetLoopInformation() != loop_info) { 1346 return true; 1347 } 1348 } 1349 return false; 1350 } 1351 1352 bool HLoopOptimization::IsOnlyUsedAfterLoop(HLoopInformation* loop_info, 1353 HInstruction* instruction, 1354 bool collect_loop_uses, 1355 /*out*/ int32_t* use_count) { 1356 for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) { 1357 HInstruction* user = use.GetUser(); 1358 if (iset_->find(user) == iset_->end()) { // not excluded? 1359 HLoopInformation* other_loop_info = user->GetBlock()->GetLoopInformation(); 1360 if (other_loop_info != nullptr && other_loop_info->IsIn(*loop_info)) { 1361 // If collect_loop_uses is set, simply keep adding those uses to the set. 1362 // Otherwise, reject uses inside the loop that were not already in the set. 1363 if (collect_loop_uses) { 1364 iset_->insert(user); 1365 continue; 1366 } 1367 return false; 1368 } 1369 ++*use_count; 1370 } 1371 } 1372 return true; 1373 } 1374 1375 bool HLoopOptimization::TryReplaceWithLastValue(HLoopInformation* loop_info, 1376 HInstruction* instruction, 1377 HBasicBlock* block) { 1378 // Try to replace outside uses with the last value. 1379 if (induction_range_.CanGenerateLastValue(instruction)) { 1380 HInstruction* replacement = induction_range_.GenerateLastValue(instruction, graph_, block); 1381 const HUseList<HInstruction*>& uses = instruction->GetUses(); 1382 for (auto it = uses.begin(), end = uses.end(); it != end;) { 1383 HInstruction* user = it->GetUser(); 1384 size_t index = it->GetIndex(); 1385 ++it; // increment before replacing 1386 if (iset_->find(user) == iset_->end()) { // not excluded? 1387 if (kIsDebugBuild) { 1388 // We have checked earlier in 'IsOnlyUsedAfterLoop' that the use is after the loop. 1389 HLoopInformation* other_loop_info = user->GetBlock()->GetLoopInformation(); 1390 CHECK(other_loop_info == nullptr || !other_loop_info->IsIn(*loop_info)); 1391 } 1392 user->ReplaceInput(replacement, index); 1393 induction_range_.Replace(user, instruction, replacement); // update induction 1394 } 1395 } 1396 const HUseList<HEnvironment*>& env_uses = instruction->GetEnvUses(); 1397 for (auto it = env_uses.begin(), end = env_uses.end(); it != end;) { 1398 HEnvironment* user = it->GetUser(); 1399 size_t index = it->GetIndex(); 1400 ++it; // increment before replacing 1401 if (iset_->find(user->GetHolder()) == iset_->end()) { // not excluded? 1402 HLoopInformation* other_loop_info = user->GetHolder()->GetBlock()->GetLoopInformation(); 1403 // Only update environment uses after the loop. 1404 if (other_loop_info == nullptr || !other_loop_info->IsIn(*loop_info)) { 1405 user->RemoveAsUserOfInput(index); 1406 user->SetRawEnvAt(index, replacement); 1407 replacement->AddEnvUseAt(user, index); 1408 } 1409 } 1410 } 1411 induction_simplication_count_++; 1412 return true; 1413 } 1414 return false; 1415 } 1416 1417 bool HLoopOptimization::TryAssignLastValue(HLoopInformation* loop_info, 1418 HInstruction* instruction, 1419 HBasicBlock* block, 1420 bool collect_loop_uses) { 1421 // Assigning the last value is always successful if there are no uses. 1422 // Otherwise, it succeeds in a no early-exit loop by generating the 1423 // proper last value assignment. 1424 int32_t use_count = 0; 1425 return IsOnlyUsedAfterLoop(loop_info, instruction, collect_loop_uses, &use_count) && 1426 (use_count == 0 || 1427 (!IsEarlyExit(loop_info) && TryReplaceWithLastValue(loop_info, instruction, block))); 1428 } 1429 1430 void HLoopOptimization::RemoveDeadInstructions(const HInstructionList& list) { 1431 for (HBackwardInstructionIterator i(list); !i.Done(); i.Advance()) { 1432 HInstruction* instruction = i.Current(); 1433 if (instruction->IsDeadAndRemovable()) { 1434 simplified_ = true; 1435 instruction->GetBlock()->RemoveInstructionOrPhi(instruction); 1436 } 1437 } 1438 } 1439 1440 } // namespace art 1441