1 //===-- PeepholeOptimizer.cpp - Peephole Optimizations --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Perform peephole optimizations on the machine code: 11 // 12 // - Optimize Extensions 13 // 14 // Optimization of sign / zero extension instructions. It may be extended to 15 // handle other instructions with similar properties. 16 // 17 // On some targets, some instructions, e.g. X86 sign / zero extension, may 18 // leave the source value in the lower part of the result. This optimization 19 // will replace some uses of the pre-extension value with uses of the 20 // sub-register of the results. 21 // 22 // - Optimize Comparisons 23 // 24 // Optimization of comparison instructions. For instance, in this code: 25 // 26 // sub r1, 1 27 // cmp r1, 0 28 // bz L1 29 // 30 // If the "sub" instruction all ready sets (or could be modified to set) the 31 // same flag that the "cmp" instruction sets and that "bz" uses, then we can 32 // eliminate the "cmp" instruction. 33 // 34 // Another instance, in this code: 35 // 36 // sub r1, r3 | sub r1, imm 37 // cmp r3, r1 or cmp r1, r3 | cmp r1, imm 38 // bge L1 39 // 40 // If the branch instruction can use flag from "sub", then we can replace 41 // "sub" with "subs" and eliminate the "cmp" instruction. 42 // 43 // - Optimize Bitcast pairs: 44 // 45 // v1 = bitcast v0 46 // v2 = bitcast v1 47 // = v2 48 // => 49 // v1 = bitcast v0 50 // = v0 51 // 52 // - Optimize Loads: 53 // 54 // Loads that can be folded into a later instruction. A load is foldable 55 // if it loads to virtual registers and the virtual register defined has 56 // a single use. 57 //===----------------------------------------------------------------------===// 58 59 #define DEBUG_TYPE "peephole-opt" 60 #include "llvm/CodeGen/Passes.h" 61 #include "llvm/ADT/DenseMap.h" 62 #include "llvm/ADT/SmallPtrSet.h" 63 #include "llvm/ADT/SmallSet.h" 64 #include "llvm/ADT/Statistic.h" 65 #include "llvm/CodeGen/MachineDominators.h" 66 #include "llvm/CodeGen/MachineInstrBuilder.h" 67 #include "llvm/CodeGen/MachineRegisterInfo.h" 68 #include "llvm/Support/CommandLine.h" 69 #include "llvm/Support/Debug.h" 70 #include "llvm/Target/TargetInstrInfo.h" 71 #include "llvm/Target/TargetRegisterInfo.h" 72 using namespace llvm; 73 74 // Optimize Extensions 75 static cl::opt<bool> 76 Aggressive("aggressive-ext-opt", cl::Hidden, 77 cl::desc("Aggressive extension optimization")); 78 79 static cl::opt<bool> 80 DisablePeephole("disable-peephole", cl::Hidden, cl::init(false), 81 cl::desc("Disable the peephole optimizer")); 82 83 STATISTIC(NumReuse, "Number of extension results reused"); 84 STATISTIC(NumBitcasts, "Number of bitcasts eliminated"); 85 STATISTIC(NumCmps, "Number of compares eliminated"); 86 STATISTIC(NumImmFold, "Number of move immediate folded"); 87 STATISTIC(NumLoadFold, "Number of loads folded"); 88 STATISTIC(NumSelects, "Number of selects optimized"); 89 90 namespace { 91 class PeepholeOptimizer : public MachineFunctionPass { 92 const TargetMachine *TM; 93 const TargetInstrInfo *TII; 94 MachineRegisterInfo *MRI; 95 MachineDominatorTree *DT; // Machine dominator tree 96 97 public: 98 static char ID; // Pass identification 99 PeepholeOptimizer() : MachineFunctionPass(ID) { 100 initializePeepholeOptimizerPass(*PassRegistry::getPassRegistry()); 101 } 102 103 virtual bool runOnMachineFunction(MachineFunction &MF); 104 105 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 106 AU.setPreservesCFG(); 107 MachineFunctionPass::getAnalysisUsage(AU); 108 if (Aggressive) { 109 AU.addRequired<MachineDominatorTree>(); 110 AU.addPreserved<MachineDominatorTree>(); 111 } 112 } 113 114 private: 115 bool optimizeBitcastInstr(MachineInstr *MI, MachineBasicBlock *MBB); 116 bool optimizeCmpInstr(MachineInstr *MI, MachineBasicBlock *MBB); 117 bool optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB, 118 SmallPtrSet<MachineInstr*, 8> &LocalMIs); 119 bool optimizeSelect(MachineInstr *MI); 120 bool isMoveImmediate(MachineInstr *MI, 121 SmallSet<unsigned, 4> &ImmDefRegs, 122 DenseMap<unsigned, MachineInstr*> &ImmDefMIs); 123 bool foldImmediate(MachineInstr *MI, MachineBasicBlock *MBB, 124 SmallSet<unsigned, 4> &ImmDefRegs, 125 DenseMap<unsigned, MachineInstr*> &ImmDefMIs); 126 bool isLoadFoldable(MachineInstr *MI, unsigned &FoldAsLoadDefReg); 127 }; 128 } 129 130 char PeepholeOptimizer::ID = 0; 131 char &llvm::PeepholeOptimizerID = PeepholeOptimizer::ID; 132 INITIALIZE_PASS_BEGIN(PeepholeOptimizer, "peephole-opts", 133 "Peephole Optimizations", false, false) 134 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 135 INITIALIZE_PASS_END(PeepholeOptimizer, "peephole-opts", 136 "Peephole Optimizations", false, false) 137 138 /// optimizeExtInstr - If instruction is a copy-like instruction, i.e. it reads 139 /// a single register and writes a single register and it does not modify the 140 /// source, and if the source value is preserved as a sub-register of the 141 /// result, then replace all reachable uses of the source with the subreg of the 142 /// result. 143 /// 144 /// Do not generate an EXTRACT that is used only in a debug use, as this changes 145 /// the code. Since this code does not currently share EXTRACTs, just ignore all 146 /// debug uses. 147 bool PeepholeOptimizer:: 148 optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB, 149 SmallPtrSet<MachineInstr*, 8> &LocalMIs) { 150 unsigned SrcReg, DstReg, SubIdx; 151 if (!TII->isCoalescableExtInstr(*MI, SrcReg, DstReg, SubIdx)) 152 return false; 153 154 if (TargetRegisterInfo::isPhysicalRegister(DstReg) || 155 TargetRegisterInfo::isPhysicalRegister(SrcReg)) 156 return false; 157 158 if (MRI->hasOneNonDBGUse(SrcReg)) 159 // No other uses. 160 return false; 161 162 // Ensure DstReg can get a register class that actually supports 163 // sub-registers. Don't change the class until we commit. 164 const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg); 165 DstRC = TM->getRegisterInfo()->getSubClassWithSubReg(DstRC, SubIdx); 166 if (!DstRC) 167 return false; 168 169 // The ext instr may be operating on a sub-register of SrcReg as well. 170 // PPC::EXTSW is a 32 -> 64-bit sign extension, but it reads a 64-bit 171 // register. 172 // If UseSrcSubIdx is Set, SubIdx also applies to SrcReg, and only uses of 173 // SrcReg:SubIdx should be replaced. 174 bool UseSrcSubIdx = TM->getRegisterInfo()-> 175 getSubClassWithSubReg(MRI->getRegClass(SrcReg), SubIdx) != 0; 176 177 // The source has other uses. See if we can replace the other uses with use of 178 // the result of the extension. 179 SmallPtrSet<MachineBasicBlock*, 4> ReachedBBs; 180 for (MachineRegisterInfo::use_nodbg_iterator 181 UI = MRI->use_nodbg_begin(DstReg), UE = MRI->use_nodbg_end(); 182 UI != UE; ++UI) 183 ReachedBBs.insert(UI->getParent()); 184 185 // Uses that are in the same BB of uses of the result of the instruction. 186 SmallVector<MachineOperand*, 8> Uses; 187 188 // Uses that the result of the instruction can reach. 189 SmallVector<MachineOperand*, 8> ExtendedUses; 190 191 bool ExtendLife = true; 192 for (MachineRegisterInfo::use_nodbg_iterator 193 UI = MRI->use_nodbg_begin(SrcReg), UE = MRI->use_nodbg_end(); 194 UI != UE; ++UI) { 195 MachineOperand &UseMO = UI.getOperand(); 196 MachineInstr *UseMI = &*UI; 197 if (UseMI == MI) 198 continue; 199 200 if (UseMI->isPHI()) { 201 ExtendLife = false; 202 continue; 203 } 204 205 // Only accept uses of SrcReg:SubIdx. 206 if (UseSrcSubIdx && UseMO.getSubReg() != SubIdx) 207 continue; 208 209 // It's an error to translate this: 210 // 211 // %reg1025 = <sext> %reg1024 212 // ... 213 // %reg1026 = SUBREG_TO_REG 0, %reg1024, 4 214 // 215 // into this: 216 // 217 // %reg1025 = <sext> %reg1024 218 // ... 219 // %reg1027 = COPY %reg1025:4 220 // %reg1026 = SUBREG_TO_REG 0, %reg1027, 4 221 // 222 // The problem here is that SUBREG_TO_REG is there to assert that an 223 // implicit zext occurs. It doesn't insert a zext instruction. If we allow 224 // the COPY here, it will give us the value after the <sext>, not the 225 // original value of %reg1024 before <sext>. 226 if (UseMI->getOpcode() == TargetOpcode::SUBREG_TO_REG) 227 continue; 228 229 MachineBasicBlock *UseMBB = UseMI->getParent(); 230 if (UseMBB == MBB) { 231 // Local uses that come after the extension. 232 if (!LocalMIs.count(UseMI)) 233 Uses.push_back(&UseMO); 234 } else if (ReachedBBs.count(UseMBB)) { 235 // Non-local uses where the result of the extension is used. Always 236 // replace these unless it's a PHI. 237 Uses.push_back(&UseMO); 238 } else if (Aggressive && DT->dominates(MBB, UseMBB)) { 239 // We may want to extend the live range of the extension result in order 240 // to replace these uses. 241 ExtendedUses.push_back(&UseMO); 242 } else { 243 // Both will be live out of the def MBB anyway. Don't extend live range of 244 // the extension result. 245 ExtendLife = false; 246 break; 247 } 248 } 249 250 if (ExtendLife && !ExtendedUses.empty()) 251 // Extend the liveness of the extension result. 252 std::copy(ExtendedUses.begin(), ExtendedUses.end(), 253 std::back_inserter(Uses)); 254 255 // Now replace all uses. 256 bool Changed = false; 257 if (!Uses.empty()) { 258 SmallPtrSet<MachineBasicBlock*, 4> PHIBBs; 259 260 // Look for PHI uses of the extended result, we don't want to extend the 261 // liveness of a PHI input. It breaks all kinds of assumptions down 262 // stream. A PHI use is expected to be the kill of its source values. 263 for (MachineRegisterInfo::use_nodbg_iterator 264 UI = MRI->use_nodbg_begin(DstReg), UE = MRI->use_nodbg_end(); 265 UI != UE; ++UI) 266 if (UI->isPHI()) 267 PHIBBs.insert(UI->getParent()); 268 269 const TargetRegisterClass *RC = MRI->getRegClass(SrcReg); 270 for (unsigned i = 0, e = Uses.size(); i != e; ++i) { 271 MachineOperand *UseMO = Uses[i]; 272 MachineInstr *UseMI = UseMO->getParent(); 273 MachineBasicBlock *UseMBB = UseMI->getParent(); 274 if (PHIBBs.count(UseMBB)) 275 continue; 276 277 // About to add uses of DstReg, clear DstReg's kill flags. 278 if (!Changed) { 279 MRI->clearKillFlags(DstReg); 280 MRI->constrainRegClass(DstReg, DstRC); 281 } 282 283 unsigned NewVR = MRI->createVirtualRegister(RC); 284 MachineInstr *Copy = BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(), 285 TII->get(TargetOpcode::COPY), NewVR) 286 .addReg(DstReg, 0, SubIdx); 287 // SubIdx applies to both SrcReg and DstReg when UseSrcSubIdx is set. 288 if (UseSrcSubIdx) { 289 Copy->getOperand(0).setSubReg(SubIdx); 290 Copy->getOperand(0).setIsUndef(); 291 } 292 UseMO->setReg(NewVR); 293 ++NumReuse; 294 Changed = true; 295 } 296 } 297 298 return Changed; 299 } 300 301 /// optimizeBitcastInstr - If the instruction is a bitcast instruction A that 302 /// cannot be optimized away during isel (e.g. ARM::VMOVSR, which bitcast 303 /// a value cross register classes), and the source is defined by another 304 /// bitcast instruction B. And if the register class of source of B matches 305 /// the register class of instruction A, then it is legal to replace all uses 306 /// of the def of A with source of B. e.g. 307 /// %vreg0<def> = VMOVSR %vreg1 308 /// %vreg3<def> = VMOVRS %vreg0 309 /// Replace all uses of vreg3 with vreg1. 310 311 bool PeepholeOptimizer::optimizeBitcastInstr(MachineInstr *MI, 312 MachineBasicBlock *MBB) { 313 unsigned NumDefs = MI->getDesc().getNumDefs(); 314 unsigned NumSrcs = MI->getDesc().getNumOperands() - NumDefs; 315 if (NumDefs != 1) 316 return false; 317 318 unsigned Def = 0; 319 unsigned Src = 0; 320 for (unsigned i = 0, e = NumDefs + NumSrcs; i != e; ++i) { 321 const MachineOperand &MO = MI->getOperand(i); 322 if (!MO.isReg()) 323 continue; 324 unsigned Reg = MO.getReg(); 325 if (!Reg) 326 continue; 327 if (MO.isDef()) 328 Def = Reg; 329 else if (Src) 330 // Multiple sources? 331 return false; 332 else 333 Src = Reg; 334 } 335 336 assert(Def && Src && "Malformed bitcast instruction!"); 337 338 MachineInstr *DefMI = MRI->getVRegDef(Src); 339 if (!DefMI || !DefMI->isBitcast()) 340 return false; 341 342 unsigned SrcSrc = 0; 343 NumDefs = DefMI->getDesc().getNumDefs(); 344 NumSrcs = DefMI->getDesc().getNumOperands() - NumDefs; 345 if (NumDefs != 1) 346 return false; 347 for (unsigned i = 0, e = NumDefs + NumSrcs; i != e; ++i) { 348 const MachineOperand &MO = DefMI->getOperand(i); 349 if (!MO.isReg() || MO.isDef()) 350 continue; 351 unsigned Reg = MO.getReg(); 352 if (!Reg) 353 continue; 354 if (!MO.isDef()) { 355 if (SrcSrc) 356 // Multiple sources? 357 return false; 358 else 359 SrcSrc = Reg; 360 } 361 } 362 363 if (MRI->getRegClass(SrcSrc) != MRI->getRegClass(Def)) 364 return false; 365 366 MRI->replaceRegWith(Def, SrcSrc); 367 MRI->clearKillFlags(SrcSrc); 368 MI->eraseFromParent(); 369 ++NumBitcasts; 370 return true; 371 } 372 373 /// optimizeCmpInstr - If the instruction is a compare and the previous 374 /// instruction it's comparing against all ready sets (or could be modified to 375 /// set) the same flag as the compare, then we can remove the comparison and use 376 /// the flag from the previous instruction. 377 bool PeepholeOptimizer::optimizeCmpInstr(MachineInstr *MI, 378 MachineBasicBlock *MBB) { 379 // If this instruction is a comparison against zero and isn't comparing a 380 // physical register, we can try to optimize it. 381 unsigned SrcReg, SrcReg2; 382 int CmpMask, CmpValue; 383 if (!TII->analyzeCompare(MI, SrcReg, SrcReg2, CmpMask, CmpValue) || 384 TargetRegisterInfo::isPhysicalRegister(SrcReg) || 385 (SrcReg2 != 0 && TargetRegisterInfo::isPhysicalRegister(SrcReg2))) 386 return false; 387 388 // Attempt to optimize the comparison instruction. 389 if (TII->optimizeCompareInstr(MI, SrcReg, SrcReg2, CmpMask, CmpValue, MRI)) { 390 ++NumCmps; 391 return true; 392 } 393 394 return false; 395 } 396 397 /// Optimize a select instruction. 398 bool PeepholeOptimizer::optimizeSelect(MachineInstr *MI) { 399 unsigned TrueOp = 0; 400 unsigned FalseOp = 0; 401 bool Optimizable = false; 402 SmallVector<MachineOperand, 4> Cond; 403 if (TII->analyzeSelect(MI, Cond, TrueOp, FalseOp, Optimizable)) 404 return false; 405 if (!Optimizable) 406 return false; 407 if (!TII->optimizeSelect(MI)) 408 return false; 409 MI->eraseFromParent(); 410 ++NumSelects; 411 return true; 412 } 413 414 /// isLoadFoldable - Check whether MI is a candidate for folding into a later 415 /// instruction. We only fold loads to virtual registers and the virtual 416 /// register defined has a single use. 417 bool PeepholeOptimizer::isLoadFoldable(MachineInstr *MI, 418 unsigned &FoldAsLoadDefReg) { 419 if (!MI->canFoldAsLoad() || !MI->mayLoad()) 420 return false; 421 const MCInstrDesc &MCID = MI->getDesc(); 422 if (MCID.getNumDefs() != 1) 423 return false; 424 425 unsigned Reg = MI->getOperand(0).getReg(); 426 // To reduce compilation time, we check MRI->hasOneUse when inserting 427 // loads. It should be checked when processing uses of the load, since 428 // uses can be removed during peephole. 429 if (!MI->getOperand(0).getSubReg() && 430 TargetRegisterInfo::isVirtualRegister(Reg) && 431 MRI->hasOneUse(Reg)) { 432 FoldAsLoadDefReg = Reg; 433 return true; 434 } 435 return false; 436 } 437 438 bool PeepholeOptimizer::isMoveImmediate(MachineInstr *MI, 439 SmallSet<unsigned, 4> &ImmDefRegs, 440 DenseMap<unsigned, MachineInstr*> &ImmDefMIs) { 441 const MCInstrDesc &MCID = MI->getDesc(); 442 if (!MI->isMoveImmediate()) 443 return false; 444 if (MCID.getNumDefs() != 1) 445 return false; 446 unsigned Reg = MI->getOperand(0).getReg(); 447 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 448 ImmDefMIs.insert(std::make_pair(Reg, MI)); 449 ImmDefRegs.insert(Reg); 450 return true; 451 } 452 453 return false; 454 } 455 456 /// foldImmediate - Try folding register operands that are defined by move 457 /// immediate instructions, i.e. a trivial constant folding optimization, if 458 /// and only if the def and use are in the same BB. 459 bool PeepholeOptimizer::foldImmediate(MachineInstr *MI, MachineBasicBlock *MBB, 460 SmallSet<unsigned, 4> &ImmDefRegs, 461 DenseMap<unsigned, MachineInstr*> &ImmDefMIs) { 462 for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) { 463 MachineOperand &MO = MI->getOperand(i); 464 if (!MO.isReg() || MO.isDef()) 465 continue; 466 unsigned Reg = MO.getReg(); 467 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 468 continue; 469 if (ImmDefRegs.count(Reg) == 0) 470 continue; 471 DenseMap<unsigned, MachineInstr*>::iterator II = ImmDefMIs.find(Reg); 472 assert(II != ImmDefMIs.end()); 473 if (TII->FoldImmediate(MI, II->second, Reg, MRI)) { 474 ++NumImmFold; 475 return true; 476 } 477 } 478 return false; 479 } 480 481 bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) { 482 DEBUG(dbgs() << "********** PEEPHOLE OPTIMIZER **********\n"); 483 DEBUG(dbgs() << "********** Function: " << MF.getName() << '\n'); 484 485 if (DisablePeephole) 486 return false; 487 488 TM = &MF.getTarget(); 489 TII = TM->getInstrInfo(); 490 MRI = &MF.getRegInfo(); 491 DT = Aggressive ? &getAnalysis<MachineDominatorTree>() : 0; 492 493 bool Changed = false; 494 495 SmallPtrSet<MachineInstr*, 8> LocalMIs; 496 SmallSet<unsigned, 4> ImmDefRegs; 497 DenseMap<unsigned, MachineInstr*> ImmDefMIs; 498 unsigned FoldAsLoadDefReg; 499 for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) { 500 MachineBasicBlock *MBB = &*I; 501 502 bool SeenMoveImm = false; 503 LocalMIs.clear(); 504 ImmDefRegs.clear(); 505 ImmDefMIs.clear(); 506 FoldAsLoadDefReg = 0; 507 508 for (MachineBasicBlock::iterator 509 MII = I->begin(), MIE = I->end(); MII != MIE; ) { 510 MachineInstr *MI = &*MII; 511 // We may be erasing MI below, increment MII now. 512 ++MII; 513 LocalMIs.insert(MI); 514 515 // If there exists an instruction which belongs to the following 516 // categories, we will discard the load candidate. 517 if (MI->isLabel() || MI->isPHI() || MI->isImplicitDef() || 518 MI->isKill() || MI->isInlineAsm() || MI->isDebugValue() || 519 MI->hasUnmodeledSideEffects()) { 520 FoldAsLoadDefReg = 0; 521 continue; 522 } 523 if (MI->mayStore() || MI->isCall()) 524 FoldAsLoadDefReg = 0; 525 526 if ((MI->isBitcast() && optimizeBitcastInstr(MI, MBB)) || 527 (MI->isCompare() && optimizeCmpInstr(MI, MBB)) || 528 (MI->isSelect() && optimizeSelect(MI))) { 529 // MI is deleted. 530 LocalMIs.erase(MI); 531 Changed = true; 532 continue; 533 } 534 535 if (isMoveImmediate(MI, ImmDefRegs, ImmDefMIs)) { 536 SeenMoveImm = true; 537 } else { 538 Changed |= optimizeExtInstr(MI, MBB, LocalMIs); 539 // optimizeExtInstr might have created new instructions after MI 540 // and before the already incremented MII. Adjust MII so that the 541 // next iteration sees the new instructions. 542 MII = MI; 543 ++MII; 544 if (SeenMoveImm) 545 Changed |= foldImmediate(MI, MBB, ImmDefRegs, ImmDefMIs); 546 } 547 548 // Check whether MI is a load candidate for folding into a later 549 // instruction. If MI is not a candidate, check whether we can fold an 550 // earlier load into MI. 551 if (!isLoadFoldable(MI, FoldAsLoadDefReg) && FoldAsLoadDefReg) { 552 // We need to fold load after optimizeCmpInstr, since optimizeCmpInstr 553 // can enable folding by converting SUB to CMP. 554 MachineInstr *DefMI = 0; 555 MachineInstr *FoldMI = TII->optimizeLoadInstr(MI, MRI, 556 FoldAsLoadDefReg, DefMI); 557 if (FoldMI) { 558 // Update LocalMIs since we replaced MI with FoldMI and deleted DefMI. 559 DEBUG(dbgs() << "Replacing: " << *MI); 560 DEBUG(dbgs() << " With: " << *FoldMI); 561 LocalMIs.erase(MI); 562 LocalMIs.erase(DefMI); 563 LocalMIs.insert(FoldMI); 564 MI->eraseFromParent(); 565 DefMI->eraseFromParent(); 566 ++NumLoadFold; 567 568 // MI is replaced with FoldMI. 569 Changed = true; 570 continue; 571 } 572 } 573 } 574 } 575 576 return Changed; 577 } 578