1 //===-- ARMConstantIslandPass.cpp - ARM constant islands ------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains a pass that splits the constant pool up into 'islands' 11 // which are scattered through-out the function. This is required due to the 12 // limited pc-relative displacements that ARM has. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #define DEBUG_TYPE "arm-cp-islands" 17 #include "ARM.h" 18 #include "ARMMachineFunctionInfo.h" 19 #include "Thumb2InstrInfo.h" 20 #include "MCTargetDesc/ARMAddressingModes.h" 21 #include "llvm/CodeGen/MachineConstantPool.h" 22 #include "llvm/CodeGen/MachineFunctionPass.h" 23 #include "llvm/CodeGen/MachineJumpTableInfo.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 #include "llvm/Target/TargetData.h" 26 #include "llvm/Target/TargetMachine.h" 27 #include "llvm/Support/Debug.h" 28 #include "llvm/Support/ErrorHandling.h" 29 #include "llvm/Support/Format.h" 30 #include "llvm/Support/raw_ostream.h" 31 #include "llvm/ADT/SmallSet.h" 32 #include "llvm/ADT/SmallVector.h" 33 #include "llvm/ADT/STLExtras.h" 34 #include "llvm/ADT/Statistic.h" 35 #include "llvm/Support/CommandLine.h" 36 #include <algorithm> 37 using namespace llvm; 38 39 STATISTIC(NumCPEs, "Number of constpool entries"); 40 STATISTIC(NumSplit, "Number of uncond branches inserted"); 41 STATISTIC(NumCBrFixed, "Number of cond branches fixed"); 42 STATISTIC(NumUBrFixed, "Number of uncond branches fixed"); 43 STATISTIC(NumTBs, "Number of table branches generated"); 44 STATISTIC(NumT2CPShrunk, "Number of Thumb2 constantpool instructions shrunk"); 45 STATISTIC(NumT2BrShrunk, "Number of Thumb2 immediate branches shrunk"); 46 STATISTIC(NumCBZ, "Number of CBZ / CBNZ formed"); 47 STATISTIC(NumJTMoved, "Number of jump table destination blocks moved"); 48 STATISTIC(NumJTInserted, "Number of jump table intermediate blocks inserted"); 49 50 51 static cl::opt<bool> 52 AdjustJumpTableBlocks("arm-adjust-jump-tables", cl::Hidden, cl::init(true), 53 cl::desc("Adjust basic block layout to better use TB[BH]")); 54 55 // FIXME: This option should be removed once it has received sufficient testing. 56 static cl::opt<bool> 57 AlignConstantIslands("arm-align-constant-islands", cl::Hidden, cl::init(true), 58 cl::desc("Align constant islands in code")); 59 60 /// UnknownPadding - Return the worst case padding that could result from 61 /// unknown offset bits. This does not include alignment padding caused by 62 /// known offset bits. 63 /// 64 /// @param LogAlign log2(alignment) 65 /// @param KnownBits Number of known low offset bits. 66 static inline unsigned UnknownPadding(unsigned LogAlign, unsigned KnownBits) { 67 if (KnownBits < LogAlign) 68 return (1u << LogAlign) - (1u << KnownBits); 69 return 0; 70 } 71 72 /// WorstCaseAlign - Assuming only the low KnownBits bits in Offset are exact, 73 /// add padding such that: 74 /// 75 /// 1. The result is aligned to 1 << LogAlign. 76 /// 77 /// 2. No other value of the unknown bits would require more padding. 78 /// 79 /// This may add more padding than is required to satisfy just one of the 80 /// constraints. It is necessary to compute alignment this way to guarantee 81 /// that we don't underestimate the padding before an aligned block. If the 82 /// real padding before a block is larger than we think, constant pool entries 83 /// may go out of range. 84 static inline unsigned WorstCaseAlign(unsigned Offset, unsigned LogAlign, 85 unsigned KnownBits) { 86 // Add the worst possible padding that the unknown bits could cause. 87 Offset += UnknownPadding(LogAlign, KnownBits); 88 89 // Then align the result. 90 return RoundUpToAlignment(Offset, 1u << LogAlign); 91 } 92 93 namespace { 94 /// ARMConstantIslands - Due to limited PC-relative displacements, ARM 95 /// requires constant pool entries to be scattered among the instructions 96 /// inside a function. To do this, it completely ignores the normal LLVM 97 /// constant pool; instead, it places constants wherever it feels like with 98 /// special instructions. 99 /// 100 /// The terminology used in this pass includes: 101 /// Islands - Clumps of constants placed in the function. 102 /// Water - Potential places where an island could be formed. 103 /// CPE - A constant pool entry that has been placed somewhere, which 104 /// tracks a list of users. 105 class ARMConstantIslands : public MachineFunctionPass { 106 /// BasicBlockInfo - Information about the offset and size of a single 107 /// basic block. 108 struct BasicBlockInfo { 109 /// Offset - Distance from the beginning of the function to the beginning 110 /// of this basic block. 111 /// 112 /// The offset is always aligned as required by the basic block. 113 unsigned Offset; 114 115 /// Size - Size of the basic block in bytes. If the block contains 116 /// inline assembly, this is a worst case estimate. 117 /// 118 /// The size does not include any alignment padding whether from the 119 /// beginning of the block, or from an aligned jump table at the end. 120 unsigned Size; 121 122 /// KnownBits - The number of low bits in Offset that are known to be 123 /// exact. The remaining bits of Offset are an upper bound. 124 uint8_t KnownBits; 125 126 /// Unalign - When non-zero, the block contains instructions (inline asm) 127 /// of unknown size. The real size may be smaller than Size bytes by a 128 /// multiple of 1 << Unalign. 129 uint8_t Unalign; 130 131 /// PostAlign - When non-zero, the block terminator contains a .align 132 /// directive, so the end of the block is aligned to 1 << PostAlign 133 /// bytes. 134 uint8_t PostAlign; 135 136 BasicBlockInfo() : Offset(0), Size(0), KnownBits(0), Unalign(0), 137 PostAlign(0) {} 138 139 /// Compute the number of known offset bits internally to this block. 140 /// This number should be used to predict worst case padding when 141 /// splitting the block. 142 unsigned internalKnownBits() const { 143 return Unalign ? Unalign : KnownBits; 144 } 145 146 /// Compute the offset immediately following this block. If LogAlign is 147 /// specified, return the offset the successor block will get if it has 148 /// this alignment. 149 unsigned postOffset(unsigned LogAlign = 0) const { 150 unsigned PO = Offset + Size; 151 unsigned LA = std::max(unsigned(PostAlign), LogAlign); 152 if (!LA) 153 return PO; 154 // Add alignment padding from the terminator. 155 return WorstCaseAlign(PO, LA, internalKnownBits()); 156 } 157 158 /// Compute the number of known low bits of postOffset. If this block 159 /// contains inline asm, the number of known bits drops to the 160 /// instruction alignment. An aligned terminator may increase the number 161 /// of know bits. 162 /// If LogAlign is given, also consider the alignment of the next block. 163 unsigned postKnownBits(unsigned LogAlign = 0) const { 164 return std::max(std::max(unsigned(PostAlign), LogAlign), 165 internalKnownBits()); 166 } 167 }; 168 169 std::vector<BasicBlockInfo> BBInfo; 170 171 /// WaterList - A sorted list of basic blocks where islands could be placed 172 /// (i.e. blocks that don't fall through to the following block, due 173 /// to a return, unreachable, or unconditional branch). 174 std::vector<MachineBasicBlock*> WaterList; 175 176 /// NewWaterList - The subset of WaterList that was created since the 177 /// previous iteration by inserting unconditional branches. 178 SmallSet<MachineBasicBlock*, 4> NewWaterList; 179 180 typedef std::vector<MachineBasicBlock*>::iterator water_iterator; 181 182 /// CPUser - One user of a constant pool, keeping the machine instruction 183 /// pointer, the constant pool being referenced, and the max displacement 184 /// allowed from the instruction to the CP. The HighWaterMark records the 185 /// highest basic block where a new CPEntry can be placed. To ensure this 186 /// pass terminates, the CP entries are initially placed at the end of the 187 /// function and then move monotonically to lower addresses. The 188 /// exception to this rule is when the current CP entry for a particular 189 /// CPUser is out of range, but there is another CP entry for the same 190 /// constant value in range. We want to use the existing in-range CP 191 /// entry, but if it later moves out of range, the search for new water 192 /// should resume where it left off. The HighWaterMark is used to record 193 /// that point. 194 struct CPUser { 195 MachineInstr *MI; 196 MachineInstr *CPEMI; 197 MachineBasicBlock *HighWaterMark; 198 private: 199 unsigned MaxDisp; 200 public: 201 bool NegOk; 202 bool IsSoImm; 203 bool KnownAlignment; 204 CPUser(MachineInstr *mi, MachineInstr *cpemi, unsigned maxdisp, 205 bool neg, bool soimm) 206 : MI(mi), CPEMI(cpemi), MaxDisp(maxdisp), NegOk(neg), IsSoImm(soimm), 207 KnownAlignment(false) { 208 HighWaterMark = CPEMI->getParent(); 209 } 210 /// getMaxDisp - Returns the maximum displacement supported by MI. 211 /// Correct for unknown alignment. 212 /// Conservatively subtract 2 bytes to handle weird alignment effects. 213 unsigned getMaxDisp() const { 214 return (KnownAlignment ? MaxDisp : MaxDisp - 2) - 2; 215 } 216 }; 217 218 /// CPUsers - Keep track of all of the machine instructions that use various 219 /// constant pools and their max displacement. 220 std::vector<CPUser> CPUsers; 221 222 /// CPEntry - One per constant pool entry, keeping the machine instruction 223 /// pointer, the constpool index, and the number of CPUser's which 224 /// reference this entry. 225 struct CPEntry { 226 MachineInstr *CPEMI; 227 unsigned CPI; 228 unsigned RefCount; 229 CPEntry(MachineInstr *cpemi, unsigned cpi, unsigned rc = 0) 230 : CPEMI(cpemi), CPI(cpi), RefCount(rc) {} 231 }; 232 233 /// CPEntries - Keep track of all of the constant pool entry machine 234 /// instructions. For each original constpool index (i.e. those that 235 /// existed upon entry to this pass), it keeps a vector of entries. 236 /// Original elements are cloned as we go along; the clones are 237 /// put in the vector of the original element, but have distinct CPIs. 238 std::vector<std::vector<CPEntry> > CPEntries; 239 240 /// ImmBranch - One per immediate branch, keeping the machine instruction 241 /// pointer, conditional or unconditional, the max displacement, 242 /// and (if isCond is true) the corresponding unconditional branch 243 /// opcode. 244 struct ImmBranch { 245 MachineInstr *MI; 246 unsigned MaxDisp : 31; 247 bool isCond : 1; 248 int UncondBr; 249 ImmBranch(MachineInstr *mi, unsigned maxdisp, bool cond, int ubr) 250 : MI(mi), MaxDisp(maxdisp), isCond(cond), UncondBr(ubr) {} 251 }; 252 253 /// ImmBranches - Keep track of all the immediate branch instructions. 254 /// 255 std::vector<ImmBranch> ImmBranches; 256 257 /// PushPopMIs - Keep track of all the Thumb push / pop instructions. 258 /// 259 SmallVector<MachineInstr*, 4> PushPopMIs; 260 261 /// T2JumpTables - Keep track of all the Thumb2 jumptable instructions. 262 SmallVector<MachineInstr*, 4> T2JumpTables; 263 264 /// HasFarJump - True if any far jump instruction has been emitted during 265 /// the branch fix up pass. 266 bool HasFarJump; 267 268 MachineFunction *MF; 269 MachineConstantPool *MCP; 270 const ARMBaseInstrInfo *TII; 271 const ARMSubtarget *STI; 272 ARMFunctionInfo *AFI; 273 bool isThumb; 274 bool isThumb1; 275 bool isThumb2; 276 public: 277 static char ID; 278 ARMConstantIslands() : MachineFunctionPass(ID) {} 279 280 virtual bool runOnMachineFunction(MachineFunction &MF); 281 282 virtual const char *getPassName() const { 283 return "ARM constant island placement and branch shortening pass"; 284 } 285 286 private: 287 void doInitialPlacement(std::vector<MachineInstr*> &CPEMIs); 288 CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI); 289 unsigned getCPELogAlign(const MachineInstr *CPEMI); 290 void scanFunctionJumpTables(); 291 void initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs); 292 MachineBasicBlock *splitBlockBeforeInstr(MachineInstr *MI); 293 void updateForInsertedWaterBlock(MachineBasicBlock *NewBB); 294 void adjustBBOffsetsAfter(MachineBasicBlock *BB); 295 bool decrementCPEReferenceCount(unsigned CPI, MachineInstr* CPEMI); 296 int findInRangeCPEntry(CPUser& U, unsigned UserOffset); 297 bool findAvailableWater(CPUser&U, unsigned UserOffset, 298 water_iterator &WaterIter); 299 void createNewWater(unsigned CPUserIndex, unsigned UserOffset, 300 MachineBasicBlock *&NewMBB); 301 bool handleConstantPoolUser(unsigned CPUserIndex); 302 void removeDeadCPEMI(MachineInstr *CPEMI); 303 bool removeUnusedCPEntries(); 304 bool isCPEntryInRange(MachineInstr *MI, unsigned UserOffset, 305 MachineInstr *CPEMI, unsigned Disp, bool NegOk, 306 bool DoDump = false); 307 bool isWaterInRange(unsigned UserOffset, MachineBasicBlock *Water, 308 CPUser &U, unsigned &Growth); 309 bool isBBInRange(MachineInstr *MI, MachineBasicBlock *BB, unsigned Disp); 310 bool fixupImmediateBr(ImmBranch &Br); 311 bool fixupConditionalBr(ImmBranch &Br); 312 bool fixupUnconditionalBr(ImmBranch &Br); 313 bool undoLRSpillRestore(); 314 bool mayOptimizeThumb2Instruction(const MachineInstr *MI) const; 315 bool optimizeThumb2Instructions(); 316 bool optimizeThumb2Branches(); 317 bool reorderThumb2JumpTables(); 318 bool optimizeThumb2JumpTables(); 319 MachineBasicBlock *adjustJTTargetBlockForward(MachineBasicBlock *BB, 320 MachineBasicBlock *JTBB); 321 322 void computeBlockSize(MachineBasicBlock *MBB); 323 unsigned getOffsetOf(MachineInstr *MI) const; 324 unsigned getUserOffset(CPUser&) const; 325 void dumpBBs(); 326 void verify(); 327 328 bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset, 329 unsigned Disp, bool NegativeOK, bool IsSoImm = false); 330 bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset, 331 const CPUser &U) { 332 return isOffsetInRange(UserOffset, TrialOffset, 333 U.getMaxDisp(), U.NegOk, U.IsSoImm); 334 } 335 }; 336 char ARMConstantIslands::ID = 0; 337 } 338 339 /// verify - check BBOffsets, BBSizes, alignment of islands 340 void ARMConstantIslands::verify() { 341 #ifndef NDEBUG 342 for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end(); 343 MBBI != E; ++MBBI) { 344 MachineBasicBlock *MBB = MBBI; 345 unsigned Align = MBB->getAlignment(); 346 unsigned MBBId = MBB->getNumber(); 347 assert(BBInfo[MBBId].Offset % (1u << Align) == 0); 348 assert(!MBBId || BBInfo[MBBId - 1].postOffset() <= BBInfo[MBBId].Offset); 349 } 350 DEBUG(dbgs() << "Verifying " << CPUsers.size() << " CP users.\n"); 351 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) { 352 CPUser &U = CPUsers[i]; 353 unsigned UserOffset = getUserOffset(U); 354 // Verify offset using the real max displacement without the safety 355 // adjustment. 356 if (isCPEntryInRange(U.MI, UserOffset, U.CPEMI, U.getMaxDisp()+2, U.NegOk, 357 /* DoDump = */ true)) { 358 DEBUG(dbgs() << "OK\n"); 359 continue; 360 } 361 DEBUG(dbgs() << "Out of range.\n"); 362 dumpBBs(); 363 DEBUG(MF->dump()); 364 llvm_unreachable("Constant pool entry out of range!"); 365 } 366 #endif 367 } 368 369 /// print block size and offset information - debugging 370 void ARMConstantIslands::dumpBBs() { 371 DEBUG({ 372 for (unsigned J = 0, E = BBInfo.size(); J !=E; ++J) { 373 const BasicBlockInfo &BBI = BBInfo[J]; 374 dbgs() << format("%08x BB#%u\t", BBI.Offset, J) 375 << " kb=" << unsigned(BBI.KnownBits) 376 << " ua=" << unsigned(BBI.Unalign) 377 << " pa=" << unsigned(BBI.PostAlign) 378 << format(" size=%#x\n", BBInfo[J].Size); 379 } 380 }); 381 } 382 383 /// createARMConstantIslandPass - returns an instance of the constpool 384 /// island pass. 385 FunctionPass *llvm::createARMConstantIslandPass() { 386 return new ARMConstantIslands(); 387 } 388 389 bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) { 390 MF = &mf; 391 MCP = mf.getConstantPool(); 392 393 DEBUG(dbgs() << "***** ARMConstantIslands: " 394 << MCP->getConstants().size() << " CP entries, aligned to " 395 << MCP->getConstantPoolAlignment() << " bytes *****\n"); 396 397 TII = (const ARMBaseInstrInfo*)MF->getTarget().getInstrInfo(); 398 AFI = MF->getInfo<ARMFunctionInfo>(); 399 STI = &MF->getTarget().getSubtarget<ARMSubtarget>(); 400 401 isThumb = AFI->isThumbFunction(); 402 isThumb1 = AFI->isThumb1OnlyFunction(); 403 isThumb2 = AFI->isThumb2Function(); 404 405 HasFarJump = false; 406 407 // This pass invalidates liveness information when it splits basic blocks. 408 MF->getRegInfo().invalidateLiveness(); 409 410 // Renumber all of the machine basic blocks in the function, guaranteeing that 411 // the numbers agree with the position of the block in the function. 412 MF->RenumberBlocks(); 413 414 // Try to reorder and otherwise adjust the block layout to make good use 415 // of the TB[BH] instructions. 416 bool MadeChange = false; 417 if (isThumb2 && AdjustJumpTableBlocks) { 418 scanFunctionJumpTables(); 419 MadeChange |= reorderThumb2JumpTables(); 420 // Data is out of date, so clear it. It'll be re-computed later. 421 T2JumpTables.clear(); 422 // Blocks may have shifted around. Keep the numbering up to date. 423 MF->RenumberBlocks(); 424 } 425 426 // Thumb1 functions containing constant pools get 4-byte alignment. 427 // This is so we can keep exact track of where the alignment padding goes. 428 429 // ARM and Thumb2 functions need to be 4-byte aligned. 430 if (!isThumb1) 431 MF->EnsureAlignment(2); // 2 = log2(4) 432 433 // Perform the initial placement of the constant pool entries. To start with, 434 // we put them all at the end of the function. 435 std::vector<MachineInstr*> CPEMIs; 436 if (!MCP->isEmpty()) 437 doInitialPlacement(CPEMIs); 438 439 /// The next UID to take is the first unused one. 440 AFI->initPICLabelUId(CPEMIs.size()); 441 442 // Do the initial scan of the function, building up information about the 443 // sizes of each block, the location of all the water, and finding all of the 444 // constant pool users. 445 initializeFunctionInfo(CPEMIs); 446 CPEMIs.clear(); 447 DEBUG(dumpBBs()); 448 449 450 /// Remove dead constant pool entries. 451 MadeChange |= removeUnusedCPEntries(); 452 453 // Iteratively place constant pool entries and fix up branches until there 454 // is no change. 455 unsigned NoCPIters = 0, NoBRIters = 0; 456 while (true) { 457 DEBUG(dbgs() << "Beginning CP iteration #" << NoCPIters << '\n'); 458 bool CPChange = false; 459 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) 460 CPChange |= handleConstantPoolUser(i); 461 if (CPChange && ++NoCPIters > 30) 462 report_fatal_error("Constant Island pass failed to converge!"); 463 DEBUG(dumpBBs()); 464 465 // Clear NewWaterList now. If we split a block for branches, it should 466 // appear as "new water" for the next iteration of constant pool placement. 467 NewWaterList.clear(); 468 469 DEBUG(dbgs() << "Beginning BR iteration #" << NoBRIters << '\n'); 470 bool BRChange = false; 471 for (unsigned i = 0, e = ImmBranches.size(); i != e; ++i) 472 BRChange |= fixupImmediateBr(ImmBranches[i]); 473 if (BRChange && ++NoBRIters > 30) 474 report_fatal_error("Branch Fix Up pass failed to converge!"); 475 DEBUG(dumpBBs()); 476 477 if (!CPChange && !BRChange) 478 break; 479 MadeChange = true; 480 } 481 482 // Shrink 32-bit Thumb2 branch, load, and store instructions. 483 if (isThumb2 && !STI->prefers32BitThumb()) 484 MadeChange |= optimizeThumb2Instructions(); 485 486 // After a while, this might be made debug-only, but it is not expensive. 487 verify(); 488 489 // If LR has been forced spilled and no far jump (i.e. BL) has been issued, 490 // undo the spill / restore of LR if possible. 491 if (isThumb && !HasFarJump && AFI->isLRSpilledForFarJump()) 492 MadeChange |= undoLRSpillRestore(); 493 494 // Save the mapping between original and cloned constpool entries. 495 for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) { 496 for (unsigned j = 0, je = CPEntries[i].size(); j != je; ++j) { 497 const CPEntry & CPE = CPEntries[i][j]; 498 AFI->recordCPEClone(i, CPE.CPI); 499 } 500 } 501 502 DEBUG(dbgs() << '\n'; dumpBBs()); 503 504 BBInfo.clear(); 505 WaterList.clear(); 506 CPUsers.clear(); 507 CPEntries.clear(); 508 ImmBranches.clear(); 509 PushPopMIs.clear(); 510 T2JumpTables.clear(); 511 512 return MadeChange; 513 } 514 515 /// doInitialPlacement - Perform the initial placement of the constant pool 516 /// entries. To start with, we put them all at the end of the function. 517 void 518 ARMConstantIslands::doInitialPlacement(std::vector<MachineInstr*> &CPEMIs) { 519 // Create the basic block to hold the CPE's. 520 MachineBasicBlock *BB = MF->CreateMachineBasicBlock(); 521 MF->push_back(BB); 522 523 // MachineConstantPool measures alignment in bytes. We measure in log2(bytes). 524 unsigned MaxAlign = Log2_32(MCP->getConstantPoolAlignment()); 525 526 // Mark the basic block as required by the const-pool. 527 // If AlignConstantIslands isn't set, use 4-byte alignment for everything. 528 BB->setAlignment(AlignConstantIslands ? MaxAlign : 2); 529 530 // The function needs to be as aligned as the basic blocks. The linker may 531 // move functions around based on their alignment. 532 MF->EnsureAlignment(BB->getAlignment()); 533 534 // Order the entries in BB by descending alignment. That ensures correct 535 // alignment of all entries as long as BB is sufficiently aligned. Keep 536 // track of the insertion point for each alignment. We are going to bucket 537 // sort the entries as they are created. 538 SmallVector<MachineBasicBlock::iterator, 8> InsPoint(MaxAlign + 1, BB->end()); 539 540 // Add all of the constants from the constant pool to the end block, use an 541 // identity mapping of CPI's to CPE's. 542 const std::vector<MachineConstantPoolEntry> &CPs = MCP->getConstants(); 543 544 const TargetData &TD = *MF->getTarget().getTargetData(); 545 for (unsigned i = 0, e = CPs.size(); i != e; ++i) { 546 unsigned Size = TD.getTypeAllocSize(CPs[i].getType()); 547 assert(Size >= 4 && "Too small constant pool entry"); 548 unsigned Align = CPs[i].getAlignment(); 549 assert(isPowerOf2_32(Align) && "Invalid alignment"); 550 // Verify that all constant pool entries are a multiple of their alignment. 551 // If not, we would have to pad them out so that instructions stay aligned. 552 assert((Size % Align) == 0 && "CP Entry not multiple of 4 bytes!"); 553 554 // Insert CONSTPOOL_ENTRY before entries with a smaller alignment. 555 unsigned LogAlign = Log2_32(Align); 556 MachineBasicBlock::iterator InsAt = InsPoint[LogAlign]; 557 MachineInstr *CPEMI = 558 BuildMI(*BB, InsAt, DebugLoc(), TII->get(ARM::CONSTPOOL_ENTRY)) 559 .addImm(i).addConstantPoolIndex(i).addImm(Size); 560 CPEMIs.push_back(CPEMI); 561 562 // Ensure that future entries with higher alignment get inserted before 563 // CPEMI. This is bucket sort with iterators. 564 for (unsigned a = LogAlign + 1; a <= MaxAlign; ++a) 565 if (InsPoint[a] == InsAt) 566 InsPoint[a] = CPEMI; 567 568 // Add a new CPEntry, but no corresponding CPUser yet. 569 std::vector<CPEntry> CPEs; 570 CPEs.push_back(CPEntry(CPEMI, i)); 571 CPEntries.push_back(CPEs); 572 ++NumCPEs; 573 DEBUG(dbgs() << "Moved CPI#" << i << " to end of function, size = " 574 << Size << ", align = " << Align <<'\n'); 575 } 576 DEBUG(BB->dump()); 577 } 578 579 /// BBHasFallthrough - Return true if the specified basic block can fallthrough 580 /// into the block immediately after it. 581 static bool BBHasFallthrough(MachineBasicBlock *MBB) { 582 // Get the next machine basic block in the function. 583 MachineFunction::iterator MBBI = MBB; 584 // Can't fall off end of function. 585 if (llvm::next(MBBI) == MBB->getParent()->end()) 586 return false; 587 588 MachineBasicBlock *NextBB = llvm::next(MBBI); 589 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), 590 E = MBB->succ_end(); I != E; ++I) 591 if (*I == NextBB) 592 return true; 593 594 return false; 595 } 596 597 /// findConstPoolEntry - Given the constpool index and CONSTPOOL_ENTRY MI, 598 /// look up the corresponding CPEntry. 599 ARMConstantIslands::CPEntry 600 *ARMConstantIslands::findConstPoolEntry(unsigned CPI, 601 const MachineInstr *CPEMI) { 602 std::vector<CPEntry> &CPEs = CPEntries[CPI]; 603 // Number of entries per constpool index should be small, just do a 604 // linear search. 605 for (unsigned i = 0, e = CPEs.size(); i != e; ++i) { 606 if (CPEs[i].CPEMI == CPEMI) 607 return &CPEs[i]; 608 } 609 return NULL; 610 } 611 612 /// getCPELogAlign - Returns the required alignment of the constant pool entry 613 /// represented by CPEMI. Alignment is measured in log2(bytes) units. 614 unsigned ARMConstantIslands::getCPELogAlign(const MachineInstr *CPEMI) { 615 assert(CPEMI && CPEMI->getOpcode() == ARM::CONSTPOOL_ENTRY); 616 617 // Everything is 4-byte aligned unless AlignConstantIslands is set. 618 if (!AlignConstantIslands) 619 return 2; 620 621 unsigned CPI = CPEMI->getOperand(1).getIndex(); 622 assert(CPI < MCP->getConstants().size() && "Invalid constant pool index."); 623 unsigned Align = MCP->getConstants()[CPI].getAlignment(); 624 assert(isPowerOf2_32(Align) && "Invalid CPE alignment"); 625 return Log2_32(Align); 626 } 627 628 /// scanFunctionJumpTables - Do a scan of the function, building up 629 /// information about the sizes of each block and the locations of all 630 /// the jump tables. 631 void ARMConstantIslands::scanFunctionJumpTables() { 632 for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end(); 633 MBBI != E; ++MBBI) { 634 MachineBasicBlock &MBB = *MBBI; 635 636 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); 637 I != E; ++I) 638 if (I->isBranch() && I->getOpcode() == ARM::t2BR_JT) 639 T2JumpTables.push_back(I); 640 } 641 } 642 643 /// initializeFunctionInfo - Do the initial scan of the function, building up 644 /// information about the sizes of each block, the location of all the water, 645 /// and finding all of the constant pool users. 646 void ARMConstantIslands:: 647 initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) { 648 BBInfo.clear(); 649 BBInfo.resize(MF->getNumBlockIDs()); 650 651 // First thing, compute the size of all basic blocks, and see if the function 652 // has any inline assembly in it. If so, we have to be conservative about 653 // alignment assumptions, as we don't know for sure the size of any 654 // instructions in the inline assembly. 655 for (MachineFunction::iterator I = MF->begin(), E = MF->end(); I != E; ++I) 656 computeBlockSize(I); 657 658 // The known bits of the entry block offset are determined by the function 659 // alignment. 660 BBInfo.front().KnownBits = MF->getAlignment(); 661 662 // Compute block offsets and known bits. 663 adjustBBOffsetsAfter(MF->begin()); 664 665 // Now go back through the instructions and build up our data structures. 666 for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end(); 667 MBBI != E; ++MBBI) { 668 MachineBasicBlock &MBB = *MBBI; 669 670 // If this block doesn't fall through into the next MBB, then this is 671 // 'water' that a constant pool island could be placed. 672 if (!BBHasFallthrough(&MBB)) 673 WaterList.push_back(&MBB); 674 675 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); 676 I != E; ++I) { 677 if (I->isDebugValue()) 678 continue; 679 680 int Opc = I->getOpcode(); 681 if (I->isBranch()) { 682 bool isCond = false; 683 unsigned Bits = 0; 684 unsigned Scale = 1; 685 int UOpc = Opc; 686 switch (Opc) { 687 default: 688 continue; // Ignore other JT branches 689 case ARM::t2BR_JT: 690 T2JumpTables.push_back(I); 691 continue; // Does not get an entry in ImmBranches 692 case ARM::Bcc: 693 isCond = true; 694 UOpc = ARM::B; 695 // Fallthrough 696 case ARM::B: 697 Bits = 24; 698 Scale = 4; 699 break; 700 case ARM::tBcc: 701 isCond = true; 702 UOpc = ARM::tB; 703 Bits = 8; 704 Scale = 2; 705 break; 706 case ARM::tB: 707 Bits = 11; 708 Scale = 2; 709 break; 710 case ARM::t2Bcc: 711 isCond = true; 712 UOpc = ARM::t2B; 713 Bits = 20; 714 Scale = 2; 715 break; 716 case ARM::t2B: 717 Bits = 24; 718 Scale = 2; 719 break; 720 } 721 722 // Record this immediate branch. 723 unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale; 724 ImmBranches.push_back(ImmBranch(I, MaxOffs, isCond, UOpc)); 725 } 726 727 if (Opc == ARM::tPUSH || Opc == ARM::tPOP_RET) 728 PushPopMIs.push_back(I); 729 730 if (Opc == ARM::CONSTPOOL_ENTRY) 731 continue; 732 733 // Scan the instructions for constant pool operands. 734 for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op) 735 if (I->getOperand(op).isCPI()) { 736 // We found one. The addressing mode tells us the max displacement 737 // from the PC that this instruction permits. 738 739 // Basic size info comes from the TSFlags field. 740 unsigned Bits = 0; 741 unsigned Scale = 1; 742 bool NegOk = false; 743 bool IsSoImm = false; 744 745 switch (Opc) { 746 default: 747 llvm_unreachable("Unknown addressing mode for CP reference!"); 748 749 // Taking the address of a CP entry. 750 case ARM::LEApcrel: 751 // This takes a SoImm, which is 8 bit immediate rotated. We'll 752 // pretend the maximum offset is 255 * 4. Since each instruction 753 // 4 byte wide, this is always correct. We'll check for other 754 // displacements that fits in a SoImm as well. 755 Bits = 8; 756 Scale = 4; 757 NegOk = true; 758 IsSoImm = true; 759 break; 760 case ARM::t2LEApcrel: 761 Bits = 12; 762 NegOk = true; 763 break; 764 case ARM::tLEApcrel: 765 Bits = 8; 766 Scale = 4; 767 break; 768 769 case ARM::LDRi12: 770 case ARM::LDRcp: 771 case ARM::t2LDRpci: 772 Bits = 12; // +-offset_12 773 NegOk = true; 774 break; 775 776 case ARM::tLDRpci: 777 Bits = 8; 778 Scale = 4; // +(offset_8*4) 779 break; 780 781 case ARM::VLDRD: 782 case ARM::VLDRS: 783 Bits = 8; 784 Scale = 4; // +-(offset_8*4) 785 NegOk = true; 786 break; 787 } 788 789 // Remember that this is a user of a CP entry. 790 unsigned CPI = I->getOperand(op).getIndex(); 791 MachineInstr *CPEMI = CPEMIs[CPI]; 792 unsigned MaxOffs = ((1 << Bits)-1) * Scale; 793 CPUsers.push_back(CPUser(I, CPEMI, MaxOffs, NegOk, IsSoImm)); 794 795 // Increment corresponding CPEntry reference count. 796 CPEntry *CPE = findConstPoolEntry(CPI, CPEMI); 797 assert(CPE && "Cannot find a corresponding CPEntry!"); 798 CPE->RefCount++; 799 800 // Instructions can only use one CP entry, don't bother scanning the 801 // rest of the operands. 802 break; 803 } 804 } 805 } 806 } 807 808 /// computeBlockSize - Compute the size and some alignment information for MBB. 809 /// This function updates BBInfo directly. 810 void ARMConstantIslands::computeBlockSize(MachineBasicBlock *MBB) { 811 BasicBlockInfo &BBI = BBInfo[MBB->getNumber()]; 812 BBI.Size = 0; 813 BBI.Unalign = 0; 814 BBI.PostAlign = 0; 815 816 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E; 817 ++I) { 818 BBI.Size += TII->GetInstSizeInBytes(I); 819 // For inline asm, GetInstSizeInBytes returns a conservative estimate. 820 // The actual size may be smaller, but still a multiple of the instr size. 821 if (I->isInlineAsm()) 822 BBI.Unalign = isThumb ? 1 : 2; 823 // Also consider instructions that may be shrunk later. 824 else if (isThumb && mayOptimizeThumb2Instruction(I)) 825 BBI.Unalign = 1; 826 } 827 828 // tBR_JTr contains a .align 2 directive. 829 if (!MBB->empty() && MBB->back().getOpcode() == ARM::tBR_JTr) { 830 BBI.PostAlign = 2; 831 MBB->getParent()->EnsureAlignment(2); 832 } 833 } 834 835 /// getOffsetOf - Return the current offset of the specified machine instruction 836 /// from the start of the function. This offset changes as stuff is moved 837 /// around inside the function. 838 unsigned ARMConstantIslands::getOffsetOf(MachineInstr *MI) const { 839 MachineBasicBlock *MBB = MI->getParent(); 840 841 // The offset is composed of two things: the sum of the sizes of all MBB's 842 // before this instruction's block, and the offset from the start of the block 843 // it is in. 844 unsigned Offset = BBInfo[MBB->getNumber()].Offset; 845 846 // Sum instructions before MI in MBB. 847 for (MachineBasicBlock::iterator I = MBB->begin(); &*I != MI; ++I) { 848 assert(I != MBB->end() && "Didn't find MI in its own basic block?"); 849 Offset += TII->GetInstSizeInBytes(I); 850 } 851 return Offset; 852 } 853 854 /// CompareMBBNumbers - Little predicate function to sort the WaterList by MBB 855 /// ID. 856 static bool CompareMBBNumbers(const MachineBasicBlock *LHS, 857 const MachineBasicBlock *RHS) { 858 return LHS->getNumber() < RHS->getNumber(); 859 } 860 861 /// updateForInsertedWaterBlock - When a block is newly inserted into the 862 /// machine function, it upsets all of the block numbers. Renumber the blocks 863 /// and update the arrays that parallel this numbering. 864 void ARMConstantIslands::updateForInsertedWaterBlock(MachineBasicBlock *NewBB) { 865 // Renumber the MBB's to keep them consecutive. 866 NewBB->getParent()->RenumberBlocks(NewBB); 867 868 // Insert an entry into BBInfo to align it properly with the (newly 869 // renumbered) block numbers. 870 BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo()); 871 872 // Next, update WaterList. Specifically, we need to add NewMBB as having 873 // available water after it. 874 water_iterator IP = 875 std::lower_bound(WaterList.begin(), WaterList.end(), NewBB, 876 CompareMBBNumbers); 877 WaterList.insert(IP, NewBB); 878 } 879 880 881 /// Split the basic block containing MI into two blocks, which are joined by 882 /// an unconditional branch. Update data structures and renumber blocks to 883 /// account for this change and returns the newly created block. 884 MachineBasicBlock *ARMConstantIslands::splitBlockBeforeInstr(MachineInstr *MI) { 885 MachineBasicBlock *OrigBB = MI->getParent(); 886 887 // Create a new MBB for the code after the OrigBB. 888 MachineBasicBlock *NewBB = 889 MF->CreateMachineBasicBlock(OrigBB->getBasicBlock()); 890 MachineFunction::iterator MBBI = OrigBB; ++MBBI; 891 MF->insert(MBBI, NewBB); 892 893 // Splice the instructions starting with MI over to NewBB. 894 NewBB->splice(NewBB->end(), OrigBB, MI, OrigBB->end()); 895 896 // Add an unconditional branch from OrigBB to NewBB. 897 // Note the new unconditional branch is not being recorded. 898 // There doesn't seem to be meaningful DebugInfo available; this doesn't 899 // correspond to anything in the source. 900 unsigned Opc = isThumb ? (isThumb2 ? ARM::t2B : ARM::tB) : ARM::B; 901 if (!isThumb) 902 BuildMI(OrigBB, DebugLoc(), TII->get(Opc)).addMBB(NewBB); 903 else 904 BuildMI(OrigBB, DebugLoc(), TII->get(Opc)).addMBB(NewBB) 905 .addImm(ARMCC::AL).addReg(0); 906 ++NumSplit; 907 908 // Update the CFG. All succs of OrigBB are now succs of NewBB. 909 NewBB->transferSuccessors(OrigBB); 910 911 // OrigBB branches to NewBB. 912 OrigBB->addSuccessor(NewBB); 913 914 // Update internal data structures to account for the newly inserted MBB. 915 // This is almost the same as updateForInsertedWaterBlock, except that 916 // the Water goes after OrigBB, not NewBB. 917 MF->RenumberBlocks(NewBB); 918 919 // Insert an entry into BBInfo to align it properly with the (newly 920 // renumbered) block numbers. 921 BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo()); 922 923 // Next, update WaterList. Specifically, we need to add OrigMBB as having 924 // available water after it (but not if it's already there, which happens 925 // when splitting before a conditional branch that is followed by an 926 // unconditional branch - in that case we want to insert NewBB). 927 water_iterator IP = 928 std::lower_bound(WaterList.begin(), WaterList.end(), OrigBB, 929 CompareMBBNumbers); 930 MachineBasicBlock* WaterBB = *IP; 931 if (WaterBB == OrigBB) 932 WaterList.insert(llvm::next(IP), NewBB); 933 else 934 WaterList.insert(IP, OrigBB); 935 NewWaterList.insert(OrigBB); 936 937 // Figure out how large the OrigBB is. As the first half of the original 938 // block, it cannot contain a tablejump. The size includes 939 // the new jump we added. (It should be possible to do this without 940 // recounting everything, but it's very confusing, and this is rarely 941 // executed.) 942 computeBlockSize(OrigBB); 943 944 // Figure out how large the NewMBB is. As the second half of the original 945 // block, it may contain a tablejump. 946 computeBlockSize(NewBB); 947 948 // All BBOffsets following these blocks must be modified. 949 adjustBBOffsetsAfter(OrigBB); 950 951 return NewBB; 952 } 953 954 /// getUserOffset - Compute the offset of U.MI as seen by the hardware 955 /// displacement computation. Update U.KnownAlignment to match its current 956 /// basic block location. 957 unsigned ARMConstantIslands::getUserOffset(CPUser &U) const { 958 unsigned UserOffset = getOffsetOf(U.MI); 959 const BasicBlockInfo &BBI = BBInfo[U.MI->getParent()->getNumber()]; 960 unsigned KnownBits = BBI.internalKnownBits(); 961 962 // The value read from PC is offset from the actual instruction address. 963 UserOffset += (isThumb ? 4 : 8); 964 965 // Because of inline assembly, we may not know the alignment (mod 4) of U.MI. 966 // Make sure U.getMaxDisp() returns a constrained range. 967 U.KnownAlignment = (KnownBits >= 2); 968 969 // On Thumb, offsets==2 mod 4 are rounded down by the hardware for 970 // purposes of the displacement computation; compensate for that here. 971 // For unknown alignments, getMaxDisp() constrains the range instead. 972 if (isThumb && U.KnownAlignment) 973 UserOffset &= ~3u; 974 975 return UserOffset; 976 } 977 978 /// isOffsetInRange - Checks whether UserOffset (the location of a constant pool 979 /// reference) is within MaxDisp of TrialOffset (a proposed location of a 980 /// constant pool entry). 981 /// UserOffset is computed by getUserOffset above to include PC adjustments. If 982 /// the mod 4 alignment of UserOffset is not known, the uncertainty must be 983 /// subtracted from MaxDisp instead. CPUser::getMaxDisp() does that. 984 bool ARMConstantIslands::isOffsetInRange(unsigned UserOffset, 985 unsigned TrialOffset, unsigned MaxDisp, 986 bool NegativeOK, bool IsSoImm) { 987 if (UserOffset <= TrialOffset) { 988 // User before the Trial. 989 if (TrialOffset - UserOffset <= MaxDisp) 990 return true; 991 // FIXME: Make use full range of soimm values. 992 } else if (NegativeOK) { 993 if (UserOffset - TrialOffset <= MaxDisp) 994 return true; 995 // FIXME: Make use full range of soimm values. 996 } 997 return false; 998 } 999 1000 /// isWaterInRange - Returns true if a CPE placed after the specified 1001 /// Water (a basic block) will be in range for the specific MI. 1002 /// 1003 /// Compute how much the function will grow by inserting a CPE after Water. 1004 bool ARMConstantIslands::isWaterInRange(unsigned UserOffset, 1005 MachineBasicBlock* Water, CPUser &U, 1006 unsigned &Growth) { 1007 unsigned CPELogAlign = getCPELogAlign(U.CPEMI); 1008 unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign); 1009 unsigned NextBlockOffset, NextBlockAlignment; 1010 MachineFunction::const_iterator NextBlock = Water; 1011 if (++NextBlock == MF->end()) { 1012 NextBlockOffset = BBInfo[Water->getNumber()].postOffset(); 1013 NextBlockAlignment = 0; 1014 } else { 1015 NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset; 1016 NextBlockAlignment = NextBlock->getAlignment(); 1017 } 1018 unsigned Size = U.CPEMI->getOperand(2).getImm(); 1019 unsigned CPEEnd = CPEOffset + Size; 1020 1021 // The CPE may be able to hide in the alignment padding before the next 1022 // block. It may also cause more padding to be required if it is more aligned 1023 // that the next block. 1024 if (CPEEnd > NextBlockOffset) { 1025 Growth = CPEEnd - NextBlockOffset; 1026 // Compute the padding that would go at the end of the CPE to align the next 1027 // block. 1028 Growth += OffsetToAlignment(CPEEnd, 1u << NextBlockAlignment); 1029 1030 // If the CPE is to be inserted before the instruction, that will raise 1031 // the offset of the instruction. Also account for unknown alignment padding 1032 // in blocks between CPE and the user. 1033 if (CPEOffset < UserOffset) 1034 UserOffset += Growth + UnknownPadding(MF->getAlignment(), CPELogAlign); 1035 } else 1036 // CPE fits in existing padding. 1037 Growth = 0; 1038 1039 return isOffsetInRange(UserOffset, CPEOffset, U); 1040 } 1041 1042 /// isCPEntryInRange - Returns true if the distance between specific MI and 1043 /// specific ConstPool entry instruction can fit in MI's displacement field. 1044 bool ARMConstantIslands::isCPEntryInRange(MachineInstr *MI, unsigned UserOffset, 1045 MachineInstr *CPEMI, unsigned MaxDisp, 1046 bool NegOk, bool DoDump) { 1047 unsigned CPEOffset = getOffsetOf(CPEMI); 1048 assert(CPEOffset % 4 == 0 && "Misaligned CPE"); 1049 1050 if (DoDump) { 1051 DEBUG({ 1052 unsigned Block = MI->getParent()->getNumber(); 1053 const BasicBlockInfo &BBI = BBInfo[Block]; 1054 dbgs() << "User of CPE#" << CPEMI->getOperand(0).getImm() 1055 << " max delta=" << MaxDisp 1056 << format(" insn address=%#x", UserOffset) 1057 << " in BB#" << Block << ": " 1058 << format("%#x-%x\t", BBI.Offset, BBI.postOffset()) << *MI 1059 << format("CPE address=%#x offset=%+d: ", CPEOffset, 1060 int(CPEOffset-UserOffset)); 1061 }); 1062 } 1063 1064 return isOffsetInRange(UserOffset, CPEOffset, MaxDisp, NegOk); 1065 } 1066 1067 #ifndef NDEBUG 1068 /// BBIsJumpedOver - Return true of the specified basic block's only predecessor 1069 /// unconditionally branches to its only successor. 1070 static bool BBIsJumpedOver(MachineBasicBlock *MBB) { 1071 if (MBB->pred_size() != 1 || MBB->succ_size() != 1) 1072 return false; 1073 1074 MachineBasicBlock *Succ = *MBB->succ_begin(); 1075 MachineBasicBlock *Pred = *MBB->pred_begin(); 1076 MachineInstr *PredMI = &Pred->back(); 1077 if (PredMI->getOpcode() == ARM::B || PredMI->getOpcode() == ARM::tB 1078 || PredMI->getOpcode() == ARM::t2B) 1079 return PredMI->getOperand(0).getMBB() == Succ; 1080 return false; 1081 } 1082 #endif // NDEBUG 1083 1084 void ARMConstantIslands::adjustBBOffsetsAfter(MachineBasicBlock *BB) { 1085 unsigned BBNum = BB->getNumber(); 1086 for(unsigned i = BBNum + 1, e = MF->getNumBlockIDs(); i < e; ++i) { 1087 // Get the offset and known bits at the end of the layout predecessor. 1088 // Include the alignment of the current block. 1089 unsigned LogAlign = MF->getBlockNumbered(i)->getAlignment(); 1090 unsigned Offset = BBInfo[i - 1].postOffset(LogAlign); 1091 unsigned KnownBits = BBInfo[i - 1].postKnownBits(LogAlign); 1092 1093 // This is where block i begins. Stop if the offset is already correct, 1094 // and we have updated 2 blocks. This is the maximum number of blocks 1095 // changed before calling this function. 1096 if (i > BBNum + 2 && 1097 BBInfo[i].Offset == Offset && 1098 BBInfo[i].KnownBits == KnownBits) 1099 break; 1100 1101 BBInfo[i].Offset = Offset; 1102 BBInfo[i].KnownBits = KnownBits; 1103 } 1104 } 1105 1106 /// decrementCPEReferenceCount - find the constant pool entry with index CPI 1107 /// and instruction CPEMI, and decrement its refcount. If the refcount 1108 /// becomes 0 remove the entry and instruction. Returns true if we removed 1109 /// the entry, false if we didn't. 1110 1111 bool ARMConstantIslands::decrementCPEReferenceCount(unsigned CPI, 1112 MachineInstr *CPEMI) { 1113 // Find the old entry. Eliminate it if it is no longer used. 1114 CPEntry *CPE = findConstPoolEntry(CPI, CPEMI); 1115 assert(CPE && "Unexpected!"); 1116 if (--CPE->RefCount == 0) { 1117 removeDeadCPEMI(CPEMI); 1118 CPE->CPEMI = NULL; 1119 --NumCPEs; 1120 return true; 1121 } 1122 return false; 1123 } 1124 1125 /// LookForCPEntryInRange - see if the currently referenced CPE is in range; 1126 /// if not, see if an in-range clone of the CPE is in range, and if so, 1127 /// change the data structures so the user references the clone. Returns: 1128 /// 0 = no existing entry found 1129 /// 1 = entry found, and there were no code insertions or deletions 1130 /// 2 = entry found, and there were code insertions or deletions 1131 int ARMConstantIslands::findInRangeCPEntry(CPUser& U, unsigned UserOffset) 1132 { 1133 MachineInstr *UserMI = U.MI; 1134 MachineInstr *CPEMI = U.CPEMI; 1135 1136 // Check to see if the CPE is already in-range. 1137 if (isCPEntryInRange(UserMI, UserOffset, CPEMI, U.getMaxDisp(), U.NegOk, 1138 true)) { 1139 DEBUG(dbgs() << "In range\n"); 1140 return 1; 1141 } 1142 1143 // No. Look for previously created clones of the CPE that are in range. 1144 unsigned CPI = CPEMI->getOperand(1).getIndex(); 1145 std::vector<CPEntry> &CPEs = CPEntries[CPI]; 1146 for (unsigned i = 0, e = CPEs.size(); i != e; ++i) { 1147 // We already tried this one 1148 if (CPEs[i].CPEMI == CPEMI) 1149 continue; 1150 // Removing CPEs can leave empty entries, skip 1151 if (CPEs[i].CPEMI == NULL) 1152 continue; 1153 if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI, U.getMaxDisp(), 1154 U.NegOk)) { 1155 DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#" 1156 << CPEs[i].CPI << "\n"); 1157 // Point the CPUser node to the replacement 1158 U.CPEMI = CPEs[i].CPEMI; 1159 // Change the CPI in the instruction operand to refer to the clone. 1160 for (unsigned j = 0, e = UserMI->getNumOperands(); j != e; ++j) 1161 if (UserMI->getOperand(j).isCPI()) { 1162 UserMI->getOperand(j).setIndex(CPEs[i].CPI); 1163 break; 1164 } 1165 // Adjust the refcount of the clone... 1166 CPEs[i].RefCount++; 1167 // ...and the original. If we didn't remove the old entry, none of the 1168 // addresses changed, so we don't need another pass. 1169 return decrementCPEReferenceCount(CPI, CPEMI) ? 2 : 1; 1170 } 1171 } 1172 return 0; 1173 } 1174 1175 /// getUnconditionalBrDisp - Returns the maximum displacement that can fit in 1176 /// the specific unconditional branch instruction. 1177 static inline unsigned getUnconditionalBrDisp(int Opc) { 1178 switch (Opc) { 1179 case ARM::tB: 1180 return ((1<<10)-1)*2; 1181 case ARM::t2B: 1182 return ((1<<23)-1)*2; 1183 default: 1184 break; 1185 } 1186 1187 return ((1<<23)-1)*4; 1188 } 1189 1190 /// findAvailableWater - Look for an existing entry in the WaterList in which 1191 /// we can place the CPE referenced from U so it's within range of U's MI. 1192 /// Returns true if found, false if not. If it returns true, WaterIter 1193 /// is set to the WaterList entry. For Thumb, prefer water that will not 1194 /// introduce padding to water that will. To ensure that this pass 1195 /// terminates, the CPE location for a particular CPUser is only allowed to 1196 /// move to a lower address, so search backward from the end of the list and 1197 /// prefer the first water that is in range. 1198 bool ARMConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset, 1199 water_iterator &WaterIter) { 1200 if (WaterList.empty()) 1201 return false; 1202 1203 unsigned BestGrowth = ~0u; 1204 for (water_iterator IP = prior(WaterList.end()), B = WaterList.begin();; 1205 --IP) { 1206 MachineBasicBlock* WaterBB = *IP; 1207 // Check if water is in range and is either at a lower address than the 1208 // current "high water mark" or a new water block that was created since 1209 // the previous iteration by inserting an unconditional branch. In the 1210 // latter case, we want to allow resetting the high water mark back to 1211 // this new water since we haven't seen it before. Inserting branches 1212 // should be relatively uncommon and when it does happen, we want to be 1213 // sure to take advantage of it for all the CPEs near that block, so that 1214 // we don't insert more branches than necessary. 1215 unsigned Growth; 1216 if (isWaterInRange(UserOffset, WaterBB, U, Growth) && 1217 (WaterBB->getNumber() < U.HighWaterMark->getNumber() || 1218 NewWaterList.count(WaterBB)) && Growth < BestGrowth) { 1219 // This is the least amount of required padding seen so far. 1220 BestGrowth = Growth; 1221 WaterIter = IP; 1222 DEBUG(dbgs() << "Found water after BB#" << WaterBB->getNumber() 1223 << " Growth=" << Growth << '\n'); 1224 1225 // Keep looking unless it is perfect. 1226 if (BestGrowth == 0) 1227 return true; 1228 } 1229 if (IP == B) 1230 break; 1231 } 1232 return BestGrowth != ~0u; 1233 } 1234 1235 /// createNewWater - No existing WaterList entry will work for 1236 /// CPUsers[CPUserIndex], so create a place to put the CPE. The end of the 1237 /// block is used if in range, and the conditional branch munged so control 1238 /// flow is correct. Otherwise the block is split to create a hole with an 1239 /// unconditional branch around it. In either case NewMBB is set to a 1240 /// block following which the new island can be inserted (the WaterList 1241 /// is not adjusted). 1242 void ARMConstantIslands::createNewWater(unsigned CPUserIndex, 1243 unsigned UserOffset, 1244 MachineBasicBlock *&NewMBB) { 1245 CPUser &U = CPUsers[CPUserIndex]; 1246 MachineInstr *UserMI = U.MI; 1247 MachineInstr *CPEMI = U.CPEMI; 1248 unsigned CPELogAlign = getCPELogAlign(CPEMI); 1249 MachineBasicBlock *UserMBB = UserMI->getParent(); 1250 const BasicBlockInfo &UserBBI = BBInfo[UserMBB->getNumber()]; 1251 1252 // If the block does not end in an unconditional branch already, and if the 1253 // end of the block is within range, make new water there. (The addition 1254 // below is for the unconditional branch we will be adding: 4 bytes on ARM + 1255 // Thumb2, 2 on Thumb1. 1256 if (BBHasFallthrough(UserMBB)) { 1257 // Size of branch to insert. 1258 unsigned Delta = isThumb1 ? 2 : 4; 1259 // End of UserBlock after adding a branch. 1260 unsigned UserBlockEnd = UserBBI.postOffset() + Delta; 1261 // Compute the offset where the CPE will begin. 1262 unsigned CPEOffset = WorstCaseAlign(UserBlockEnd, CPELogAlign, 1263 UserBBI.postKnownBits()); 1264 1265 if (isOffsetInRange(UserOffset, CPEOffset, U)) { 1266 DEBUG(dbgs() << "Split at end of BB#" << UserMBB->getNumber() 1267 << format(", expected CPE offset %#x\n", CPEOffset)); 1268 NewMBB = llvm::next(MachineFunction::iterator(UserMBB)); 1269 // Add an unconditional branch from UserMBB to fallthrough block. Record 1270 // it for branch lengthening; this new branch will not get out of range, 1271 // but if the preceding conditional branch is out of range, the targets 1272 // will be exchanged, and the altered branch may be out of range, so the 1273 // machinery has to know about it. 1274 int UncondBr = isThumb ? ((isThumb2) ? ARM::t2B : ARM::tB) : ARM::B; 1275 if (!isThumb) 1276 BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr)).addMBB(NewMBB); 1277 else 1278 BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr)).addMBB(NewMBB) 1279 .addImm(ARMCC::AL).addReg(0); 1280 unsigned MaxDisp = getUnconditionalBrDisp(UncondBr); 1281 ImmBranches.push_back(ImmBranch(&UserMBB->back(), 1282 MaxDisp, false, UncondBr)); 1283 BBInfo[UserMBB->getNumber()].Size += Delta; 1284 adjustBBOffsetsAfter(UserMBB); 1285 return; 1286 } 1287 } 1288 1289 // What a big block. Find a place within the block to split it. This is a 1290 // little tricky on Thumb1 since instructions are 2 bytes and constant pool 1291 // entries are 4 bytes: if instruction I references island CPE, and 1292 // instruction I+1 references CPE', it will not work well to put CPE as far 1293 // forward as possible, since then CPE' cannot immediately follow it (that 1294 // location is 2 bytes farther away from I+1 than CPE was from I) and we'd 1295 // need to create a new island. So, we make a first guess, then walk through 1296 // the instructions between the one currently being looked at and the 1297 // possible insertion point, and make sure any other instructions that 1298 // reference CPEs will be able to use the same island area; if not, we back 1299 // up the insertion point. 1300 1301 // Try to split the block so it's fully aligned. Compute the latest split 1302 // point where we can add a 4-byte branch instruction, and then 1303 // WorstCaseAlign to LogAlign. 1304 unsigned LogAlign = MF->getAlignment(); 1305 assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry"); 1306 unsigned KnownBits = UserBBI.internalKnownBits(); 1307 unsigned UPad = UnknownPadding(LogAlign, KnownBits); 1308 unsigned BaseInsertOffset = UserOffset + U.getMaxDisp(); 1309 DEBUG(dbgs() << format("Split in middle of big block before %#x", 1310 BaseInsertOffset)); 1311 1312 // Account for alignment and unknown padding. 1313 BaseInsertOffset &= ~((1u << LogAlign) - 1); 1314 BaseInsertOffset -= UPad; 1315 1316 // The 4 in the following is for the unconditional branch we'll be inserting 1317 // (allows for long branch on Thumb1). Alignment of the island is handled 1318 // inside isOffsetInRange. 1319 BaseInsertOffset -= 4; 1320 1321 DEBUG(dbgs() << format(", adjusted to %#x", BaseInsertOffset) 1322 << " la=" << LogAlign 1323 << " kb=" << KnownBits 1324 << " up=" << UPad << '\n'); 1325 1326 // This could point off the end of the block if we've already got constant 1327 // pool entries following this block; only the last one is in the water list. 1328 // Back past any possible branches (allow for a conditional and a maximally 1329 // long unconditional). 1330 if (BaseInsertOffset >= BBInfo[UserMBB->getNumber()+1].Offset) 1331 BaseInsertOffset = BBInfo[UserMBB->getNumber()+1].Offset - 1332 (isThumb1 ? 6 : 8); 1333 unsigned EndInsertOffset = 1334 WorstCaseAlign(BaseInsertOffset + 4, LogAlign, KnownBits) + 1335 CPEMI->getOperand(2).getImm(); 1336 MachineBasicBlock::iterator MI = UserMI; 1337 ++MI; 1338 unsigned CPUIndex = CPUserIndex+1; 1339 unsigned NumCPUsers = CPUsers.size(); 1340 MachineInstr *LastIT = 0; 1341 for (unsigned Offset = UserOffset+TII->GetInstSizeInBytes(UserMI); 1342 Offset < BaseInsertOffset; 1343 Offset += TII->GetInstSizeInBytes(MI), 1344 MI = llvm::next(MI)) { 1345 if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == MI) { 1346 CPUser &U = CPUsers[CPUIndex]; 1347 if (!isOffsetInRange(Offset, EndInsertOffset, U)) { 1348 // Shift intertion point by one unit of alignment so it is within reach. 1349 BaseInsertOffset -= 1u << LogAlign; 1350 EndInsertOffset -= 1u << LogAlign; 1351 } 1352 // This is overly conservative, as we don't account for CPEMIs being 1353 // reused within the block, but it doesn't matter much. Also assume CPEs 1354 // are added in order with alignment padding. We may eventually be able 1355 // to pack the aligned CPEs better. 1356 EndInsertOffset = RoundUpToAlignment(EndInsertOffset, 1357 1u << getCPELogAlign(U.CPEMI)) + 1358 U.CPEMI->getOperand(2).getImm(); 1359 CPUIndex++; 1360 } 1361 1362 // Remember the last IT instruction. 1363 if (MI->getOpcode() == ARM::t2IT) 1364 LastIT = MI; 1365 } 1366 1367 --MI; 1368 1369 // Avoid splitting an IT block. 1370 if (LastIT) { 1371 unsigned PredReg = 0; 1372 ARMCC::CondCodes CC = getITInstrPredicate(MI, PredReg); 1373 if (CC != ARMCC::AL) 1374 MI = LastIT; 1375 } 1376 NewMBB = splitBlockBeforeInstr(MI); 1377 } 1378 1379 /// handleConstantPoolUser - Analyze the specified user, checking to see if it 1380 /// is out-of-range. If so, pick up the constant pool value and move it some 1381 /// place in-range. Return true if we changed any addresses (thus must run 1382 /// another pass of branch lengthening), false otherwise. 1383 bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) { 1384 CPUser &U = CPUsers[CPUserIndex]; 1385 MachineInstr *UserMI = U.MI; 1386 MachineInstr *CPEMI = U.CPEMI; 1387 unsigned CPI = CPEMI->getOperand(1).getIndex(); 1388 unsigned Size = CPEMI->getOperand(2).getImm(); 1389 // Compute this only once, it's expensive. 1390 unsigned UserOffset = getUserOffset(U); 1391 1392 // See if the current entry is within range, or there is a clone of it 1393 // in range. 1394 int result = findInRangeCPEntry(U, UserOffset); 1395 if (result==1) return false; 1396 else if (result==2) return true; 1397 1398 // No existing clone of this CPE is within range. 1399 // We will be generating a new clone. Get a UID for it. 1400 unsigned ID = AFI->createPICLabelUId(); 1401 1402 // Look for water where we can place this CPE. 1403 MachineBasicBlock *NewIsland = MF->CreateMachineBasicBlock(); 1404 MachineBasicBlock *NewMBB; 1405 water_iterator IP; 1406 if (findAvailableWater(U, UserOffset, IP)) { 1407 DEBUG(dbgs() << "Found water in range\n"); 1408 MachineBasicBlock *WaterBB = *IP; 1409 1410 // If the original WaterList entry was "new water" on this iteration, 1411 // propagate that to the new island. This is just keeping NewWaterList 1412 // updated to match the WaterList, which will be updated below. 1413 if (NewWaterList.count(WaterBB)) { 1414 NewWaterList.erase(WaterBB); 1415 NewWaterList.insert(NewIsland); 1416 } 1417 // The new CPE goes before the following block (NewMBB). 1418 NewMBB = llvm::next(MachineFunction::iterator(WaterBB)); 1419 1420 } else { 1421 // No water found. 1422 DEBUG(dbgs() << "No water found\n"); 1423 createNewWater(CPUserIndex, UserOffset, NewMBB); 1424 1425 // splitBlockBeforeInstr adds to WaterList, which is important when it is 1426 // called while handling branches so that the water will be seen on the 1427 // next iteration for constant pools, but in this context, we don't want 1428 // it. Check for this so it will be removed from the WaterList. 1429 // Also remove any entry from NewWaterList. 1430 MachineBasicBlock *WaterBB = prior(MachineFunction::iterator(NewMBB)); 1431 IP = std::find(WaterList.begin(), WaterList.end(), WaterBB); 1432 if (IP != WaterList.end()) 1433 NewWaterList.erase(WaterBB); 1434 1435 // We are adding new water. Update NewWaterList. 1436 NewWaterList.insert(NewIsland); 1437 } 1438 1439 // Remove the original WaterList entry; we want subsequent insertions in 1440 // this vicinity to go after the one we're about to insert. This 1441 // considerably reduces the number of times we have to move the same CPE 1442 // more than once and is also important to ensure the algorithm terminates. 1443 if (IP != WaterList.end()) 1444 WaterList.erase(IP); 1445 1446 // Okay, we know we can put an island before NewMBB now, do it! 1447 MF->insert(NewMBB, NewIsland); 1448 1449 // Update internal data structures to account for the newly inserted MBB. 1450 updateForInsertedWaterBlock(NewIsland); 1451 1452 // Decrement the old entry, and remove it if refcount becomes 0. 1453 decrementCPEReferenceCount(CPI, CPEMI); 1454 1455 // Now that we have an island to add the CPE to, clone the original CPE and 1456 // add it to the island. 1457 U.HighWaterMark = NewIsland; 1458 U.CPEMI = BuildMI(NewIsland, DebugLoc(), TII->get(ARM::CONSTPOOL_ENTRY)) 1459 .addImm(ID).addConstantPoolIndex(CPI).addImm(Size); 1460 CPEntries[CPI].push_back(CPEntry(U.CPEMI, ID, 1)); 1461 ++NumCPEs; 1462 1463 // Mark the basic block as aligned as required by the const-pool entry. 1464 NewIsland->setAlignment(getCPELogAlign(U.CPEMI)); 1465 1466 // Increase the size of the island block to account for the new entry. 1467 BBInfo[NewIsland->getNumber()].Size += Size; 1468 adjustBBOffsetsAfter(llvm::prior(MachineFunction::iterator(NewIsland))); 1469 1470 // Finally, change the CPI in the instruction operand to be ID. 1471 for (unsigned i = 0, e = UserMI->getNumOperands(); i != e; ++i) 1472 if (UserMI->getOperand(i).isCPI()) { 1473 UserMI->getOperand(i).setIndex(ID); 1474 break; 1475 } 1476 1477 DEBUG(dbgs() << " Moved CPE to #" << ID << " CPI=" << CPI 1478 << format(" offset=%#x\n", BBInfo[NewIsland->getNumber()].Offset)); 1479 1480 return true; 1481 } 1482 1483 /// removeDeadCPEMI - Remove a dead constant pool entry instruction. Update 1484 /// sizes and offsets of impacted basic blocks. 1485 void ARMConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) { 1486 MachineBasicBlock *CPEBB = CPEMI->getParent(); 1487 unsigned Size = CPEMI->getOperand(2).getImm(); 1488 CPEMI->eraseFromParent(); 1489 BBInfo[CPEBB->getNumber()].Size -= Size; 1490 // All succeeding offsets have the current size value added in, fix this. 1491 if (CPEBB->empty()) { 1492 BBInfo[CPEBB->getNumber()].Size = 0; 1493 1494 // This block no longer needs to be aligned. <rdar://problem/10534709>. 1495 CPEBB->setAlignment(0); 1496 } else 1497 // Entries are sorted by descending alignment, so realign from the front. 1498 CPEBB->setAlignment(getCPELogAlign(CPEBB->begin())); 1499 1500 adjustBBOffsetsAfter(CPEBB); 1501 // An island has only one predecessor BB and one successor BB. Check if 1502 // this BB's predecessor jumps directly to this BB's successor. This 1503 // shouldn't happen currently. 1504 assert(!BBIsJumpedOver(CPEBB) && "How did this happen?"); 1505 // FIXME: remove the empty blocks after all the work is done? 1506 } 1507 1508 /// removeUnusedCPEntries - Remove constant pool entries whose refcounts 1509 /// are zero. 1510 bool ARMConstantIslands::removeUnusedCPEntries() { 1511 unsigned MadeChange = false; 1512 for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) { 1513 std::vector<CPEntry> &CPEs = CPEntries[i]; 1514 for (unsigned j = 0, ee = CPEs.size(); j != ee; ++j) { 1515 if (CPEs[j].RefCount == 0 && CPEs[j].CPEMI) { 1516 removeDeadCPEMI(CPEs[j].CPEMI); 1517 CPEs[j].CPEMI = NULL; 1518 MadeChange = true; 1519 } 1520 } 1521 } 1522 return MadeChange; 1523 } 1524 1525 /// isBBInRange - Returns true if the distance between specific MI and 1526 /// specific BB can fit in MI's displacement field. 1527 bool ARMConstantIslands::isBBInRange(MachineInstr *MI,MachineBasicBlock *DestBB, 1528 unsigned MaxDisp) { 1529 unsigned PCAdj = isThumb ? 4 : 8; 1530 unsigned BrOffset = getOffsetOf(MI) + PCAdj; 1531 unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset; 1532 1533 DEBUG(dbgs() << "Branch of destination BB#" << DestBB->getNumber() 1534 << " from BB#" << MI->getParent()->getNumber() 1535 << " max delta=" << MaxDisp 1536 << " from " << getOffsetOf(MI) << " to " << DestOffset 1537 << " offset " << int(DestOffset-BrOffset) << "\t" << *MI); 1538 1539 if (BrOffset <= DestOffset) { 1540 // Branch before the Dest. 1541 if (DestOffset-BrOffset <= MaxDisp) 1542 return true; 1543 } else { 1544 if (BrOffset-DestOffset <= MaxDisp) 1545 return true; 1546 } 1547 return false; 1548 } 1549 1550 /// fixupImmediateBr - Fix up an immediate branch whose destination is too far 1551 /// away to fit in its displacement field. 1552 bool ARMConstantIslands::fixupImmediateBr(ImmBranch &Br) { 1553 MachineInstr *MI = Br.MI; 1554 MachineBasicBlock *DestBB = MI->getOperand(0).getMBB(); 1555 1556 // Check to see if the DestBB is already in-range. 1557 if (isBBInRange(MI, DestBB, Br.MaxDisp)) 1558 return false; 1559 1560 if (!Br.isCond) 1561 return fixupUnconditionalBr(Br); 1562 return fixupConditionalBr(Br); 1563 } 1564 1565 /// fixupUnconditionalBr - Fix up an unconditional branch whose destination is 1566 /// too far away to fit in its displacement field. If the LR register has been 1567 /// spilled in the epilogue, then we can use BL to implement a far jump. 1568 /// Otherwise, add an intermediate branch instruction to a branch. 1569 bool 1570 ARMConstantIslands::fixupUnconditionalBr(ImmBranch &Br) { 1571 MachineInstr *MI = Br.MI; 1572 MachineBasicBlock *MBB = MI->getParent(); 1573 if (!isThumb1) 1574 llvm_unreachable("fixupUnconditionalBr is Thumb1 only!"); 1575 1576 // Use BL to implement far jump. 1577 Br.MaxDisp = (1 << 21) * 2; 1578 MI->setDesc(TII->get(ARM::tBfar)); 1579 BBInfo[MBB->getNumber()].Size += 2; 1580 adjustBBOffsetsAfter(MBB); 1581 HasFarJump = true; 1582 ++NumUBrFixed; 1583 1584 DEBUG(dbgs() << " Changed B to long jump " << *MI); 1585 1586 return true; 1587 } 1588 1589 /// fixupConditionalBr - Fix up a conditional branch whose destination is too 1590 /// far away to fit in its displacement field. It is converted to an inverse 1591 /// conditional branch + an unconditional branch to the destination. 1592 bool 1593 ARMConstantIslands::fixupConditionalBr(ImmBranch &Br) { 1594 MachineInstr *MI = Br.MI; 1595 MachineBasicBlock *DestBB = MI->getOperand(0).getMBB(); 1596 1597 // Add an unconditional branch to the destination and invert the branch 1598 // condition to jump over it: 1599 // blt L1 1600 // => 1601 // bge L2 1602 // b L1 1603 // L2: 1604 ARMCC::CondCodes CC = (ARMCC::CondCodes)MI->getOperand(1).getImm(); 1605 CC = ARMCC::getOppositeCondition(CC); 1606 unsigned CCReg = MI->getOperand(2).getReg(); 1607 1608 // If the branch is at the end of its MBB and that has a fall-through block, 1609 // direct the updated conditional branch to the fall-through block. Otherwise, 1610 // split the MBB before the next instruction. 1611 MachineBasicBlock *MBB = MI->getParent(); 1612 MachineInstr *BMI = &MBB->back(); 1613 bool NeedSplit = (BMI != MI) || !BBHasFallthrough(MBB); 1614 1615 ++NumCBrFixed; 1616 if (BMI != MI) { 1617 if (llvm::next(MachineBasicBlock::iterator(MI)) == prior(MBB->end()) && 1618 BMI->getOpcode() == Br.UncondBr) { 1619 // Last MI in the BB is an unconditional branch. Can we simply invert the 1620 // condition and swap destinations: 1621 // beq L1 1622 // b L2 1623 // => 1624 // bne L2 1625 // b L1 1626 MachineBasicBlock *NewDest = BMI->getOperand(0).getMBB(); 1627 if (isBBInRange(MI, NewDest, Br.MaxDisp)) { 1628 DEBUG(dbgs() << " Invert Bcc condition and swap its destination with " 1629 << *BMI); 1630 BMI->getOperand(0).setMBB(DestBB); 1631 MI->getOperand(0).setMBB(NewDest); 1632 MI->getOperand(1).setImm(CC); 1633 return true; 1634 } 1635 } 1636 } 1637 1638 if (NeedSplit) { 1639 splitBlockBeforeInstr(MI); 1640 // No need for the branch to the next block. We're adding an unconditional 1641 // branch to the destination. 1642 int delta = TII->GetInstSizeInBytes(&MBB->back()); 1643 BBInfo[MBB->getNumber()].Size -= delta; 1644 MBB->back().eraseFromParent(); 1645 // BBInfo[SplitBB].Offset is wrong temporarily, fixed below 1646 } 1647 MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(MBB)); 1648 1649 DEBUG(dbgs() << " Insert B to BB#" << DestBB->getNumber() 1650 << " also invert condition and change dest. to BB#" 1651 << NextBB->getNumber() << "\n"); 1652 1653 // Insert a new conditional branch and a new unconditional branch. 1654 // Also update the ImmBranch as well as adding a new entry for the new branch. 1655 BuildMI(MBB, DebugLoc(), TII->get(MI->getOpcode())) 1656 .addMBB(NextBB).addImm(CC).addReg(CCReg); 1657 Br.MI = &MBB->back(); 1658 BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back()); 1659 if (isThumb) 1660 BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB) 1661 .addImm(ARMCC::AL).addReg(0); 1662 else 1663 BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB); 1664 BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back()); 1665 unsigned MaxDisp = getUnconditionalBrDisp(Br.UncondBr); 1666 ImmBranches.push_back(ImmBranch(&MBB->back(), MaxDisp, false, Br.UncondBr)); 1667 1668 // Remove the old conditional branch. It may or may not still be in MBB. 1669 BBInfo[MI->getParent()->getNumber()].Size -= TII->GetInstSizeInBytes(MI); 1670 MI->eraseFromParent(); 1671 adjustBBOffsetsAfter(MBB); 1672 return true; 1673 } 1674 1675 /// undoLRSpillRestore - Remove Thumb push / pop instructions that only spills 1676 /// LR / restores LR to pc. FIXME: This is done here because it's only possible 1677 /// to do this if tBfar is not used. 1678 bool ARMConstantIslands::undoLRSpillRestore() { 1679 bool MadeChange = false; 1680 for (unsigned i = 0, e = PushPopMIs.size(); i != e; ++i) { 1681 MachineInstr *MI = PushPopMIs[i]; 1682 // First two operands are predicates. 1683 if (MI->getOpcode() == ARM::tPOP_RET && 1684 MI->getOperand(2).getReg() == ARM::PC && 1685 MI->getNumExplicitOperands() == 3) { 1686 // Create the new insn and copy the predicate from the old. 1687 BuildMI(MI->getParent(), MI->getDebugLoc(), TII->get(ARM::tBX_RET)) 1688 .addOperand(MI->getOperand(0)) 1689 .addOperand(MI->getOperand(1)); 1690 MI->eraseFromParent(); 1691 MadeChange = true; 1692 } 1693 } 1694 return MadeChange; 1695 } 1696 1697 // mayOptimizeThumb2Instruction - Returns true if optimizeThumb2Instructions 1698 // below may shrink MI. 1699 bool 1700 ARMConstantIslands::mayOptimizeThumb2Instruction(const MachineInstr *MI) const { 1701 switch(MI->getOpcode()) { 1702 // optimizeThumb2Instructions. 1703 case ARM::t2LEApcrel: 1704 case ARM::t2LDRpci: 1705 // optimizeThumb2Branches. 1706 case ARM::t2B: 1707 case ARM::t2Bcc: 1708 case ARM::tBcc: 1709 // optimizeThumb2JumpTables. 1710 case ARM::t2BR_JT: 1711 return true; 1712 } 1713 return false; 1714 } 1715 1716 bool ARMConstantIslands::optimizeThumb2Instructions() { 1717 bool MadeChange = false; 1718 1719 // Shrink ADR and LDR from constantpool. 1720 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) { 1721 CPUser &U = CPUsers[i]; 1722 unsigned Opcode = U.MI->getOpcode(); 1723 unsigned NewOpc = 0; 1724 unsigned Scale = 1; 1725 unsigned Bits = 0; 1726 switch (Opcode) { 1727 default: break; 1728 case ARM::t2LEApcrel: 1729 if (isARMLowRegister(U.MI->getOperand(0).getReg())) { 1730 NewOpc = ARM::tLEApcrel; 1731 Bits = 8; 1732 Scale = 4; 1733 } 1734 break; 1735 case ARM::t2LDRpci: 1736 if (isARMLowRegister(U.MI->getOperand(0).getReg())) { 1737 NewOpc = ARM::tLDRpci; 1738 Bits = 8; 1739 Scale = 4; 1740 } 1741 break; 1742 } 1743 1744 if (!NewOpc) 1745 continue; 1746 1747 unsigned UserOffset = getUserOffset(U); 1748 unsigned MaxOffs = ((1 << Bits) - 1) * Scale; 1749 1750 // Be conservative with inline asm. 1751 if (!U.KnownAlignment) 1752 MaxOffs -= 2; 1753 1754 // FIXME: Check if offset is multiple of scale if scale is not 4. 1755 if (isCPEntryInRange(U.MI, UserOffset, U.CPEMI, MaxOffs, false, true)) { 1756 DEBUG(dbgs() << "Shrink: " << *U.MI); 1757 U.MI->setDesc(TII->get(NewOpc)); 1758 MachineBasicBlock *MBB = U.MI->getParent(); 1759 BBInfo[MBB->getNumber()].Size -= 2; 1760 adjustBBOffsetsAfter(MBB); 1761 ++NumT2CPShrunk; 1762 MadeChange = true; 1763 } 1764 } 1765 1766 MadeChange |= optimizeThumb2Branches(); 1767 MadeChange |= optimizeThumb2JumpTables(); 1768 return MadeChange; 1769 } 1770 1771 bool ARMConstantIslands::optimizeThumb2Branches() { 1772 bool MadeChange = false; 1773 1774 for (unsigned i = 0, e = ImmBranches.size(); i != e; ++i) { 1775 ImmBranch &Br = ImmBranches[i]; 1776 unsigned Opcode = Br.MI->getOpcode(); 1777 unsigned NewOpc = 0; 1778 unsigned Scale = 1; 1779 unsigned Bits = 0; 1780 switch (Opcode) { 1781 default: break; 1782 case ARM::t2B: 1783 NewOpc = ARM::tB; 1784 Bits = 11; 1785 Scale = 2; 1786 break; 1787 case ARM::t2Bcc: { 1788 NewOpc = ARM::tBcc; 1789 Bits = 8; 1790 Scale = 2; 1791 break; 1792 } 1793 } 1794 if (NewOpc) { 1795 unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale; 1796 MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB(); 1797 if (isBBInRange(Br.MI, DestBB, MaxOffs)) { 1798 DEBUG(dbgs() << "Shrink branch: " << *Br.MI); 1799 Br.MI->setDesc(TII->get(NewOpc)); 1800 MachineBasicBlock *MBB = Br.MI->getParent(); 1801 BBInfo[MBB->getNumber()].Size -= 2; 1802 adjustBBOffsetsAfter(MBB); 1803 ++NumT2BrShrunk; 1804 MadeChange = true; 1805 } 1806 } 1807 1808 Opcode = Br.MI->getOpcode(); 1809 if (Opcode != ARM::tBcc) 1810 continue; 1811 1812 // If the conditional branch doesn't kill CPSR, then CPSR can be liveout 1813 // so this transformation is not safe. 1814 if (!Br.MI->killsRegister(ARM::CPSR)) 1815 continue; 1816 1817 NewOpc = 0; 1818 unsigned PredReg = 0; 1819 ARMCC::CondCodes Pred = getInstrPredicate(Br.MI, PredReg); 1820 if (Pred == ARMCC::EQ) 1821 NewOpc = ARM::tCBZ; 1822 else if (Pred == ARMCC::NE) 1823 NewOpc = ARM::tCBNZ; 1824 if (!NewOpc) 1825 continue; 1826 MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB(); 1827 // Check if the distance is within 126. Subtract starting offset by 2 1828 // because the cmp will be eliminated. 1829 unsigned BrOffset = getOffsetOf(Br.MI) + 4 - 2; 1830 unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset; 1831 if (BrOffset < DestOffset && (DestOffset - BrOffset) <= 126) { 1832 MachineBasicBlock::iterator CmpMI = Br.MI; 1833 if (CmpMI != Br.MI->getParent()->begin()) { 1834 --CmpMI; 1835 if (CmpMI->getOpcode() == ARM::tCMPi8) { 1836 unsigned Reg = CmpMI->getOperand(0).getReg(); 1837 Pred = getInstrPredicate(CmpMI, PredReg); 1838 if (Pred == ARMCC::AL && 1839 CmpMI->getOperand(1).getImm() == 0 && 1840 isARMLowRegister(Reg)) { 1841 MachineBasicBlock *MBB = Br.MI->getParent(); 1842 DEBUG(dbgs() << "Fold: " << *CmpMI << " and: " << *Br.MI); 1843 MachineInstr *NewBR = 1844 BuildMI(*MBB, CmpMI, Br.MI->getDebugLoc(), TII->get(NewOpc)) 1845 .addReg(Reg).addMBB(DestBB,Br.MI->getOperand(0).getTargetFlags()); 1846 CmpMI->eraseFromParent(); 1847 Br.MI->eraseFromParent(); 1848 Br.MI = NewBR; 1849 BBInfo[MBB->getNumber()].Size -= 2; 1850 adjustBBOffsetsAfter(MBB); 1851 ++NumCBZ; 1852 MadeChange = true; 1853 } 1854 } 1855 } 1856 } 1857 } 1858 1859 return MadeChange; 1860 } 1861 1862 /// optimizeThumb2JumpTables - Use tbb / tbh instructions to generate smaller 1863 /// jumptables when it's possible. 1864 bool ARMConstantIslands::optimizeThumb2JumpTables() { 1865 bool MadeChange = false; 1866 1867 // FIXME: After the tables are shrunk, can we get rid some of the 1868 // constantpool tables? 1869 MachineJumpTableInfo *MJTI = MF->getJumpTableInfo(); 1870 if (MJTI == 0) return false; 1871 1872 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables(); 1873 for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) { 1874 MachineInstr *MI = T2JumpTables[i]; 1875 const MCInstrDesc &MCID = MI->getDesc(); 1876 unsigned NumOps = MCID.getNumOperands(); 1877 unsigned JTOpIdx = NumOps - (MI->isPredicable() ? 3 : 2); 1878 MachineOperand JTOP = MI->getOperand(JTOpIdx); 1879 unsigned JTI = JTOP.getIndex(); 1880 assert(JTI < JT.size()); 1881 1882 bool ByteOk = true; 1883 bool HalfWordOk = true; 1884 unsigned JTOffset = getOffsetOf(MI) + 4; 1885 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs; 1886 for (unsigned j = 0, ee = JTBBs.size(); j != ee; ++j) { 1887 MachineBasicBlock *MBB = JTBBs[j]; 1888 unsigned DstOffset = BBInfo[MBB->getNumber()].Offset; 1889 // Negative offset is not ok. FIXME: We should change BB layout to make 1890 // sure all the branches are forward. 1891 if (ByteOk && (DstOffset - JTOffset) > ((1<<8)-1)*2) 1892 ByteOk = false; 1893 unsigned TBHLimit = ((1<<16)-1)*2; 1894 if (HalfWordOk && (DstOffset - JTOffset) > TBHLimit) 1895 HalfWordOk = false; 1896 if (!ByteOk && !HalfWordOk) 1897 break; 1898 } 1899 1900 if (ByteOk || HalfWordOk) { 1901 MachineBasicBlock *MBB = MI->getParent(); 1902 unsigned BaseReg = MI->getOperand(0).getReg(); 1903 bool BaseRegKill = MI->getOperand(0).isKill(); 1904 if (!BaseRegKill) 1905 continue; 1906 unsigned IdxReg = MI->getOperand(1).getReg(); 1907 bool IdxRegKill = MI->getOperand(1).isKill(); 1908 1909 // Scan backwards to find the instruction that defines the base 1910 // register. Due to post-RA scheduling, we can't count on it 1911 // immediately preceding the branch instruction. 1912 MachineBasicBlock::iterator PrevI = MI; 1913 MachineBasicBlock::iterator B = MBB->begin(); 1914 while (PrevI != B && !PrevI->definesRegister(BaseReg)) 1915 --PrevI; 1916 1917 // If for some reason we didn't find it, we can't do anything, so 1918 // just skip this one. 1919 if (!PrevI->definesRegister(BaseReg)) 1920 continue; 1921 1922 MachineInstr *AddrMI = PrevI; 1923 bool OptOk = true; 1924 // Examine the instruction that calculates the jumptable entry address. 1925 // Make sure it only defines the base register and kills any uses 1926 // other than the index register. 1927 for (unsigned k = 0, eee = AddrMI->getNumOperands(); k != eee; ++k) { 1928 const MachineOperand &MO = AddrMI->getOperand(k); 1929 if (!MO.isReg() || !MO.getReg()) 1930 continue; 1931 if (MO.isDef() && MO.getReg() != BaseReg) { 1932 OptOk = false; 1933 break; 1934 } 1935 if (MO.isUse() && !MO.isKill() && MO.getReg() != IdxReg) { 1936 OptOk = false; 1937 break; 1938 } 1939 } 1940 if (!OptOk) 1941 continue; 1942 1943 // Now scan back again to find the tLEApcrel or t2LEApcrelJT instruction 1944 // that gave us the initial base register definition. 1945 for (--PrevI; PrevI != B && !PrevI->definesRegister(BaseReg); --PrevI) 1946 ; 1947 1948 // The instruction should be a tLEApcrel or t2LEApcrelJT; we want 1949 // to delete it as well. 1950 MachineInstr *LeaMI = PrevI; 1951 if ((LeaMI->getOpcode() != ARM::tLEApcrelJT && 1952 LeaMI->getOpcode() != ARM::t2LEApcrelJT) || 1953 LeaMI->getOperand(0).getReg() != BaseReg) 1954 OptOk = false; 1955 1956 if (!OptOk) 1957 continue; 1958 1959 DEBUG(dbgs() << "Shrink JT: " << *MI << " addr: " << *AddrMI 1960 << " lea: " << *LeaMI); 1961 unsigned Opc = ByteOk ? ARM::t2TBB_JT : ARM::t2TBH_JT; 1962 MachineInstr *NewJTMI = BuildMI(MBB, MI->getDebugLoc(), TII->get(Opc)) 1963 .addReg(IdxReg, getKillRegState(IdxRegKill)) 1964 .addJumpTableIndex(JTI, JTOP.getTargetFlags()) 1965 .addImm(MI->getOperand(JTOpIdx+1).getImm()); 1966 DEBUG(dbgs() << "BB#" << MBB->getNumber() << ": " << *NewJTMI); 1967 // FIXME: Insert an "ALIGN" instruction to ensure the next instruction 1968 // is 2-byte aligned. For now, asm printer will fix it up. 1969 unsigned NewSize = TII->GetInstSizeInBytes(NewJTMI); 1970 unsigned OrigSize = TII->GetInstSizeInBytes(AddrMI); 1971 OrigSize += TII->GetInstSizeInBytes(LeaMI); 1972 OrigSize += TII->GetInstSizeInBytes(MI); 1973 1974 AddrMI->eraseFromParent(); 1975 LeaMI->eraseFromParent(); 1976 MI->eraseFromParent(); 1977 1978 int delta = OrigSize - NewSize; 1979 BBInfo[MBB->getNumber()].Size -= delta; 1980 adjustBBOffsetsAfter(MBB); 1981 1982 ++NumTBs; 1983 MadeChange = true; 1984 } 1985 } 1986 1987 return MadeChange; 1988 } 1989 1990 /// reorderThumb2JumpTables - Adjust the function's block layout to ensure that 1991 /// jump tables always branch forwards, since that's what tbb and tbh need. 1992 bool ARMConstantIslands::reorderThumb2JumpTables() { 1993 bool MadeChange = false; 1994 1995 MachineJumpTableInfo *MJTI = MF->getJumpTableInfo(); 1996 if (MJTI == 0) return false; 1997 1998 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables(); 1999 for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) { 2000 MachineInstr *MI = T2JumpTables[i]; 2001 const MCInstrDesc &MCID = MI->getDesc(); 2002 unsigned NumOps = MCID.getNumOperands(); 2003 unsigned JTOpIdx = NumOps - (MI->isPredicable() ? 3 : 2); 2004 MachineOperand JTOP = MI->getOperand(JTOpIdx); 2005 unsigned JTI = JTOP.getIndex(); 2006 assert(JTI < JT.size()); 2007 2008 // We prefer if target blocks for the jump table come after the jump 2009 // instruction so we can use TB[BH]. Loop through the target blocks 2010 // and try to adjust them such that that's true. 2011 int JTNumber = MI->getParent()->getNumber(); 2012 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs; 2013 for (unsigned j = 0, ee = JTBBs.size(); j != ee; ++j) { 2014 MachineBasicBlock *MBB = JTBBs[j]; 2015 int DTNumber = MBB->getNumber(); 2016 2017 if (DTNumber < JTNumber) { 2018 // The destination precedes the switch. Try to move the block forward 2019 // so we have a positive offset. 2020 MachineBasicBlock *NewBB = 2021 adjustJTTargetBlockForward(MBB, MI->getParent()); 2022 if (NewBB) 2023 MJTI->ReplaceMBBInJumpTable(JTI, JTBBs[j], NewBB); 2024 MadeChange = true; 2025 } 2026 } 2027 } 2028 2029 return MadeChange; 2030 } 2031 2032 MachineBasicBlock *ARMConstantIslands:: 2033 adjustJTTargetBlockForward(MachineBasicBlock *BB, MachineBasicBlock *JTBB) { 2034 // If the destination block is terminated by an unconditional branch, 2035 // try to move it; otherwise, create a new block following the jump 2036 // table that branches back to the actual target. This is a very simple 2037 // heuristic. FIXME: We can definitely improve it. 2038 MachineBasicBlock *TBB = 0, *FBB = 0; 2039 SmallVector<MachineOperand, 4> Cond; 2040 SmallVector<MachineOperand, 4> CondPrior; 2041 MachineFunction::iterator BBi = BB; 2042 MachineFunction::iterator OldPrior = prior(BBi); 2043 2044 // If the block terminator isn't analyzable, don't try to move the block 2045 bool B = TII->AnalyzeBranch(*BB, TBB, FBB, Cond); 2046 2047 // If the block ends in an unconditional branch, move it. The prior block 2048 // has to have an analyzable terminator for us to move this one. Be paranoid 2049 // and make sure we're not trying to move the entry block of the function. 2050 if (!B && Cond.empty() && BB != MF->begin() && 2051 !TII->AnalyzeBranch(*OldPrior, TBB, FBB, CondPrior)) { 2052 BB->moveAfter(JTBB); 2053 OldPrior->updateTerminator(); 2054 BB->updateTerminator(); 2055 // Update numbering to account for the block being moved. 2056 MF->RenumberBlocks(); 2057 ++NumJTMoved; 2058 return NULL; 2059 } 2060 2061 // Create a new MBB for the code after the jump BB. 2062 MachineBasicBlock *NewBB = 2063 MF->CreateMachineBasicBlock(JTBB->getBasicBlock()); 2064 MachineFunction::iterator MBBI = JTBB; ++MBBI; 2065 MF->insert(MBBI, NewBB); 2066 2067 // Add an unconditional branch from NewBB to BB. 2068 // There doesn't seem to be meaningful DebugInfo available; this doesn't 2069 // correspond directly to anything in the source. 2070 assert (isThumb2 && "Adjusting for TB[BH] but not in Thumb2?"); 2071 BuildMI(NewBB, DebugLoc(), TII->get(ARM::t2B)).addMBB(BB) 2072 .addImm(ARMCC::AL).addReg(0); 2073 2074 // Update internal data structures to account for the newly inserted MBB. 2075 MF->RenumberBlocks(NewBB); 2076 2077 // Update the CFG. 2078 NewBB->addSuccessor(BB); 2079 JTBB->removeSuccessor(BB); 2080 JTBB->addSuccessor(NewBB); 2081 2082 ++NumJTInserted; 2083 return NewBB; 2084 } 2085