1 //===-- ARMBaseRegisterInfo.cpp - ARM Register Information ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the base ARM implementation of TargetRegisterInfo class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "ARMBaseRegisterInfo.h" 15 #include "ARM.h" 16 #include "ARMBaseInstrInfo.h" 17 #include "ARMFrameLowering.h" 18 #include "ARMMachineFunctionInfo.h" 19 #include "ARMSubtarget.h" 20 #include "MCTargetDesc/ARMAddressingModes.h" 21 #include "llvm/ADT/BitVector.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/CodeGen/MachineConstantPool.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/CodeGen/MachineFunction.h" 26 #include "llvm/CodeGen/MachineInstrBuilder.h" 27 #include "llvm/CodeGen/MachineRegisterInfo.h" 28 #include "llvm/CodeGen/RegisterScavenging.h" 29 #include "llvm/CodeGen/VirtRegMap.h" 30 #include "llvm/IR/Constants.h" 31 #include "llvm/IR/DerivedTypes.h" 32 #include "llvm/IR/Function.h" 33 #include "llvm/IR/LLVMContext.h" 34 #include "llvm/Support/Debug.h" 35 #include "llvm/Support/ErrorHandling.h" 36 #include "llvm/Support/raw_ostream.h" 37 #include "llvm/Target/TargetFrameLowering.h" 38 #include "llvm/Target/TargetMachine.h" 39 #include "llvm/Target/TargetOptions.h" 40 41 #define DEBUG_TYPE "arm-register-info" 42 43 #define GET_REGINFO_TARGET_DESC 44 #include "ARMGenRegisterInfo.inc" 45 46 using namespace llvm; 47 48 ARMBaseRegisterInfo::ARMBaseRegisterInfo() 49 : ARMGenRegisterInfo(ARM::LR, 0, 0, ARM::PC), BasePtr(ARM::R6) {} 50 51 static unsigned getFramePointerReg(const ARMSubtarget &STI) { 52 if (STI.isTargetMachO()) { 53 if (STI.isTargetDarwin() || STI.isThumb1Only()) 54 return ARM::R7; 55 else 56 return ARM::R11; 57 } else if (STI.isTargetWindows()) 58 return ARM::R11; 59 else // ARM EABI 60 return STI.isThumb() ? ARM::R7 : ARM::R11; 61 } 62 63 const MCPhysReg* 64 ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 65 const ARMSubtarget &STI = MF->getSubtarget<ARMSubtarget>(); 66 const MCPhysReg *RegList = 67 STI.isTargetDarwin() ? CSR_iOS_SaveList : CSR_AAPCS_SaveList; 68 69 const Function *F = MF->getFunction(); 70 if (F->getCallingConv() == CallingConv::GHC) { 71 // GHC set of callee saved regs is empty as all those regs are 72 // used for passing STG regs around 73 return CSR_NoRegs_SaveList; 74 } else if (F->hasFnAttribute("interrupt")) { 75 if (STI.isMClass()) { 76 // M-class CPUs have hardware which saves the registers needed to allow a 77 // function conforming to the AAPCS to function as a handler. 78 return CSR_AAPCS_SaveList; 79 } else if (F->getFnAttribute("interrupt").getValueAsString() == "FIQ") { 80 // Fast interrupt mode gives the handler a private copy of R8-R14, so less 81 // need to be saved to restore user-mode state. 82 return CSR_FIQ_SaveList; 83 } else { 84 // Generally only R13-R14 (i.e. SP, LR) are automatically preserved by 85 // exception handling. 86 return CSR_GenericInt_SaveList; 87 } 88 } 89 90 return RegList; 91 } 92 93 const uint32_t * 94 ARMBaseRegisterInfo::getCallPreservedMask(const MachineFunction &MF, 95 CallingConv::ID CC) const { 96 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 97 if (CC == CallingConv::GHC) 98 // This is academic becase all GHC calls are (supposed to be) tail calls 99 return CSR_NoRegs_RegMask; 100 return STI.isTargetDarwin() ? CSR_iOS_RegMask : CSR_AAPCS_RegMask; 101 } 102 103 const uint32_t* 104 ARMBaseRegisterInfo::getNoPreservedMask() const { 105 return CSR_NoRegs_RegMask; 106 } 107 108 const uint32_t * 109 ARMBaseRegisterInfo::getThisReturnPreservedMask(const MachineFunction &MF, 110 CallingConv::ID CC) const { 111 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 112 // This should return a register mask that is the same as that returned by 113 // getCallPreservedMask but that additionally preserves the register used for 114 // the first i32 argument (which must also be the register used to return a 115 // single i32 return value) 116 // 117 // In case that the calling convention does not use the same register for 118 // both or otherwise does not want to enable this optimization, the function 119 // should return NULL 120 if (CC == CallingConv::GHC) 121 // This is academic becase all GHC calls are (supposed to be) tail calls 122 return nullptr; 123 return STI.isTargetDarwin() ? CSR_iOS_ThisReturn_RegMask 124 : CSR_AAPCS_ThisReturn_RegMask; 125 } 126 127 BitVector ARMBaseRegisterInfo:: 128 getReservedRegs(const MachineFunction &MF) const { 129 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 130 const ARMFrameLowering *TFI = getFrameLowering(MF); 131 132 // FIXME: avoid re-calculating this every time. 133 BitVector Reserved(getNumRegs()); 134 Reserved.set(ARM::SP); 135 Reserved.set(ARM::PC); 136 Reserved.set(ARM::FPSCR); 137 Reserved.set(ARM::APSR_NZCV); 138 if (TFI->hasFP(MF)) 139 Reserved.set(getFramePointerReg(STI)); 140 if (hasBasePointer(MF)) 141 Reserved.set(BasePtr); 142 // Some targets reserve R9. 143 if (STI.isR9Reserved()) 144 Reserved.set(ARM::R9); 145 // Reserve D16-D31 if the subtarget doesn't support them. 146 if (!STI.hasVFP3() || STI.hasD16()) { 147 assert(ARM::D31 == ARM::D16 + 15); 148 for (unsigned i = 0; i != 16; ++i) 149 Reserved.set(ARM::D16 + i); 150 } 151 const TargetRegisterClass *RC = &ARM::GPRPairRegClass; 152 for(TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); I!=E; ++I) 153 for (MCSubRegIterator SI(*I, this); SI.isValid(); ++SI) 154 if (Reserved.test(*SI)) Reserved.set(*I); 155 156 return Reserved; 157 } 158 159 const TargetRegisterClass * 160 ARMBaseRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC, 161 const MachineFunction &) const { 162 const TargetRegisterClass *Super = RC; 163 TargetRegisterClass::sc_iterator I = RC->getSuperClasses(); 164 do { 165 switch (Super->getID()) { 166 case ARM::GPRRegClassID: 167 case ARM::SPRRegClassID: 168 case ARM::DPRRegClassID: 169 case ARM::QPRRegClassID: 170 case ARM::QQPRRegClassID: 171 case ARM::QQQQPRRegClassID: 172 case ARM::GPRPairRegClassID: 173 return Super; 174 } 175 Super = *I++; 176 } while (Super); 177 return RC; 178 } 179 180 const TargetRegisterClass * 181 ARMBaseRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind) 182 const { 183 return &ARM::GPRRegClass; 184 } 185 186 const TargetRegisterClass * 187 ARMBaseRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 188 if (RC == &ARM::CCRRegClass) 189 return &ARM::rGPRRegClass; // Can't copy CCR registers. 190 return RC; 191 } 192 193 unsigned 194 ARMBaseRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, 195 MachineFunction &MF) const { 196 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 197 const ARMFrameLowering *TFI = getFrameLowering(MF); 198 199 switch (RC->getID()) { 200 default: 201 return 0; 202 case ARM::tGPRRegClassID: 203 return TFI->hasFP(MF) ? 4 : 5; 204 case ARM::GPRRegClassID: { 205 unsigned FP = TFI->hasFP(MF) ? 1 : 0; 206 return 10 - FP - (STI.isR9Reserved() ? 1 : 0); 207 } 208 case ARM::SPRRegClassID: // Currently not used as 'rep' register class. 209 case ARM::DPRRegClassID: 210 return 32 - 10; 211 } 212 } 213 214 // Get the other register in a GPRPair. 215 static unsigned getPairedGPR(unsigned Reg, bool Odd, const MCRegisterInfo *RI) { 216 for (MCSuperRegIterator Supers(Reg, RI); Supers.isValid(); ++Supers) 217 if (ARM::GPRPairRegClass.contains(*Supers)) 218 return RI->getSubReg(*Supers, Odd ? ARM::gsub_1 : ARM::gsub_0); 219 return 0; 220 } 221 222 // Resolve the RegPairEven / RegPairOdd register allocator hints. 223 void 224 ARMBaseRegisterInfo::getRegAllocationHints(unsigned VirtReg, 225 ArrayRef<MCPhysReg> Order, 226 SmallVectorImpl<MCPhysReg> &Hints, 227 const MachineFunction &MF, 228 const VirtRegMap *VRM, 229 const LiveRegMatrix *Matrix) const { 230 const MachineRegisterInfo &MRI = MF.getRegInfo(); 231 std::pair<unsigned, unsigned> Hint = MRI.getRegAllocationHint(VirtReg); 232 233 unsigned Odd; 234 switch (Hint.first) { 235 case ARMRI::RegPairEven: 236 Odd = 0; 237 break; 238 case ARMRI::RegPairOdd: 239 Odd = 1; 240 break; 241 default: 242 TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF, VRM); 243 return; 244 } 245 246 // This register should preferably be even (Odd == 0) or odd (Odd == 1). 247 // Check if the other part of the pair has already been assigned, and provide 248 // the paired register as the first hint. 249 unsigned Paired = Hint.second; 250 if (Paired == 0) 251 return; 252 253 unsigned PairedPhys = 0; 254 if (TargetRegisterInfo::isPhysicalRegister(Paired)) { 255 PairedPhys = Paired; 256 } else if (VRM && VRM->hasPhys(Paired)) { 257 PairedPhys = getPairedGPR(VRM->getPhys(Paired), Odd, this); 258 } 259 260 // First prefer the paired physreg. 261 if (PairedPhys && 262 std::find(Order.begin(), Order.end(), PairedPhys) != Order.end()) 263 Hints.push_back(PairedPhys); 264 265 // Then prefer even or odd registers. 266 for (unsigned I = 0, E = Order.size(); I != E; ++I) { 267 unsigned Reg = Order[I]; 268 if (Reg == PairedPhys || (getEncodingValue(Reg) & 1) != Odd) 269 continue; 270 // Don't provide hints that are paired to a reserved register. 271 unsigned Paired = getPairedGPR(Reg, !Odd, this); 272 if (!Paired || MRI.isReserved(Paired)) 273 continue; 274 Hints.push_back(Reg); 275 } 276 } 277 278 void 279 ARMBaseRegisterInfo::updateRegAllocHint(unsigned Reg, unsigned NewReg, 280 MachineFunction &MF) const { 281 MachineRegisterInfo *MRI = &MF.getRegInfo(); 282 std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg); 283 if ((Hint.first == (unsigned)ARMRI::RegPairOdd || 284 Hint.first == (unsigned)ARMRI::RegPairEven) && 285 TargetRegisterInfo::isVirtualRegister(Hint.second)) { 286 // If 'Reg' is one of the even / odd register pair and it's now changed 287 // (e.g. coalesced) into a different register. The other register of the 288 // pair allocation hint must be updated to reflect the relationship 289 // change. 290 unsigned OtherReg = Hint.second; 291 Hint = MRI->getRegAllocationHint(OtherReg); 292 // Make sure the pair has not already divorced. 293 if (Hint.second == Reg) { 294 MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg); 295 if (TargetRegisterInfo::isVirtualRegister(NewReg)) 296 MRI->setRegAllocationHint(NewReg, 297 Hint.first == (unsigned)ARMRI::RegPairOdd ? ARMRI::RegPairEven 298 : ARMRI::RegPairOdd, OtherReg); 299 } 300 } 301 } 302 303 bool ARMBaseRegisterInfo::hasBasePointer(const MachineFunction &MF) const { 304 const MachineFrameInfo *MFI = MF.getFrameInfo(); 305 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 306 const ARMFrameLowering *TFI = getFrameLowering(MF); 307 308 // When outgoing call frames are so large that we adjust the stack pointer 309 // around the call, we can no longer use the stack pointer to reach the 310 // emergency spill slot. 311 if (needsStackRealignment(MF) && !TFI->hasReservedCallFrame(MF)) 312 return true; 313 314 // Thumb has trouble with negative offsets from the FP. Thumb2 has a limited 315 // negative range for ldr/str (255), and thumb1 is positive offsets only. 316 // It's going to be better to use the SP or Base Pointer instead. When there 317 // are variable sized objects, we can't reference off of the SP, so we 318 // reserve a Base Pointer. 319 if (AFI->isThumbFunction() && MFI->hasVarSizedObjects()) { 320 // Conservatively estimate whether the negative offset from the frame 321 // pointer will be sufficient to reach. If a function has a smallish 322 // frame, it's less likely to have lots of spills and callee saved 323 // space, so it's all more likely to be within range of the frame pointer. 324 // If it's wrong, the scavenger will still enable access to work, it just 325 // won't be optimal. 326 if (AFI->isThumb2Function() && MFI->getLocalFrameSize() < 128) 327 return false; 328 return true; 329 } 330 331 return false; 332 } 333 334 bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const { 335 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 336 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 337 const ARMFrameLowering *TFI = getFrameLowering(MF); 338 // We can't realign the stack if: 339 // 1. Dynamic stack realignment is explicitly disabled, 340 // 2. This is a Thumb1 function (it's not useful, so we don't bother), or 341 // 3. There are VLAs in the function and the base pointer is disabled. 342 if (!TargetRegisterInfo::canRealignStack(MF)) 343 return false; 344 if (AFI->isThumb1OnlyFunction()) 345 return false; 346 // Stack realignment requires a frame pointer. If we already started 347 // register allocation with frame pointer elimination, it is too late now. 348 if (!MRI->canReserveReg(getFramePointerReg(MF.getSubtarget<ARMSubtarget>()))) 349 return false; 350 // We may also need a base pointer if there are dynamic allocas or stack 351 // pointer adjustments around calls. 352 if (TFI->hasReservedCallFrame(MF)) 353 return true; 354 // A base pointer is required and allowed. Check that it isn't too late to 355 // reserve it. 356 return MRI->canReserveReg(BasePtr); 357 } 358 359 bool ARMBaseRegisterInfo:: 360 cannotEliminateFrame(const MachineFunction &MF) const { 361 const MachineFrameInfo *MFI = MF.getFrameInfo(); 362 if (MF.getTarget().Options.DisableFramePointerElim(MF) && MFI->adjustsStack()) 363 return true; 364 return MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken() 365 || needsStackRealignment(MF); 366 } 367 368 unsigned 369 ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const { 370 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 371 const ARMFrameLowering *TFI = getFrameLowering(MF); 372 373 if (TFI->hasFP(MF)) 374 return getFramePointerReg(STI); 375 return ARM::SP; 376 } 377 378 /// emitLoadConstPool - Emits a load from constpool to materialize the 379 /// specified immediate. 380 void ARMBaseRegisterInfo:: 381 emitLoadConstPool(MachineBasicBlock &MBB, 382 MachineBasicBlock::iterator &MBBI, 383 DebugLoc dl, 384 unsigned DestReg, unsigned SubIdx, int Val, 385 ARMCC::CondCodes Pred, 386 unsigned PredReg, unsigned MIFlags) const { 387 MachineFunction &MF = *MBB.getParent(); 388 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 389 MachineConstantPool *ConstantPool = MF.getConstantPool(); 390 const Constant *C = 391 ConstantInt::get(Type::getInt32Ty(MF.getFunction()->getContext()), Val); 392 unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4); 393 394 BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp)) 395 .addReg(DestReg, getDefRegState(true), SubIdx) 396 .addConstantPoolIndex(Idx) 397 .addImm(0).addImm(Pred).addReg(PredReg) 398 .setMIFlags(MIFlags); 399 } 400 401 bool ARMBaseRegisterInfo:: 402 requiresRegisterScavenging(const MachineFunction &MF) const { 403 return true; 404 } 405 406 bool ARMBaseRegisterInfo:: 407 trackLivenessAfterRegAlloc(const MachineFunction &MF) const { 408 return true; 409 } 410 411 bool ARMBaseRegisterInfo:: 412 requiresFrameIndexScavenging(const MachineFunction &MF) const { 413 return true; 414 } 415 416 bool ARMBaseRegisterInfo:: 417 requiresVirtualBaseRegisters(const MachineFunction &MF) const { 418 return true; 419 } 420 421 int64_t ARMBaseRegisterInfo:: 422 getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const { 423 const MCInstrDesc &Desc = MI->getDesc(); 424 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 425 int64_t InstrOffs = 0; 426 int Scale = 1; 427 unsigned ImmIdx = 0; 428 switch (AddrMode) { 429 case ARMII::AddrModeT2_i8: 430 case ARMII::AddrModeT2_i12: 431 case ARMII::AddrMode_i12: 432 InstrOffs = MI->getOperand(Idx+1).getImm(); 433 Scale = 1; 434 break; 435 case ARMII::AddrMode5: { 436 // VFP address mode. 437 const MachineOperand &OffOp = MI->getOperand(Idx+1); 438 InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm()); 439 if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub) 440 InstrOffs = -InstrOffs; 441 Scale = 4; 442 break; 443 } 444 case ARMII::AddrMode2: { 445 ImmIdx = Idx+2; 446 InstrOffs = ARM_AM::getAM2Offset(MI->getOperand(ImmIdx).getImm()); 447 if (ARM_AM::getAM2Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub) 448 InstrOffs = -InstrOffs; 449 break; 450 } 451 case ARMII::AddrMode3: { 452 ImmIdx = Idx+2; 453 InstrOffs = ARM_AM::getAM3Offset(MI->getOperand(ImmIdx).getImm()); 454 if (ARM_AM::getAM3Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub) 455 InstrOffs = -InstrOffs; 456 break; 457 } 458 case ARMII::AddrModeT1_s: { 459 ImmIdx = Idx+1; 460 InstrOffs = MI->getOperand(ImmIdx).getImm(); 461 Scale = 4; 462 break; 463 } 464 default: 465 llvm_unreachable("Unsupported addressing mode!"); 466 } 467 468 return InstrOffs * Scale; 469 } 470 471 /// needsFrameBaseReg - Returns true if the instruction's frame index 472 /// reference would be better served by a base register other than FP 473 /// or SP. Used by LocalStackFrameAllocation to determine which frame index 474 /// references it should create new base registers for. 475 bool ARMBaseRegisterInfo:: 476 needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const { 477 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) { 478 assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!"); 479 } 480 481 // It's the load/store FI references that cause issues, as it can be difficult 482 // to materialize the offset if it won't fit in the literal field. Estimate 483 // based on the size of the local frame and some conservative assumptions 484 // about the rest of the stack frame (note, this is pre-regalloc, so 485 // we don't know everything for certain yet) whether this offset is likely 486 // to be out of range of the immediate. Return true if so. 487 488 // We only generate virtual base registers for loads and stores, so 489 // return false for everything else. 490 unsigned Opc = MI->getOpcode(); 491 switch (Opc) { 492 case ARM::LDRi12: case ARM::LDRH: case ARM::LDRBi12: 493 case ARM::STRi12: case ARM::STRH: case ARM::STRBi12: 494 case ARM::t2LDRi12: case ARM::t2LDRi8: 495 case ARM::t2STRi12: case ARM::t2STRi8: 496 case ARM::VLDRS: case ARM::VLDRD: 497 case ARM::VSTRS: case ARM::VSTRD: 498 case ARM::tSTRspi: case ARM::tLDRspi: 499 break; 500 default: 501 return false; 502 } 503 504 // Without a virtual base register, if the function has variable sized 505 // objects, all fixed-size local references will be via the frame pointer, 506 // Approximate the offset and see if it's legal for the instruction. 507 // Note that the incoming offset is based on the SP value at function entry, 508 // so it'll be negative. 509 MachineFunction &MF = *MI->getParent()->getParent(); 510 const ARMFrameLowering *TFI = getFrameLowering(MF); 511 MachineFrameInfo *MFI = MF.getFrameInfo(); 512 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 513 514 // Estimate an offset from the frame pointer. 515 // Conservatively assume all callee-saved registers get pushed. R4-R6 516 // will be earlier than the FP, so we ignore those. 517 // R7, LR 518 int64_t FPOffset = Offset - 8; 519 // ARM and Thumb2 functions also need to consider R8-R11 and D8-D15 520 if (!AFI->isThumbFunction() || !AFI->isThumb1OnlyFunction()) 521 FPOffset -= 80; 522 // Estimate an offset from the stack pointer. 523 // The incoming offset is relating to the SP at the start of the function, 524 // but when we access the local it'll be relative to the SP after local 525 // allocation, so adjust our SP-relative offset by that allocation size. 526 Offset += MFI->getLocalFrameSize(); 527 // Assume that we'll have at least some spill slots allocated. 528 // FIXME: This is a total SWAG number. We should run some statistics 529 // and pick a real one. 530 Offset += 128; // 128 bytes of spill slots 531 532 // If there's a frame pointer and the addressing mode allows it, try using it. 533 // The FP is only available if there is no dynamic realignment. We 534 // don't know for sure yet whether we'll need that, so we guess based 535 // on whether there are any local variables that would trigger it. 536 unsigned StackAlign = TFI->getStackAlignment(); 537 if (TFI->hasFP(MF) && 538 !((MFI->getLocalFrameMaxAlign() > StackAlign) && canRealignStack(MF))) { 539 if (isFrameOffsetLegal(MI, getFrameRegister(MF), FPOffset)) 540 return false; 541 } 542 // If we can reference via the stack pointer, try that. 543 // FIXME: This (and the code that resolves the references) can be improved 544 // to only disallow SP relative references in the live range of 545 // the VLA(s). In practice, it's unclear how much difference that 546 // would make, but it may be worth doing. 547 if (!MFI->hasVarSizedObjects() && isFrameOffsetLegal(MI, ARM::SP, Offset)) 548 return false; 549 550 // The offset likely isn't legal, we want to allocate a virtual base register. 551 return true; 552 } 553 554 /// materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to 555 /// be a pointer to FrameIdx at the beginning of the basic block. 556 void ARMBaseRegisterInfo:: 557 materializeFrameBaseRegister(MachineBasicBlock *MBB, 558 unsigned BaseReg, int FrameIdx, 559 int64_t Offset) const { 560 ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>(); 561 unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri : 562 (AFI->isThumb1OnlyFunction() ? ARM::tADDframe : ARM::t2ADDri); 563 564 MachineBasicBlock::iterator Ins = MBB->begin(); 565 DebugLoc DL; // Defaults to "unknown" 566 if (Ins != MBB->end()) 567 DL = Ins->getDebugLoc(); 568 569 const MachineFunction &MF = *MBB->getParent(); 570 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 571 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 572 const MCInstrDesc &MCID = TII.get(ADDriOpc); 573 MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this, MF)); 574 575 MachineInstrBuilder MIB = BuildMI(*MBB, Ins, DL, MCID, BaseReg) 576 .addFrameIndex(FrameIdx).addImm(Offset); 577 578 if (!AFI->isThumb1OnlyFunction()) 579 AddDefaultCC(AddDefaultPred(MIB)); 580 } 581 582 void ARMBaseRegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg, 583 int64_t Offset) const { 584 MachineBasicBlock &MBB = *MI.getParent(); 585 MachineFunction &MF = *MBB.getParent(); 586 const ARMBaseInstrInfo &TII = 587 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo()); 588 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 589 int Off = Offset; // ARM doesn't need the general 64-bit offsets 590 unsigned i = 0; 591 592 assert(!AFI->isThumb1OnlyFunction() && 593 "This resolveFrameIndex does not support Thumb1!"); 594 595 while (!MI.getOperand(i).isFI()) { 596 ++i; 597 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); 598 } 599 bool Done = false; 600 if (!AFI->isThumbFunction()) 601 Done = rewriteARMFrameIndex(MI, i, BaseReg, Off, TII); 602 else { 603 assert(AFI->isThumb2Function()); 604 Done = rewriteT2FrameIndex(MI, i, BaseReg, Off, TII); 605 } 606 assert (Done && "Unable to resolve frame index!"); 607 (void)Done; 608 } 609 610 bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI, unsigned BaseReg, 611 int64_t Offset) const { 612 const MCInstrDesc &Desc = MI->getDesc(); 613 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 614 unsigned i = 0; 615 616 while (!MI->getOperand(i).isFI()) { 617 ++i; 618 assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!"); 619 } 620 621 // AddrMode4 and AddrMode6 cannot handle any offset. 622 if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6) 623 return Offset == 0; 624 625 unsigned NumBits = 0; 626 unsigned Scale = 1; 627 bool isSigned = true; 628 switch (AddrMode) { 629 case ARMII::AddrModeT2_i8: 630 case ARMII::AddrModeT2_i12: 631 // i8 supports only negative, and i12 supports only positive, so 632 // based on Offset sign, consider the appropriate instruction 633 Scale = 1; 634 if (Offset < 0) { 635 NumBits = 8; 636 Offset = -Offset; 637 } else { 638 NumBits = 12; 639 } 640 break; 641 case ARMII::AddrMode5: 642 // VFP address mode. 643 NumBits = 8; 644 Scale = 4; 645 break; 646 case ARMII::AddrMode_i12: 647 case ARMII::AddrMode2: 648 NumBits = 12; 649 break; 650 case ARMII::AddrMode3: 651 NumBits = 8; 652 break; 653 case ARMII::AddrModeT1_s: 654 NumBits = (BaseReg == ARM::SP ? 8 : 5); 655 Scale = 4; 656 isSigned = false; 657 break; 658 default: 659 llvm_unreachable("Unsupported addressing mode!"); 660 } 661 662 Offset += getFrameIndexInstrOffset(MI, i); 663 // Make sure the offset is encodable for instructions that scale the 664 // immediate. 665 if ((Offset & (Scale-1)) != 0) 666 return false; 667 668 if (isSigned && Offset < 0) 669 Offset = -Offset; 670 671 unsigned Mask = (1 << NumBits) - 1; 672 if ((unsigned)Offset <= Mask * Scale) 673 return true; 674 675 return false; 676 } 677 678 void 679 ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 680 int SPAdj, unsigned FIOperandNum, 681 RegScavenger *RS) const { 682 MachineInstr &MI = *II; 683 MachineBasicBlock &MBB = *MI.getParent(); 684 MachineFunction &MF = *MBB.getParent(); 685 const ARMBaseInstrInfo &TII = 686 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo()); 687 const ARMFrameLowering *TFI = getFrameLowering(MF); 688 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 689 assert(!AFI->isThumb1OnlyFunction() && 690 "This eliminateFrameIndex does not support Thumb1!"); 691 int FrameIndex = MI.getOperand(FIOperandNum).getIndex(); 692 unsigned FrameReg; 693 694 int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj); 695 696 // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the 697 // call frame setup/destroy instructions have already been eliminated. That 698 // means the stack pointer cannot be used to access the emergency spill slot 699 // when !hasReservedCallFrame(). 700 #ifndef NDEBUG 701 if (RS && FrameReg == ARM::SP && RS->isScavengingFrameIndex(FrameIndex)){ 702 assert(TFI->hasReservedCallFrame(MF) && 703 "Cannot use SP to access the emergency spill slot in " 704 "functions without a reserved call frame"); 705 assert(!MF.getFrameInfo()->hasVarSizedObjects() && 706 "Cannot use SP to access the emergency spill slot in " 707 "functions with variable sized frame objects"); 708 } 709 #endif // NDEBUG 710 711 assert(!MI.isDebugValue() && "DBG_VALUEs should be handled in target-independent code"); 712 713 // Modify MI as necessary to handle as much of 'Offset' as possible 714 bool Done = false; 715 if (!AFI->isThumbFunction()) 716 Done = rewriteARMFrameIndex(MI, FIOperandNum, FrameReg, Offset, TII); 717 else { 718 assert(AFI->isThumb2Function()); 719 Done = rewriteT2FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII); 720 } 721 if (Done) 722 return; 723 724 // If we get here, the immediate doesn't fit into the instruction. We folded 725 // as much as possible above, handle the rest, providing a register that is 726 // SP+LargeImm. 727 assert((Offset || 728 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 || 729 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6) && 730 "This code isn't needed if offset already handled!"); 731 732 unsigned ScratchReg = 0; 733 int PIdx = MI.findFirstPredOperandIdx(); 734 ARMCC::CondCodes Pred = (PIdx == -1) 735 ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm(); 736 unsigned PredReg = (PIdx == -1) ? 0 : MI.getOperand(PIdx+1).getReg(); 737 if (Offset == 0) 738 // Must be addrmode4/6. 739 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false, false, false); 740 else { 741 ScratchReg = MF.getRegInfo().createVirtualRegister(&ARM::GPRRegClass); 742 if (!AFI->isThumbFunction()) 743 emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, 744 Offset, Pred, PredReg, TII); 745 else { 746 assert(AFI->isThumb2Function()); 747 emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, 748 Offset, Pred, PredReg, TII); 749 } 750 // Update the original instruction to use the scratch register. 751 MI.getOperand(FIOperandNum).ChangeToRegister(ScratchReg, false, false,true); 752 } 753 } 754 755 bool ARMBaseRegisterInfo::shouldCoalesce(MachineInstr *MI, 756 const TargetRegisterClass *SrcRC, 757 unsigned SubReg, 758 const TargetRegisterClass *DstRC, 759 unsigned DstSubReg, 760 const TargetRegisterClass *NewRC) const { 761 auto MBB = MI->getParent(); 762 auto MF = MBB->getParent(); 763 const MachineRegisterInfo &MRI = MF->getRegInfo(); 764 // If not copying into a sub-register this should be ok because we shouldn't 765 // need to split the reg. 766 if (!DstSubReg) 767 return true; 768 // Small registers don't frequently cause a problem, so we can coalesce them. 769 if (NewRC->getSize() < 32 && DstRC->getSize() < 32 && SrcRC->getSize() < 32) 770 return true; 771 772 auto NewRCWeight = 773 MRI.getTargetRegisterInfo()->getRegClassWeight(NewRC); 774 auto SrcRCWeight = 775 MRI.getTargetRegisterInfo()->getRegClassWeight(SrcRC); 776 auto DstRCWeight = 777 MRI.getTargetRegisterInfo()->getRegClassWeight(DstRC); 778 // If the source register class is more expensive than the destination, the 779 // coalescing is probably profitable. 780 if (SrcRCWeight.RegWeight > NewRCWeight.RegWeight) 781 return true; 782 if (DstRCWeight.RegWeight > NewRCWeight.RegWeight) 783 return true; 784 785 // If the register allocator isn't constrained, we can always allow coalescing 786 // unfortunately we don't know yet if we will be constrained. 787 // The goal of this heuristic is to restrict how many expensive registers 788 // we allow to coalesce in a given basic block. 789 auto AFI = MF->getInfo<ARMFunctionInfo>(); 790 auto It = AFI->getCoalescedWeight(MBB); 791 792 DEBUG(dbgs() << "\tARM::shouldCoalesce - Coalesced Weight: " 793 << It->second << "\n"); 794 DEBUG(dbgs() << "\tARM::shouldCoalesce - Reg Weight: " 795 << NewRCWeight.RegWeight << "\n"); 796 797 // This number is the largest round number that which meets the criteria: 798 // (1) addresses PR18825 799 // (2) generates better code in some test cases (like vldm-shed-a9.ll) 800 // (3) Doesn't regress any test cases (in-tree, test-suite, and SPEC) 801 // In practice the SizeMultiplier will only factor in for straight line code 802 // that uses a lot of NEON vectors, which isn't terribly common. 803 unsigned SizeMultiplier = MBB->size()/100; 804 SizeMultiplier = SizeMultiplier ? SizeMultiplier : 1; 805 if (It->second < NewRCWeight.WeightLimit * SizeMultiplier) { 806 It->second += NewRCWeight.RegWeight; 807 return true; 808 } 809 return false; 810 } 811