1 //===-- XCoreFrameLowering.cpp - Frame info for XCore Target --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains XCore frame information that doesn't fit anywhere else 11 // cleanly... 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "XCoreFrameLowering.h" 16 #include "XCore.h" 17 #include "XCoreInstrInfo.h" 18 #include "XCoreMachineFunctionInfo.h" 19 #include "XCoreSubtarget.h" 20 #include "llvm/CodeGen/MachineFrameInfo.h" 21 #include "llvm/CodeGen/MachineFunction.h" 22 #include "llvm/CodeGen/MachineInstrBuilder.h" 23 #include "llvm/CodeGen/MachineModuleInfo.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 #include "llvm/CodeGen/RegisterScavenging.h" 26 #include "llvm/CodeGen/TargetLowering.h" 27 #include "llvm/IR/DataLayout.h" 28 #include "llvm/IR/Function.h" 29 #include "llvm/Support/ErrorHandling.h" 30 #include "llvm/Target/TargetOptions.h" 31 #include <algorithm> // std::sort 32 33 using namespace llvm; 34 35 static const unsigned FramePtr = XCore::R10; 36 static const int MaxImmU16 = (1<<16) - 1; 37 38 // helper functions. FIXME: Eliminate. 39 static inline bool isImmU6(unsigned val) { 40 return val < (1 << 6); 41 } 42 43 static inline bool isImmU16(unsigned val) { 44 return val < (1 << 16); 45 } 46 47 // Helper structure with compare function for handling stack slots. 48 namespace { 49 struct StackSlotInfo { 50 int FI; 51 int Offset; 52 unsigned Reg; 53 StackSlotInfo(int f, int o, int r) : FI(f), Offset(o), Reg(r){}; 54 }; 55 } // end anonymous namespace 56 57 static bool CompareSSIOffset(const StackSlotInfo& a, const StackSlotInfo& b) { 58 return a.Offset < b.Offset; 59 } 60 61 static void EmitDefCfaRegister(MachineBasicBlock &MBB, 62 MachineBasicBlock::iterator MBBI, 63 const DebugLoc &dl, const TargetInstrInfo &TII, 64 MachineFunction &MF, unsigned DRegNum) { 65 unsigned CFIIndex = MF.addFrameInst( 66 MCCFIInstruction::createDefCfaRegister(nullptr, DRegNum)); 67 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 68 .addCFIIndex(CFIIndex); 69 } 70 71 static void EmitDefCfaOffset(MachineBasicBlock &MBB, 72 MachineBasicBlock::iterator MBBI, 73 const DebugLoc &dl, const TargetInstrInfo &TII, 74 int Offset) { 75 MachineFunction &MF = *MBB.getParent(); 76 unsigned CFIIndex = 77 MF.addFrameInst(MCCFIInstruction::createDefCfaOffset(nullptr, -Offset)); 78 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 79 .addCFIIndex(CFIIndex); 80 } 81 82 static void EmitCfiOffset(MachineBasicBlock &MBB, 83 MachineBasicBlock::iterator MBBI, const DebugLoc &dl, 84 const TargetInstrInfo &TII, unsigned DRegNum, 85 int Offset) { 86 MachineFunction &MF = *MBB.getParent(); 87 unsigned CFIIndex = MF.addFrameInst( 88 MCCFIInstruction::createOffset(nullptr, DRegNum, Offset)); 89 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 90 .addCFIIndex(CFIIndex); 91 } 92 93 /// The SP register is moved in steps of 'MaxImmU16' towards the bottom of the 94 /// frame. During these steps, it may be necessary to spill registers. 95 /// IfNeededExtSP emits the necessary EXTSP instructions to move the SP only 96 /// as far as to make 'OffsetFromBottom' reachable using an STWSP_lru6. 97 /// \param OffsetFromTop the spill offset from the top of the frame. 98 /// \param [in,out] Adjusted the current SP offset from the top of the frame. 99 static void IfNeededExtSP(MachineBasicBlock &MBB, 100 MachineBasicBlock::iterator MBBI, const DebugLoc &dl, 101 const TargetInstrInfo &TII, int OffsetFromTop, 102 int &Adjusted, int FrameSize, bool emitFrameMoves) { 103 while (OffsetFromTop > Adjusted) { 104 assert(Adjusted < FrameSize && "OffsetFromTop is beyond FrameSize"); 105 int remaining = FrameSize - Adjusted; 106 int OpImm = (remaining > MaxImmU16) ? MaxImmU16 : remaining; 107 int Opcode = isImmU6(OpImm) ? XCore::EXTSP_u6 : XCore::EXTSP_lu6; 108 BuildMI(MBB, MBBI, dl, TII.get(Opcode)).addImm(OpImm); 109 Adjusted += OpImm; 110 if (emitFrameMoves) 111 EmitDefCfaOffset(MBB, MBBI, dl, TII, Adjusted*4); 112 } 113 } 114 115 /// The SP register is moved in steps of 'MaxImmU16' towards the top of the 116 /// frame. During these steps, it may be necessary to re-load registers. 117 /// IfNeededLDAWSP emits the necessary LDAWSP instructions to move the SP only 118 /// as far as to make 'OffsetFromTop' reachable using an LDAWSP_lru6. 119 /// \param OffsetFromTop the spill offset from the top of the frame. 120 /// \param [in,out] RemainingAdj the current SP offset from the top of the 121 /// frame. 122 static void IfNeededLDAWSP(MachineBasicBlock &MBB, 123 MachineBasicBlock::iterator MBBI, const DebugLoc &dl, 124 const TargetInstrInfo &TII, int OffsetFromTop, 125 int &RemainingAdj) { 126 while (OffsetFromTop < RemainingAdj - MaxImmU16) { 127 assert(RemainingAdj && "OffsetFromTop is beyond FrameSize"); 128 int OpImm = (RemainingAdj > MaxImmU16) ? MaxImmU16 : RemainingAdj; 129 int Opcode = isImmU6(OpImm) ? XCore::LDAWSP_ru6 : XCore::LDAWSP_lru6; 130 BuildMI(MBB, MBBI, dl, TII.get(Opcode), XCore::SP).addImm(OpImm); 131 RemainingAdj -= OpImm; 132 } 133 } 134 135 /// Creates an ordered list of registers that are spilled 136 /// during the emitPrologue/emitEpilogue. 137 /// Registers are ordered according to their frame offset. 138 /// As offsets are negative, the largest offsets will be first. 139 static void GetSpillList(SmallVectorImpl<StackSlotInfo> &SpillList, 140 MachineFrameInfo &MFI, XCoreFunctionInfo *XFI, 141 bool fetchLR, bool fetchFP) { 142 if (fetchLR) { 143 int Offset = MFI.getObjectOffset(XFI->getLRSpillSlot()); 144 SpillList.push_back(StackSlotInfo(XFI->getLRSpillSlot(), 145 Offset, 146 XCore::LR)); 147 } 148 if (fetchFP) { 149 int Offset = MFI.getObjectOffset(XFI->getFPSpillSlot()); 150 SpillList.push_back(StackSlotInfo(XFI->getFPSpillSlot(), 151 Offset, 152 FramePtr)); 153 } 154 llvm::sort(SpillList.begin(), SpillList.end(), CompareSSIOffset); 155 } 156 157 /// Creates an ordered list of EH info register 'spills'. 158 /// These slots are only used by the unwinder and calls to llvm.eh.return(). 159 /// Registers are ordered according to their frame offset. 160 /// As offsets are negative, the largest offsets will be first. 161 static void GetEHSpillList(SmallVectorImpl<StackSlotInfo> &SpillList, 162 MachineFrameInfo &MFI, XCoreFunctionInfo *XFI, 163 const Constant *PersonalityFn, 164 const TargetLowering *TL) { 165 assert(XFI->hasEHSpillSlot() && "There are no EH register spill slots"); 166 const int *EHSlot = XFI->getEHSpillSlot(); 167 SpillList.push_back( 168 StackSlotInfo(EHSlot[0], MFI.getObjectOffset(EHSlot[0]), 169 TL->getExceptionPointerRegister(PersonalityFn))); 170 SpillList.push_back( 171 StackSlotInfo(EHSlot[0], MFI.getObjectOffset(EHSlot[1]), 172 TL->getExceptionSelectorRegister(PersonalityFn))); 173 llvm::sort(SpillList.begin(), SpillList.end(), CompareSSIOffset); 174 } 175 176 static MachineMemOperand *getFrameIndexMMO(MachineBasicBlock &MBB, 177 int FrameIndex, 178 MachineMemOperand::Flags flags) { 179 MachineFunction *MF = MBB.getParent(); 180 const MachineFrameInfo &MFI = MF->getFrameInfo(); 181 MachineMemOperand *MMO = MF->getMachineMemOperand( 182 MachinePointerInfo::getFixedStack(*MF, FrameIndex), flags, 183 MFI.getObjectSize(FrameIndex), MFI.getObjectAlignment(FrameIndex)); 184 return MMO; 185 } 186 187 188 /// Restore clobbered registers with their spill slot value. 189 /// The SP will be adjusted at the same time, thus the SpillList must be ordered 190 /// with the largest (negative) offsets first. 191 static void RestoreSpillList(MachineBasicBlock &MBB, 192 MachineBasicBlock::iterator MBBI, 193 const DebugLoc &dl, const TargetInstrInfo &TII, 194 int &RemainingAdj, 195 SmallVectorImpl<StackSlotInfo> &SpillList) { 196 for (unsigned i = 0, e = SpillList.size(); i != e; ++i) { 197 assert(SpillList[i].Offset % 4 == 0 && "Misaligned stack offset"); 198 assert(SpillList[i].Offset <= 0 && "Unexpected positive stack offset"); 199 int OffsetFromTop = - SpillList[i].Offset/4; 200 IfNeededLDAWSP(MBB, MBBI, dl, TII, OffsetFromTop, RemainingAdj); 201 int Offset = RemainingAdj - OffsetFromTop; 202 int Opcode = isImmU6(Offset) ? XCore::LDWSP_ru6 : XCore::LDWSP_lru6; 203 BuildMI(MBB, MBBI, dl, TII.get(Opcode), SpillList[i].Reg) 204 .addImm(Offset) 205 .addMemOperand(getFrameIndexMMO(MBB, SpillList[i].FI, 206 MachineMemOperand::MOLoad)); 207 } 208 } 209 210 //===----------------------------------------------------------------------===// 211 // XCoreFrameLowering: 212 //===----------------------------------------------------------------------===// 213 214 XCoreFrameLowering::XCoreFrameLowering(const XCoreSubtarget &sti) 215 : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 4, 0) { 216 // Do nothing 217 } 218 219 bool XCoreFrameLowering::hasFP(const MachineFunction &MF) const { 220 return MF.getTarget().Options.DisableFramePointerElim(MF) || 221 MF.getFrameInfo().hasVarSizedObjects(); 222 } 223 224 void XCoreFrameLowering::emitPrologue(MachineFunction &MF, 225 MachineBasicBlock &MBB) const { 226 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported"); 227 MachineBasicBlock::iterator MBBI = MBB.begin(); 228 MachineFrameInfo &MFI = MF.getFrameInfo(); 229 MachineModuleInfo *MMI = &MF.getMMI(); 230 const MCRegisterInfo *MRI = MMI->getContext().getRegisterInfo(); 231 const XCoreInstrInfo &TII = *MF.getSubtarget<XCoreSubtarget>().getInstrInfo(); 232 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 233 // Debug location must be unknown since the first debug location is used 234 // to determine the end of the prologue. 235 DebugLoc dl; 236 237 if (MFI.getMaxAlignment() > getStackAlignment()) 238 report_fatal_error("emitPrologue unsupported alignment: " 239 + Twine(MFI.getMaxAlignment())); 240 241 const AttributeList &PAL = MF.getFunction().getAttributes(); 242 if (PAL.hasAttrSomewhere(Attribute::Nest)) 243 BuildMI(MBB, MBBI, dl, TII.get(XCore::LDWSP_ru6), XCore::R11).addImm(0); 244 // FIX: Needs addMemOperand() but can't use getFixedStack() or getStack(). 245 246 // Work out frame sizes. 247 // We will adjust the SP in stages towards the final FrameSize. 248 assert(MFI.getStackSize()%4 == 0 && "Misaligned frame size"); 249 const int FrameSize = MFI.getStackSize() / 4; 250 int Adjusted = 0; 251 252 bool saveLR = XFI->hasLRSpillSlot(); 253 bool UseENTSP = saveLR && FrameSize 254 && (MFI.getObjectOffset(XFI->getLRSpillSlot()) == 0); 255 if (UseENTSP) 256 saveLR = false; 257 bool FP = hasFP(MF); 258 bool emitFrameMoves = XCoreRegisterInfo::needsFrameMoves(MF); 259 260 if (UseENTSP) { 261 // Allocate space on the stack at the same time as saving LR. 262 Adjusted = (FrameSize > MaxImmU16) ? MaxImmU16 : FrameSize; 263 int Opcode = isImmU6(Adjusted) ? XCore::ENTSP_u6 : XCore::ENTSP_lu6; 264 MBB.addLiveIn(XCore::LR); 265 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opcode)); 266 MIB.addImm(Adjusted); 267 MIB->addRegisterKilled(XCore::LR, MF.getSubtarget().getRegisterInfo(), 268 true); 269 if (emitFrameMoves) { 270 EmitDefCfaOffset(MBB, MBBI, dl, TII, Adjusted*4); 271 unsigned DRegNum = MRI->getDwarfRegNum(XCore::LR, true); 272 EmitCfiOffset(MBB, MBBI, dl, TII, DRegNum, 0); 273 } 274 } 275 276 // If necessary, save LR and FP to the stack, as we EXTSP. 277 SmallVector<StackSlotInfo,2> SpillList; 278 GetSpillList(SpillList, MFI, XFI, saveLR, FP); 279 // We want the nearest (negative) offsets first, so reverse list. 280 std::reverse(SpillList.begin(), SpillList.end()); 281 for (unsigned i = 0, e = SpillList.size(); i != e; ++i) { 282 assert(SpillList[i].Offset % 4 == 0 && "Misaligned stack offset"); 283 assert(SpillList[i].Offset <= 0 && "Unexpected positive stack offset"); 284 int OffsetFromTop = - SpillList[i].Offset/4; 285 IfNeededExtSP(MBB, MBBI, dl, TII, OffsetFromTop, Adjusted, FrameSize, 286 emitFrameMoves); 287 int Offset = Adjusted - OffsetFromTop; 288 int Opcode = isImmU6(Offset) ? XCore::STWSP_ru6 : XCore::STWSP_lru6; 289 MBB.addLiveIn(SpillList[i].Reg); 290 BuildMI(MBB, MBBI, dl, TII.get(Opcode)) 291 .addReg(SpillList[i].Reg, RegState::Kill) 292 .addImm(Offset) 293 .addMemOperand(getFrameIndexMMO(MBB, SpillList[i].FI, 294 MachineMemOperand::MOStore)); 295 if (emitFrameMoves) { 296 unsigned DRegNum = MRI->getDwarfRegNum(SpillList[i].Reg, true); 297 EmitCfiOffset(MBB, MBBI, dl, TII, DRegNum, SpillList[i].Offset); 298 } 299 } 300 301 // Complete any remaining Stack adjustment. 302 IfNeededExtSP(MBB, MBBI, dl, TII, FrameSize, Adjusted, FrameSize, 303 emitFrameMoves); 304 assert(Adjusted==FrameSize && "IfNeededExtSP has not completed adjustment"); 305 306 if (FP) { 307 // Set the FP from the SP. 308 BuildMI(MBB, MBBI, dl, TII.get(XCore::LDAWSP_ru6), FramePtr).addImm(0); 309 if (emitFrameMoves) 310 EmitDefCfaRegister(MBB, MBBI, dl, TII, MF, 311 MRI->getDwarfRegNum(FramePtr, true)); 312 } 313 314 if (emitFrameMoves) { 315 // Frame moves for callee saved. 316 for (const auto &SpillLabel : XFI->getSpillLabels()) { 317 MachineBasicBlock::iterator Pos = SpillLabel.first; 318 ++Pos; 319 const CalleeSavedInfo &CSI = SpillLabel.second; 320 int Offset = MFI.getObjectOffset(CSI.getFrameIdx()); 321 unsigned DRegNum = MRI->getDwarfRegNum(CSI.getReg(), true); 322 EmitCfiOffset(MBB, Pos, dl, TII, DRegNum, Offset); 323 } 324 if (XFI->hasEHSpillSlot()) { 325 // The unwinder requires stack slot & CFI offsets for the exception info. 326 // We do not save/spill these registers. 327 const Function *Fn = &MF.getFunction(); 328 const Constant *PersonalityFn = 329 Fn->hasPersonalityFn() ? Fn->getPersonalityFn() : nullptr; 330 SmallVector<StackSlotInfo, 2> SpillList; 331 GetEHSpillList(SpillList, MFI, XFI, PersonalityFn, 332 MF.getSubtarget().getTargetLowering()); 333 assert(SpillList.size()==2 && "Unexpected SpillList size"); 334 EmitCfiOffset(MBB, MBBI, dl, TII, 335 MRI->getDwarfRegNum(SpillList[0].Reg, true), 336 SpillList[0].Offset); 337 EmitCfiOffset(MBB, MBBI, dl, TII, 338 MRI->getDwarfRegNum(SpillList[1].Reg, true), 339 SpillList[1].Offset); 340 } 341 } 342 } 343 344 void XCoreFrameLowering::emitEpilogue(MachineFunction &MF, 345 MachineBasicBlock &MBB) const { 346 MachineFrameInfo &MFI = MF.getFrameInfo(); 347 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 348 const XCoreInstrInfo &TII = *MF.getSubtarget<XCoreSubtarget>().getInstrInfo(); 349 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 350 DebugLoc dl = MBBI->getDebugLoc(); 351 unsigned RetOpcode = MBBI->getOpcode(); 352 353 // Work out frame sizes. 354 // We will adjust the SP in stages towards the final FrameSize. 355 int RemainingAdj = MFI.getStackSize(); 356 assert(RemainingAdj%4 == 0 && "Misaligned frame size"); 357 RemainingAdj /= 4; 358 359 if (RetOpcode == XCore::EH_RETURN) { 360 // 'Restore' the exception info the unwinder has placed into the stack 361 // slots. 362 const Function *Fn = &MF.getFunction(); 363 const Constant *PersonalityFn = 364 Fn->hasPersonalityFn() ? Fn->getPersonalityFn() : nullptr; 365 SmallVector<StackSlotInfo, 2> SpillList; 366 GetEHSpillList(SpillList, MFI, XFI, PersonalityFn, 367 MF.getSubtarget().getTargetLowering()); 368 RestoreSpillList(MBB, MBBI, dl, TII, RemainingAdj, SpillList); 369 370 // Return to the landing pad. 371 unsigned EhStackReg = MBBI->getOperand(0).getReg(); 372 unsigned EhHandlerReg = MBBI->getOperand(1).getReg(); 373 BuildMI(MBB, MBBI, dl, TII.get(XCore::SETSP_1r)).addReg(EhStackReg); 374 BuildMI(MBB, MBBI, dl, TII.get(XCore::BAU_1r)).addReg(EhHandlerReg); 375 MBB.erase(MBBI); // Erase the previous return instruction. 376 return; 377 } 378 379 bool restoreLR = XFI->hasLRSpillSlot(); 380 bool UseRETSP = restoreLR && RemainingAdj 381 && (MFI.getObjectOffset(XFI->getLRSpillSlot()) == 0); 382 if (UseRETSP) 383 restoreLR = false; 384 bool FP = hasFP(MF); 385 386 if (FP) // Restore the stack pointer. 387 BuildMI(MBB, MBBI, dl, TII.get(XCore::SETSP_1r)).addReg(FramePtr); 388 389 // If necessary, restore LR and FP from the stack, as we EXTSP. 390 SmallVector<StackSlotInfo,2> SpillList; 391 GetSpillList(SpillList, MFI, XFI, restoreLR, FP); 392 RestoreSpillList(MBB, MBBI, dl, TII, RemainingAdj, SpillList); 393 394 if (RemainingAdj) { 395 // Complete all but one of the remaining Stack adjustments. 396 IfNeededLDAWSP(MBB, MBBI, dl, TII, 0, RemainingAdj); 397 if (UseRETSP) { 398 // Fold prologue into return instruction 399 assert(RetOpcode == XCore::RETSP_u6 400 || RetOpcode == XCore::RETSP_lu6); 401 int Opcode = isImmU6(RemainingAdj) ? XCore::RETSP_u6 : XCore::RETSP_lu6; 402 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opcode)) 403 .addImm(RemainingAdj); 404 for (unsigned i = 3, e = MBBI->getNumOperands(); i < e; ++i) 405 MIB->addOperand(MBBI->getOperand(i)); // copy any variadic operands 406 MBB.erase(MBBI); // Erase the previous return instruction. 407 } else { 408 int Opcode = isImmU6(RemainingAdj) ? XCore::LDAWSP_ru6 : 409 XCore::LDAWSP_lru6; 410 BuildMI(MBB, MBBI, dl, TII.get(Opcode), XCore::SP).addImm(RemainingAdj); 411 // Don't erase the return instruction. 412 } 413 } // else Don't erase the return instruction. 414 } 415 416 bool XCoreFrameLowering:: 417 spillCalleeSavedRegisters(MachineBasicBlock &MBB, 418 MachineBasicBlock::iterator MI, 419 const std::vector<CalleeSavedInfo> &CSI, 420 const TargetRegisterInfo *TRI) const { 421 if (CSI.empty()) 422 return true; 423 424 MachineFunction *MF = MBB.getParent(); 425 const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo(); 426 XCoreFunctionInfo *XFI = MF->getInfo<XCoreFunctionInfo>(); 427 bool emitFrameMoves = XCoreRegisterInfo::needsFrameMoves(*MF); 428 429 DebugLoc DL; 430 if (MI != MBB.end() && !MI->isDebugInstr()) 431 DL = MI->getDebugLoc(); 432 433 for (std::vector<CalleeSavedInfo>::const_iterator it = CSI.begin(); 434 it != CSI.end(); ++it) { 435 unsigned Reg = it->getReg(); 436 assert(Reg != XCore::LR && !(Reg == XCore::R10 && hasFP(*MF)) && 437 "LR & FP are always handled in emitPrologue"); 438 439 // Add the callee-saved register as live-in. It's killed at the spill. 440 MBB.addLiveIn(Reg); 441 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 442 TII.storeRegToStackSlot(MBB, MI, Reg, true, it->getFrameIdx(), RC, TRI); 443 if (emitFrameMoves) { 444 auto Store = MI; 445 --Store; 446 XFI->getSpillLabels().push_back(std::make_pair(Store, *it)); 447 } 448 } 449 return true; 450 } 451 452 bool XCoreFrameLowering:: 453 restoreCalleeSavedRegisters(MachineBasicBlock &MBB, 454 MachineBasicBlock::iterator MI, 455 std::vector<CalleeSavedInfo> &CSI, 456 const TargetRegisterInfo *TRI) const{ 457 MachineFunction *MF = MBB.getParent(); 458 const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo(); 459 bool AtStart = MI == MBB.begin(); 460 MachineBasicBlock::iterator BeforeI = MI; 461 if (!AtStart) 462 --BeforeI; 463 for (std::vector<CalleeSavedInfo>::const_iterator it = CSI.begin(); 464 it != CSI.end(); ++it) { 465 unsigned Reg = it->getReg(); 466 assert(Reg != XCore::LR && !(Reg == XCore::R10 && hasFP(*MF)) && 467 "LR & FP are always handled in emitEpilogue"); 468 469 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 470 TII.loadRegFromStackSlot(MBB, MI, Reg, it->getFrameIdx(), RC, TRI); 471 assert(MI != MBB.begin() && 472 "loadRegFromStackSlot didn't insert any code!"); 473 // Insert in reverse order. loadRegFromStackSlot can insert multiple 474 // instructions. 475 if (AtStart) 476 MI = MBB.begin(); 477 else { 478 MI = BeforeI; 479 ++MI; 480 } 481 } 482 return true; 483 } 484 485 // This function eliminates ADJCALLSTACKDOWN, 486 // ADJCALLSTACKUP pseudo instructions 487 MachineBasicBlock::iterator XCoreFrameLowering::eliminateCallFramePseudoInstr( 488 MachineFunction &MF, MachineBasicBlock &MBB, 489 MachineBasicBlock::iterator I) const { 490 const XCoreInstrInfo &TII = *MF.getSubtarget<XCoreSubtarget>().getInstrInfo(); 491 if (!hasReservedCallFrame(MF)) { 492 // Turn the adjcallstackdown instruction into 'extsp <amt>' and the 493 // adjcallstackup instruction into 'ldaw sp, sp[<amt>]' 494 MachineInstr &Old = *I; 495 uint64_t Amount = Old.getOperand(0).getImm(); 496 if (Amount != 0) { 497 // We need to keep the stack aligned properly. To do this, we round the 498 // amount of space needed for the outgoing arguments up to the next 499 // alignment boundary. 500 unsigned Align = getStackAlignment(); 501 Amount = (Amount+Align-1)/Align*Align; 502 503 assert(Amount%4 == 0); 504 Amount /= 4; 505 506 bool isU6 = isImmU6(Amount); 507 if (!isU6 && !isImmU16(Amount)) { 508 // FIX could emit multiple instructions in this case. 509 #ifndef NDEBUG 510 errs() << "eliminateCallFramePseudoInstr size too big: " 511 << Amount << "\n"; 512 #endif 513 llvm_unreachable(nullptr); 514 } 515 516 MachineInstr *New; 517 if (Old.getOpcode() == XCore::ADJCALLSTACKDOWN) { 518 int Opcode = isU6 ? XCore::EXTSP_u6 : XCore::EXTSP_lu6; 519 New = BuildMI(MF, Old.getDebugLoc(), TII.get(Opcode)).addImm(Amount); 520 } else { 521 assert(Old.getOpcode() == XCore::ADJCALLSTACKUP); 522 int Opcode = isU6 ? XCore::LDAWSP_ru6 : XCore::LDAWSP_lru6; 523 New = BuildMI(MF, Old.getDebugLoc(), TII.get(Opcode), XCore::SP) 524 .addImm(Amount); 525 } 526 527 // Replace the pseudo instruction with a new instruction... 528 MBB.insert(I, New); 529 } 530 } 531 532 return MBB.erase(I); 533 } 534 535 void XCoreFrameLowering::determineCalleeSaves(MachineFunction &MF, 536 BitVector &SavedRegs, 537 RegScavenger *RS) const { 538 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 539 540 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 541 542 const MachineRegisterInfo &MRI = MF.getRegInfo(); 543 bool LRUsed = MRI.isPhysRegModified(XCore::LR); 544 545 if (!LRUsed && !MF.getFunction().isVarArg() && 546 MF.getFrameInfo().estimateStackSize(MF)) 547 // If we need to extend the stack it is more efficient to use entsp / retsp. 548 // We force the LR to be saved so these instructions are used. 549 LRUsed = true; 550 551 if (MF.callsUnwindInit() || MF.callsEHReturn()) { 552 // The unwinder expects to find spill slots for the exception info regs R0 553 // & R1. These are used during llvm.eh.return() to 'restore' the exception 554 // info. N.B. we do not spill or restore R0, R1 during normal operation. 555 XFI->createEHSpillSlot(MF); 556 // As we will have a stack, we force the LR to be saved. 557 LRUsed = true; 558 } 559 560 if (LRUsed) { 561 // We will handle the LR in the prologue/epilogue 562 // and allocate space on the stack ourselves. 563 SavedRegs.reset(XCore::LR); 564 XFI->createLRSpillSlot(MF); 565 } 566 567 if (hasFP(MF)) 568 // A callee save register is used to hold the FP. 569 // This needs saving / restoring in the epilogue / prologue. 570 XFI->createFPSpillSlot(MF); 571 } 572 573 void XCoreFrameLowering:: 574 processFunctionBeforeFrameFinalized(MachineFunction &MF, 575 RegScavenger *RS) const { 576 assert(RS && "requiresRegisterScavenging failed"); 577 MachineFrameInfo &MFI = MF.getFrameInfo(); 578 const TargetRegisterClass &RC = XCore::GRRegsRegClass; 579 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 580 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 581 // Reserve slots close to SP or frame pointer for Scavenging spills. 582 // When using SP for small frames, we don't need any scratch registers. 583 // When using SP for large frames, we may need 2 scratch registers. 584 // When using FP, for large or small frames, we may need 1 scratch register. 585 unsigned Size = TRI.getSpillSize(RC); 586 unsigned Align = TRI.getSpillAlignment(RC); 587 if (XFI->isLargeFrame(MF) || hasFP(MF)) 588 RS->addScavengingFrameIndex(MFI.CreateStackObject(Size, Align, false)); 589 if (XFI->isLargeFrame(MF) && !hasFP(MF)) 590 RS->addScavengingFrameIndex(MFI.CreateStackObject(Size, Align, false)); 591 } 592