1 //===-- XCoreFrameLowering.cpp - Frame info for XCore Target --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains XCore frame information that doesn't fit anywhere else 11 // cleanly... 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "XCoreFrameLowering.h" 16 #include "XCore.h" 17 #include "XCoreInstrInfo.h" 18 #include "XCoreMachineFunctionInfo.h" 19 #include "XCoreSubtarget.h" 20 #include "llvm/CodeGen/MachineFrameInfo.h" 21 #include "llvm/CodeGen/MachineFunction.h" 22 #include "llvm/CodeGen/MachineInstrBuilder.h" 23 #include "llvm/CodeGen/MachineModuleInfo.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 #include "llvm/CodeGen/RegisterScavenging.h" 26 #include "llvm/IR/DataLayout.h" 27 #include "llvm/IR/Function.h" 28 #include "llvm/Support/ErrorHandling.h" 29 #include "llvm/Target/TargetLowering.h" 30 #include "llvm/Target/TargetOptions.h" 31 #include <algorithm> // std::sort 32 33 using namespace llvm; 34 35 static const unsigned FramePtr = XCore::R10; 36 static const int MaxImmU16 = (1<<16) - 1; 37 38 // helper functions. FIXME: Eliminate. 39 static inline bool isImmU6(unsigned val) { 40 return val < (1 << 6); 41 } 42 43 static inline bool isImmU16(unsigned val) { 44 return val < (1 << 16); 45 } 46 47 // Helper structure with compare function for handling stack slots. 48 namespace { 49 struct StackSlotInfo { 50 int FI; 51 int Offset; 52 unsigned Reg; 53 StackSlotInfo(int f, int o, int r) : FI(f), Offset(o), Reg(r){}; 54 }; 55 } // end anonymous namespace 56 57 static bool CompareSSIOffset(const StackSlotInfo& a, const StackSlotInfo& b) { 58 return a.Offset < b.Offset; 59 } 60 61 static void EmitDefCfaRegister(MachineBasicBlock &MBB, 62 MachineBasicBlock::iterator MBBI, 63 const DebugLoc &dl, const TargetInstrInfo &TII, 64 MachineModuleInfo *MMI, unsigned DRegNum) { 65 unsigned CFIIndex = MMI->addFrameInst( 66 MCCFIInstruction::createDefCfaRegister(nullptr, DRegNum)); 67 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 68 .addCFIIndex(CFIIndex); 69 } 70 71 static void EmitDefCfaOffset(MachineBasicBlock &MBB, 72 MachineBasicBlock::iterator MBBI, 73 const DebugLoc &dl, const TargetInstrInfo &TII, 74 MachineModuleInfo *MMI, int Offset) { 75 unsigned CFIIndex = 76 MMI->addFrameInst(MCCFIInstruction::createDefCfaOffset(nullptr, -Offset)); 77 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 78 .addCFIIndex(CFIIndex); 79 } 80 81 static void EmitCfiOffset(MachineBasicBlock &MBB, 82 MachineBasicBlock::iterator MBBI, const DebugLoc &dl, 83 const TargetInstrInfo &TII, MachineModuleInfo *MMI, 84 unsigned DRegNum, int Offset) { 85 unsigned CFIIndex = MMI->addFrameInst( 86 MCCFIInstruction::createOffset(nullptr, DRegNum, Offset)); 87 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 88 .addCFIIndex(CFIIndex); 89 } 90 91 /// The SP register is moved in steps of 'MaxImmU16' towards the bottom of the 92 /// frame. During these steps, it may be necessary to spill registers. 93 /// IfNeededExtSP emits the necessary EXTSP instructions to move the SP only 94 /// as far as to make 'OffsetFromBottom' reachable using an STWSP_lru6. 95 /// \param OffsetFromTop the spill offset from the top of the frame. 96 /// \param [in,out] Adjusted the current SP offset from the top of the frame. 97 static void IfNeededExtSP(MachineBasicBlock &MBB, 98 MachineBasicBlock::iterator MBBI, const DebugLoc &dl, 99 const TargetInstrInfo &TII, MachineModuleInfo *MMI, 100 int OffsetFromTop, int &Adjusted, int FrameSize, 101 bool emitFrameMoves) { 102 while (OffsetFromTop > Adjusted) { 103 assert(Adjusted < FrameSize && "OffsetFromTop is beyond FrameSize"); 104 int remaining = FrameSize - Adjusted; 105 int OpImm = (remaining > MaxImmU16) ? MaxImmU16 : remaining; 106 int Opcode = isImmU6(OpImm) ? XCore::EXTSP_u6 : XCore::EXTSP_lu6; 107 BuildMI(MBB, MBBI, dl, TII.get(Opcode)).addImm(OpImm); 108 Adjusted += OpImm; 109 if (emitFrameMoves) 110 EmitDefCfaOffset(MBB, MBBI, dl, TII, MMI, Adjusted*4); 111 } 112 } 113 114 /// The SP register is moved in steps of 'MaxImmU16' towards the top of the 115 /// frame. During these steps, it may be necessary to re-load registers. 116 /// IfNeededLDAWSP emits the necessary LDAWSP instructions to move the SP only 117 /// as far as to make 'OffsetFromTop' reachable using an LDAWSP_lru6. 118 /// \param OffsetFromTop the spill offset from the top of the frame. 119 /// \param [in,out] RemainingAdj the current SP offset from the top of the 120 /// frame. 121 static void IfNeededLDAWSP(MachineBasicBlock &MBB, 122 MachineBasicBlock::iterator MBBI, const DebugLoc &dl, 123 const TargetInstrInfo &TII, int OffsetFromTop, 124 int &RemainingAdj) { 125 while (OffsetFromTop < RemainingAdj - MaxImmU16) { 126 assert(RemainingAdj && "OffsetFromTop is beyond FrameSize"); 127 int OpImm = (RemainingAdj > MaxImmU16) ? MaxImmU16 : RemainingAdj; 128 int Opcode = isImmU6(OpImm) ? XCore::LDAWSP_ru6 : XCore::LDAWSP_lru6; 129 BuildMI(MBB, MBBI, dl, TII.get(Opcode), XCore::SP).addImm(OpImm); 130 RemainingAdj -= OpImm; 131 } 132 } 133 134 /// Creates an ordered list of registers that are spilled 135 /// during the emitPrologue/emitEpilogue. 136 /// Registers are ordered according to their frame offset. 137 /// As offsets are negative, the largest offsets will be first. 138 static void GetSpillList(SmallVectorImpl<StackSlotInfo> &SpillList, 139 MachineFrameInfo *MFI, XCoreFunctionInfo *XFI, 140 bool fetchLR, bool fetchFP) { 141 if (fetchLR) { 142 int Offset = MFI->getObjectOffset(XFI->getLRSpillSlot()); 143 SpillList.push_back(StackSlotInfo(XFI->getLRSpillSlot(), 144 Offset, 145 XCore::LR)); 146 } 147 if (fetchFP) { 148 int Offset = MFI->getObjectOffset(XFI->getFPSpillSlot()); 149 SpillList.push_back(StackSlotInfo(XFI->getFPSpillSlot(), 150 Offset, 151 FramePtr)); 152 } 153 std::sort(SpillList.begin(), SpillList.end(), CompareSSIOffset); 154 } 155 156 /// Creates an ordered list of EH info register 'spills'. 157 /// These slots are only used by the unwinder and calls to llvm.eh.return(). 158 /// Registers are ordered according to their frame offset. 159 /// As offsets are negative, the largest offsets will be first. 160 static void GetEHSpillList(SmallVectorImpl<StackSlotInfo> &SpillList, 161 MachineFrameInfo *MFI, XCoreFunctionInfo *XFI, 162 const Constant *PersonalityFn, 163 const TargetLowering *TL) { 164 assert(XFI->hasEHSpillSlot() && "There are no EH register spill slots"); 165 const int *EHSlot = XFI->getEHSpillSlot(); 166 SpillList.push_back( 167 StackSlotInfo(EHSlot[0], MFI->getObjectOffset(EHSlot[0]), 168 TL->getExceptionPointerRegister(PersonalityFn))); 169 SpillList.push_back( 170 StackSlotInfo(EHSlot[0], MFI->getObjectOffset(EHSlot[1]), 171 TL->getExceptionSelectorRegister(PersonalityFn))); 172 std::sort(SpillList.begin(), SpillList.end(), CompareSSIOffset); 173 } 174 175 static MachineMemOperand * 176 getFrameIndexMMO(MachineBasicBlock &MBB, int FrameIndex, unsigned flags) { 177 MachineFunction *MF = MBB.getParent(); 178 const MachineFrameInfo &MFI = *MF->getFrameInfo(); 179 MachineMemOperand *MMO = MF->getMachineMemOperand( 180 MachinePointerInfo::getFixedStack(*MF, FrameIndex), flags, 181 MFI.getObjectSize(FrameIndex), MFI.getObjectAlignment(FrameIndex)); 182 return MMO; 183 } 184 185 186 /// Restore clobbered registers with their spill slot value. 187 /// The SP will be adjusted at the same time, thus the SpillList must be ordered 188 /// with the largest (negative) offsets first. 189 static void RestoreSpillList(MachineBasicBlock &MBB, 190 MachineBasicBlock::iterator MBBI, 191 const DebugLoc &dl, const TargetInstrInfo &TII, 192 int &RemainingAdj, 193 SmallVectorImpl<StackSlotInfo> &SpillList) { 194 for (unsigned i = 0, e = SpillList.size(); i != e; ++i) { 195 assert(SpillList[i].Offset % 4 == 0 && "Misaligned stack offset"); 196 assert(SpillList[i].Offset <= 0 && "Unexpected positive stack offset"); 197 int OffsetFromTop = - SpillList[i].Offset/4; 198 IfNeededLDAWSP(MBB, MBBI, dl, TII, OffsetFromTop, RemainingAdj); 199 int Offset = RemainingAdj - OffsetFromTop; 200 int Opcode = isImmU6(Offset) ? XCore::LDWSP_ru6 : XCore::LDWSP_lru6; 201 BuildMI(MBB, MBBI, dl, TII.get(Opcode), SpillList[i].Reg) 202 .addImm(Offset) 203 .addMemOperand(getFrameIndexMMO(MBB, SpillList[i].FI, 204 MachineMemOperand::MOLoad)); 205 } 206 } 207 208 //===----------------------------------------------------------------------===// 209 // XCoreFrameLowering: 210 //===----------------------------------------------------------------------===// 211 212 XCoreFrameLowering::XCoreFrameLowering(const XCoreSubtarget &sti) 213 : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 4, 0) { 214 // Do nothing 215 } 216 217 bool XCoreFrameLowering::hasFP(const MachineFunction &MF) const { 218 return MF.getTarget().Options.DisableFramePointerElim(MF) || 219 MF.getFrameInfo()->hasVarSizedObjects(); 220 } 221 222 void XCoreFrameLowering::emitPrologue(MachineFunction &MF, 223 MachineBasicBlock &MBB) const { 224 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported"); 225 MachineBasicBlock::iterator MBBI = MBB.begin(); 226 MachineFrameInfo *MFI = MF.getFrameInfo(); 227 MachineModuleInfo *MMI = &MF.getMMI(); 228 const MCRegisterInfo *MRI = MMI->getContext().getRegisterInfo(); 229 const XCoreInstrInfo &TII = *MF.getSubtarget<XCoreSubtarget>().getInstrInfo(); 230 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 231 // Debug location must be unknown since the first debug location is used 232 // to determine the end of the prologue. 233 DebugLoc dl; 234 235 if (MFI->getMaxAlignment() > getStackAlignment()) 236 report_fatal_error("emitPrologue unsupported alignment: " 237 + Twine(MFI->getMaxAlignment())); 238 239 const AttributeSet &PAL = MF.getFunction()->getAttributes(); 240 if (PAL.hasAttrSomewhere(Attribute::Nest)) 241 BuildMI(MBB, MBBI, dl, TII.get(XCore::LDWSP_ru6), XCore::R11).addImm(0); 242 // FIX: Needs addMemOperand() but can't use getFixedStack() or getStack(). 243 244 // Work out frame sizes. 245 // We will adjust the SP in stages towards the final FrameSize. 246 assert(MFI->getStackSize()%4 == 0 && "Misaligned frame size"); 247 const int FrameSize = MFI->getStackSize() / 4; 248 int Adjusted = 0; 249 250 bool saveLR = XFI->hasLRSpillSlot(); 251 bool UseENTSP = saveLR && FrameSize 252 && (MFI->getObjectOffset(XFI->getLRSpillSlot()) == 0); 253 if (UseENTSP) 254 saveLR = false; 255 bool FP = hasFP(MF); 256 bool emitFrameMoves = XCoreRegisterInfo::needsFrameMoves(MF); 257 258 if (UseENTSP) { 259 // Allocate space on the stack at the same time as saving LR. 260 Adjusted = (FrameSize > MaxImmU16) ? MaxImmU16 : FrameSize; 261 int Opcode = isImmU6(Adjusted) ? XCore::ENTSP_u6 : XCore::ENTSP_lu6; 262 MBB.addLiveIn(XCore::LR); 263 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opcode)); 264 MIB.addImm(Adjusted); 265 MIB->addRegisterKilled(XCore::LR, MF.getSubtarget().getRegisterInfo(), 266 true); 267 if (emitFrameMoves) { 268 EmitDefCfaOffset(MBB, MBBI, dl, TII, MMI, Adjusted*4); 269 unsigned DRegNum = MRI->getDwarfRegNum(XCore::LR, true); 270 EmitCfiOffset(MBB, MBBI, dl, TII, MMI, DRegNum, 0); 271 } 272 } 273 274 // If necessary, save LR and FP to the stack, as we EXTSP. 275 SmallVector<StackSlotInfo,2> SpillList; 276 GetSpillList(SpillList, MFI, XFI, saveLR, FP); 277 // We want the nearest (negative) offsets first, so reverse list. 278 std::reverse(SpillList.begin(), SpillList.end()); 279 for (unsigned i = 0, e = SpillList.size(); i != e; ++i) { 280 assert(SpillList[i].Offset % 4 == 0 && "Misaligned stack offset"); 281 assert(SpillList[i].Offset <= 0 && "Unexpected positive stack offset"); 282 int OffsetFromTop = - SpillList[i].Offset/4; 283 IfNeededExtSP(MBB, MBBI, dl, TII, MMI, OffsetFromTop, Adjusted, FrameSize, 284 emitFrameMoves); 285 int Offset = Adjusted - OffsetFromTop; 286 int Opcode = isImmU6(Offset) ? XCore::STWSP_ru6 : XCore::STWSP_lru6; 287 MBB.addLiveIn(SpillList[i].Reg); 288 BuildMI(MBB, MBBI, dl, TII.get(Opcode)) 289 .addReg(SpillList[i].Reg, RegState::Kill) 290 .addImm(Offset) 291 .addMemOperand(getFrameIndexMMO(MBB, SpillList[i].FI, 292 MachineMemOperand::MOStore)); 293 if (emitFrameMoves) { 294 unsigned DRegNum = MRI->getDwarfRegNum(SpillList[i].Reg, true); 295 EmitCfiOffset(MBB, MBBI, dl, TII, MMI, DRegNum, SpillList[i].Offset); 296 } 297 } 298 299 // Complete any remaining Stack adjustment. 300 IfNeededExtSP(MBB, MBBI, dl, TII, MMI, FrameSize, Adjusted, FrameSize, 301 emitFrameMoves); 302 assert(Adjusted==FrameSize && "IfNeededExtSP has not completed adjustment"); 303 304 if (FP) { 305 // Set the FP from the SP. 306 BuildMI(MBB, MBBI, dl, TII.get(XCore::LDAWSP_ru6), FramePtr).addImm(0); 307 if (emitFrameMoves) 308 EmitDefCfaRegister(MBB, MBBI, dl, TII, MMI, 309 MRI->getDwarfRegNum(FramePtr, true)); 310 } 311 312 if (emitFrameMoves) { 313 // Frame moves for callee saved. 314 for (const auto &SpillLabel : XFI->getSpillLabels()) { 315 MachineBasicBlock::iterator Pos = SpillLabel.first; 316 ++Pos; 317 const CalleeSavedInfo &CSI = SpillLabel.second; 318 int Offset = MFI->getObjectOffset(CSI.getFrameIdx()); 319 unsigned DRegNum = MRI->getDwarfRegNum(CSI.getReg(), true); 320 EmitCfiOffset(MBB, Pos, dl, TII, MMI, DRegNum, Offset); 321 } 322 if (XFI->hasEHSpillSlot()) { 323 // The unwinder requires stack slot & CFI offsets for the exception info. 324 // We do not save/spill these registers. 325 const Function *Fn = MF.getFunction(); 326 const Constant *PersonalityFn = 327 Fn->hasPersonalityFn() ? Fn->getPersonalityFn() : nullptr; 328 SmallVector<StackSlotInfo, 2> SpillList; 329 GetEHSpillList(SpillList, MFI, XFI, PersonalityFn, 330 MF.getSubtarget().getTargetLowering()); 331 assert(SpillList.size()==2 && "Unexpected SpillList size"); 332 EmitCfiOffset(MBB, MBBI, dl, TII, MMI, 333 MRI->getDwarfRegNum(SpillList[0].Reg, true), 334 SpillList[0].Offset); 335 EmitCfiOffset(MBB, MBBI, dl, TII, MMI, 336 MRI->getDwarfRegNum(SpillList[1].Reg, true), 337 SpillList[1].Offset); 338 } 339 } 340 } 341 342 void XCoreFrameLowering::emitEpilogue(MachineFunction &MF, 343 MachineBasicBlock &MBB) const { 344 MachineFrameInfo *MFI = MF.getFrameInfo(); 345 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 346 const XCoreInstrInfo &TII = *MF.getSubtarget<XCoreSubtarget>().getInstrInfo(); 347 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 348 DebugLoc dl = MBBI->getDebugLoc(); 349 unsigned RetOpcode = MBBI->getOpcode(); 350 351 // Work out frame sizes. 352 // We will adjust the SP in stages towards the final FrameSize. 353 int RemainingAdj = MFI->getStackSize(); 354 assert(RemainingAdj%4 == 0 && "Misaligned frame size"); 355 RemainingAdj /= 4; 356 357 if (RetOpcode == XCore::EH_RETURN) { 358 // 'Restore' the exception info the unwinder has placed into the stack 359 // slots. 360 const Function *Fn = MF.getFunction(); 361 const Constant *PersonalityFn = 362 Fn->hasPersonalityFn() ? Fn->getPersonalityFn() : nullptr; 363 SmallVector<StackSlotInfo, 2> SpillList; 364 GetEHSpillList(SpillList, MFI, XFI, PersonalityFn, 365 MF.getSubtarget().getTargetLowering()); 366 RestoreSpillList(MBB, MBBI, dl, TII, RemainingAdj, SpillList); 367 368 // Return to the landing pad. 369 unsigned EhStackReg = MBBI->getOperand(0).getReg(); 370 unsigned EhHandlerReg = MBBI->getOperand(1).getReg(); 371 BuildMI(MBB, MBBI, dl, TII.get(XCore::SETSP_1r)).addReg(EhStackReg); 372 BuildMI(MBB, MBBI, dl, TII.get(XCore::BAU_1r)).addReg(EhHandlerReg); 373 MBB.erase(MBBI); // Erase the previous return instruction. 374 return; 375 } 376 377 bool restoreLR = XFI->hasLRSpillSlot(); 378 bool UseRETSP = restoreLR && RemainingAdj 379 && (MFI->getObjectOffset(XFI->getLRSpillSlot()) == 0); 380 if (UseRETSP) 381 restoreLR = false; 382 bool FP = hasFP(MF); 383 384 if (FP) // Restore the stack pointer. 385 BuildMI(MBB, MBBI, dl, TII.get(XCore::SETSP_1r)).addReg(FramePtr); 386 387 // If necessary, restore LR and FP from the stack, as we EXTSP. 388 SmallVector<StackSlotInfo,2> SpillList; 389 GetSpillList(SpillList, MFI, XFI, restoreLR, FP); 390 RestoreSpillList(MBB, MBBI, dl, TII, RemainingAdj, SpillList); 391 392 if (RemainingAdj) { 393 // Complete all but one of the remaining Stack adjustments. 394 IfNeededLDAWSP(MBB, MBBI, dl, TII, 0, RemainingAdj); 395 if (UseRETSP) { 396 // Fold prologue into return instruction 397 assert(RetOpcode == XCore::RETSP_u6 398 || RetOpcode == XCore::RETSP_lu6); 399 int Opcode = isImmU6(RemainingAdj) ? XCore::RETSP_u6 : XCore::RETSP_lu6; 400 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opcode)) 401 .addImm(RemainingAdj); 402 for (unsigned i = 3, e = MBBI->getNumOperands(); i < e; ++i) 403 MIB->addOperand(MBBI->getOperand(i)); // copy any variadic operands 404 MBB.erase(MBBI); // Erase the previous return instruction. 405 } else { 406 int Opcode = isImmU6(RemainingAdj) ? XCore::LDAWSP_ru6 : 407 XCore::LDAWSP_lru6; 408 BuildMI(MBB, MBBI, dl, TII.get(Opcode), XCore::SP).addImm(RemainingAdj); 409 // Don't erase the return instruction. 410 } 411 } // else Don't erase the return instruction. 412 } 413 414 bool XCoreFrameLowering:: 415 spillCalleeSavedRegisters(MachineBasicBlock &MBB, 416 MachineBasicBlock::iterator MI, 417 const std::vector<CalleeSavedInfo> &CSI, 418 const TargetRegisterInfo *TRI) const { 419 if (CSI.empty()) 420 return true; 421 422 MachineFunction *MF = MBB.getParent(); 423 const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo(); 424 XCoreFunctionInfo *XFI = MF->getInfo<XCoreFunctionInfo>(); 425 bool emitFrameMoves = XCoreRegisterInfo::needsFrameMoves(*MF); 426 427 DebugLoc DL; 428 if (MI != MBB.end() && !MI->isDebugValue()) 429 DL = MI->getDebugLoc(); 430 431 for (std::vector<CalleeSavedInfo>::const_iterator it = CSI.begin(); 432 it != CSI.end(); ++it) { 433 unsigned Reg = it->getReg(); 434 assert(Reg != XCore::LR && !(Reg == XCore::R10 && hasFP(*MF)) && 435 "LR & FP are always handled in emitPrologue"); 436 437 // Add the callee-saved register as live-in. It's killed at the spill. 438 MBB.addLiveIn(Reg); 439 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 440 TII.storeRegToStackSlot(MBB, MI, Reg, true, it->getFrameIdx(), RC, TRI); 441 if (emitFrameMoves) { 442 auto Store = MI; 443 --Store; 444 XFI->getSpillLabels().push_back(std::make_pair(Store, *it)); 445 } 446 } 447 return true; 448 } 449 450 bool XCoreFrameLowering:: 451 restoreCalleeSavedRegisters(MachineBasicBlock &MBB, 452 MachineBasicBlock::iterator MI, 453 const std::vector<CalleeSavedInfo> &CSI, 454 const TargetRegisterInfo *TRI) const{ 455 MachineFunction *MF = MBB.getParent(); 456 const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo(); 457 bool AtStart = MI == MBB.begin(); 458 MachineBasicBlock::iterator BeforeI = MI; 459 if (!AtStart) 460 --BeforeI; 461 for (std::vector<CalleeSavedInfo>::const_iterator it = CSI.begin(); 462 it != CSI.end(); ++it) { 463 unsigned Reg = it->getReg(); 464 assert(Reg != XCore::LR && !(Reg == XCore::R10 && hasFP(*MF)) && 465 "LR & FP are always handled in emitEpilogue"); 466 467 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 468 TII.loadRegFromStackSlot(MBB, MI, Reg, it->getFrameIdx(), RC, TRI); 469 assert(MI != MBB.begin() && 470 "loadRegFromStackSlot didn't insert any code!"); 471 // Insert in reverse order. loadRegFromStackSlot can insert multiple 472 // instructions. 473 if (AtStart) 474 MI = MBB.begin(); 475 else { 476 MI = BeforeI; 477 ++MI; 478 } 479 } 480 return true; 481 } 482 483 // This function eliminates ADJCALLSTACKDOWN, 484 // ADJCALLSTACKUP pseudo instructions 485 MachineBasicBlock::iterator XCoreFrameLowering::eliminateCallFramePseudoInstr( 486 MachineFunction &MF, MachineBasicBlock &MBB, 487 MachineBasicBlock::iterator I) const { 488 const XCoreInstrInfo &TII = *MF.getSubtarget<XCoreSubtarget>().getInstrInfo(); 489 if (!hasReservedCallFrame(MF)) { 490 // Turn the adjcallstackdown instruction into 'extsp <amt>' and the 491 // adjcallstackup instruction into 'ldaw sp, sp[<amt>]' 492 MachineInstr *Old = I; 493 uint64_t Amount = Old->getOperand(0).getImm(); 494 if (Amount != 0) { 495 // We need to keep the stack aligned properly. To do this, we round the 496 // amount of space needed for the outgoing arguments up to the next 497 // alignment boundary. 498 unsigned Align = getStackAlignment(); 499 Amount = (Amount+Align-1)/Align*Align; 500 501 assert(Amount%4 == 0); 502 Amount /= 4; 503 504 bool isU6 = isImmU6(Amount); 505 if (!isU6 && !isImmU16(Amount)) { 506 // FIX could emit multiple instructions in this case. 507 #ifndef NDEBUG 508 errs() << "eliminateCallFramePseudoInstr size too big: " 509 << Amount << "\n"; 510 #endif 511 llvm_unreachable(nullptr); 512 } 513 514 MachineInstr *New; 515 if (Old->getOpcode() == XCore::ADJCALLSTACKDOWN) { 516 int Opcode = isU6 ? XCore::EXTSP_u6 : XCore::EXTSP_lu6; 517 New=BuildMI(MF, Old->getDebugLoc(), TII.get(Opcode)) 518 .addImm(Amount); 519 } else { 520 assert(Old->getOpcode() == XCore::ADJCALLSTACKUP); 521 int Opcode = isU6 ? XCore::LDAWSP_ru6 : XCore::LDAWSP_lru6; 522 New=BuildMI(MF, Old->getDebugLoc(), TII.get(Opcode), XCore::SP) 523 .addImm(Amount); 524 } 525 526 // Replace the pseudo instruction with a new instruction... 527 MBB.insert(I, New); 528 } 529 } 530 531 return MBB.erase(I); 532 } 533 534 void XCoreFrameLowering::determineCalleeSaves(MachineFunction &MF, 535 BitVector &SavedRegs, 536 RegScavenger *RS) const { 537 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 538 539 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 540 541 const MachineRegisterInfo &MRI = MF.getRegInfo(); 542 bool LRUsed = MRI.isPhysRegModified(XCore::LR); 543 544 if (!LRUsed && !MF.getFunction()->isVarArg() && 545 MF.getFrameInfo()->estimateStackSize(MF)) 546 // If we need to extend the stack it is more efficient to use entsp / retsp. 547 // We force the LR to be saved so these instructions are used. 548 LRUsed = true; 549 550 if (MF.getMMI().callsUnwindInit() || MF.getMMI().callsEHReturn()) { 551 // The unwinder expects to find spill slots for the exception info regs R0 552 // & R1. These are used during llvm.eh.return() to 'restore' the exception 553 // info. N.B. we do not spill or restore R0, R1 during normal operation. 554 XFI->createEHSpillSlot(MF); 555 // As we will have a stack, we force the LR to be saved. 556 LRUsed = true; 557 } 558 559 if (LRUsed) { 560 // We will handle the LR in the prologue/epilogue 561 // and allocate space on the stack ourselves. 562 SavedRegs.reset(XCore::LR); 563 XFI->createLRSpillSlot(MF); 564 } 565 566 if (hasFP(MF)) 567 // A callee save register is used to hold the FP. 568 // This needs saving / restoring in the epilogue / prologue. 569 XFI->createFPSpillSlot(MF); 570 } 571 572 void XCoreFrameLowering:: 573 processFunctionBeforeFrameFinalized(MachineFunction &MF, 574 RegScavenger *RS) const { 575 assert(RS && "requiresRegisterScavenging failed"); 576 MachineFrameInfo *MFI = MF.getFrameInfo(); 577 const TargetRegisterClass *RC = &XCore::GRRegsRegClass; 578 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 579 // Reserve slots close to SP or frame pointer for Scavenging spills. 580 // When using SP for small frames, we don't need any scratch registers. 581 // When using SP for large frames, we may need 2 scratch registers. 582 // When using FP, for large or small frames, we may need 1 scratch register. 583 if (XFI->isLargeFrame(MF) || hasFP(MF)) 584 RS->addScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(), 585 RC->getAlignment(), 586 false)); 587 if (XFI->isLargeFrame(MF) && !hasFP(MF)) 588 RS->addScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(), 589 RC->getAlignment(), 590 false)); 591 } 592