1 //===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the X86 implementation of TargetFrameLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "X86FrameLowering.h" 15 #include "X86InstrBuilder.h" 16 #include "X86InstrInfo.h" 17 #include "X86MachineFunctionInfo.h" 18 #include "X86Subtarget.h" 19 #include "X86TargetMachine.h" 20 #include "llvm/ADT/SmallSet.h" 21 #include "llvm/CodeGen/MachineFrameInfo.h" 22 #include "llvm/CodeGen/MachineFunction.h" 23 #include "llvm/CodeGen/MachineInstrBuilder.h" 24 #include "llvm/CodeGen/MachineModuleInfo.h" 25 #include "llvm/CodeGen/MachineRegisterInfo.h" 26 #include "llvm/IR/DataLayout.h" 27 #include "llvm/IR/Function.h" 28 #include "llvm/MC/MCAsmInfo.h" 29 #include "llvm/MC/MCSymbol.h" 30 #include "llvm/Support/CommandLine.h" 31 #include "llvm/Target/TargetOptions.h" 32 33 using namespace llvm; 34 35 // FIXME: completely move here. 36 extern cl::opt<bool> ForceStackAlign; 37 38 bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { 39 return !MF.getFrameInfo()->hasVarSizedObjects(); 40 } 41 42 /// hasFP - Return true if the specified function should have a dedicated frame 43 /// pointer register. This is true if the function has variable sized allocas 44 /// or if frame pointer elimination is disabled. 45 bool X86FrameLowering::hasFP(const MachineFunction &MF) const { 46 const MachineFrameInfo *MFI = MF.getFrameInfo(); 47 const MachineModuleInfo &MMI = MF.getMMI(); 48 const TargetRegisterInfo *RegInfo = TM.getRegisterInfo(); 49 50 return (MF.getTarget().Options.DisableFramePointerElim(MF) || 51 RegInfo->needsStackRealignment(MF) || 52 MFI->hasVarSizedObjects() || 53 MFI->isFrameAddressTaken() || MF.hasMSInlineAsm() || 54 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() || 55 MMI.callsUnwindInit() || MMI.callsEHReturn()); 56 } 57 58 static unsigned getSUBriOpcode(unsigned IsLP64, int64_t Imm) { 59 if (IsLP64) { 60 if (isInt<8>(Imm)) 61 return X86::SUB64ri8; 62 return X86::SUB64ri32; 63 } else { 64 if (isInt<8>(Imm)) 65 return X86::SUB32ri8; 66 return X86::SUB32ri; 67 } 68 } 69 70 static unsigned getADDriOpcode(unsigned IsLP64, int64_t Imm) { 71 if (IsLP64) { 72 if (isInt<8>(Imm)) 73 return X86::ADD64ri8; 74 return X86::ADD64ri32; 75 } else { 76 if (isInt<8>(Imm)) 77 return X86::ADD32ri8; 78 return X86::ADD32ri; 79 } 80 } 81 82 static unsigned getLEArOpcode(unsigned IsLP64) { 83 return IsLP64 ? X86::LEA64r : X86::LEA32r; 84 } 85 86 /// findDeadCallerSavedReg - Return a caller-saved register that isn't live 87 /// when it reaches the "return" instruction. We can then pop a stack object 88 /// to this register without worry about clobbering it. 89 static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB, 90 MachineBasicBlock::iterator &MBBI, 91 const TargetRegisterInfo &TRI, 92 bool Is64Bit) { 93 const MachineFunction *MF = MBB.getParent(); 94 const Function *F = MF->getFunction(); 95 if (!F || MF->getMMI().callsEHReturn()) 96 return 0; 97 98 static const uint16_t CallerSavedRegs32Bit[] = { 99 X86::EAX, X86::EDX, X86::ECX, 0 100 }; 101 102 static const uint16_t CallerSavedRegs64Bit[] = { 103 X86::RAX, X86::RDX, X86::RCX, X86::RSI, X86::RDI, 104 X86::R8, X86::R9, X86::R10, X86::R11, 0 105 }; 106 107 unsigned Opc = MBBI->getOpcode(); 108 switch (Opc) { 109 default: return 0; 110 case X86::RET: 111 case X86::RETI: 112 case X86::TCRETURNdi: 113 case X86::TCRETURNri: 114 case X86::TCRETURNmi: 115 case X86::TCRETURNdi64: 116 case X86::TCRETURNri64: 117 case X86::TCRETURNmi64: 118 case X86::EH_RETURN: 119 case X86::EH_RETURN64: { 120 SmallSet<uint16_t, 8> Uses; 121 for (unsigned i = 0, e = MBBI->getNumOperands(); i != e; ++i) { 122 MachineOperand &MO = MBBI->getOperand(i); 123 if (!MO.isReg() || MO.isDef()) 124 continue; 125 unsigned Reg = MO.getReg(); 126 if (!Reg) 127 continue; 128 for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI) 129 Uses.insert(*AI); 130 } 131 132 const uint16_t *CS = Is64Bit ? CallerSavedRegs64Bit : CallerSavedRegs32Bit; 133 for (; *CS; ++CS) 134 if (!Uses.count(*CS)) 135 return *CS; 136 } 137 } 138 139 return 0; 140 } 141 142 143 /// emitSPUpdate - Emit a series of instructions to increment / decrement the 144 /// stack pointer by a constant value. 145 static 146 void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 147 unsigned StackPtr, int64_t NumBytes, 148 bool Is64Bit, bool IsLP64, bool UseLEA, 149 const TargetInstrInfo &TII, const TargetRegisterInfo &TRI) { 150 bool isSub = NumBytes < 0; 151 uint64_t Offset = isSub ? -NumBytes : NumBytes; 152 unsigned Opc; 153 if (UseLEA) 154 Opc = getLEArOpcode(IsLP64); 155 else 156 Opc = isSub 157 ? getSUBriOpcode(IsLP64, Offset) 158 : getADDriOpcode(IsLP64, Offset); 159 160 uint64_t Chunk = (1LL << 31) - 1; 161 DebugLoc DL = MBB.findDebugLoc(MBBI); 162 163 while (Offset) { 164 uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset; 165 if (ThisVal == (Is64Bit ? 8 : 4)) { 166 // Use push / pop instead. 167 unsigned Reg = isSub 168 ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX) 169 : findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit); 170 if (Reg) { 171 Opc = isSub 172 ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r) 173 : (Is64Bit ? X86::POP64r : X86::POP32r); 174 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc)) 175 .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub)); 176 if (isSub) 177 MI->setFlag(MachineInstr::FrameSetup); 178 Offset -= ThisVal; 179 continue; 180 } 181 } 182 183 MachineInstr *MI = NULL; 184 185 if (UseLEA) { 186 MI = addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr), 187 StackPtr, false, isSub ? -ThisVal : ThisVal); 188 } else { 189 MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 190 .addReg(StackPtr) 191 .addImm(ThisVal); 192 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 193 } 194 195 if (isSub) 196 MI->setFlag(MachineInstr::FrameSetup); 197 198 Offset -= ThisVal; 199 } 200 } 201 202 /// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator. 203 static 204 void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 205 unsigned StackPtr, uint64_t *NumBytes = NULL) { 206 if (MBBI == MBB.begin()) return; 207 208 MachineBasicBlock::iterator PI = prior(MBBI); 209 unsigned Opc = PI->getOpcode(); 210 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 211 Opc == X86::ADD32ri || Opc == X86::ADD32ri8 || 212 Opc == X86::LEA32r || Opc == X86::LEA64_32r) && 213 PI->getOperand(0).getReg() == StackPtr) { 214 if (NumBytes) 215 *NumBytes += PI->getOperand(2).getImm(); 216 MBB.erase(PI); 217 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 218 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 219 PI->getOperand(0).getReg() == StackPtr) { 220 if (NumBytes) 221 *NumBytes -= PI->getOperand(2).getImm(); 222 MBB.erase(PI); 223 } 224 } 225 226 /// mergeSPUpdatesDown - Merge two stack-manipulating instructions lower iterator. 227 static 228 void mergeSPUpdatesDown(MachineBasicBlock &MBB, 229 MachineBasicBlock::iterator &MBBI, 230 unsigned StackPtr, uint64_t *NumBytes = NULL) { 231 // FIXME: THIS ISN'T RUN!!! 232 return; 233 234 if (MBBI == MBB.end()) return; 235 236 MachineBasicBlock::iterator NI = llvm::next(MBBI); 237 if (NI == MBB.end()) return; 238 239 unsigned Opc = NI->getOpcode(); 240 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 241 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 242 NI->getOperand(0).getReg() == StackPtr) { 243 if (NumBytes) 244 *NumBytes -= NI->getOperand(2).getImm(); 245 MBB.erase(NI); 246 MBBI = NI; 247 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 248 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 249 NI->getOperand(0).getReg() == StackPtr) { 250 if (NumBytes) 251 *NumBytes += NI->getOperand(2).getImm(); 252 MBB.erase(NI); 253 MBBI = NI; 254 } 255 } 256 257 /// mergeSPUpdates - Checks the instruction before/after the passed 258 /// instruction. If it is an ADD/SUB/LEA instruction it is deleted argument and the 259 /// stack adjustment is returned as a positive value for ADD/LEA and a negative for 260 /// SUB. 261 static int mergeSPUpdates(MachineBasicBlock &MBB, 262 MachineBasicBlock::iterator &MBBI, 263 unsigned StackPtr, 264 bool doMergeWithPrevious) { 265 if ((doMergeWithPrevious && MBBI == MBB.begin()) || 266 (!doMergeWithPrevious && MBBI == MBB.end())) 267 return 0; 268 269 MachineBasicBlock::iterator PI = doMergeWithPrevious ? prior(MBBI) : MBBI; 270 MachineBasicBlock::iterator NI = doMergeWithPrevious ? 0 : llvm::next(MBBI); 271 unsigned Opc = PI->getOpcode(); 272 int Offset = 0; 273 274 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 275 Opc == X86::ADD32ri || Opc == X86::ADD32ri8 || 276 Opc == X86::LEA32r || Opc == X86::LEA64_32r) && 277 PI->getOperand(0).getReg() == StackPtr){ 278 Offset += PI->getOperand(2).getImm(); 279 MBB.erase(PI); 280 if (!doMergeWithPrevious) MBBI = NI; 281 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 282 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 283 PI->getOperand(0).getReg() == StackPtr) { 284 Offset -= PI->getOperand(2).getImm(); 285 MBB.erase(PI); 286 if (!doMergeWithPrevious) MBBI = NI; 287 } 288 289 return Offset; 290 } 291 292 static bool isEAXLiveIn(MachineFunction &MF) { 293 for (MachineRegisterInfo::livein_iterator II = MF.getRegInfo().livein_begin(), 294 EE = MF.getRegInfo().livein_end(); II != EE; ++II) { 295 unsigned Reg = II->first; 296 297 if (Reg == X86::EAX || Reg == X86::AX || 298 Reg == X86::AH || Reg == X86::AL) 299 return true; 300 } 301 302 return false; 303 } 304 305 void X86FrameLowering::emitCalleeSavedFrameMoves(MachineFunction &MF, 306 MCSymbol *Label, 307 unsigned FramePtr) const { 308 MachineFrameInfo *MFI = MF.getFrameInfo(); 309 MachineModuleInfo &MMI = MF.getMMI(); 310 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); 311 312 // Add callee saved registers to move list. 313 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); 314 if (CSI.empty()) return; 315 316 const X86RegisterInfo *RegInfo = TM.getRegisterInfo(); 317 bool HasFP = hasFP(MF); 318 319 // Calculate amount of bytes used for return address storing. 320 int stackGrowth = -RegInfo->getSlotSize(); 321 322 // FIXME: This is dirty hack. The code itself is pretty mess right now. 323 // It should be rewritten from scratch and generalized sometimes. 324 325 // Determine maximum offset (minimum due to stack growth). 326 int64_t MaxOffset = 0; 327 for (std::vector<CalleeSavedInfo>::const_iterator 328 I = CSI.begin(), E = CSI.end(); I != E; ++I) 329 MaxOffset = std::min(MaxOffset, 330 MFI->getObjectOffset(I->getFrameIdx())); 331 332 // Calculate offsets. 333 int64_t saveAreaOffset = (HasFP ? 3 : 2) * stackGrowth; 334 for (std::vector<CalleeSavedInfo>::const_iterator 335 I = CSI.begin(), E = CSI.end(); I != E; ++I) { 336 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx()); 337 unsigned Reg = I->getReg(); 338 Offset = MaxOffset - Offset + saveAreaOffset; 339 340 // Don't output a new machine move if we're re-saving the frame 341 // pointer. This happens when the PrologEpilogInserter has inserted an extra 342 // "PUSH" of the frame pointer -- the "emitPrologue" method automatically 343 // generates one when frame pointers are used. If we generate a "machine 344 // move" for this extra "PUSH", the linker will lose track of the fact that 345 // the frame pointer should have the value of the first "PUSH" when it's 346 // trying to unwind. 347 // 348 // FIXME: This looks inelegant. It's possibly correct, but it's covering up 349 // another bug. I.e., one where we generate a prolog like this: 350 // 351 // pushl %ebp 352 // movl %esp, %ebp 353 // pushl %ebp 354 // pushl %esi 355 // ... 356 // 357 // The immediate re-push of EBP is unnecessary. At the least, it's an 358 // optimization bug. EBP can be used as a scratch register in certain 359 // cases, but probably not when we have a frame pointer. 360 if (HasFP && FramePtr == Reg) 361 continue; 362 363 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true); 364 MMI.addFrameInst(MCCFIInstruction::createOffset(Label, DwarfReg, Offset)); 365 } 366 } 367 368 /// getCompactUnwindRegNum - Get the compact unwind number for a given 369 /// register. The number corresponds to the enum lists in 370 /// compact_unwind_encoding.h. 371 static int getCompactUnwindRegNum(unsigned Reg, bool is64Bit) { 372 static const uint16_t CU32BitRegs[] = { 373 X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0 374 }; 375 static const uint16_t CU64BitRegs[] = { 376 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 377 }; 378 const uint16_t *CURegs = is64Bit ? CU64BitRegs : CU32BitRegs; 379 for (int Idx = 1; *CURegs; ++CURegs, ++Idx) 380 if (*CURegs == Reg) 381 return Idx; 382 383 return -1; 384 } 385 386 // Number of registers that can be saved in a compact unwind encoding. 387 #define CU_NUM_SAVED_REGS 6 388 389 /// encodeCompactUnwindRegistersWithoutFrame - Create the permutation encoding 390 /// used with frameless stacks. It is passed the number of registers to be saved 391 /// and an array of the registers saved. 392 static uint32_t 393 encodeCompactUnwindRegistersWithoutFrame(unsigned SavedRegs[CU_NUM_SAVED_REGS], 394 unsigned RegCount, bool Is64Bit) { 395 // The saved registers are numbered from 1 to 6. In order to encode the order 396 // in which they were saved, we re-number them according to their place in the 397 // register order. The re-numbering is relative to the last re-numbered 398 // register. E.g., if we have registers {6, 2, 4, 5} saved in that order: 399 // 400 // Orig Re-Num 401 // ---- ------ 402 // 6 6 403 // 2 2 404 // 4 3 405 // 5 3 406 // 407 for (unsigned i = 0; i != CU_NUM_SAVED_REGS; ++i) { 408 int CUReg = getCompactUnwindRegNum(SavedRegs[i], Is64Bit); 409 if (CUReg == -1) return ~0U; 410 SavedRegs[i] = CUReg; 411 } 412 413 // Reverse the list. 414 std::swap(SavedRegs[0], SavedRegs[5]); 415 std::swap(SavedRegs[1], SavedRegs[4]); 416 std::swap(SavedRegs[2], SavedRegs[3]); 417 418 uint32_t RenumRegs[CU_NUM_SAVED_REGS]; 419 for (unsigned i = CU_NUM_SAVED_REGS - RegCount; i < CU_NUM_SAVED_REGS; ++i) { 420 unsigned Countless = 0; 421 for (unsigned j = CU_NUM_SAVED_REGS - RegCount; j < i; ++j) 422 if (SavedRegs[j] < SavedRegs[i]) 423 ++Countless; 424 425 RenumRegs[i] = SavedRegs[i] - Countless - 1; 426 } 427 428 // Take the renumbered values and encode them into a 10-bit number. 429 uint32_t permutationEncoding = 0; 430 switch (RegCount) { 431 case 6: 432 permutationEncoding |= 120 * RenumRegs[0] + 24 * RenumRegs[1] 433 + 6 * RenumRegs[2] + 2 * RenumRegs[3] 434 + RenumRegs[4]; 435 break; 436 case 5: 437 permutationEncoding |= 120 * RenumRegs[1] + 24 * RenumRegs[2] 438 + 6 * RenumRegs[3] + 2 * RenumRegs[4] 439 + RenumRegs[5]; 440 break; 441 case 4: 442 permutationEncoding |= 60 * RenumRegs[2] + 12 * RenumRegs[3] 443 + 3 * RenumRegs[4] + RenumRegs[5]; 444 break; 445 case 3: 446 permutationEncoding |= 20 * RenumRegs[3] + 4 * RenumRegs[4] 447 + RenumRegs[5]; 448 break; 449 case 2: 450 permutationEncoding |= 5 * RenumRegs[4] + RenumRegs[5]; 451 break; 452 case 1: 453 permutationEncoding |= RenumRegs[5]; 454 break; 455 } 456 457 assert((permutationEncoding & 0x3FF) == permutationEncoding && 458 "Invalid compact register encoding!"); 459 return permutationEncoding; 460 } 461 462 /// encodeCompactUnwindRegistersWithFrame - Return the registers encoded for a 463 /// compact encoding with a frame pointer. 464 static uint32_t 465 encodeCompactUnwindRegistersWithFrame(unsigned SavedRegs[CU_NUM_SAVED_REGS], 466 bool Is64Bit) { 467 // Encode the registers in the order they were saved, 3-bits per register. The 468 // registers are numbered from 1 to CU_NUM_SAVED_REGS. 469 uint32_t RegEnc = 0; 470 for (int I = CU_NUM_SAVED_REGS - 1, Idx = 0; I != -1; --I) { 471 unsigned Reg = SavedRegs[I]; 472 if (Reg == 0) continue; 473 474 int CURegNum = getCompactUnwindRegNum(Reg, Is64Bit); 475 if (CURegNum == -1) return ~0U; 476 477 // Encode the 3-bit register number in order, skipping over 3-bits for each 478 // register. 479 RegEnc |= (CURegNum & 0x7) << (Idx++ * 3); 480 } 481 482 assert((RegEnc & 0x3FFFF) == RegEnc && "Invalid compact register encoding!"); 483 return RegEnc; 484 } 485 486 uint32_t X86FrameLowering::getCompactUnwindEncoding(MachineFunction &MF) const { 487 const X86RegisterInfo *RegInfo = TM.getRegisterInfo(); 488 unsigned FramePtr = RegInfo->getFrameRegister(MF); 489 unsigned StackPtr = RegInfo->getStackRegister(); 490 491 bool Is64Bit = STI.is64Bit(); 492 bool HasFP = hasFP(MF); 493 494 unsigned SavedRegs[CU_NUM_SAVED_REGS] = { 0, 0, 0, 0, 0, 0 }; 495 unsigned SavedRegIdx = 0; 496 497 unsigned OffsetSize = (Is64Bit ? 8 : 4); 498 499 unsigned PushInstr = (Is64Bit ? X86::PUSH64r : X86::PUSH32r); 500 unsigned PushInstrSize = 1; 501 unsigned MoveInstr = (Is64Bit ? X86::MOV64rr : X86::MOV32rr); 502 unsigned MoveInstrSize = (Is64Bit ? 3 : 2); 503 unsigned SubtractInstrIdx = (Is64Bit ? 3 : 2); 504 505 unsigned StackDivide = (Is64Bit ? 8 : 4); 506 507 unsigned InstrOffset = 0; 508 unsigned StackAdjust = 0; 509 unsigned StackSize = 0; 510 511 MachineBasicBlock &MBB = MF.front(); // Prologue is in entry BB. 512 bool ExpectEnd = false; 513 for (MachineBasicBlock::iterator 514 MBBI = MBB.begin(), MBBE = MBB.end(); MBBI != MBBE; ++MBBI) { 515 MachineInstr &MI = *MBBI; 516 unsigned Opc = MI.getOpcode(); 517 if (Opc == X86::PROLOG_LABEL) continue; 518 if (!MI.getFlag(MachineInstr::FrameSetup)) break; 519 520 // We don't exect any more prolog instructions. 521 if (ExpectEnd) return CU::UNWIND_MODE_DWARF; 522 523 if (Opc == PushInstr) { 524 // If there are too many saved registers, we cannot use compact encoding. 525 if (SavedRegIdx >= CU_NUM_SAVED_REGS) return CU::UNWIND_MODE_DWARF; 526 527 unsigned Reg = MI.getOperand(0).getReg(); 528 if (Reg == (Is64Bit ? X86::RAX : X86::EAX)) { 529 ExpectEnd = true; 530 continue; 531 } 532 533 SavedRegs[SavedRegIdx++] = MI.getOperand(0).getReg(); 534 StackAdjust += OffsetSize; 535 InstrOffset += PushInstrSize; 536 } else if (Opc == MoveInstr) { 537 unsigned SrcReg = MI.getOperand(1).getReg(); 538 unsigned DstReg = MI.getOperand(0).getReg(); 539 540 if (DstReg != FramePtr || SrcReg != StackPtr) 541 return CU::UNWIND_MODE_DWARF; 542 543 StackAdjust = 0; 544 memset(SavedRegs, 0, sizeof(SavedRegs)); 545 SavedRegIdx = 0; 546 InstrOffset += MoveInstrSize; 547 } else if (Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 548 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) { 549 if (StackSize) 550 // We already have a stack size. 551 return CU::UNWIND_MODE_DWARF; 552 553 if (!MI.getOperand(0).isReg() || 554 MI.getOperand(0).getReg() != MI.getOperand(1).getReg() || 555 MI.getOperand(0).getReg() != StackPtr || !MI.getOperand(2).isImm()) 556 // We need this to be a stack adjustment pointer. Something like: 557 // 558 // %RSP<def> = SUB64ri8 %RSP, 48 559 return CU::UNWIND_MODE_DWARF; 560 561 StackSize = MI.getOperand(2).getImm() / StackDivide; 562 SubtractInstrIdx += InstrOffset; 563 ExpectEnd = true; 564 } 565 } 566 567 // Encode that we are using EBP/RBP as the frame pointer. 568 uint32_t CompactUnwindEncoding = 0; 569 StackAdjust /= StackDivide; 570 if (HasFP) { 571 if ((StackAdjust & 0xFF) != StackAdjust) 572 // Offset was too big for compact encoding. 573 return CU::UNWIND_MODE_DWARF; 574 575 // Get the encoding of the saved registers when we have a frame pointer. 576 uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame(SavedRegs, Is64Bit); 577 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF; 578 579 CompactUnwindEncoding |= CU::UNWIND_MODE_BP_FRAME; 580 CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16; 581 CompactUnwindEncoding |= RegEnc & CU::UNWIND_BP_FRAME_REGISTERS; 582 } else { 583 ++StackAdjust; 584 uint32_t TotalStackSize = StackAdjust + StackSize; 585 if ((TotalStackSize & 0xFF) == TotalStackSize) { 586 // Frameless stack with a small stack size. 587 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IMMD; 588 589 // Encode the stack size. 590 CompactUnwindEncoding |= (TotalStackSize & 0xFF) << 16; 591 } else { 592 if ((StackAdjust & 0x7) != StackAdjust) 593 // The extra stack adjustments are too big for us to handle. 594 return CU::UNWIND_MODE_DWARF; 595 596 // Frameless stack with an offset too large for us to encode compactly. 597 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IND; 598 599 // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP' 600 // instruction. 601 CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16; 602 603 // Encode any extra stack stack adjustments (done via push instructions). 604 CompactUnwindEncoding |= (StackAdjust & 0x7) << 13; 605 } 606 607 // Encode the number of registers saved. 608 CompactUnwindEncoding |= (SavedRegIdx & 0x7) << 10; 609 610 // Get the encoding of the saved registers when we don't have a frame 611 // pointer. 612 uint32_t RegEnc = 613 encodeCompactUnwindRegistersWithoutFrame(SavedRegs, SavedRegIdx, 614 Is64Bit); 615 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF; 616 617 // Encode the register encoding. 618 CompactUnwindEncoding |= 619 RegEnc & CU::UNWIND_FRAMELESS_STACK_REG_PERMUTATION; 620 } 621 622 return CompactUnwindEncoding; 623 } 624 625 /// usesTheStack - This function checks if any of the users of EFLAGS 626 /// copies the EFLAGS. We know that the code that lowers COPY of EFLAGS has 627 /// to use the stack, and if we don't adjust the stack we clobber the first 628 /// frame index. 629 /// See X86InstrInfo::copyPhysReg. 630 static bool usesTheStack(MachineFunction &MF) { 631 MachineRegisterInfo &MRI = MF.getRegInfo(); 632 633 for (MachineRegisterInfo::reg_iterator ri = MRI.reg_begin(X86::EFLAGS), 634 re = MRI.reg_end(); ri != re; ++ri) 635 if (ri->isCopy()) 636 return true; 637 638 return false; 639 } 640 641 /// emitPrologue - Push callee-saved registers onto the stack, which 642 /// automatically adjust the stack pointer. Adjust the stack pointer to allocate 643 /// space for local variables. Also emit labels used by the exception handler to 644 /// generate the exception handling frames. 645 void X86FrameLowering::emitPrologue(MachineFunction &MF) const { 646 MachineBasicBlock &MBB = MF.front(); // Prologue goes in entry BB. 647 MachineBasicBlock::iterator MBBI = MBB.begin(); 648 MachineFrameInfo *MFI = MF.getFrameInfo(); 649 const Function *Fn = MF.getFunction(); 650 const X86RegisterInfo *RegInfo = TM.getRegisterInfo(); 651 const X86InstrInfo &TII = *TM.getInstrInfo(); 652 MachineModuleInfo &MMI = MF.getMMI(); 653 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 654 bool needsFrameMoves = MMI.hasDebugInfo() || 655 Fn->needsUnwindTableEntry(); 656 uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment. 657 uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate. 658 bool HasFP = hasFP(MF); 659 bool Is64Bit = STI.is64Bit(); 660 bool IsLP64 = STI.isTarget64BitLP64(); 661 bool IsWin64 = STI.isTargetWin64(); 662 bool UseLEA = STI.useLeaForSP(); 663 unsigned StackAlign = getStackAlignment(); 664 unsigned SlotSize = RegInfo->getSlotSize(); 665 unsigned FramePtr = RegInfo->getFrameRegister(MF); 666 unsigned StackPtr = RegInfo->getStackRegister(); 667 unsigned BasePtr = RegInfo->getBaseRegister(); 668 DebugLoc DL; 669 670 // If we're forcing a stack realignment we can't rely on just the frame 671 // info, we need to know the ABI stack alignment as well in case we 672 // have a call out. Otherwise just make sure we have some alignment - we'll 673 // go with the minimum SlotSize. 674 if (ForceStackAlign) { 675 if (MFI->hasCalls()) 676 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign; 677 else if (MaxAlign < SlotSize) 678 MaxAlign = SlotSize; 679 } 680 681 // Add RETADDR move area to callee saved frame size. 682 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 683 if (TailCallReturnAddrDelta < 0) 684 X86FI->setCalleeSavedFrameSize( 685 X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta); 686 687 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf 688 // function, and use up to 128 bytes of stack space, don't have a frame 689 // pointer, calls, or dynamic alloca then we do not need to adjust the 690 // stack pointer (we fit in the Red Zone). We also check that we don't 691 // push and pop from the stack. 692 if (Is64Bit && !Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex, 693 Attribute::NoRedZone) && 694 !RegInfo->needsStackRealignment(MF) && 695 !MFI->hasVarSizedObjects() && // No dynamic alloca. 696 !MFI->adjustsStack() && // No calls. 697 !IsWin64 && // Win64 has no Red Zone 698 !usesTheStack(MF) && // Don't push and pop. 699 !MF.getTarget().Options.EnableSegmentedStacks) { // Regular stack 700 uint64_t MinSize = X86FI->getCalleeSavedFrameSize(); 701 if (HasFP) MinSize += SlotSize; 702 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0); 703 MFI->setStackSize(StackSize); 704 } 705 706 // Insert stack pointer adjustment for later moving of return addr. Only 707 // applies to tail call optimized functions where the callee argument stack 708 // size is bigger than the callers. 709 if (TailCallReturnAddrDelta < 0) { 710 MachineInstr *MI = 711 BuildMI(MBB, MBBI, DL, 712 TII.get(getSUBriOpcode(IsLP64, -TailCallReturnAddrDelta)), 713 StackPtr) 714 .addReg(StackPtr) 715 .addImm(-TailCallReturnAddrDelta) 716 .setMIFlag(MachineInstr::FrameSetup); 717 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 718 } 719 720 // Mapping for machine moves: 721 // 722 // DST: VirtualFP AND 723 // SRC: VirtualFP => DW_CFA_def_cfa_offset 724 // ELSE => DW_CFA_def_cfa 725 // 726 // SRC: VirtualFP AND 727 // DST: Register => DW_CFA_def_cfa_register 728 // 729 // ELSE 730 // OFFSET < 0 => DW_CFA_offset_extended_sf 731 // REG < 64 => DW_CFA_offset + Reg 732 // ELSE => DW_CFA_offset_extended 733 734 uint64_t NumBytes = 0; 735 int stackGrowth = -SlotSize; 736 737 if (HasFP) { 738 // Calculate required stack adjustment. 739 uint64_t FrameSize = StackSize - SlotSize; 740 if (RegInfo->needsStackRealignment(MF)) { 741 // Callee-saved registers are pushed on stack before the stack 742 // is realigned. 743 FrameSize -= X86FI->getCalleeSavedFrameSize(); 744 NumBytes = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign; 745 } else { 746 NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize(); 747 } 748 749 // Get the offset of the stack slot for the EBP register, which is 750 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized. 751 // Update the frame offset adjustment. 752 MFI->setOffsetAdjustment(-NumBytes); 753 754 // Save EBP/RBP into the appropriate stack slot. 755 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r)) 756 .addReg(FramePtr, RegState::Kill) 757 .setMIFlag(MachineInstr::FrameSetup); 758 759 if (needsFrameMoves) { 760 // Mark the place where EBP/RBP was saved. 761 MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol(); 762 BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)) 763 .addSym(FrameLabel); 764 765 // Define the current CFA rule to use the provided offset. 766 assert(StackSize); 767 MMI.addFrameInst( 768 MCCFIInstruction::createDefCfaOffset(FrameLabel, 2 * stackGrowth)); 769 770 // Change the rule for the FramePtr to be an "offset" rule. 771 unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(FramePtr, true); 772 MMI.addFrameInst(MCCFIInstruction::createOffset(FrameLabel, DwarfFramePtr, 773 2 * stackGrowth)); 774 } 775 776 // Update EBP with the new base value. 777 BuildMI(MBB, MBBI, DL, 778 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr) 779 .addReg(StackPtr) 780 .setMIFlag(MachineInstr::FrameSetup); 781 782 if (needsFrameMoves) { 783 // Mark effective beginning of when frame pointer becomes valid. 784 MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol(); 785 BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)) 786 .addSym(FrameLabel); 787 788 // Define the current CFA to use the EBP/RBP register. 789 unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(FramePtr, true); 790 MMI.addFrameInst( 791 MCCFIInstruction::createDefCfaRegister(FrameLabel, DwarfFramePtr)); 792 } 793 794 // Mark the FramePtr as live-in in every block except the entry. 795 for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end(); 796 I != E; ++I) 797 I->addLiveIn(FramePtr); 798 } else { 799 NumBytes = StackSize - X86FI->getCalleeSavedFrameSize(); 800 } 801 802 // Skip the callee-saved push instructions. 803 bool PushedRegs = false; 804 int StackOffset = 2 * stackGrowth; 805 806 while (MBBI != MBB.end() && 807 (MBBI->getOpcode() == X86::PUSH32r || 808 MBBI->getOpcode() == X86::PUSH64r)) { 809 PushedRegs = true; 810 MBBI->setFlag(MachineInstr::FrameSetup); 811 ++MBBI; 812 813 if (!HasFP && needsFrameMoves) { 814 // Mark callee-saved push instruction. 815 MCSymbol *Label = MMI.getContext().CreateTempSymbol(); 816 BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(Label); 817 818 // Define the current CFA rule to use the provided offset. 819 assert(StackSize); 820 MMI.addFrameInst( 821 MCCFIInstruction::createDefCfaOffset(Label, StackOffset)); 822 StackOffset += stackGrowth; 823 } 824 } 825 826 // Realign stack after we pushed callee-saved registers (so that we'll be 827 // able to calculate their offsets from the frame pointer). 828 829 // NOTE: We push the registers before realigning the stack, so 830 // vector callee-saved (xmm) registers may be saved w/o proper 831 // alignment in this way. However, currently these regs are saved in 832 // stack slots (see X86FrameLowering::spillCalleeSavedRegisters()), so 833 // this shouldn't be a problem. 834 if (RegInfo->needsStackRealignment(MF)) { 835 assert(HasFP && "There should be a frame pointer if stack is realigned."); 836 MachineInstr *MI = 837 BuildMI(MBB, MBBI, DL, 838 TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri), StackPtr) 839 .addReg(StackPtr) 840 .addImm(-MaxAlign) 841 .setMIFlag(MachineInstr::FrameSetup); 842 843 // The EFLAGS implicit def is dead. 844 MI->getOperand(3).setIsDead(); 845 } 846 847 // If there is an SUB32ri of ESP immediately before this instruction, merge 848 // the two. This can be the case when tail call elimination is enabled and 849 // the callee has more arguments then the caller. 850 NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true); 851 852 // If there is an ADD32ri or SUB32ri of ESP immediately after this 853 // instruction, merge the two instructions. 854 mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes); 855 856 // Adjust stack pointer: ESP -= numbytes. 857 858 // Windows and cygwin/mingw require a prologue helper routine when allocating 859 // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw 860 // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the 861 // stack and adjust the stack pointer in one go. The 64-bit version of 862 // __chkstk is only responsible for probing the stack. The 64-bit prologue is 863 // responsible for adjusting the stack pointer. Touching the stack at 4K 864 // increments is necessary to ensure that the guard pages used by the OS 865 // virtual memory manager are allocated in correct sequence. 866 if (NumBytes >= 4096 && STI.isTargetCOFF() && !STI.isTargetEnvMacho()) { 867 const char *StackProbeSymbol; 868 bool isSPUpdateNeeded = false; 869 870 if (Is64Bit) { 871 if (STI.isTargetCygMing()) 872 StackProbeSymbol = "___chkstk"; 873 else { 874 StackProbeSymbol = "__chkstk"; 875 isSPUpdateNeeded = true; 876 } 877 } else if (STI.isTargetCygMing()) 878 StackProbeSymbol = "_alloca"; 879 else 880 StackProbeSymbol = "_chkstk"; 881 882 // Check whether EAX is livein for this function. 883 bool isEAXAlive = isEAXLiveIn(MF); 884 885 if (isEAXAlive) { 886 // Sanity check that EAX is not livein for this function. 887 // It should not be, so throw an assert. 888 assert(!Is64Bit && "EAX is livein in x64 case!"); 889 890 // Save EAX 891 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r)) 892 .addReg(X86::EAX, RegState::Kill) 893 .setMIFlag(MachineInstr::FrameSetup); 894 } 895 896 if (Is64Bit) { 897 // Handle the 64-bit Windows ABI case where we need to call __chkstk. 898 // Function prologue is responsible for adjusting the stack pointer. 899 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX) 900 .addImm(NumBytes) 901 .setMIFlag(MachineInstr::FrameSetup); 902 } else { 903 // Allocate NumBytes-4 bytes on stack in case of isEAXAlive. 904 // We'll also use 4 already allocated bytes for EAX. 905 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 906 .addImm(isEAXAlive ? NumBytes - 4 : NumBytes) 907 .setMIFlag(MachineInstr::FrameSetup); 908 } 909 910 BuildMI(MBB, MBBI, DL, 911 TII.get(Is64Bit ? X86::W64ALLOCA : X86::CALLpcrel32)) 912 .addExternalSymbol(StackProbeSymbol) 913 .addReg(StackPtr, RegState::Define | RegState::Implicit) 914 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit) 915 .setMIFlag(MachineInstr::FrameSetup); 916 917 // MSVC x64's __chkstk does not adjust %rsp itself. 918 // It also does not clobber %rax so we can reuse it when adjusting %rsp. 919 if (isSPUpdateNeeded) { 920 BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64rr), StackPtr) 921 .addReg(StackPtr) 922 .addReg(X86::RAX) 923 .setMIFlag(MachineInstr::FrameSetup); 924 } 925 926 if (isEAXAlive) { 927 // Restore EAX 928 MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm), 929 X86::EAX), 930 StackPtr, false, NumBytes - 4); 931 MI->setFlag(MachineInstr::FrameSetup); 932 MBB.insert(MBBI, MI); 933 } 934 } else if (NumBytes) 935 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, IsLP64, 936 UseLEA, TII, *RegInfo); 937 938 // If we need a base pointer, set it up here. It's whatever the value 939 // of the stack pointer is at this point. Any variable size objects 940 // will be allocated after this, so we can still use the base pointer 941 // to reference locals. 942 if (RegInfo->hasBasePointer(MF)) { 943 // Update the frame pointer with the current stack pointer. 944 unsigned Opc = Is64Bit ? X86::MOV64rr : X86::MOV32rr; 945 BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr) 946 .addReg(StackPtr) 947 .setMIFlag(MachineInstr::FrameSetup); 948 } 949 950 if (( (!HasFP && NumBytes) || PushedRegs) && needsFrameMoves) { 951 // Mark end of stack pointer adjustment. 952 MCSymbol *Label = MMI.getContext().CreateTempSymbol(); 953 BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)) 954 .addSym(Label); 955 956 if (!HasFP && NumBytes) { 957 // Define the current CFA rule to use the provided offset. 958 assert(StackSize); 959 MMI.addFrameInst(MCCFIInstruction::createDefCfaOffset( 960 Label, -StackSize + stackGrowth)); 961 } 962 963 // Emit DWARF info specifying the offsets of the callee-saved registers. 964 if (PushedRegs) 965 emitCalleeSavedFrameMoves(MF, Label, HasFP ? FramePtr : StackPtr); 966 } 967 968 // Darwin 10.7 and greater has support for compact unwind encoding. 969 if (STI.getTargetTriple().isMacOSX() && 970 !STI.getTargetTriple().isMacOSXVersionLT(10, 7)) 971 MMI.setCompactUnwindEncoding(getCompactUnwindEncoding(MF)); 972 } 973 974 void X86FrameLowering::emitEpilogue(MachineFunction &MF, 975 MachineBasicBlock &MBB) const { 976 const MachineFrameInfo *MFI = MF.getFrameInfo(); 977 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 978 const X86RegisterInfo *RegInfo = TM.getRegisterInfo(); 979 const X86InstrInfo &TII = *TM.getInstrInfo(); 980 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 981 assert(MBBI != MBB.end() && "Returning block has no instructions"); 982 unsigned RetOpcode = MBBI->getOpcode(); 983 DebugLoc DL = MBBI->getDebugLoc(); 984 bool Is64Bit = STI.is64Bit(); 985 bool IsLP64 = STI.isTarget64BitLP64(); 986 bool UseLEA = STI.useLeaForSP(); 987 unsigned StackAlign = getStackAlignment(); 988 unsigned SlotSize = RegInfo->getSlotSize(); 989 unsigned FramePtr = RegInfo->getFrameRegister(MF); 990 unsigned StackPtr = RegInfo->getStackRegister(); 991 992 switch (RetOpcode) { 993 default: 994 llvm_unreachable("Can only insert epilog into returning blocks"); 995 case X86::RET: 996 case X86::RETI: 997 case X86::TCRETURNdi: 998 case X86::TCRETURNri: 999 case X86::TCRETURNmi: 1000 case X86::TCRETURNdi64: 1001 case X86::TCRETURNri64: 1002 case X86::TCRETURNmi64: 1003 case X86::EH_RETURN: 1004 case X86::EH_RETURN64: 1005 break; // These are ok 1006 } 1007 1008 // Get the number of bytes to allocate from the FrameInfo. 1009 uint64_t StackSize = MFI->getStackSize(); 1010 uint64_t MaxAlign = MFI->getMaxAlignment(); 1011 unsigned CSSize = X86FI->getCalleeSavedFrameSize(); 1012 uint64_t NumBytes = 0; 1013 1014 // If we're forcing a stack realignment we can't rely on just the frame 1015 // info, we need to know the ABI stack alignment as well in case we 1016 // have a call out. Otherwise just make sure we have some alignment - we'll 1017 // go with the minimum. 1018 if (ForceStackAlign) { 1019 if (MFI->hasCalls()) 1020 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign; 1021 else 1022 MaxAlign = MaxAlign ? MaxAlign : 4; 1023 } 1024 1025 if (hasFP(MF)) { 1026 // Calculate required stack adjustment. 1027 uint64_t FrameSize = StackSize - SlotSize; 1028 if (RegInfo->needsStackRealignment(MF)) { 1029 // Callee-saved registers were pushed on stack before the stack 1030 // was realigned. 1031 FrameSize -= CSSize; 1032 NumBytes = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign; 1033 } else { 1034 NumBytes = FrameSize - CSSize; 1035 } 1036 1037 // Pop EBP. 1038 BuildMI(MBB, MBBI, DL, 1039 TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr); 1040 } else { 1041 NumBytes = StackSize - CSSize; 1042 } 1043 1044 // Skip the callee-saved pop instructions. 1045 while (MBBI != MBB.begin()) { 1046 MachineBasicBlock::iterator PI = prior(MBBI); 1047 unsigned Opc = PI->getOpcode(); 1048 1049 if (Opc != X86::POP32r && Opc != X86::POP64r && Opc != X86::DBG_VALUE && 1050 !PI->isTerminator()) 1051 break; 1052 1053 --MBBI; 1054 } 1055 MachineBasicBlock::iterator FirstCSPop = MBBI; 1056 1057 DL = MBBI->getDebugLoc(); 1058 1059 // If there is an ADD32ri or SUB32ri of ESP immediately before this 1060 // instruction, merge the two instructions. 1061 if (NumBytes || MFI->hasVarSizedObjects()) 1062 mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes); 1063 1064 // If dynamic alloca is used, then reset esp to point to the last callee-saved 1065 // slot before popping them off! Same applies for the case, when stack was 1066 // realigned. 1067 if (RegInfo->needsStackRealignment(MF) || MFI->hasVarSizedObjects()) { 1068 if (RegInfo->needsStackRealignment(MF)) 1069 MBBI = FirstCSPop; 1070 if (CSSize != 0) { 1071 unsigned Opc = getLEArOpcode(IsLP64); 1072 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr), 1073 FramePtr, false, -CSSize); 1074 } else { 1075 unsigned Opc = (Is64Bit ? X86::MOV64rr : X86::MOV32rr); 1076 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 1077 .addReg(FramePtr); 1078 } 1079 } else if (NumBytes) { 1080 // Adjust stack pointer back: ESP += numbytes. 1081 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, IsLP64, UseLEA, 1082 TII, *RegInfo); 1083 } 1084 1085 // We're returning from function via eh_return. 1086 if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) { 1087 MBBI = MBB.getLastNonDebugInstr(); 1088 MachineOperand &DestAddr = MBBI->getOperand(0); 1089 assert(DestAddr.isReg() && "Offset should be in register!"); 1090 BuildMI(MBB, MBBI, DL, 1091 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), 1092 StackPtr).addReg(DestAddr.getReg()); 1093 } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi || 1094 RetOpcode == X86::TCRETURNmi || 1095 RetOpcode == X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64 || 1096 RetOpcode == X86::TCRETURNmi64) { 1097 bool isMem = RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64; 1098 // Tail call return: adjust the stack pointer and jump to callee. 1099 MBBI = MBB.getLastNonDebugInstr(); 1100 MachineOperand &JumpTarget = MBBI->getOperand(0); 1101 MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1); 1102 assert(StackAdjust.isImm() && "Expecting immediate value."); 1103 1104 // Adjust stack pointer. 1105 int StackAdj = StackAdjust.getImm(); 1106 int MaxTCDelta = X86FI->getTCReturnAddrDelta(); 1107 int Offset = 0; 1108 assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive"); 1109 1110 // Incoporate the retaddr area. 1111 Offset = StackAdj-MaxTCDelta; 1112 assert(Offset >= 0 && "Offset should never be negative"); 1113 1114 if (Offset) { 1115 // Check for possible merge with preceding ADD instruction. 1116 Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true); 1117 emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, IsLP64, 1118 UseLEA, TII, *RegInfo); 1119 } 1120 1121 // Jump to label or value in register. 1122 if (RetOpcode == X86::TCRETURNdi || RetOpcode == X86::TCRETURNdi64) { 1123 MachineInstrBuilder MIB = 1124 BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNdi) 1125 ? X86::TAILJMPd : X86::TAILJMPd64)); 1126 if (JumpTarget.isGlobal()) 1127 MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(), 1128 JumpTarget.getTargetFlags()); 1129 else { 1130 assert(JumpTarget.isSymbol()); 1131 MIB.addExternalSymbol(JumpTarget.getSymbolName(), 1132 JumpTarget.getTargetFlags()); 1133 } 1134 } else if (RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64) { 1135 MachineInstrBuilder MIB = 1136 BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNmi) 1137 ? X86::TAILJMPm : X86::TAILJMPm64)); 1138 for (unsigned i = 0; i != 5; ++i) 1139 MIB.addOperand(MBBI->getOperand(i)); 1140 } else if (RetOpcode == X86::TCRETURNri64) { 1141 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64)). 1142 addReg(JumpTarget.getReg(), RegState::Kill); 1143 } else { 1144 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr)). 1145 addReg(JumpTarget.getReg(), RegState::Kill); 1146 } 1147 1148 MachineInstr *NewMI = prior(MBBI); 1149 NewMI->copyImplicitOps(MF, MBBI); 1150 1151 // Delete the pseudo instruction TCRETURN. 1152 MBB.erase(MBBI); 1153 } else if ((RetOpcode == X86::RET || RetOpcode == X86::RETI) && 1154 (X86FI->getTCReturnAddrDelta() < 0)) { 1155 // Add the return addr area delta back since we are not tail calling. 1156 int delta = -1*X86FI->getTCReturnAddrDelta(); 1157 MBBI = MBB.getLastNonDebugInstr(); 1158 1159 // Check for possible merge with preceding ADD instruction. 1160 delta += mergeSPUpdates(MBB, MBBI, StackPtr, true); 1161 emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, IsLP64, UseLEA, TII, 1162 *RegInfo); 1163 } 1164 } 1165 1166 int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF, int FI) const { 1167 const X86RegisterInfo *RegInfo = 1168 static_cast<const X86RegisterInfo*>(MF.getTarget().getRegisterInfo()); 1169 const MachineFrameInfo *MFI = MF.getFrameInfo(); 1170 int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea(); 1171 uint64_t StackSize = MFI->getStackSize(); 1172 1173 if (RegInfo->hasBasePointer(MF)) { 1174 assert (hasFP(MF) && "VLAs and dynamic stack realign, but no FP?!"); 1175 if (FI < 0) { 1176 // Skip the saved EBP. 1177 return Offset + RegInfo->getSlotSize(); 1178 } else { 1179 assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0); 1180 return Offset + StackSize; 1181 } 1182 } else if (RegInfo->needsStackRealignment(MF)) { 1183 if (FI < 0) { 1184 // Skip the saved EBP. 1185 return Offset + RegInfo->getSlotSize(); 1186 } else { 1187 assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0); 1188 return Offset + StackSize; 1189 } 1190 // FIXME: Support tail calls 1191 } else { 1192 if (!hasFP(MF)) 1193 return Offset + StackSize; 1194 1195 // Skip the saved EBP. 1196 Offset += RegInfo->getSlotSize(); 1197 1198 // Skip the RETADDR move area 1199 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1200 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 1201 if (TailCallReturnAddrDelta < 0) 1202 Offset -= TailCallReturnAddrDelta; 1203 } 1204 1205 return Offset; 1206 } 1207 1208 int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI, 1209 unsigned &FrameReg) const { 1210 const X86RegisterInfo *RegInfo = 1211 static_cast<const X86RegisterInfo*>(MF.getTarget().getRegisterInfo()); 1212 // We can't calculate offset from frame pointer if the stack is realigned, 1213 // so enforce usage of stack/base pointer. The base pointer is used when we 1214 // have dynamic allocas in addition to dynamic realignment. 1215 if (RegInfo->hasBasePointer(MF)) 1216 FrameReg = RegInfo->getBaseRegister(); 1217 else if (RegInfo->needsStackRealignment(MF)) 1218 FrameReg = RegInfo->getStackRegister(); 1219 else 1220 FrameReg = RegInfo->getFrameRegister(MF); 1221 return getFrameIndexOffset(MF, FI); 1222 } 1223 1224 bool X86FrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB, 1225 MachineBasicBlock::iterator MI, 1226 const std::vector<CalleeSavedInfo> &CSI, 1227 const TargetRegisterInfo *TRI) const { 1228 if (CSI.empty()) 1229 return false; 1230 1231 DebugLoc DL = MBB.findDebugLoc(MI); 1232 1233 MachineFunction &MF = *MBB.getParent(); 1234 1235 unsigned SlotSize = STI.is64Bit() ? 8 : 4; 1236 unsigned FPReg = TRI->getFrameRegister(MF); 1237 unsigned CalleeFrameSize = 0; 1238 1239 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo(); 1240 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1241 1242 // Push GPRs. It increases frame size. 1243 unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r; 1244 for (unsigned i = CSI.size(); i != 0; --i) { 1245 unsigned Reg = CSI[i-1].getReg(); 1246 if (!X86::GR64RegClass.contains(Reg) && 1247 !X86::GR32RegClass.contains(Reg)) 1248 continue; 1249 // Add the callee-saved register as live-in. It's killed at the spill. 1250 MBB.addLiveIn(Reg); 1251 if (Reg == FPReg) 1252 // X86RegisterInfo::emitPrologue will handle spilling of frame register. 1253 continue; 1254 CalleeFrameSize += SlotSize; 1255 BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, RegState::Kill) 1256 .setMIFlag(MachineInstr::FrameSetup); 1257 } 1258 1259 X86FI->setCalleeSavedFrameSize(CalleeFrameSize); 1260 1261 // Make XMM regs spilled. X86 does not have ability of push/pop XMM. 1262 // It can be done by spilling XMMs to stack frame. 1263 // Note that only Win64 ABI might spill XMMs. 1264 for (unsigned i = CSI.size(); i != 0; --i) { 1265 unsigned Reg = CSI[i-1].getReg(); 1266 if (X86::GR64RegClass.contains(Reg) || 1267 X86::GR32RegClass.contains(Reg)) 1268 continue; 1269 // Add the callee-saved register as live-in. It's killed at the spill. 1270 MBB.addLiveIn(Reg); 1271 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 1272 TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i-1].getFrameIdx(), 1273 RC, TRI); 1274 } 1275 1276 return true; 1277 } 1278 1279 bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB, 1280 MachineBasicBlock::iterator MI, 1281 const std::vector<CalleeSavedInfo> &CSI, 1282 const TargetRegisterInfo *TRI) const { 1283 if (CSI.empty()) 1284 return false; 1285 1286 DebugLoc DL = MBB.findDebugLoc(MI); 1287 1288 MachineFunction &MF = *MBB.getParent(); 1289 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo(); 1290 1291 // Reload XMMs from stack frame. 1292 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 1293 unsigned Reg = CSI[i].getReg(); 1294 if (X86::GR64RegClass.contains(Reg) || 1295 X86::GR32RegClass.contains(Reg)) 1296 continue; 1297 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 1298 TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), 1299 RC, TRI); 1300 } 1301 1302 // POP GPRs. 1303 unsigned FPReg = TRI->getFrameRegister(MF); 1304 unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r; 1305 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 1306 unsigned Reg = CSI[i].getReg(); 1307 if (!X86::GR64RegClass.contains(Reg) && 1308 !X86::GR32RegClass.contains(Reg)) 1309 continue; 1310 if (Reg == FPReg) 1311 // X86RegisterInfo::emitEpilogue will handle restoring of frame register. 1312 continue; 1313 BuildMI(MBB, MI, DL, TII.get(Opc), Reg); 1314 } 1315 return true; 1316 } 1317 1318 void 1319 X86FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, 1320 RegScavenger *RS) const { 1321 MachineFrameInfo *MFI = MF.getFrameInfo(); 1322 const X86RegisterInfo *RegInfo = TM.getRegisterInfo(); 1323 unsigned SlotSize = RegInfo->getSlotSize(); 1324 1325 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1326 int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 1327 1328 if (TailCallReturnAddrDelta < 0) { 1329 // create RETURNADDR area 1330 // arg 1331 // arg 1332 // RETADDR 1333 // { ... 1334 // RETADDR area 1335 // ... 1336 // } 1337 // [EBP] 1338 MFI->CreateFixedObject(-TailCallReturnAddrDelta, 1339 TailCallReturnAddrDelta - SlotSize, true); 1340 } 1341 1342 if (hasFP(MF)) { 1343 assert((TailCallReturnAddrDelta <= 0) && 1344 "The Delta should always be zero or negative"); 1345 const TargetFrameLowering &TFI = *MF.getTarget().getFrameLowering(); 1346 1347 // Create a frame entry for the EBP register that must be saved. 1348 int FrameIdx = MFI->CreateFixedObject(SlotSize, 1349 -(int)SlotSize + 1350 TFI.getOffsetOfLocalArea() + 1351 TailCallReturnAddrDelta, 1352 true); 1353 assert(FrameIdx == MFI->getObjectIndexBegin() && 1354 "Slot for EBP register must be last in order to be found!"); 1355 (void)FrameIdx; 1356 } 1357 1358 // Spill the BasePtr if it's used. 1359 if (RegInfo->hasBasePointer(MF)) 1360 MF.getRegInfo().setPhysRegUsed(RegInfo->getBaseRegister()); 1361 } 1362 1363 static bool 1364 HasNestArgument(const MachineFunction *MF) { 1365 const Function *F = MF->getFunction(); 1366 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 1367 I != E; I++) { 1368 if (I->hasNestAttr()) 1369 return true; 1370 } 1371 return false; 1372 } 1373 1374 /// GetScratchRegister - Get a temp register for performing work in the 1375 /// segmented stack and the Erlang/HiPE stack prologue. Depending on platform 1376 /// and the properties of the function either one or two registers will be 1377 /// needed. Set primary to true for the first register, false for the second. 1378 static unsigned 1379 GetScratchRegister(bool Is64Bit, const MachineFunction &MF, bool Primary) { 1380 CallingConv::ID CallingConvention = MF.getFunction()->getCallingConv(); 1381 1382 // Erlang stuff. 1383 if (CallingConvention == CallingConv::HiPE) { 1384 if (Is64Bit) 1385 return Primary ? X86::R14 : X86::R13; 1386 else 1387 return Primary ? X86::EBX : X86::EDI; 1388 } 1389 1390 if (Is64Bit) 1391 return Primary ? X86::R11 : X86::R12; 1392 1393 bool IsNested = HasNestArgument(&MF); 1394 1395 if (CallingConvention == CallingConv::X86_FastCall || 1396 CallingConvention == CallingConv::Fast) { 1397 if (IsNested) 1398 report_fatal_error("Segmented stacks does not support fastcall with " 1399 "nested function."); 1400 return Primary ? X86::EAX : X86::ECX; 1401 } 1402 if (IsNested) 1403 return Primary ? X86::EDX : X86::EAX; 1404 return Primary ? X86::ECX : X86::EAX; 1405 } 1406 1407 // The stack limit in the TCB is set to this many bytes above the actual stack 1408 // limit. 1409 static const uint64_t kSplitStackAvailable = 256; 1410 1411 void 1412 X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const { 1413 MachineBasicBlock &prologueMBB = MF.front(); 1414 MachineFrameInfo *MFI = MF.getFrameInfo(); 1415 const X86InstrInfo &TII = *TM.getInstrInfo(); 1416 uint64_t StackSize; 1417 bool Is64Bit = STI.is64Bit(); 1418 unsigned TlsReg, TlsOffset; 1419 DebugLoc DL; 1420 1421 unsigned ScratchReg = GetScratchRegister(Is64Bit, MF, true); 1422 assert(!MF.getRegInfo().isLiveIn(ScratchReg) && 1423 "Scratch register is live-in"); 1424 1425 if (MF.getFunction()->isVarArg()) 1426 report_fatal_error("Segmented stacks do not support vararg functions."); 1427 if (!STI.isTargetLinux() && !STI.isTargetDarwin() && 1428 !STI.isTargetWin32() && !STI.isTargetFreeBSD()) 1429 report_fatal_error("Segmented stacks not supported on this platform."); 1430 1431 MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock(); 1432 MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock(); 1433 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1434 bool IsNested = false; 1435 1436 // We need to know if the function has a nest argument only in 64 bit mode. 1437 if (Is64Bit) 1438 IsNested = HasNestArgument(&MF); 1439 1440 // The MOV R10, RAX needs to be in a different block, since the RET we emit in 1441 // allocMBB needs to be last (terminating) instruction. 1442 1443 for (MachineBasicBlock::livein_iterator i = prologueMBB.livein_begin(), 1444 e = prologueMBB.livein_end(); i != e; i++) { 1445 allocMBB->addLiveIn(*i); 1446 checkMBB->addLiveIn(*i); 1447 } 1448 1449 if (IsNested) 1450 allocMBB->addLiveIn(X86::R10); 1451 1452 MF.push_front(allocMBB); 1453 MF.push_front(checkMBB); 1454 1455 // Eventually StackSize will be calculated by a link-time pass; which will 1456 // also decide whether checking code needs to be injected into this particular 1457 // prologue. 1458 StackSize = MFI->getStackSize(); 1459 1460 // When the frame size is less than 256 we just compare the stack 1461 // boundary directly to the value of the stack pointer, per gcc. 1462 bool CompareStackPointer = StackSize < kSplitStackAvailable; 1463 1464 // Read the limit off the current stacklet off the stack_guard location. 1465 if (Is64Bit) { 1466 if (STI.isTargetLinux()) { 1467 TlsReg = X86::FS; 1468 TlsOffset = 0x70; 1469 } else if (STI.isTargetDarwin()) { 1470 TlsReg = X86::GS; 1471 TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90. 1472 } else if (STI.isTargetFreeBSD()) { 1473 TlsReg = X86::FS; 1474 TlsOffset = 0x18; 1475 } else { 1476 report_fatal_error("Segmented stacks not supported on this platform."); 1477 } 1478 1479 if (CompareStackPointer) 1480 ScratchReg = X86::RSP; 1481 else 1482 BuildMI(checkMBB, DL, TII.get(X86::LEA64r), ScratchReg).addReg(X86::RSP) 1483 .addImm(1).addReg(0).addImm(-StackSize).addReg(0); 1484 1485 BuildMI(checkMBB, DL, TII.get(X86::CMP64rm)).addReg(ScratchReg) 1486 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg); 1487 } else { 1488 if (STI.isTargetLinux()) { 1489 TlsReg = X86::GS; 1490 TlsOffset = 0x30; 1491 } else if (STI.isTargetDarwin()) { 1492 TlsReg = X86::GS; 1493 TlsOffset = 0x48 + 90*4; 1494 } else if (STI.isTargetWin32()) { 1495 TlsReg = X86::FS; 1496 TlsOffset = 0x14; // pvArbitrary, reserved for application use 1497 } else if (STI.isTargetFreeBSD()) { 1498 report_fatal_error("Segmented stacks not supported on FreeBSD i386."); 1499 } else { 1500 report_fatal_error("Segmented stacks not supported on this platform."); 1501 } 1502 1503 if (CompareStackPointer) 1504 ScratchReg = X86::ESP; 1505 else 1506 BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP) 1507 .addImm(1).addReg(0).addImm(-StackSize).addReg(0); 1508 1509 if (STI.isTargetLinux() || STI.isTargetWin32()) { 1510 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg) 1511 .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg); 1512 } else if (STI.isTargetDarwin()) { 1513 1514 // TlsOffset doesn't fit into a mod r/m byte so we need an extra register 1515 unsigned ScratchReg2; 1516 bool SaveScratch2; 1517 if (CompareStackPointer) { 1518 // The primary scratch register is available for holding the TLS offset 1519 ScratchReg2 = GetScratchRegister(Is64Bit, MF, true); 1520 SaveScratch2 = false; 1521 } else { 1522 // Need to use a second register to hold the TLS offset 1523 ScratchReg2 = GetScratchRegister(Is64Bit, MF, false); 1524 1525 // Unfortunately, with fastcc the second scratch register may hold an arg 1526 SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2); 1527 } 1528 1529 // If Scratch2 is live-in then it needs to be saved 1530 assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) && 1531 "Scratch register is live-in and not saved"); 1532 1533 if (SaveScratch2) 1534 BuildMI(checkMBB, DL, TII.get(X86::PUSH32r)) 1535 .addReg(ScratchReg2, RegState::Kill); 1536 1537 BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2) 1538 .addImm(TlsOffset); 1539 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)) 1540 .addReg(ScratchReg) 1541 .addReg(ScratchReg2).addImm(1).addReg(0) 1542 .addImm(0) 1543 .addReg(TlsReg); 1544 1545 if (SaveScratch2) 1546 BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2); 1547 } 1548 } 1549 1550 // This jump is taken if SP >= (Stacklet Limit + Stack Space required). 1551 // It jumps to normal execution of the function body. 1552 BuildMI(checkMBB, DL, TII.get(X86::JA_4)).addMBB(&prologueMBB); 1553 1554 // On 32 bit we first push the arguments size and then the frame size. On 64 1555 // bit, we pass the stack frame size in r10 and the argument size in r11. 1556 if (Is64Bit) { 1557 // Functions with nested arguments use R10, so it needs to be saved across 1558 // the call to _morestack 1559 1560 if (IsNested) 1561 BuildMI(allocMBB, DL, TII.get(X86::MOV64rr), X86::RAX).addReg(X86::R10); 1562 1563 BuildMI(allocMBB, DL, TII.get(X86::MOV64ri), X86::R10) 1564 .addImm(StackSize); 1565 BuildMI(allocMBB, DL, TII.get(X86::MOV64ri), X86::R11) 1566 .addImm(X86FI->getArgumentStackSize()); 1567 MF.getRegInfo().setPhysRegUsed(X86::R10); 1568 MF.getRegInfo().setPhysRegUsed(X86::R11); 1569 } else { 1570 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32)) 1571 .addImm(X86FI->getArgumentStackSize()); 1572 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32)) 1573 .addImm(StackSize); 1574 } 1575 1576 // __morestack is in libgcc 1577 if (Is64Bit) 1578 BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32)) 1579 .addExternalSymbol("__morestack"); 1580 else 1581 BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32)) 1582 .addExternalSymbol("__morestack"); 1583 1584 if (IsNested) 1585 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10)); 1586 else 1587 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET)); 1588 1589 allocMBB->addSuccessor(&prologueMBB); 1590 1591 checkMBB->addSuccessor(allocMBB); 1592 checkMBB->addSuccessor(&prologueMBB); 1593 1594 #ifdef XDEBUG 1595 MF.verify(); 1596 #endif 1597 } 1598 1599 /// Erlang programs may need a special prologue to handle the stack size they 1600 /// might need at runtime. That is because Erlang/OTP does not implement a C 1601 /// stack but uses a custom implementation of hybrid stack/heap architecture. 1602 /// (for more information see Eric Stenman's Ph.D. thesis: 1603 /// http://publications.uu.se/uu/fulltext/nbn_se_uu_diva-2688.pdf) 1604 /// 1605 /// CheckStack: 1606 /// temp0 = sp - MaxStack 1607 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart 1608 /// OldStart: 1609 /// ... 1610 /// IncStack: 1611 /// call inc_stack # doubles the stack space 1612 /// temp0 = sp - MaxStack 1613 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart 1614 void X86FrameLowering::adjustForHiPEPrologue(MachineFunction &MF) const { 1615 const X86InstrInfo &TII = *TM.getInstrInfo(); 1616 MachineFrameInfo *MFI = MF.getFrameInfo(); 1617 const unsigned SlotSize = TM.getRegisterInfo()->getSlotSize(); 1618 const bool Is64Bit = STI.is64Bit(); 1619 DebugLoc DL; 1620 // HiPE-specific values 1621 const unsigned HipeLeafWords = 24; 1622 const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5; 1623 const unsigned Guaranteed = HipeLeafWords * SlotSize; 1624 unsigned CallerStkArity = MF.getFunction()->arg_size() > CCRegisteredArgs ? 1625 MF.getFunction()->arg_size() - CCRegisteredArgs : 0; 1626 unsigned MaxStack = MFI->getStackSize() + CallerStkArity*SlotSize + SlotSize; 1627 1628 assert(STI.isTargetLinux() && 1629 "HiPE prologue is only supported on Linux operating systems."); 1630 1631 // Compute the largest caller's frame that is needed to fit the callees' 1632 // frames. This 'MaxStack' is computed from: 1633 // 1634 // a) the fixed frame size, which is the space needed for all spilled temps, 1635 // b) outgoing on-stack parameter areas, and 1636 // c) the minimum stack space this function needs to make available for the 1637 // functions it calls (a tunable ABI property). 1638 if (MFI->hasCalls()) { 1639 unsigned MoreStackForCalls = 0; 1640 1641 for (MachineFunction::iterator MBBI = MF.begin(), MBBE = MF.end(); 1642 MBBI != MBBE; ++MBBI) 1643 for (MachineBasicBlock::iterator MI = MBBI->begin(), ME = MBBI->end(); 1644 MI != ME; ++MI) { 1645 if (!MI->isCall()) 1646 continue; 1647 1648 // Get callee operand. 1649 const MachineOperand &MO = MI->getOperand(0); 1650 1651 // Only take account of global function calls (no closures etc.). 1652 if (!MO.isGlobal()) 1653 continue; 1654 1655 const Function *F = dyn_cast<Function>(MO.getGlobal()); 1656 if (!F) 1657 continue; 1658 1659 // Do not update 'MaxStack' for primitive and built-in functions 1660 // (encoded with names either starting with "erlang."/"bif_" or not 1661 // having a ".", such as a simple <Module>.<Function>.<Arity>, or an 1662 // "_", such as the BIF "suspend_0") as they are executed on another 1663 // stack. 1664 if (F->getName().find("erlang.") != StringRef::npos || 1665 F->getName().find("bif_") != StringRef::npos || 1666 F->getName().find_first_of("._") == StringRef::npos) 1667 continue; 1668 1669 unsigned CalleeStkArity = 1670 F->arg_size() > CCRegisteredArgs ? F->arg_size()-CCRegisteredArgs : 0; 1671 if (HipeLeafWords - 1 > CalleeStkArity) 1672 MoreStackForCalls = std::max(MoreStackForCalls, 1673 (HipeLeafWords - 1 - CalleeStkArity) * SlotSize); 1674 } 1675 MaxStack += MoreStackForCalls; 1676 } 1677 1678 // If the stack frame needed is larger than the guaranteed then runtime checks 1679 // and calls to "inc_stack_0" BIF should be inserted in the assembly prologue. 1680 if (MaxStack > Guaranteed) { 1681 MachineBasicBlock &prologueMBB = MF.front(); 1682 MachineBasicBlock *stackCheckMBB = MF.CreateMachineBasicBlock(); 1683 MachineBasicBlock *incStackMBB = MF.CreateMachineBasicBlock(); 1684 1685 for (MachineBasicBlock::livein_iterator I = prologueMBB.livein_begin(), 1686 E = prologueMBB.livein_end(); I != E; I++) { 1687 stackCheckMBB->addLiveIn(*I); 1688 incStackMBB->addLiveIn(*I); 1689 } 1690 1691 MF.push_front(incStackMBB); 1692 MF.push_front(stackCheckMBB); 1693 1694 unsigned ScratchReg, SPReg, PReg, SPLimitOffset; 1695 unsigned LEAop, CMPop, CALLop; 1696 if (Is64Bit) { 1697 SPReg = X86::RSP; 1698 PReg = X86::RBP; 1699 LEAop = X86::LEA64r; 1700 CMPop = X86::CMP64rm; 1701 CALLop = X86::CALL64pcrel32; 1702 SPLimitOffset = 0x90; 1703 } else { 1704 SPReg = X86::ESP; 1705 PReg = X86::EBP; 1706 LEAop = X86::LEA32r; 1707 CMPop = X86::CMP32rm; 1708 CALLop = X86::CALLpcrel32; 1709 SPLimitOffset = 0x4c; 1710 } 1711 1712 ScratchReg = GetScratchRegister(Is64Bit, MF, true); 1713 assert(!MF.getRegInfo().isLiveIn(ScratchReg) && 1714 "HiPE prologue scratch register is live-in"); 1715 1716 // Create new MBB for StackCheck: 1717 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(LEAop), ScratchReg), 1718 SPReg, false, -MaxStack); 1719 // SPLimitOffset is in a fixed heap location (pointed by BP). 1720 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop)) 1721 .addReg(ScratchReg), PReg, false, SPLimitOffset); 1722 BuildMI(stackCheckMBB, DL, TII.get(X86::JAE_4)).addMBB(&prologueMBB); 1723 1724 // Create new MBB for IncStack: 1725 BuildMI(incStackMBB, DL, TII.get(CALLop)). 1726 addExternalSymbol("inc_stack_0"); 1727 addRegOffset(BuildMI(incStackMBB, DL, TII.get(LEAop), ScratchReg), 1728 SPReg, false, -MaxStack); 1729 addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop)) 1730 .addReg(ScratchReg), PReg, false, SPLimitOffset); 1731 BuildMI(incStackMBB, DL, TII.get(X86::JLE_4)).addMBB(incStackMBB); 1732 1733 stackCheckMBB->addSuccessor(&prologueMBB, 99); 1734 stackCheckMBB->addSuccessor(incStackMBB, 1); 1735 incStackMBB->addSuccessor(&prologueMBB, 99); 1736 incStackMBB->addSuccessor(incStackMBB, 1); 1737 } 1738 #ifdef XDEBUG 1739 MF.verify(); 1740 #endif 1741 } 1742 1743 void X86FrameLowering:: 1744 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, 1745 MachineBasicBlock::iterator I) const { 1746 const X86InstrInfo &TII = *TM.getInstrInfo(); 1747 const X86RegisterInfo &RegInfo = *TM.getRegisterInfo(); 1748 unsigned StackPtr = RegInfo.getStackRegister(); 1749 bool reseveCallFrame = hasReservedCallFrame(MF); 1750 int Opcode = I->getOpcode(); 1751 bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode(); 1752 bool IsLP64 = STI.isTarget64BitLP64(); 1753 DebugLoc DL = I->getDebugLoc(); 1754 uint64_t Amount = !reseveCallFrame ? I->getOperand(0).getImm() : 0; 1755 uint64_t CalleeAmt = isDestroy ? I->getOperand(1).getImm() : 0; 1756 I = MBB.erase(I); 1757 1758 if (!reseveCallFrame) { 1759 // If the stack pointer can be changed after prologue, turn the 1760 // adjcallstackup instruction into a 'sub ESP, <amt>' and the 1761 // adjcallstackdown instruction into 'add ESP, <amt>' 1762 // TODO: consider using push / pop instead of sub + store / add 1763 if (Amount == 0) 1764 return; 1765 1766 // We need to keep the stack aligned properly. To do this, we round the 1767 // amount of space needed for the outgoing arguments up to the next 1768 // alignment boundary. 1769 unsigned StackAlign = TM.getFrameLowering()->getStackAlignment(); 1770 Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign; 1771 1772 MachineInstr *New = 0; 1773 if (Opcode == TII.getCallFrameSetupOpcode()) { 1774 New = BuildMI(MF, DL, TII.get(getSUBriOpcode(IsLP64, Amount)), 1775 StackPtr) 1776 .addReg(StackPtr) 1777 .addImm(Amount); 1778 } else { 1779 assert(Opcode == TII.getCallFrameDestroyOpcode()); 1780 1781 // Factor out the amount the callee already popped. 1782 Amount -= CalleeAmt; 1783 1784 if (Amount) { 1785 unsigned Opc = getADDriOpcode(IsLP64, Amount); 1786 New = BuildMI(MF, DL, TII.get(Opc), StackPtr) 1787 .addReg(StackPtr).addImm(Amount); 1788 } 1789 } 1790 1791 if (New) { 1792 // The EFLAGS implicit def is dead. 1793 New->getOperand(3).setIsDead(); 1794 1795 // Replace the pseudo instruction with a new instruction. 1796 MBB.insert(I, New); 1797 } 1798 1799 return; 1800 } 1801 1802 if (Opcode == TII.getCallFrameDestroyOpcode() && CalleeAmt) { 1803 // If we are performing frame pointer elimination and if the callee pops 1804 // something off the stack pointer, add it back. We do this until we have 1805 // more advanced stack pointer tracking ability. 1806 unsigned Opc = getSUBriOpcode(IsLP64, CalleeAmt); 1807 MachineInstr *New = BuildMI(MF, DL, TII.get(Opc), StackPtr) 1808 .addReg(StackPtr).addImm(CalleeAmt); 1809 1810 // The EFLAGS implicit def is dead. 1811 New->getOperand(3).setIsDead(); 1812 1813 // We are not tracking the stack pointer adjustment by the callee, so make 1814 // sure we restore the stack pointer immediately after the call, there may 1815 // be spill code inserted between the CALL and ADJCALLSTACKUP instructions. 1816 MachineBasicBlock::iterator B = MBB.begin(); 1817 while (I != B && !llvm::prior(I)->isCall()) 1818 --I; 1819 MBB.insert(I, New); 1820 } 1821 } 1822 1823