1 //=======- X86FrameLowering.cpp - X86 Frame Information --------*- C++ -*-====// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the X86 implementation of TargetFrameLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "X86FrameLowering.h" 15 #include "X86InstrBuilder.h" 16 #include "X86InstrInfo.h" 17 #include "X86MachineFunctionInfo.h" 18 #include "X86Subtarget.h" 19 #include "X86TargetMachine.h" 20 #include "llvm/Function.h" 21 #include "llvm/CodeGen/MachineFrameInfo.h" 22 #include "llvm/CodeGen/MachineFunction.h" 23 #include "llvm/CodeGen/MachineInstrBuilder.h" 24 #include "llvm/CodeGen/MachineModuleInfo.h" 25 #include "llvm/CodeGen/MachineRegisterInfo.h" 26 #include "llvm/MC/MCAsmInfo.h" 27 #include "llvm/MC/MCSymbol.h" 28 #include "llvm/Target/TargetData.h" 29 #include "llvm/Target/TargetOptions.h" 30 #include "llvm/Support/CommandLine.h" 31 #include "llvm/ADT/SmallSet.h" 32 33 using namespace llvm; 34 35 // FIXME: completely move here. 36 extern cl::opt<bool> ForceStackAlign; 37 38 bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { 39 return !MF.getFrameInfo()->hasVarSizedObjects(); 40 } 41 42 /// hasFP - Return true if the specified function should have a dedicated frame 43 /// pointer register. This is true if the function has variable sized allocas 44 /// or if frame pointer elimination is disabled. 45 bool X86FrameLowering::hasFP(const MachineFunction &MF) const { 46 const MachineFrameInfo *MFI = MF.getFrameInfo(); 47 const MachineModuleInfo &MMI = MF.getMMI(); 48 const TargetRegisterInfo *RI = TM.getRegisterInfo(); 49 50 return (DisableFramePointerElim(MF) || 51 RI->needsStackRealignment(MF) || 52 MFI->hasVarSizedObjects() || 53 MFI->isFrameAddressTaken() || 54 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() || 55 MMI.callsUnwindInit()); 56 } 57 58 static unsigned getSUBriOpcode(unsigned is64Bit, int64_t Imm) { 59 if (is64Bit) { 60 if (isInt<8>(Imm)) 61 return X86::SUB64ri8; 62 return X86::SUB64ri32; 63 } else { 64 if (isInt<8>(Imm)) 65 return X86::SUB32ri8; 66 return X86::SUB32ri; 67 } 68 } 69 70 static unsigned getADDriOpcode(unsigned is64Bit, int64_t Imm) { 71 if (is64Bit) { 72 if (isInt<8>(Imm)) 73 return X86::ADD64ri8; 74 return X86::ADD64ri32; 75 } else { 76 if (isInt<8>(Imm)) 77 return X86::ADD32ri8; 78 return X86::ADD32ri; 79 } 80 } 81 82 /// findDeadCallerSavedReg - Return a caller-saved register that isn't live 83 /// when it reaches the "return" instruction. We can then pop a stack object 84 /// to this register without worry about clobbering it. 85 static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB, 86 MachineBasicBlock::iterator &MBBI, 87 const TargetRegisterInfo &TRI, 88 bool Is64Bit) { 89 const MachineFunction *MF = MBB.getParent(); 90 const Function *F = MF->getFunction(); 91 if (!F || MF->getMMI().callsEHReturn()) 92 return 0; 93 94 static const unsigned CallerSavedRegs32Bit[] = { 95 X86::EAX, X86::EDX, X86::ECX, 0 96 }; 97 98 static const unsigned CallerSavedRegs64Bit[] = { 99 X86::RAX, X86::RDX, X86::RCX, X86::RSI, X86::RDI, 100 X86::R8, X86::R9, X86::R10, X86::R11, 0 101 }; 102 103 unsigned Opc = MBBI->getOpcode(); 104 switch (Opc) { 105 default: return 0; 106 case X86::RET: 107 case X86::RETI: 108 case X86::TCRETURNdi: 109 case X86::TCRETURNri: 110 case X86::TCRETURNmi: 111 case X86::TCRETURNdi64: 112 case X86::TCRETURNri64: 113 case X86::TCRETURNmi64: 114 case X86::EH_RETURN: 115 case X86::EH_RETURN64: { 116 SmallSet<unsigned, 8> Uses; 117 for (unsigned i = 0, e = MBBI->getNumOperands(); i != e; ++i) { 118 MachineOperand &MO = MBBI->getOperand(i); 119 if (!MO.isReg() || MO.isDef()) 120 continue; 121 unsigned Reg = MO.getReg(); 122 if (!Reg) 123 continue; 124 for (const unsigned *AsI = TRI.getOverlaps(Reg); *AsI; ++AsI) 125 Uses.insert(*AsI); 126 } 127 128 const unsigned *CS = Is64Bit ? CallerSavedRegs64Bit : CallerSavedRegs32Bit; 129 for (; *CS; ++CS) 130 if (!Uses.count(*CS)) 131 return *CS; 132 } 133 } 134 135 return 0; 136 } 137 138 139 /// emitSPUpdate - Emit a series of instructions to increment / decrement the 140 /// stack pointer by a constant value. 141 static 142 void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 143 unsigned StackPtr, int64_t NumBytes, 144 bool Is64Bit, const TargetInstrInfo &TII, 145 const TargetRegisterInfo &TRI) { 146 bool isSub = NumBytes < 0; 147 uint64_t Offset = isSub ? -NumBytes : NumBytes; 148 unsigned Opc = isSub ? 149 getSUBriOpcode(Is64Bit, Offset) : 150 getADDriOpcode(Is64Bit, Offset); 151 uint64_t Chunk = (1LL << 31) - 1; 152 DebugLoc DL = MBB.findDebugLoc(MBBI); 153 154 while (Offset) { 155 uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset; 156 if (ThisVal == (Is64Bit ? 8 : 4)) { 157 // Use push / pop instead. 158 unsigned Reg = isSub 159 ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX) 160 : findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit); 161 if (Reg) { 162 Opc = isSub 163 ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r) 164 : (Is64Bit ? X86::POP64r : X86::POP32r); 165 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc)) 166 .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub)); 167 if (isSub) 168 MI->setFlag(MachineInstr::FrameSetup); 169 Offset -= ThisVal; 170 continue; 171 } 172 } 173 174 MachineInstr *MI = 175 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) 176 .addReg(StackPtr) 177 .addImm(ThisVal); 178 if (isSub) 179 MI->setFlag(MachineInstr::FrameSetup); 180 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 181 Offset -= ThisVal; 182 } 183 } 184 185 /// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator. 186 static 187 void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 188 unsigned StackPtr, uint64_t *NumBytes = NULL) { 189 if (MBBI == MBB.begin()) return; 190 191 MachineBasicBlock::iterator PI = prior(MBBI); 192 unsigned Opc = PI->getOpcode(); 193 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 194 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 195 PI->getOperand(0).getReg() == StackPtr) { 196 if (NumBytes) 197 *NumBytes += PI->getOperand(2).getImm(); 198 MBB.erase(PI); 199 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 200 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 201 PI->getOperand(0).getReg() == StackPtr) { 202 if (NumBytes) 203 *NumBytes -= PI->getOperand(2).getImm(); 204 MBB.erase(PI); 205 } 206 } 207 208 /// mergeSPUpdatesDown - Merge two stack-manipulating instructions lower iterator. 209 static 210 void mergeSPUpdatesDown(MachineBasicBlock &MBB, 211 MachineBasicBlock::iterator &MBBI, 212 unsigned StackPtr, uint64_t *NumBytes = NULL) { 213 // FIXME: THIS ISN'T RUN!!! 214 return; 215 216 if (MBBI == MBB.end()) return; 217 218 MachineBasicBlock::iterator NI = llvm::next(MBBI); 219 if (NI == MBB.end()) return; 220 221 unsigned Opc = NI->getOpcode(); 222 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 223 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 224 NI->getOperand(0).getReg() == StackPtr) { 225 if (NumBytes) 226 *NumBytes -= NI->getOperand(2).getImm(); 227 MBB.erase(NI); 228 MBBI = NI; 229 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 230 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 231 NI->getOperand(0).getReg() == StackPtr) { 232 if (NumBytes) 233 *NumBytes += NI->getOperand(2).getImm(); 234 MBB.erase(NI); 235 MBBI = NI; 236 } 237 } 238 239 /// mergeSPUpdates - Checks the instruction before/after the passed 240 /// instruction. If it is an ADD/SUB instruction it is deleted argument and the 241 /// stack adjustment is returned as a positive value for ADD and a negative for 242 /// SUB. 243 static int mergeSPUpdates(MachineBasicBlock &MBB, 244 MachineBasicBlock::iterator &MBBI, 245 unsigned StackPtr, 246 bool doMergeWithPrevious) { 247 if ((doMergeWithPrevious && MBBI == MBB.begin()) || 248 (!doMergeWithPrevious && MBBI == MBB.end())) 249 return 0; 250 251 MachineBasicBlock::iterator PI = doMergeWithPrevious ? prior(MBBI) : MBBI; 252 MachineBasicBlock::iterator NI = doMergeWithPrevious ? 0 : llvm::next(MBBI); 253 unsigned Opc = PI->getOpcode(); 254 int Offset = 0; 255 256 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || 257 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && 258 PI->getOperand(0).getReg() == StackPtr){ 259 Offset += PI->getOperand(2).getImm(); 260 MBB.erase(PI); 261 if (!doMergeWithPrevious) MBBI = NI; 262 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || 263 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && 264 PI->getOperand(0).getReg() == StackPtr) { 265 Offset -= PI->getOperand(2).getImm(); 266 MBB.erase(PI); 267 if (!doMergeWithPrevious) MBBI = NI; 268 } 269 270 return Offset; 271 } 272 273 static bool isEAXLiveIn(MachineFunction &MF) { 274 for (MachineRegisterInfo::livein_iterator II = MF.getRegInfo().livein_begin(), 275 EE = MF.getRegInfo().livein_end(); II != EE; ++II) { 276 unsigned Reg = II->first; 277 278 if (Reg == X86::EAX || Reg == X86::AX || 279 Reg == X86::AH || Reg == X86::AL) 280 return true; 281 } 282 283 return false; 284 } 285 286 void X86FrameLowering::emitCalleeSavedFrameMoves(MachineFunction &MF, 287 MCSymbol *Label, 288 unsigned FramePtr) const { 289 MachineFrameInfo *MFI = MF.getFrameInfo(); 290 MachineModuleInfo &MMI = MF.getMMI(); 291 292 // Add callee saved registers to move list. 293 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); 294 if (CSI.empty()) return; 295 296 std::vector<MachineMove> &Moves = MMI.getFrameMoves(); 297 const TargetData *TD = TM.getTargetData(); 298 bool HasFP = hasFP(MF); 299 300 // Calculate amount of bytes used for return address storing. 301 int stackGrowth = -TD->getPointerSize(); 302 303 // FIXME: This is dirty hack. The code itself is pretty mess right now. 304 // It should be rewritten from scratch and generalized sometimes. 305 306 // Determine maximum offset (minimum due to stack growth). 307 int64_t MaxOffset = 0; 308 for (std::vector<CalleeSavedInfo>::const_iterator 309 I = CSI.begin(), E = CSI.end(); I != E; ++I) 310 MaxOffset = std::min(MaxOffset, 311 MFI->getObjectOffset(I->getFrameIdx())); 312 313 // Calculate offsets. 314 int64_t saveAreaOffset = (HasFP ? 3 : 2) * stackGrowth; 315 for (std::vector<CalleeSavedInfo>::const_iterator 316 I = CSI.begin(), E = CSI.end(); I != E; ++I) { 317 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx()); 318 unsigned Reg = I->getReg(); 319 Offset = MaxOffset - Offset + saveAreaOffset; 320 321 // Don't output a new machine move if we're re-saving the frame 322 // pointer. This happens when the PrologEpilogInserter has inserted an extra 323 // "PUSH" of the frame pointer -- the "emitPrologue" method automatically 324 // generates one when frame pointers are used. If we generate a "machine 325 // move" for this extra "PUSH", the linker will lose track of the fact that 326 // the frame pointer should have the value of the first "PUSH" when it's 327 // trying to unwind. 328 // 329 // FIXME: This looks inelegant. It's possibly correct, but it's covering up 330 // another bug. I.e., one where we generate a prolog like this: 331 // 332 // pushl %ebp 333 // movl %esp, %ebp 334 // pushl %ebp 335 // pushl %esi 336 // ... 337 // 338 // The immediate re-push of EBP is unnecessary. At the least, it's an 339 // optimization bug. EBP can be used as a scratch register in certain 340 // cases, but probably not when we have a frame pointer. 341 if (HasFP && FramePtr == Reg) 342 continue; 343 344 MachineLocation CSDst(MachineLocation::VirtualFP, Offset); 345 MachineLocation CSSrc(Reg); 346 Moves.push_back(MachineMove(Label, CSDst, CSSrc)); 347 } 348 } 349 350 /// getCompactUnwindRegNum - Get the compact unwind number for a given 351 /// register. The number corresponds to the enum lists in 352 /// compact_unwind_encoding.h. 353 static int getCompactUnwindRegNum(const unsigned *CURegs, unsigned Reg) { 354 int Idx = 1; 355 for (; *CURegs; ++CURegs, ++Idx) 356 if (*CURegs == Reg) 357 return Idx; 358 359 return -1; 360 } 361 362 /// encodeCompactUnwindRegistersWithoutFrame - Create the permutation encoding 363 /// used with frameless stacks. It is passed the number of registers to be saved 364 /// and an array of the registers saved. 365 static uint32_t encodeCompactUnwindRegistersWithoutFrame(unsigned SavedRegs[6], 366 unsigned RegCount, 367 bool Is64Bit) { 368 // The saved registers are numbered from 1 to 6. In order to encode the order 369 // in which they were saved, we re-number them according to their place in the 370 // register order. The re-numbering is relative to the last re-numbered 371 // register. E.g., if we have registers {6, 2, 4, 5} saved in that order: 372 // 373 // Orig Re-Num 374 // ---- ------ 375 // 6 6 376 // 2 2 377 // 4 3 378 // 5 3 379 // 380 static const unsigned CU32BitRegs[] = { 381 X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0 382 }; 383 static const unsigned CU64BitRegs[] = { 384 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 385 }; 386 const unsigned *CURegs = (Is64Bit ? CU64BitRegs : CU32BitRegs); 387 388 uint32_t RenumRegs[6]; 389 for (unsigned i = 6 - RegCount; i < 6; ++i) { 390 int CUReg = getCompactUnwindRegNum(CURegs, SavedRegs[i]); 391 if (CUReg == -1) return ~0U; 392 SavedRegs[i] = CUReg; 393 394 unsigned Countless = 0; 395 for (unsigned j = 6 - RegCount; j < i; ++j) 396 if (SavedRegs[j] < SavedRegs[i]) 397 ++Countless; 398 399 RenumRegs[i] = SavedRegs[i] - Countless - 1; 400 } 401 402 // Take the renumbered values and encode them into a 10-bit number. 403 uint32_t permutationEncoding = 0; 404 switch (RegCount) { 405 case 6: 406 permutationEncoding |= 120 * RenumRegs[0] + 24 * RenumRegs[1] 407 + 6 * RenumRegs[2] + 2 * RenumRegs[3] 408 + RenumRegs[4]; 409 break; 410 case 5: 411 permutationEncoding |= 120 * RenumRegs[1] + 24 * RenumRegs[2] 412 + 6 * RenumRegs[3] + 2 * RenumRegs[4] 413 + RenumRegs[5]; 414 break; 415 case 4: 416 permutationEncoding |= 60 * RenumRegs[2] + 12 * RenumRegs[3] 417 + 3 * RenumRegs[4] + RenumRegs[5]; 418 break; 419 case 3: 420 permutationEncoding |= 20 * RenumRegs[3] + 4 * RenumRegs[4] 421 + RenumRegs[5]; 422 break; 423 case 2: 424 permutationEncoding |= 5 * RenumRegs[4] + RenumRegs[5]; 425 break; 426 case 1: 427 permutationEncoding |= RenumRegs[5]; 428 break; 429 } 430 431 assert((permutationEncoding & 0x3FF) == permutationEncoding && 432 "Invalid compact register encoding!"); 433 return permutationEncoding; 434 } 435 436 /// encodeCompactUnwindRegistersWithFrame - Return the registers encoded for a 437 /// compact encoding with a frame pointer. 438 static uint32_t encodeCompactUnwindRegistersWithFrame(unsigned SavedRegs[6], 439 bool Is64Bit) { 440 static const unsigned CU32BitRegs[] = { 441 X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0 442 }; 443 static const unsigned CU64BitRegs[] = { 444 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 445 }; 446 const unsigned *CURegs = (Is64Bit ? CU64BitRegs : CU32BitRegs); 447 448 // Encode the registers in the order they were saved, 3-bits per register. The 449 // registers are numbered from 1 to 6. 450 uint32_t RegEnc = 0; 451 for (int I = 5; I >= 0; --I) { 452 unsigned Reg = SavedRegs[I]; 453 if (Reg == 0) break; 454 int CURegNum = getCompactUnwindRegNum(CURegs, Reg); 455 if (CURegNum == -1) 456 return ~0U; 457 RegEnc |= (CURegNum & 0x7) << (5 - I); 458 } 459 460 assert((RegEnc & 0x7FFF) == RegEnc && "Invalid compact register encoding!"); 461 return RegEnc; 462 } 463 464 uint32_t X86FrameLowering::getCompactUnwindEncoding(MachineFunction &MF) const { 465 const X86RegisterInfo *RegInfo = TM.getRegisterInfo(); 466 unsigned FramePtr = RegInfo->getFrameRegister(MF); 467 unsigned StackPtr = RegInfo->getStackRegister(); 468 469 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 470 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 471 472 bool Is64Bit = STI.is64Bit(); 473 bool HasFP = hasFP(MF); 474 475 unsigned SavedRegs[6] = { 0, 0, 0, 0, 0, 0 }; 476 int SavedRegIdx = 6; 477 478 unsigned OffsetSize = (Is64Bit ? 8 : 4); 479 480 unsigned PushInstr = (Is64Bit ? X86::PUSH64r : X86::PUSH32r); 481 unsigned PushInstrSize = 1; 482 unsigned MoveInstr = (Is64Bit ? X86::MOV64rr : X86::MOV32rr); 483 unsigned MoveInstrSize = (Is64Bit ? 3 : 2); 484 unsigned SubtractInstr = getSUBriOpcode(Is64Bit, -TailCallReturnAddrDelta); 485 unsigned SubtractInstrIdx = (Is64Bit ? 3 : 2); 486 487 unsigned StackDivide = (Is64Bit ? 8 : 4); 488 489 unsigned InstrOffset = 0; 490 unsigned CFAOffset = 0; 491 unsigned StackAdjust = 0; 492 493 MachineBasicBlock &MBB = MF.front(); // Prologue is in entry BB. 494 bool ExpectEnd = false; 495 for (MachineBasicBlock::iterator 496 MBBI = MBB.begin(), MBBE = MBB.end(); MBBI != MBBE; ++MBBI) { 497 MachineInstr &MI = *MBBI; 498 unsigned Opc = MI.getOpcode(); 499 if (Opc == X86::PROLOG_LABEL) continue; 500 if (!MI.getFlag(MachineInstr::FrameSetup)) break; 501 502 // We don't exect any more prolog instructions. 503 if (ExpectEnd) return 0; 504 505 if (Opc == PushInstr) { 506 // If there are too many saved registers, we cannot use compact encoding. 507 if (--SavedRegIdx < 0) return 0; 508 509 SavedRegs[SavedRegIdx] = MI.getOperand(0).getReg(); 510 CFAOffset += OffsetSize; 511 InstrOffset += PushInstrSize; 512 } else if (Opc == MoveInstr) { 513 unsigned SrcReg = MI.getOperand(1).getReg(); 514 unsigned DstReg = MI.getOperand(0).getReg(); 515 516 if (DstReg != FramePtr || SrcReg != StackPtr) 517 return 0; 518 519 CFAOffset = 0; 520 memset(SavedRegs, 0, sizeof(SavedRegs)); 521 InstrOffset += MoveInstrSize; 522 } else if (Opc == SubtractInstr) { 523 if (StackAdjust) 524 // We all ready have a stack pointer adjustment. 525 return 0; 526 527 if (!MI.getOperand(0).isReg() || 528 MI.getOperand(0).getReg() != MI.getOperand(1).getReg() || 529 MI.getOperand(0).getReg() != StackPtr || !MI.getOperand(2).isImm()) 530 // We need this to be a stack adjustment pointer. Something like: 531 // 532 // %RSP<def> = SUB64ri8 %RSP, 48 533 return 0; 534 535 StackAdjust = MI.getOperand(2).getImm() / StackDivide; 536 SubtractInstrIdx += InstrOffset; 537 ExpectEnd = true; 538 } 539 } 540 541 // Encode that we are using EBP/RBP as the frame pointer. 542 uint32_t CompactUnwindEncoding = 0; 543 CFAOffset /= StackDivide; 544 if (HasFP) { 545 if ((CFAOffset & 0xFF) != CFAOffset) 546 // Offset was too big for compact encoding. 547 return 0; 548 549 // Get the encoding of the saved registers when we have a frame pointer. 550 uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame(SavedRegs, Is64Bit); 551 if (RegEnc == ~0U) 552 return 0; 553 554 CompactUnwindEncoding |= 0x01000000; 555 CompactUnwindEncoding |= (CFAOffset & 0xFF) << 16; 556 CompactUnwindEncoding |= RegEnc & 0x7FFF; 557 } else { 558 unsigned FullOffset = CFAOffset + StackAdjust; 559 if ((FullOffset & 0xFF) == FullOffset) { 560 // Frameless stack. 561 CompactUnwindEncoding |= 0x02000000; 562 CompactUnwindEncoding |= (FullOffset & 0xFF) << 16; 563 } else { 564 if ((CFAOffset & 0x7) != CFAOffset) 565 // The extra stack adjustments are too big for us to handle. 566 return 0; 567 568 // Frameless stack with an offset too large for us to encode compactly. 569 CompactUnwindEncoding |= 0x03000000; 570 571 // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP' 572 // instruction. 573 CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16; 574 575 // Encode any extra stack stack changes (done via push instructions). 576 CompactUnwindEncoding |= (CFAOffset & 0x7) << 13; 577 } 578 579 // Get the encoding of the saved registers when we don't have a frame 580 // pointer. 581 uint32_t RegEnc = encodeCompactUnwindRegistersWithoutFrame(SavedRegs, 582 6 - SavedRegIdx, 583 Is64Bit); 584 if (RegEnc == ~0U) return 0; 585 CompactUnwindEncoding |= RegEnc & 0x3FF; 586 } 587 588 return CompactUnwindEncoding; 589 } 590 591 /// emitPrologue - Push callee-saved registers onto the stack, which 592 /// automatically adjust the stack pointer. Adjust the stack pointer to allocate 593 /// space for local variables. Also emit labels used by the exception handler to 594 /// generate the exception handling frames. 595 void X86FrameLowering::emitPrologue(MachineFunction &MF) const { 596 MachineBasicBlock &MBB = MF.front(); // Prologue goes in entry BB. 597 MachineBasicBlock::iterator MBBI = MBB.begin(); 598 MachineFrameInfo *MFI = MF.getFrameInfo(); 599 const Function *Fn = MF.getFunction(); 600 const X86RegisterInfo *RegInfo = TM.getRegisterInfo(); 601 const X86InstrInfo &TII = *TM.getInstrInfo(); 602 MachineModuleInfo &MMI = MF.getMMI(); 603 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 604 bool needsFrameMoves = MMI.hasDebugInfo() || 605 Fn->needsUnwindTableEntry(); 606 uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment. 607 uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate. 608 bool HasFP = hasFP(MF); 609 bool Is64Bit = STI.is64Bit(); 610 bool IsWin64 = STI.isTargetWin64(); 611 unsigned StackAlign = getStackAlignment(); 612 unsigned SlotSize = RegInfo->getSlotSize(); 613 unsigned FramePtr = RegInfo->getFrameRegister(MF); 614 unsigned StackPtr = RegInfo->getStackRegister(); 615 DebugLoc DL; 616 617 // If we're forcing a stack realignment we can't rely on just the frame 618 // info, we need to know the ABI stack alignment as well in case we 619 // have a call out. Otherwise just make sure we have some alignment - we'll 620 // go with the minimum SlotSize. 621 if (ForceStackAlign) { 622 if (MFI->hasCalls()) 623 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign; 624 else if (MaxAlign < SlotSize) 625 MaxAlign = SlotSize; 626 } 627 628 // Add RETADDR move area to callee saved frame size. 629 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 630 if (TailCallReturnAddrDelta < 0) 631 X86FI->setCalleeSavedFrameSize( 632 X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta); 633 634 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf 635 // function, and use up to 128 bytes of stack space, don't have a frame 636 // pointer, calls, or dynamic alloca then we do not need to adjust the 637 // stack pointer (we fit in the Red Zone). 638 if (Is64Bit && !Fn->hasFnAttr(Attribute::NoRedZone) && 639 !RegInfo->needsStackRealignment(MF) && 640 !MFI->hasVarSizedObjects() && // No dynamic alloca. 641 !MFI->adjustsStack() && // No calls. 642 !IsWin64 && // Win64 has no Red Zone 643 !EnableSegmentedStacks) { // Regular stack 644 uint64_t MinSize = X86FI->getCalleeSavedFrameSize(); 645 if (HasFP) MinSize += SlotSize; 646 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0); 647 MFI->setStackSize(StackSize); 648 } 649 650 // Insert stack pointer adjustment for later moving of return addr. Only 651 // applies to tail call optimized functions where the callee argument stack 652 // size is bigger than the callers. 653 if (TailCallReturnAddrDelta < 0) { 654 MachineInstr *MI = 655 BuildMI(MBB, MBBI, DL, 656 TII.get(getSUBriOpcode(Is64Bit, -TailCallReturnAddrDelta)), 657 StackPtr) 658 .addReg(StackPtr) 659 .addImm(-TailCallReturnAddrDelta) 660 .setMIFlag(MachineInstr::FrameSetup); 661 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. 662 } 663 664 // Mapping for machine moves: 665 // 666 // DST: VirtualFP AND 667 // SRC: VirtualFP => DW_CFA_def_cfa_offset 668 // ELSE => DW_CFA_def_cfa 669 // 670 // SRC: VirtualFP AND 671 // DST: Register => DW_CFA_def_cfa_register 672 // 673 // ELSE 674 // OFFSET < 0 => DW_CFA_offset_extended_sf 675 // REG < 64 => DW_CFA_offset + Reg 676 // ELSE => DW_CFA_offset_extended 677 678 std::vector<MachineMove> &Moves = MMI.getFrameMoves(); 679 const TargetData *TD = MF.getTarget().getTargetData(); 680 uint64_t NumBytes = 0; 681 int stackGrowth = -TD->getPointerSize(); 682 683 if (HasFP) { 684 // Calculate required stack adjustment. 685 uint64_t FrameSize = StackSize - SlotSize; 686 if (RegInfo->needsStackRealignment(MF)) 687 FrameSize = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign; 688 689 NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize(); 690 691 // Get the offset of the stack slot for the EBP register, which is 692 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized. 693 // Update the frame offset adjustment. 694 MFI->setOffsetAdjustment(-NumBytes); 695 696 // Save EBP/RBP into the appropriate stack slot. 697 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r)) 698 .addReg(FramePtr, RegState::Kill) 699 .setMIFlag(MachineInstr::FrameSetup); 700 701 if (needsFrameMoves) { 702 // Mark the place where EBP/RBP was saved. 703 MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol(); 704 BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)) 705 .addSym(FrameLabel); 706 707 // Define the current CFA rule to use the provided offset. 708 if (StackSize) { 709 MachineLocation SPDst(MachineLocation::VirtualFP); 710 MachineLocation SPSrc(MachineLocation::VirtualFP, 2 * stackGrowth); 711 Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc)); 712 } else { 713 MachineLocation SPDst(StackPtr); 714 MachineLocation SPSrc(StackPtr, stackGrowth); 715 Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc)); 716 } 717 718 // Change the rule for the FramePtr to be an "offset" rule. 719 MachineLocation FPDst(MachineLocation::VirtualFP, 2 * stackGrowth); 720 MachineLocation FPSrc(FramePtr); 721 Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc)); 722 } 723 724 // Update EBP with the new base value. 725 BuildMI(MBB, MBBI, DL, 726 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr) 727 .addReg(StackPtr) 728 .setMIFlag(MachineInstr::FrameSetup); 729 730 if (needsFrameMoves) { 731 // Mark effective beginning of when frame pointer becomes valid. 732 MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol(); 733 BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)) 734 .addSym(FrameLabel); 735 736 // Define the current CFA to use the EBP/RBP register. 737 MachineLocation FPDst(FramePtr); 738 MachineLocation FPSrc(MachineLocation::VirtualFP); 739 Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc)); 740 } 741 742 // Mark the FramePtr as live-in in every block except the entry. 743 for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end(); 744 I != E; ++I) 745 I->addLiveIn(FramePtr); 746 747 // Realign stack 748 if (RegInfo->needsStackRealignment(MF)) { 749 MachineInstr *MI = 750 BuildMI(MBB, MBBI, DL, 751 TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri), StackPtr) 752 .addReg(StackPtr) 753 .addImm(-MaxAlign) 754 .setMIFlag(MachineInstr::FrameSetup); 755 756 // The EFLAGS implicit def is dead. 757 MI->getOperand(3).setIsDead(); 758 } 759 } else { 760 NumBytes = StackSize - X86FI->getCalleeSavedFrameSize(); 761 } 762 763 // Skip the callee-saved push instructions. 764 bool PushedRegs = false; 765 int StackOffset = 2 * stackGrowth; 766 767 while (MBBI != MBB.end() && 768 (MBBI->getOpcode() == X86::PUSH32r || 769 MBBI->getOpcode() == X86::PUSH64r)) { 770 PushedRegs = true; 771 MBBI->setFlag(MachineInstr::FrameSetup); 772 ++MBBI; 773 774 if (!HasFP && needsFrameMoves) { 775 // Mark callee-saved push instruction. 776 MCSymbol *Label = MMI.getContext().CreateTempSymbol(); 777 BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(Label); 778 779 // Define the current CFA rule to use the provided offset. 780 unsigned Ptr = StackSize ? MachineLocation::VirtualFP : StackPtr; 781 MachineLocation SPDst(Ptr); 782 MachineLocation SPSrc(Ptr, StackOffset); 783 Moves.push_back(MachineMove(Label, SPDst, SPSrc)); 784 StackOffset += stackGrowth; 785 } 786 } 787 788 DL = MBB.findDebugLoc(MBBI); 789 790 // If there is an SUB32ri of ESP immediately before this instruction, merge 791 // the two. This can be the case when tail call elimination is enabled and 792 // the callee has more arguments then the caller. 793 NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true); 794 795 // If there is an ADD32ri or SUB32ri of ESP immediately after this 796 // instruction, merge the two instructions. 797 mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes); 798 799 // Adjust stack pointer: ESP -= numbytes. 800 801 // Windows and cygwin/mingw require a prologue helper routine when allocating 802 // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw 803 // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the 804 // stack and adjust the stack pointer in one go. The 64-bit version of 805 // __chkstk is only responsible for probing the stack. The 64-bit prologue is 806 // responsible for adjusting the stack pointer. Touching the stack at 4K 807 // increments is necessary to ensure that the guard pages used by the OS 808 // virtual memory manager are allocated in correct sequence. 809 if (NumBytes >= 4096 && STI.isTargetCOFF() && !STI.isTargetEnvMacho()) { 810 const char *StackProbeSymbol; 811 bool isSPUpdateNeeded = false; 812 813 if (Is64Bit) { 814 if (STI.isTargetCygMing()) 815 StackProbeSymbol = "___chkstk"; 816 else { 817 StackProbeSymbol = "__chkstk"; 818 isSPUpdateNeeded = true; 819 } 820 } else if (STI.isTargetCygMing()) 821 StackProbeSymbol = "_alloca"; 822 else 823 StackProbeSymbol = "_chkstk"; 824 825 // Check whether EAX is livein for this function. 826 bool isEAXAlive = isEAXLiveIn(MF); 827 828 if (isEAXAlive) { 829 // Sanity check that EAX is not livein for this function. 830 // It should not be, so throw an assert. 831 assert(!Is64Bit && "EAX is livein in x64 case!"); 832 833 // Save EAX 834 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r)) 835 .addReg(X86::EAX, RegState::Kill) 836 .setMIFlag(MachineInstr::FrameSetup); 837 } 838 839 if (Is64Bit) { 840 // Handle the 64-bit Windows ABI case where we need to call __chkstk. 841 // Function prologue is responsible for adjusting the stack pointer. 842 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX) 843 .addImm(NumBytes) 844 .setMIFlag(MachineInstr::FrameSetup); 845 } else { 846 // Allocate NumBytes-4 bytes on stack in case of isEAXAlive. 847 // We'll also use 4 already allocated bytes for EAX. 848 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) 849 .addImm(isEAXAlive ? NumBytes - 4 : NumBytes) 850 .setMIFlag(MachineInstr::FrameSetup); 851 } 852 853 if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) { 854 // For the large code model, we have to call through a register. Use R11, 855 // as it is unused and clobbered by all probe functions. 856 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::R11) 857 .addExternalSymbol(StackProbeSymbol); 858 BuildMI(MBB, MBBI, DL, TII.get(X86::CALL64r)) 859 .addReg(X86::R11) 860 .addReg(StackPtr, RegState::Define | RegState::Implicit) 861 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit) 862 .setMIFlag(MachineInstr::FrameSetup); 863 } else { 864 BuildMI(MBB, MBBI, DL, 865 TII.get(STI.is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32)) 866 .addExternalSymbol(StackProbeSymbol) 867 .addReg(StackPtr, RegState::Define | RegState::Implicit) 868 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit) 869 .setMIFlag(MachineInstr::FrameSetup); 870 } 871 872 // MSVC x64's __chkstk needs to adjust %rsp. 873 // FIXME: %rax preserves the offset and should be available. 874 if (isSPUpdateNeeded) 875 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, 876 TII, *RegInfo); 877 878 if (isEAXAlive) { 879 // Restore EAX 880 MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm), 881 X86::EAX), 882 StackPtr, false, NumBytes - 4); 883 MI->setFlag(MachineInstr::FrameSetup); 884 MBB.insert(MBBI, MI); 885 } 886 } else if (NumBytes) 887 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, 888 TII, *RegInfo); 889 890 if (( (!HasFP && NumBytes) || PushedRegs) && needsFrameMoves) { 891 // Mark end of stack pointer adjustment. 892 MCSymbol *Label = MMI.getContext().CreateTempSymbol(); 893 BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)) 894 .addSym(Label); 895 896 if (!HasFP && NumBytes) { 897 // Define the current CFA rule to use the provided offset. 898 if (StackSize) { 899 MachineLocation SPDst(MachineLocation::VirtualFP); 900 MachineLocation SPSrc(MachineLocation::VirtualFP, 901 -StackSize + stackGrowth); 902 Moves.push_back(MachineMove(Label, SPDst, SPSrc)); 903 } else { 904 MachineLocation SPDst(StackPtr); 905 MachineLocation SPSrc(StackPtr, stackGrowth); 906 Moves.push_back(MachineMove(Label, SPDst, SPSrc)); 907 } 908 } 909 910 // Emit DWARF info specifying the offsets of the callee-saved registers. 911 if (PushedRegs) 912 emitCalleeSavedFrameMoves(MF, Label, HasFP ? FramePtr : StackPtr); 913 } 914 915 // Darwin 10.7 and greater has support for compact unwind encoding. 916 if (STI.getTargetTriple().isMacOSX() && 917 !STI.getTargetTriple().isMacOSXVersionLT(10, 7)) 918 MMI.setCompactUnwindEncoding(getCompactUnwindEncoding(MF)); 919 } 920 921 void X86FrameLowering::emitEpilogue(MachineFunction &MF, 922 MachineBasicBlock &MBB) const { 923 const MachineFrameInfo *MFI = MF.getFrameInfo(); 924 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 925 const X86RegisterInfo *RegInfo = TM.getRegisterInfo(); 926 const X86InstrInfo &TII = *TM.getInstrInfo(); 927 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 928 assert(MBBI != MBB.end() && "Returning block has no instructions"); 929 unsigned RetOpcode = MBBI->getOpcode(); 930 DebugLoc DL = MBBI->getDebugLoc(); 931 bool Is64Bit = STI.is64Bit(); 932 unsigned StackAlign = getStackAlignment(); 933 unsigned SlotSize = RegInfo->getSlotSize(); 934 unsigned FramePtr = RegInfo->getFrameRegister(MF); 935 unsigned StackPtr = RegInfo->getStackRegister(); 936 937 switch (RetOpcode) { 938 default: 939 llvm_unreachable("Can only insert epilog into returning blocks"); 940 case X86::RET: 941 case X86::RETI: 942 case X86::TCRETURNdi: 943 case X86::TCRETURNri: 944 case X86::TCRETURNmi: 945 case X86::TCRETURNdi64: 946 case X86::TCRETURNri64: 947 case X86::TCRETURNmi64: 948 case X86::EH_RETURN: 949 case X86::EH_RETURN64: 950 break; // These are ok 951 } 952 953 // Get the number of bytes to allocate from the FrameInfo. 954 uint64_t StackSize = MFI->getStackSize(); 955 uint64_t MaxAlign = MFI->getMaxAlignment(); 956 unsigned CSSize = X86FI->getCalleeSavedFrameSize(); 957 uint64_t NumBytes = 0; 958 959 // If we're forcing a stack realignment we can't rely on just the frame 960 // info, we need to know the ABI stack alignment as well in case we 961 // have a call out. Otherwise just make sure we have some alignment - we'll 962 // go with the minimum. 963 if (ForceStackAlign) { 964 if (MFI->hasCalls()) 965 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign; 966 else 967 MaxAlign = MaxAlign ? MaxAlign : 4; 968 } 969 970 if (hasFP(MF)) { 971 // Calculate required stack adjustment. 972 uint64_t FrameSize = StackSize - SlotSize; 973 if (RegInfo->needsStackRealignment(MF)) 974 FrameSize = (FrameSize + MaxAlign - 1)/MaxAlign*MaxAlign; 975 976 NumBytes = FrameSize - CSSize; 977 978 // Pop EBP. 979 BuildMI(MBB, MBBI, DL, 980 TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr); 981 } else { 982 NumBytes = StackSize - CSSize; 983 } 984 985 // Skip the callee-saved pop instructions. 986 MachineBasicBlock::iterator LastCSPop = MBBI; 987 while (MBBI != MBB.begin()) { 988 MachineBasicBlock::iterator PI = prior(MBBI); 989 unsigned Opc = PI->getOpcode(); 990 991 if (Opc != X86::POP32r && Opc != X86::POP64r && Opc != X86::DBG_VALUE && 992 !PI->getDesc().isTerminator()) 993 break; 994 995 --MBBI; 996 } 997 998 DL = MBBI->getDebugLoc(); 999 1000 // If there is an ADD32ri or SUB32ri of ESP immediately before this 1001 // instruction, merge the two instructions. 1002 if (NumBytes || MFI->hasVarSizedObjects()) 1003 mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes); 1004 1005 // If dynamic alloca is used, then reset esp to point to the last callee-saved 1006 // slot before popping them off! Same applies for the case, when stack was 1007 // realigned. 1008 if (RegInfo->needsStackRealignment(MF)) { 1009 // We cannot use LEA here, because stack pointer was realigned. We need to 1010 // deallocate local frame back. 1011 if (CSSize) { 1012 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII, *RegInfo); 1013 MBBI = prior(LastCSPop); 1014 } 1015 1016 BuildMI(MBB, MBBI, DL, 1017 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), 1018 StackPtr).addReg(FramePtr); 1019 } else if (MFI->hasVarSizedObjects()) { 1020 if (CSSize) { 1021 unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r; 1022 MachineInstr *MI = 1023 addRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr), 1024 FramePtr, false, -CSSize); 1025 MBB.insert(MBBI, MI); 1026 } else { 1027 BuildMI(MBB, MBBI, DL, 1028 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), StackPtr) 1029 .addReg(FramePtr); 1030 } 1031 } else if (NumBytes) { 1032 // Adjust stack pointer back: ESP += numbytes. 1033 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII, *RegInfo); 1034 } 1035 1036 // We're returning from function via eh_return. 1037 if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) { 1038 MBBI = MBB.getLastNonDebugInstr(); 1039 MachineOperand &DestAddr = MBBI->getOperand(0); 1040 assert(DestAddr.isReg() && "Offset should be in register!"); 1041 BuildMI(MBB, MBBI, DL, 1042 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), 1043 StackPtr).addReg(DestAddr.getReg()); 1044 } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi || 1045 RetOpcode == X86::TCRETURNmi || 1046 RetOpcode == X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64 || 1047 RetOpcode == X86::TCRETURNmi64) { 1048 bool isMem = RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64; 1049 // Tail call return: adjust the stack pointer and jump to callee. 1050 MBBI = MBB.getLastNonDebugInstr(); 1051 MachineOperand &JumpTarget = MBBI->getOperand(0); 1052 MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1); 1053 assert(StackAdjust.isImm() && "Expecting immediate value."); 1054 1055 // Adjust stack pointer. 1056 int StackAdj = StackAdjust.getImm(); 1057 int MaxTCDelta = X86FI->getTCReturnAddrDelta(); 1058 int Offset = 0; 1059 assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive"); 1060 1061 // Incoporate the retaddr area. 1062 Offset = StackAdj-MaxTCDelta; 1063 assert(Offset >= 0 && "Offset should never be negative"); 1064 1065 if (Offset) { 1066 // Check for possible merge with preceding ADD instruction. 1067 Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true); 1068 emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, TII, *RegInfo); 1069 } 1070 1071 // Jump to label or value in register. 1072 if (RetOpcode == X86::TCRETURNdi || RetOpcode == X86::TCRETURNdi64) { 1073 MachineInstrBuilder MIB = 1074 BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNdi) 1075 ? X86::TAILJMPd : X86::TAILJMPd64)); 1076 if (JumpTarget.isGlobal()) 1077 MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(), 1078 JumpTarget.getTargetFlags()); 1079 else { 1080 assert(JumpTarget.isSymbol()); 1081 MIB.addExternalSymbol(JumpTarget.getSymbolName(), 1082 JumpTarget.getTargetFlags()); 1083 } 1084 } else if (RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64) { 1085 MachineInstrBuilder MIB = 1086 BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNmi) 1087 ? X86::TAILJMPm : X86::TAILJMPm64)); 1088 for (unsigned i = 0; i != 5; ++i) 1089 MIB.addOperand(MBBI->getOperand(i)); 1090 } else if (RetOpcode == X86::TCRETURNri64) { 1091 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64)). 1092 addReg(JumpTarget.getReg(), RegState::Kill); 1093 } else { 1094 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr)). 1095 addReg(JumpTarget.getReg(), RegState::Kill); 1096 } 1097 1098 MachineInstr *NewMI = prior(MBBI); 1099 for (unsigned i = 2, e = MBBI->getNumOperands(); i != e; ++i) 1100 NewMI->addOperand(MBBI->getOperand(i)); 1101 1102 // Delete the pseudo instruction TCRETURN. 1103 MBB.erase(MBBI); 1104 } else if ((RetOpcode == X86::RET || RetOpcode == X86::RETI) && 1105 (X86FI->getTCReturnAddrDelta() < 0)) { 1106 // Add the return addr area delta back since we are not tail calling. 1107 int delta = -1*X86FI->getTCReturnAddrDelta(); 1108 MBBI = MBB.getLastNonDebugInstr(); 1109 1110 // Check for possible merge with preceding ADD instruction. 1111 delta += mergeSPUpdates(MBB, MBBI, StackPtr, true); 1112 emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, TII, *RegInfo); 1113 } 1114 } 1115 1116 int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF, int FI) const { 1117 const X86RegisterInfo *RI = 1118 static_cast<const X86RegisterInfo*>(MF.getTarget().getRegisterInfo()); 1119 const MachineFrameInfo *MFI = MF.getFrameInfo(); 1120 int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea(); 1121 uint64_t StackSize = MFI->getStackSize(); 1122 1123 if (RI->needsStackRealignment(MF)) { 1124 if (FI < 0) { 1125 // Skip the saved EBP. 1126 Offset += RI->getSlotSize(); 1127 } else { 1128 assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0); 1129 return Offset + StackSize; 1130 } 1131 // FIXME: Support tail calls 1132 } else { 1133 if (!hasFP(MF)) 1134 return Offset + StackSize; 1135 1136 // Skip the saved EBP. 1137 Offset += RI->getSlotSize(); 1138 1139 // Skip the RETADDR move area 1140 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1141 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 1142 if (TailCallReturnAddrDelta < 0) 1143 Offset -= TailCallReturnAddrDelta; 1144 } 1145 1146 return Offset; 1147 } 1148 1149 bool X86FrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB, 1150 MachineBasicBlock::iterator MI, 1151 const std::vector<CalleeSavedInfo> &CSI, 1152 const TargetRegisterInfo *TRI) const { 1153 if (CSI.empty()) 1154 return false; 1155 1156 DebugLoc DL = MBB.findDebugLoc(MI); 1157 1158 MachineFunction &MF = *MBB.getParent(); 1159 1160 unsigned SlotSize = STI.is64Bit() ? 8 : 4; 1161 unsigned FPReg = TRI->getFrameRegister(MF); 1162 unsigned CalleeFrameSize = 0; 1163 1164 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo(); 1165 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1166 1167 // Push GPRs. It increases frame size. 1168 unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r; 1169 for (unsigned i = CSI.size(); i != 0; --i) { 1170 unsigned Reg = CSI[i-1].getReg(); 1171 if (!X86::GR64RegClass.contains(Reg) && 1172 !X86::GR32RegClass.contains(Reg)) 1173 continue; 1174 // Add the callee-saved register as live-in. It's killed at the spill. 1175 MBB.addLiveIn(Reg); 1176 if (Reg == FPReg) 1177 // X86RegisterInfo::emitPrologue will handle spilling of frame register. 1178 continue; 1179 CalleeFrameSize += SlotSize; 1180 BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, RegState::Kill) 1181 .setMIFlag(MachineInstr::FrameSetup); 1182 } 1183 1184 X86FI->setCalleeSavedFrameSize(CalleeFrameSize); 1185 1186 // Make XMM regs spilled. X86 does not have ability of push/pop XMM. 1187 // It can be done by spilling XMMs to stack frame. 1188 // Note that only Win64 ABI might spill XMMs. 1189 for (unsigned i = CSI.size(); i != 0; --i) { 1190 unsigned Reg = CSI[i-1].getReg(); 1191 if (X86::GR64RegClass.contains(Reg) || 1192 X86::GR32RegClass.contains(Reg)) 1193 continue; 1194 // Add the callee-saved register as live-in. It's killed at the spill. 1195 MBB.addLiveIn(Reg); 1196 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 1197 TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i-1].getFrameIdx(), 1198 RC, TRI); 1199 } 1200 1201 return true; 1202 } 1203 1204 bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB, 1205 MachineBasicBlock::iterator MI, 1206 const std::vector<CalleeSavedInfo> &CSI, 1207 const TargetRegisterInfo *TRI) const { 1208 if (CSI.empty()) 1209 return false; 1210 1211 DebugLoc DL = MBB.findDebugLoc(MI); 1212 1213 MachineFunction &MF = *MBB.getParent(); 1214 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo(); 1215 1216 // Reload XMMs from stack frame. 1217 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 1218 unsigned Reg = CSI[i].getReg(); 1219 if (X86::GR64RegClass.contains(Reg) || 1220 X86::GR32RegClass.contains(Reg)) 1221 continue; 1222 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 1223 TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), 1224 RC, TRI); 1225 } 1226 1227 // POP GPRs. 1228 unsigned FPReg = TRI->getFrameRegister(MF); 1229 unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r; 1230 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 1231 unsigned Reg = CSI[i].getReg(); 1232 if (!X86::GR64RegClass.contains(Reg) && 1233 !X86::GR32RegClass.contains(Reg)) 1234 continue; 1235 if (Reg == FPReg) 1236 // X86RegisterInfo::emitEpilogue will handle restoring of frame register. 1237 continue; 1238 BuildMI(MBB, MI, DL, TII.get(Opc), Reg); 1239 } 1240 return true; 1241 } 1242 1243 void 1244 X86FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, 1245 RegScavenger *RS) const { 1246 MachineFrameInfo *MFI = MF.getFrameInfo(); 1247 const X86RegisterInfo *RegInfo = TM.getRegisterInfo(); 1248 unsigned SlotSize = RegInfo->getSlotSize(); 1249 1250 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1251 int32_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); 1252 1253 if (TailCallReturnAddrDelta < 0) { 1254 // create RETURNADDR area 1255 // arg 1256 // arg 1257 // RETADDR 1258 // { ... 1259 // RETADDR area 1260 // ... 1261 // } 1262 // [EBP] 1263 MFI->CreateFixedObject(-TailCallReturnAddrDelta, 1264 (-1U*SlotSize)+TailCallReturnAddrDelta, true); 1265 } 1266 1267 if (hasFP(MF)) { 1268 assert((TailCallReturnAddrDelta <= 0) && 1269 "The Delta should always be zero or negative"); 1270 const TargetFrameLowering &TFI = *MF.getTarget().getFrameLowering(); 1271 1272 // Create a frame entry for the EBP register that must be saved. 1273 int FrameIdx = MFI->CreateFixedObject(SlotSize, 1274 -(int)SlotSize + 1275 TFI.getOffsetOfLocalArea() + 1276 TailCallReturnAddrDelta, 1277 true); 1278 assert(FrameIdx == MFI->getObjectIndexBegin() && 1279 "Slot for EBP register must be last in order to be found!"); 1280 (void)FrameIdx; 1281 } 1282 } 1283 1284 static bool 1285 HasNestArgument(const MachineFunction *MF) { 1286 const Function *F = MF->getFunction(); 1287 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 1288 I != E; I++) { 1289 if (I->hasNestAttr()) 1290 return true; 1291 } 1292 return false; 1293 } 1294 1295 static unsigned 1296 GetScratchRegister(bool Is64Bit, const MachineFunction &MF) { 1297 if (Is64Bit) { 1298 return X86::R11; 1299 } else { 1300 CallingConv::ID CallingConvention = MF.getFunction()->getCallingConv(); 1301 bool IsNested = HasNestArgument(&MF); 1302 1303 if (CallingConvention == CallingConv::X86_FastCall) { 1304 if (IsNested) { 1305 report_fatal_error("Segmented stacks does not support fastcall with " 1306 "nested function."); 1307 return -1; 1308 } else { 1309 return X86::EAX; 1310 } 1311 } else { 1312 if (IsNested) 1313 return X86::EDX; 1314 else 1315 return X86::ECX; 1316 } 1317 } 1318 } 1319 1320 void 1321 X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const { 1322 MachineBasicBlock &prologueMBB = MF.front(); 1323 MachineFrameInfo *MFI = MF.getFrameInfo(); 1324 const X86InstrInfo &TII = *TM.getInstrInfo(); 1325 uint64_t StackSize; 1326 bool Is64Bit = STI.is64Bit(); 1327 unsigned TlsReg, TlsOffset; 1328 DebugLoc DL; 1329 const X86Subtarget *ST = &MF.getTarget().getSubtarget<X86Subtarget>(); 1330 1331 unsigned ScratchReg = GetScratchRegister(Is64Bit, MF); 1332 assert(!MF.getRegInfo().isLiveIn(ScratchReg) && 1333 "Scratch register is live-in"); 1334 1335 if (MF.getFunction()->isVarArg()) 1336 report_fatal_error("Segmented stacks do not support vararg functions."); 1337 if (!ST->isTargetLinux()) 1338 report_fatal_error("Segmented stacks supported only on linux."); 1339 1340 MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock(); 1341 MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock(); 1342 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1343 bool IsNested = false; 1344 1345 // We need to know if the function has a nest argument only in 64 bit mode. 1346 if (Is64Bit) 1347 IsNested = HasNestArgument(&MF); 1348 1349 // The MOV R10, RAX needs to be in a different block, since the RET we emit in 1350 // allocMBB needs to be last (terminating) instruction. 1351 MachineBasicBlock *restoreR10MBB = NULL; 1352 if (IsNested) 1353 restoreR10MBB = MF.CreateMachineBasicBlock(); 1354 1355 for (MachineBasicBlock::livein_iterator i = prologueMBB.livein_begin(), 1356 e = prologueMBB.livein_end(); i != e; i++) { 1357 allocMBB->addLiveIn(*i); 1358 checkMBB->addLiveIn(*i); 1359 1360 if (IsNested) 1361 restoreR10MBB->addLiveIn(*i); 1362 } 1363 1364 if (IsNested) { 1365 allocMBB->addLiveIn(X86::R10); 1366 restoreR10MBB->addLiveIn(X86::RAX); 1367 } 1368 1369 if (IsNested) 1370 MF.push_front(restoreR10MBB); 1371 MF.push_front(allocMBB); 1372 MF.push_front(checkMBB); 1373 1374 // Eventually StackSize will be calculated by a link-time pass; which will 1375 // also decide whether checking code needs to be injected into this particular 1376 // prologue. 1377 StackSize = MFI->getStackSize(); 1378 1379 // Read the limit off the current stacklet off the stack_guard location. 1380 if (Is64Bit) { 1381 TlsReg = X86::FS; 1382 TlsOffset = 0x70; 1383 1384 BuildMI(checkMBB, DL, TII.get(X86::LEA64r), ScratchReg).addReg(X86::RSP) 1385 .addImm(0).addReg(0).addImm(-StackSize).addReg(0); 1386 BuildMI(checkMBB, DL, TII.get(X86::CMP64rm)).addReg(ScratchReg) 1387 .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg); 1388 } else { 1389 TlsReg = X86::GS; 1390 TlsOffset = 0x30; 1391 1392 BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP) 1393 .addImm(0).addReg(0).addImm(-StackSize).addReg(0); 1394 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg) 1395 .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg); 1396 } 1397 1398 // This jump is taken if SP >= (Stacklet Limit + Stack Space required). 1399 // It jumps to normal execution of the function body. 1400 BuildMI(checkMBB, DL, TII.get(X86::JG_4)).addMBB(&prologueMBB); 1401 1402 // On 32 bit we first push the arguments size and then the frame size. On 64 1403 // bit, we pass the stack frame size in r10 and the argument size in r11. 1404 if (Is64Bit) { 1405 // Functions with nested arguments use R10, so it needs to be saved across 1406 // the call to _morestack 1407 1408 if (IsNested) 1409 BuildMI(allocMBB, DL, TII.get(X86::MOV64rr), X86::RAX).addReg(X86::R10); 1410 1411 BuildMI(allocMBB, DL, TII.get(X86::MOV64ri), X86::R10) 1412 .addImm(StackSize); 1413 BuildMI(allocMBB, DL, TII.get(X86::MOV64ri), X86::R11) 1414 .addImm(X86FI->getArgumentStackSize()); 1415 MF.getRegInfo().setPhysRegUsed(X86::R10); 1416 MF.getRegInfo().setPhysRegUsed(X86::R11); 1417 } else { 1418 // Since we'll call __morestack, stack alignment needs to be preserved. 1419 BuildMI(allocMBB, DL, TII.get(X86::SUB32ri), X86::ESP).addReg(X86::ESP) 1420 .addImm(8); 1421 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32)) 1422 .addImm(X86FI->getArgumentStackSize()); 1423 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32)) 1424 .addImm(StackSize); 1425 } 1426 1427 // __morestack is in libgcc 1428 if (Is64Bit) 1429 BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32)) 1430 .addExternalSymbol("__morestack"); 1431 else 1432 BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32)) 1433 .addExternalSymbol("__morestack"); 1434 1435 // __morestack only seems to remove 8 bytes off the stack. Add back the 1436 // additional 8 bytes we added before pushing the arguments. 1437 if (!Is64Bit) 1438 BuildMI(allocMBB, DL, TII.get(X86::ADD32ri), X86::ESP).addReg(X86::ESP) 1439 .addImm(8); 1440 BuildMI(allocMBB, DL, TII.get(X86::RET)); 1441 1442 if (IsNested) 1443 BuildMI(restoreR10MBB, DL, TII.get(X86::MOV64rr), X86::R10) 1444 .addReg(X86::RAX); 1445 1446 if (IsNested) { 1447 allocMBB->addSuccessor(restoreR10MBB); 1448 restoreR10MBB->addSuccessor(&prologueMBB); 1449 } else { 1450 allocMBB->addSuccessor(&prologueMBB); 1451 } 1452 1453 checkMBB->addSuccessor(allocMBB); 1454 checkMBB->addSuccessor(&prologueMBB); 1455 1456 #ifdef XDEBUG 1457 MF.verify(); 1458 #endif 1459 } 1460