1 //===-- MipsSEFrameLowering.cpp - Mips32/64 Frame Information -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the Mips32/64 implementation of TargetFrameLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "MipsSEFrameLowering.h" 15 #include "MCTargetDesc/MipsBaseInfo.h" 16 #include "MipsMachineFunction.h" 17 #include "MipsSEInstrInfo.h" 18 #include "MipsSubtarget.h" 19 #include "llvm/ADT/StringSwitch.h" 20 #include "llvm/CodeGen/MachineFrameInfo.h" 21 #include "llvm/CodeGen/MachineFunction.h" 22 #include "llvm/CodeGen/MachineInstrBuilder.h" 23 #include "llvm/CodeGen/MachineModuleInfo.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 #include "llvm/CodeGen/RegisterScavenging.h" 26 #include "llvm/IR/DataLayout.h" 27 #include "llvm/IR/Function.h" 28 #include "llvm/Target/TargetOptions.h" 29 30 using namespace llvm; 31 32 namespace { 33 typedef MachineBasicBlock::iterator Iter; 34 35 static std::pair<unsigned, unsigned> getMFHiLoOpc(unsigned Src) { 36 if (Mips::ACC64RegClass.contains(Src)) 37 return std::make_pair((unsigned)Mips::PseudoMFHI, 38 (unsigned)Mips::PseudoMFLO); 39 40 if (Mips::ACC64DSPRegClass.contains(Src)) 41 return std::make_pair((unsigned)Mips::MFHI_DSP, (unsigned)Mips::MFLO_DSP); 42 43 if (Mips::ACC128RegClass.contains(Src)) 44 return std::make_pair((unsigned)Mips::PseudoMFHI64, 45 (unsigned)Mips::PseudoMFLO64); 46 47 return std::make_pair(0, 0); 48 } 49 50 /// Helper class to expand pseudos. 51 class ExpandPseudo { 52 public: 53 ExpandPseudo(MachineFunction &MF); 54 bool expand(); 55 56 private: 57 bool expandInstr(MachineBasicBlock &MBB, Iter I); 58 void expandLoadCCond(MachineBasicBlock &MBB, Iter I); 59 void expandStoreCCond(MachineBasicBlock &MBB, Iter I); 60 void expandLoadACC(MachineBasicBlock &MBB, Iter I, unsigned RegSize); 61 void expandStoreACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc, 62 unsigned MFLoOpc, unsigned RegSize); 63 bool expandCopy(MachineBasicBlock &MBB, Iter I); 64 bool expandCopyACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc, 65 unsigned MFLoOpc); 66 bool expandBuildPairF64(MachineBasicBlock &MBB, 67 MachineBasicBlock::iterator I, bool FP64) const; 68 bool expandExtractElementF64(MachineBasicBlock &MBB, 69 MachineBasicBlock::iterator I, bool FP64) const; 70 71 MachineFunction &MF; 72 MachineRegisterInfo &MRI; 73 const MipsSubtarget &Subtarget; 74 const MipsSEInstrInfo &TII; 75 const MipsRegisterInfo &RegInfo; 76 }; 77 } 78 79 ExpandPseudo::ExpandPseudo(MachineFunction &MF_) 80 : MF(MF_), MRI(MF.getRegInfo()), 81 Subtarget(static_cast<const MipsSubtarget &>(MF.getSubtarget())), 82 TII(*static_cast<const MipsSEInstrInfo *>(Subtarget.getInstrInfo())), 83 RegInfo(*Subtarget.getRegisterInfo()) {} 84 85 bool ExpandPseudo::expand() { 86 bool Expanded = false; 87 88 for (auto &MBB : MF) { 89 for (Iter I = MBB.begin(), End = MBB.end(); I != End;) 90 Expanded |= expandInstr(MBB, I++); 91 } 92 93 return Expanded; 94 } 95 96 bool ExpandPseudo::expandInstr(MachineBasicBlock &MBB, Iter I) { 97 switch(I->getOpcode()) { 98 case Mips::LOAD_CCOND_DSP: 99 expandLoadCCond(MBB, I); 100 break; 101 case Mips::STORE_CCOND_DSP: 102 expandStoreCCond(MBB, I); 103 break; 104 case Mips::LOAD_ACC64: 105 case Mips::LOAD_ACC64DSP: 106 expandLoadACC(MBB, I, 4); 107 break; 108 case Mips::LOAD_ACC128: 109 expandLoadACC(MBB, I, 8); 110 break; 111 case Mips::STORE_ACC64: 112 expandStoreACC(MBB, I, Mips::PseudoMFHI, Mips::PseudoMFLO, 4); 113 break; 114 case Mips::STORE_ACC64DSP: 115 expandStoreACC(MBB, I, Mips::MFHI_DSP, Mips::MFLO_DSP, 4); 116 break; 117 case Mips::STORE_ACC128: 118 expandStoreACC(MBB, I, Mips::PseudoMFHI64, Mips::PseudoMFLO64, 8); 119 break; 120 case Mips::BuildPairF64: 121 if (expandBuildPairF64(MBB, I, false)) 122 MBB.erase(I); 123 return false; 124 case Mips::BuildPairF64_64: 125 if (expandBuildPairF64(MBB, I, true)) 126 MBB.erase(I); 127 return false; 128 case Mips::ExtractElementF64: 129 if (expandExtractElementF64(MBB, I, false)) 130 MBB.erase(I); 131 return false; 132 case Mips::ExtractElementF64_64: 133 if (expandExtractElementF64(MBB, I, true)) 134 MBB.erase(I); 135 return false; 136 case TargetOpcode::COPY: 137 if (!expandCopy(MBB, I)) 138 return false; 139 break; 140 default: 141 return false; 142 } 143 144 MBB.erase(I); 145 return true; 146 } 147 148 void ExpandPseudo::expandLoadCCond(MachineBasicBlock &MBB, Iter I) { 149 // load $vr, FI 150 // copy ccond, $vr 151 152 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 153 154 const TargetRegisterClass *RC = RegInfo.intRegClass(4); 155 unsigned VR = MRI.createVirtualRegister(RC); 156 unsigned Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 157 158 TII.loadRegFromStack(MBB, I, VR, FI, RC, &RegInfo, 0); 159 BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), Dst) 160 .addReg(VR, RegState::Kill); 161 } 162 163 void ExpandPseudo::expandStoreCCond(MachineBasicBlock &MBB, Iter I) { 164 // copy $vr, ccond 165 // store $vr, FI 166 167 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 168 169 const TargetRegisterClass *RC = RegInfo.intRegClass(4); 170 unsigned VR = MRI.createVirtualRegister(RC); 171 unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 172 173 BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), VR) 174 .addReg(Src, getKillRegState(I->getOperand(0).isKill())); 175 TII.storeRegToStack(MBB, I, VR, true, FI, RC, &RegInfo, 0); 176 } 177 178 void ExpandPseudo::expandLoadACC(MachineBasicBlock &MBB, Iter I, 179 unsigned RegSize) { 180 // load $vr0, FI 181 // copy lo, $vr0 182 // load $vr1, FI + 4 183 // copy hi, $vr1 184 185 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 186 187 const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize); 188 unsigned VR0 = MRI.createVirtualRegister(RC); 189 unsigned VR1 = MRI.createVirtualRegister(RC); 190 unsigned Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 191 unsigned Lo = RegInfo.getSubReg(Dst, Mips::sub_lo); 192 unsigned Hi = RegInfo.getSubReg(Dst, Mips::sub_hi); 193 DebugLoc DL = I->getDebugLoc(); 194 const MCInstrDesc &Desc = TII.get(TargetOpcode::COPY); 195 196 TII.loadRegFromStack(MBB, I, VR0, FI, RC, &RegInfo, 0); 197 BuildMI(MBB, I, DL, Desc, Lo).addReg(VR0, RegState::Kill); 198 TII.loadRegFromStack(MBB, I, VR1, FI, RC, &RegInfo, RegSize); 199 BuildMI(MBB, I, DL, Desc, Hi).addReg(VR1, RegState::Kill); 200 } 201 202 void ExpandPseudo::expandStoreACC(MachineBasicBlock &MBB, Iter I, 203 unsigned MFHiOpc, unsigned MFLoOpc, 204 unsigned RegSize) { 205 // mflo $vr0, src 206 // store $vr0, FI 207 // mfhi $vr1, src 208 // store $vr1, FI + 4 209 210 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 211 212 const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize); 213 unsigned VR0 = MRI.createVirtualRegister(RC); 214 unsigned VR1 = MRI.createVirtualRegister(RC); 215 unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 216 unsigned SrcKill = getKillRegState(I->getOperand(0).isKill()); 217 DebugLoc DL = I->getDebugLoc(); 218 219 BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src); 220 TII.storeRegToStack(MBB, I, VR0, true, FI, RC, &RegInfo, 0); 221 BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill); 222 TII.storeRegToStack(MBB, I, VR1, true, FI, RC, &RegInfo, RegSize); 223 } 224 225 bool ExpandPseudo::expandCopy(MachineBasicBlock &MBB, Iter I) { 226 unsigned Src = I->getOperand(1).getReg(); 227 std::pair<unsigned, unsigned> Opcodes = getMFHiLoOpc(Src); 228 229 if (!Opcodes.first) 230 return false; 231 232 return expandCopyACC(MBB, I, Opcodes.first, Opcodes.second); 233 } 234 235 bool ExpandPseudo::expandCopyACC(MachineBasicBlock &MBB, Iter I, 236 unsigned MFHiOpc, unsigned MFLoOpc) { 237 // mflo $vr0, src 238 // copy dst_lo, $vr0 239 // mfhi $vr1, src 240 // copy dst_hi, $vr1 241 242 unsigned Dst = I->getOperand(0).getReg(), Src = I->getOperand(1).getReg(); 243 unsigned VRegSize = RegInfo.getMinimalPhysRegClass(Dst)->getSize() / 2; 244 const TargetRegisterClass *RC = RegInfo.intRegClass(VRegSize); 245 unsigned VR0 = MRI.createVirtualRegister(RC); 246 unsigned VR1 = MRI.createVirtualRegister(RC); 247 unsigned SrcKill = getKillRegState(I->getOperand(1).isKill()); 248 unsigned DstLo = RegInfo.getSubReg(Dst, Mips::sub_lo); 249 unsigned DstHi = RegInfo.getSubReg(Dst, Mips::sub_hi); 250 DebugLoc DL = I->getDebugLoc(); 251 252 BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src); 253 BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstLo) 254 .addReg(VR0, RegState::Kill); 255 BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill); 256 BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstHi) 257 .addReg(VR1, RegState::Kill); 258 return true; 259 } 260 261 /// This method expands the same instruction that MipsSEInstrInfo:: 262 /// expandBuildPairF64 does, for the case when ABI is fpxx and mthc1 is not 263 /// available and the case where the ABI is FP64A. It is implemented here 264 /// because frame indexes are eliminated before MipsSEInstrInfo:: 265 /// expandBuildPairF64 is called. 266 bool ExpandPseudo::expandBuildPairF64(MachineBasicBlock &MBB, 267 MachineBasicBlock::iterator I, 268 bool FP64) const { 269 // For fpxx and when mthc1 is not available, use: 270 // spill + reload via ldc1 271 // 272 // The case where dmtc1 is available doesn't need to be handled here 273 // because it never creates a BuildPairF64 node. 274 // 275 // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence 276 // for odd-numbered double precision values (because the lower 32-bits is 277 // transferred with mtc1 which is redirected to the upper half of the even 278 // register). Unfortunately, we have to make this decision before register 279 // allocation so for now we use a spill/reload sequence for all 280 // double-precision values in regardless of being an odd/even register. 281 if ((Subtarget.isABI_FPXX() && !Subtarget.hasMTHC1()) || 282 (FP64 && !Subtarget.useOddSPReg())) { 283 unsigned DstReg = I->getOperand(0).getReg(); 284 unsigned LoReg = I->getOperand(1).getReg(); 285 unsigned HiReg = I->getOperand(2).getReg(); 286 287 // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are 288 // the cases where mthc1 is not available). 64-bit architectures and 289 // MIPS32r2 or later can use FGR64 though. 290 assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() || 291 !Subtarget.isFP64bit()); 292 293 const TargetRegisterClass *RC = &Mips::GPR32RegClass; 294 const TargetRegisterClass *RC2 = 295 FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass; 296 297 // We re-use the same spill slot each time so that the stack frame doesn't 298 // grow too much in functions with a large number of moves. 299 int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(RC2); 300 if (!Subtarget.isLittle()) 301 std::swap(LoReg, HiReg); 302 TII.storeRegToStack(MBB, I, LoReg, I->getOperand(1).isKill(), FI, RC, 303 &RegInfo, 0); 304 TII.storeRegToStack(MBB, I, HiReg, I->getOperand(2).isKill(), FI, RC, 305 &RegInfo, 4); 306 TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, 0); 307 return true; 308 } 309 310 return false; 311 } 312 313 /// This method expands the same instruction that MipsSEInstrInfo:: 314 /// expandExtractElementF64 does, for the case when ABI is fpxx and mfhc1 is not 315 /// available and the case where the ABI is FP64A. It is implemented here 316 /// because frame indexes are eliminated before MipsSEInstrInfo:: 317 /// expandExtractElementF64 is called. 318 bool ExpandPseudo::expandExtractElementF64(MachineBasicBlock &MBB, 319 MachineBasicBlock::iterator I, 320 bool FP64) const { 321 const MachineOperand &Op1 = I->getOperand(1); 322 const MachineOperand &Op2 = I->getOperand(2); 323 324 if ((Op1.isReg() && Op1.isUndef()) || (Op2.isReg() && Op2.isUndef())) { 325 unsigned DstReg = I->getOperand(0).getReg(); 326 BuildMI(MBB, I, I->getDebugLoc(), TII.get(Mips::IMPLICIT_DEF), DstReg); 327 return true; 328 } 329 330 // For fpxx and when mfhc1 is not available, use: 331 // spill + reload via ldc1 332 // 333 // The case where dmfc1 is available doesn't need to be handled here 334 // because it never creates a ExtractElementF64 node. 335 // 336 // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence 337 // for odd-numbered double precision values (because the lower 32-bits is 338 // transferred with mfc1 which is redirected to the upper half of the even 339 // register). Unfortunately, we have to make this decision before register 340 // allocation so for now we use a spill/reload sequence for all 341 // double-precision values in regardless of being an odd/even register. 342 343 if ((Subtarget.isABI_FPXX() && !Subtarget.hasMTHC1()) || 344 (FP64 && !Subtarget.useOddSPReg())) { 345 unsigned DstReg = I->getOperand(0).getReg(); 346 unsigned SrcReg = Op1.getReg(); 347 unsigned N = Op2.getImm(); 348 int64_t Offset = 4 * (Subtarget.isLittle() ? N : (1 - N)); 349 350 // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are 351 // the cases where mfhc1 is not available). 64-bit architectures and 352 // MIPS32r2 or later can use FGR64 though. 353 assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() || 354 !Subtarget.isFP64bit()); 355 356 const TargetRegisterClass *RC = 357 FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass; 358 const TargetRegisterClass *RC2 = &Mips::GPR32RegClass; 359 360 // We re-use the same spill slot each time so that the stack frame doesn't 361 // grow too much in functions with a large number of moves. 362 int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(RC); 363 TII.storeRegToStack(MBB, I, SrcReg, Op1.isKill(), FI, RC, &RegInfo, 0); 364 TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, Offset); 365 return true; 366 } 367 368 return false; 369 } 370 371 MipsSEFrameLowering::MipsSEFrameLowering(const MipsSubtarget &STI) 372 : MipsFrameLowering(STI, STI.stackAlignment()) {} 373 374 void MipsSEFrameLowering::emitPrologue(MachineFunction &MF, 375 MachineBasicBlock &MBB) const { 376 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported"); 377 MachineFrameInfo *MFI = MF.getFrameInfo(); 378 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 379 380 const MipsSEInstrInfo &TII = 381 *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo()); 382 const MipsRegisterInfo &RegInfo = 383 *static_cast<const MipsRegisterInfo *>(STI.getRegisterInfo()); 384 385 MachineBasicBlock::iterator MBBI = MBB.begin(); 386 DebugLoc dl; 387 MipsABIInfo ABI = STI.getABI(); 388 unsigned SP = ABI.GetStackPtr(); 389 unsigned FP = ABI.GetFramePtr(); 390 unsigned ZERO = ABI.GetNullPtr(); 391 unsigned MOVE = ABI.GetGPRMoveOp(); 392 unsigned ADDiu = ABI.GetPtrAddiuOp(); 393 unsigned AND = ABI.IsN64() ? Mips::AND64 : Mips::AND; 394 395 const TargetRegisterClass *RC = ABI.ArePtrs64bit() ? 396 &Mips::GPR64RegClass : &Mips::GPR32RegClass; 397 398 // First, compute final stack size. 399 uint64_t StackSize = MFI->getStackSize(); 400 401 // No need to allocate space on the stack. 402 if (StackSize == 0 && !MFI->adjustsStack()) return; 403 404 MachineModuleInfo &MMI = MF.getMMI(); 405 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); 406 MachineLocation DstML, SrcML; 407 408 // Adjust stack. 409 TII.adjustStackPtr(SP, -StackSize, MBB, MBBI); 410 411 // emit ".cfi_def_cfa_offset StackSize" 412 unsigned CFIIndex = MMI.addFrameInst( 413 MCCFIInstruction::createDefCfaOffset(nullptr, -StackSize)); 414 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 415 .addCFIIndex(CFIIndex); 416 417 if (MF.getFunction()->hasFnAttribute("interrupt")) 418 emitInterruptPrologueStub(MF, MBB); 419 420 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); 421 422 if (CSI.size()) { 423 // Find the instruction past the last instruction that saves a callee-saved 424 // register to the stack. 425 for (unsigned i = 0; i < CSI.size(); ++i) 426 ++MBBI; 427 428 // Iterate over list of callee-saved registers and emit .cfi_offset 429 // directives. 430 for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(), 431 E = CSI.end(); I != E; ++I) { 432 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx()); 433 unsigned Reg = I->getReg(); 434 435 // If Reg is a double precision register, emit two cfa_offsets, 436 // one for each of the paired single precision registers. 437 if (Mips::AFGR64RegClass.contains(Reg)) { 438 unsigned Reg0 = 439 MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_lo), true); 440 unsigned Reg1 = 441 MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_hi), true); 442 443 if (!STI.isLittle()) 444 std::swap(Reg0, Reg1); 445 446 unsigned CFIIndex = MMI.addFrameInst( 447 MCCFIInstruction::createOffset(nullptr, Reg0, Offset)); 448 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 449 .addCFIIndex(CFIIndex); 450 451 CFIIndex = MMI.addFrameInst( 452 MCCFIInstruction::createOffset(nullptr, Reg1, Offset + 4)); 453 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 454 .addCFIIndex(CFIIndex); 455 } else if (Mips::FGR64RegClass.contains(Reg)) { 456 unsigned Reg0 = MRI->getDwarfRegNum(Reg, true); 457 unsigned Reg1 = MRI->getDwarfRegNum(Reg, true) + 1; 458 459 if (!STI.isLittle()) 460 std::swap(Reg0, Reg1); 461 462 unsigned CFIIndex = MMI.addFrameInst( 463 MCCFIInstruction::createOffset(nullptr, Reg0, Offset)); 464 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 465 .addCFIIndex(CFIIndex); 466 467 CFIIndex = MMI.addFrameInst( 468 MCCFIInstruction::createOffset(nullptr, Reg1, Offset + 4)); 469 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 470 .addCFIIndex(CFIIndex); 471 } else { 472 // Reg is either in GPR32 or FGR32. 473 unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createOffset( 474 nullptr, MRI->getDwarfRegNum(Reg, 1), Offset)); 475 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 476 .addCFIIndex(CFIIndex); 477 } 478 } 479 } 480 481 if (MipsFI->callsEhReturn()) { 482 // Insert instructions that spill eh data registers. 483 for (int I = 0; I < 4; ++I) { 484 if (!MBB.isLiveIn(ABI.GetEhDataReg(I))) 485 MBB.addLiveIn(ABI.GetEhDataReg(I)); 486 TII.storeRegToStackSlot(MBB, MBBI, ABI.GetEhDataReg(I), false, 487 MipsFI->getEhDataRegFI(I), RC, &RegInfo); 488 } 489 490 // Emit .cfi_offset directives for eh data registers. 491 for (int I = 0; I < 4; ++I) { 492 int64_t Offset = MFI->getObjectOffset(MipsFI->getEhDataRegFI(I)); 493 unsigned Reg = MRI->getDwarfRegNum(ABI.GetEhDataReg(I), true); 494 unsigned CFIIndex = MMI.addFrameInst( 495 MCCFIInstruction::createOffset(nullptr, Reg, Offset)); 496 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 497 .addCFIIndex(CFIIndex); 498 } 499 } 500 501 // if framepointer enabled, set it to point to the stack pointer. 502 if (hasFP(MF)) { 503 // Insert instruction "move $fp, $sp" at this location. 504 BuildMI(MBB, MBBI, dl, TII.get(MOVE), FP).addReg(SP).addReg(ZERO) 505 .setMIFlag(MachineInstr::FrameSetup); 506 507 // emit ".cfi_def_cfa_register $fp" 508 unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createDefCfaRegister( 509 nullptr, MRI->getDwarfRegNum(FP, true))); 510 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 511 .addCFIIndex(CFIIndex); 512 513 if (RegInfo.needsStackRealignment(MF)) { 514 // addiu $Reg, $zero, -MaxAlignment 515 // andi $sp, $sp, $Reg 516 unsigned VR = MF.getRegInfo().createVirtualRegister(RC); 517 assert(isInt<16>(MFI->getMaxAlignment()) && 518 "Function's alignment size requirement is not supported."); 519 int MaxAlign = -(int)MFI->getMaxAlignment(); 520 521 BuildMI(MBB, MBBI, dl, TII.get(ADDiu), VR).addReg(ZERO) .addImm(MaxAlign); 522 BuildMI(MBB, MBBI, dl, TII.get(AND), SP).addReg(SP).addReg(VR); 523 524 if (hasBP(MF)) { 525 // move $s7, $sp 526 unsigned BP = STI.isABI_N64() ? Mips::S7_64 : Mips::S7; 527 BuildMI(MBB, MBBI, dl, TII.get(MOVE), BP) 528 .addReg(SP) 529 .addReg(ZERO); 530 } 531 } 532 } 533 } 534 535 void MipsSEFrameLowering::emitInterruptPrologueStub( 536 MachineFunction &MF, MachineBasicBlock &MBB) const { 537 538 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 539 MachineBasicBlock::iterator MBBI = MBB.begin(); 540 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 541 542 // Report an error the target doesn't support Mips32r2 or later. 543 // The epilogue relies on the use of the "ehb" to clear execution 544 // hazards. Pre R2 Mips relies on an implementation defined number 545 // of "ssnop"s to clear the execution hazard. Support for ssnop hazard 546 // clearing is not provided so reject that configuration. 547 if (!STI.hasMips32r2()) 548 report_fatal_error( 549 "\"interrupt\" attribute is not supported on pre-MIPS32R2 or " 550 "MIPS16 targets."); 551 552 // The GP register contains the "user" value, so we cannot perform 553 // any gp relative loads until we restore the "kernel" or "system" gp 554 // value. Until support is written we shall only accept the static 555 // relocation model. 556 if ((STI.getRelocationModel() != Reloc::Static)) 557 report_fatal_error("\"interrupt\" attribute is only supported for the " 558 "static relocation model on MIPS at the present time."); 559 560 if (!STI.isABI_O32() || STI.hasMips64()) 561 report_fatal_error("\"interrupt\" attribute is only supported for the " 562 "O32 ABI on MIPS32R2+ at the present time."); 563 564 // Perform ISR handling like GCC 565 StringRef IntKind = 566 MF.getFunction()->getFnAttribute("interrupt").getValueAsString(); 567 const TargetRegisterClass *PtrRC = &Mips::GPR32RegClass; 568 569 // EIC interrupt handling needs to read the Cause register to disable 570 // interrupts. 571 if (IntKind == "eic") { 572 // Coprocessor registers are always live per se. 573 MBB.addLiveIn(Mips::COP013); 574 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MFC0), Mips::K0) 575 .addReg(Mips::COP013) 576 .addImm(0) 577 .setMIFlag(MachineInstr::FrameSetup); 578 579 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::EXT), Mips::K0) 580 .addReg(Mips::K0) 581 .addImm(10) 582 .addImm(6) 583 .setMIFlag(MachineInstr::FrameSetup); 584 } 585 586 // Fetch and spill EPC 587 MBB.addLiveIn(Mips::COP014); 588 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MFC0), Mips::K1) 589 .addReg(Mips::COP014) 590 .addImm(0) 591 .setMIFlag(MachineInstr::FrameSetup); 592 593 STI.getInstrInfo()->storeRegToStack(MBB, MBBI, Mips::K1, false, 594 MipsFI->getISRRegFI(0), PtrRC, 595 STI.getRegisterInfo(), 0); 596 597 // Fetch and Spill Status 598 MBB.addLiveIn(Mips::COP012); 599 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MFC0), Mips::K1) 600 .addReg(Mips::COP012) 601 .addImm(0) 602 .setMIFlag(MachineInstr::FrameSetup); 603 604 STI.getInstrInfo()->storeRegToStack(MBB, MBBI, Mips::K1, false, 605 MipsFI->getISRRegFI(1), PtrRC, 606 STI.getRegisterInfo(), 0); 607 608 // Build the configuration for disabling lower priority interrupts. Non EIC 609 // interrupts need to be masked off with zero, EIC from the Cause register. 610 unsigned InsPosition = 8; 611 unsigned InsSize = 0; 612 unsigned SrcReg = Mips::ZERO; 613 614 // If the interrupt we're tied to is the EIC, switch the source for the 615 // masking off interrupts to the cause register. 616 if (IntKind == "eic") { 617 SrcReg = Mips::K0; 618 InsPosition = 10; 619 InsSize = 6; 620 } else 621 InsSize = StringSwitch<unsigned>(IntKind) 622 .Case("sw0", 1) 623 .Case("sw1", 2) 624 .Case("hw0", 3) 625 .Case("hw1", 4) 626 .Case("hw2", 5) 627 .Case("hw3", 6) 628 .Case("hw4", 7) 629 .Case("hw5", 8) 630 .Default(0); 631 assert(InsSize != 0 && "Unknown interrupt type!"); 632 633 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::INS), Mips::K1) 634 .addReg(SrcReg) 635 .addImm(InsPosition) 636 .addImm(InsSize) 637 .addReg(Mips::K1) 638 .setMIFlag(MachineInstr::FrameSetup); 639 640 // Mask off KSU, ERL, EXL 641 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::INS), Mips::K1) 642 .addReg(Mips::ZERO) 643 .addImm(1) 644 .addImm(4) 645 .addReg(Mips::K1) 646 .setMIFlag(MachineInstr::FrameSetup); 647 648 // Disable the FPU as we are not spilling those register sets. 649 if (!STI.useSoftFloat()) 650 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::INS), Mips::K1) 651 .addReg(Mips::ZERO) 652 .addImm(29) 653 .addImm(1) 654 .addReg(Mips::K1) 655 .setMIFlag(MachineInstr::FrameSetup); 656 657 // Set the new status 658 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP012) 659 .addReg(Mips::K1) 660 .addImm(0) 661 .setMIFlag(MachineInstr::FrameSetup); 662 } 663 664 void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF, 665 MachineBasicBlock &MBB) const { 666 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 667 MachineFrameInfo *MFI = MF.getFrameInfo(); 668 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 669 670 const MipsSEInstrInfo &TII = 671 *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo()); 672 const MipsRegisterInfo &RegInfo = 673 *static_cast<const MipsRegisterInfo *>(STI.getRegisterInfo()); 674 675 DebugLoc DL = MBBI->getDebugLoc(); 676 MipsABIInfo ABI = STI.getABI(); 677 unsigned SP = ABI.GetStackPtr(); 678 unsigned FP = ABI.GetFramePtr(); 679 unsigned ZERO = ABI.GetNullPtr(); 680 unsigned MOVE = ABI.GetGPRMoveOp(); 681 682 // if framepointer enabled, restore the stack pointer. 683 if (hasFP(MF)) { 684 // Find the first instruction that restores a callee-saved register. 685 MachineBasicBlock::iterator I = MBBI; 686 687 for (unsigned i = 0; i < MFI->getCalleeSavedInfo().size(); ++i) 688 --I; 689 690 // Insert instruction "move $sp, $fp" at this location. 691 BuildMI(MBB, I, DL, TII.get(MOVE), SP).addReg(FP).addReg(ZERO); 692 } 693 694 if (MipsFI->callsEhReturn()) { 695 const TargetRegisterClass *RC = 696 ABI.ArePtrs64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass; 697 698 // Find first instruction that restores a callee-saved register. 699 MachineBasicBlock::iterator I = MBBI; 700 for (unsigned i = 0; i < MFI->getCalleeSavedInfo().size(); ++i) 701 --I; 702 703 // Insert instructions that restore eh data registers. 704 for (int J = 0; J < 4; ++J) { 705 TII.loadRegFromStackSlot(MBB, I, ABI.GetEhDataReg(J), 706 MipsFI->getEhDataRegFI(J), RC, &RegInfo); 707 } 708 } 709 710 if (MF.getFunction()->hasFnAttribute("interrupt")) 711 emitInterruptEpilogueStub(MF, MBB); 712 713 // Get the number of bytes from FrameInfo 714 uint64_t StackSize = MFI->getStackSize(); 715 716 if (!StackSize) 717 return; 718 719 // Adjust stack. 720 TII.adjustStackPtr(SP, StackSize, MBB, MBBI); 721 } 722 723 void MipsSEFrameLowering::emitInterruptEpilogueStub( 724 MachineFunction &MF, MachineBasicBlock &MBB) const { 725 726 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 727 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 728 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 729 730 // Perform ISR handling like GCC 731 const TargetRegisterClass *PtrRC = &Mips::GPR32RegClass; 732 733 // Disable Interrupts. 734 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::DI), Mips::ZERO); 735 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::EHB)); 736 737 // Restore EPC 738 STI.getInstrInfo()->loadRegFromStackSlot(MBB, MBBI, Mips::K1, 739 MipsFI->getISRRegFI(0), PtrRC, 740 STI.getRegisterInfo()); 741 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP014) 742 .addReg(Mips::K1) 743 .addImm(0); 744 745 // Restore Status 746 STI.getInstrInfo()->loadRegFromStackSlot(MBB, MBBI, Mips::K1, 747 MipsFI->getISRRegFI(1), PtrRC, 748 STI.getRegisterInfo()); 749 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP012) 750 .addReg(Mips::K1) 751 .addImm(0); 752 } 753 754 int MipsSEFrameLowering::getFrameIndexReference(const MachineFunction &MF, 755 int FI, 756 unsigned &FrameReg) const { 757 const MachineFrameInfo *MFI = MF.getFrameInfo(); 758 MipsABIInfo ABI = STI.getABI(); 759 760 if (MFI->isFixedObjectIndex(FI)) 761 FrameReg = hasFP(MF) ? ABI.GetFramePtr() : ABI.GetStackPtr(); 762 else 763 FrameReg = hasBP(MF) ? ABI.GetBasePtr() : ABI.GetStackPtr(); 764 765 return MFI->getObjectOffset(FI) + MFI->getStackSize() - 766 getOffsetOfLocalArea() + MFI->getOffsetAdjustment(); 767 } 768 769 bool MipsSEFrameLowering:: 770 spillCalleeSavedRegisters(MachineBasicBlock &MBB, 771 MachineBasicBlock::iterator MI, 772 const std::vector<CalleeSavedInfo> &CSI, 773 const TargetRegisterInfo *TRI) const { 774 MachineFunction *MF = MBB.getParent(); 775 MachineBasicBlock *EntryBlock = &MF->front(); 776 const TargetInstrInfo &TII = *STI.getInstrInfo(); 777 778 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 779 // Add the callee-saved register as live-in. Do not add if the register is 780 // RA and return address is taken, because it has already been added in 781 // method MipsTargetLowering::LowerRETURNADDR. 782 // It's killed at the spill, unless the register is RA and return address 783 // is taken. 784 unsigned Reg = CSI[i].getReg(); 785 bool IsRAAndRetAddrIsTaken = (Reg == Mips::RA || Reg == Mips::RA_64) 786 && MF->getFrameInfo()->isReturnAddressTaken(); 787 if (!IsRAAndRetAddrIsTaken) 788 EntryBlock->addLiveIn(Reg); 789 790 // ISRs require HI/LO to be spilled into kernel registers to be then 791 // spilled to the stack frame. 792 bool IsLOHI = (Reg == Mips::LO0 || Reg == Mips::LO0_64 || 793 Reg == Mips::HI0 || Reg == Mips::HI0_64); 794 const Function *Func = MBB.getParent()->getFunction(); 795 if (IsLOHI && Func->hasFnAttribute("interrupt")) { 796 DebugLoc DL = MI->getDebugLoc(); 797 798 unsigned Op = 0; 799 if (!STI.getABI().ArePtrs64bit()) { 800 Op = (Reg == Mips::HI0) ? Mips::MFHI : Mips::MFLO; 801 Reg = Mips::K0; 802 } else { 803 Op = (Reg == Mips::HI0) ? Mips::MFHI64 : Mips::MFLO64; 804 Reg = Mips::K0_64; 805 } 806 BuildMI(MBB, MI, DL, TII.get(Op), Mips::K0) 807 .setMIFlag(MachineInstr::FrameSetup); 808 } 809 810 // Insert the spill to the stack frame. 811 bool IsKill = !IsRAAndRetAddrIsTaken; 812 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 813 TII.storeRegToStackSlot(*EntryBlock, MI, Reg, IsKill, 814 CSI[i].getFrameIdx(), RC, TRI); 815 } 816 817 return true; 818 } 819 820 bool 821 MipsSEFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { 822 const MachineFrameInfo *MFI = MF.getFrameInfo(); 823 824 // Reserve call frame if the size of the maximum call frame fits into 16-bit 825 // immediate field and there are no variable sized objects on the stack. 826 // Make sure the second register scavenger spill slot can be accessed with one 827 // instruction. 828 return isInt<16>(MFI->getMaxCallFrameSize() + getStackAlignment()) && 829 !MFI->hasVarSizedObjects(); 830 } 831 832 /// Mark \p Reg and all registers aliasing it in the bitset. 833 static void setAliasRegs(MachineFunction &MF, BitVector &SavedRegs, 834 unsigned Reg) { 835 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 836 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) 837 SavedRegs.set(*AI); 838 } 839 840 void MipsSEFrameLowering::determineCalleeSaves(MachineFunction &MF, 841 BitVector &SavedRegs, 842 RegScavenger *RS) const { 843 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 844 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 845 MipsABIInfo ABI = STI.getABI(); 846 unsigned FP = ABI.GetFramePtr(); 847 unsigned BP = ABI.IsN64() ? Mips::S7_64 : Mips::S7; 848 849 // Mark $fp as used if function has dedicated frame pointer. 850 if (hasFP(MF)) 851 setAliasRegs(MF, SavedRegs, FP); 852 // Mark $s7 as used if function has dedicated base pointer. 853 if (hasBP(MF)) 854 setAliasRegs(MF, SavedRegs, BP); 855 856 // Create spill slots for eh data registers if function calls eh_return. 857 if (MipsFI->callsEhReturn()) 858 MipsFI->createEhDataRegsFI(); 859 860 // Create spill slots for Coprocessor 0 registers if function is an ISR. 861 if (MipsFI->isISR()) 862 MipsFI->createISRRegFI(); 863 864 // Expand pseudo instructions which load, store or copy accumulators. 865 // Add an emergency spill slot if a pseudo was expanded. 866 if (ExpandPseudo(MF).expand()) { 867 // The spill slot should be half the size of the accumulator. If target is 868 // mips64, it should be 64-bit, otherwise it should be 32-bt. 869 const TargetRegisterClass *RC = STI.hasMips64() ? 870 &Mips::GPR64RegClass : &Mips::GPR32RegClass; 871 int FI = MF.getFrameInfo()->CreateStackObject(RC->getSize(), 872 RC->getAlignment(), false); 873 RS->addScavengingFrameIndex(FI); 874 } 875 876 // Set scavenging frame index if necessary. 877 uint64_t MaxSPOffset = MF.getInfo<MipsFunctionInfo>()->getIncomingArgSize() + 878 estimateStackSize(MF); 879 880 if (isInt<16>(MaxSPOffset)) 881 return; 882 883 const TargetRegisterClass *RC = 884 ABI.ArePtrs64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass; 885 int FI = MF.getFrameInfo()->CreateStackObject(RC->getSize(), 886 RC->getAlignment(), false); 887 RS->addScavengingFrameIndex(FI); 888 } 889 890 const MipsFrameLowering * 891 llvm::createMipsSEFrameLowering(const MipsSubtarget &ST) { 892 return new MipsSEFrameLowering(ST); 893 } 894