1 //===-- MipsSEFrameLowering.cpp - Mips32/64 Frame Information -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the Mips32/64 implementation of TargetFrameLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "MipsSEFrameLowering.h" 15 #include "MCTargetDesc/MipsBaseInfo.h" 16 #include "MipsAnalyzeImmediate.h" 17 #include "MipsMachineFunction.h" 18 #include "MipsSEInstrInfo.h" 19 #include "MipsSubtarget.h" 20 #include "llvm/ADT/StringSwitch.h" 21 #include "llvm/CodeGen/MachineFrameInfo.h" 22 #include "llvm/CodeGen/MachineFunction.h" 23 #include "llvm/CodeGen/MachineInstrBuilder.h" 24 #include "llvm/CodeGen/MachineModuleInfo.h" 25 #include "llvm/CodeGen/MachineRegisterInfo.h" 26 #include "llvm/CodeGen/RegisterScavenging.h" 27 #include "llvm/IR/DataLayout.h" 28 #include "llvm/IR/Function.h" 29 #include "llvm/Support/CommandLine.h" 30 #include "llvm/Target/TargetOptions.h" 31 32 using namespace llvm; 33 34 namespace { 35 typedef MachineBasicBlock::iterator Iter; 36 37 static std::pair<unsigned, unsigned> getMFHiLoOpc(unsigned Src) { 38 if (Mips::ACC64RegClass.contains(Src)) 39 return std::make_pair((unsigned)Mips::PseudoMFHI, 40 (unsigned)Mips::PseudoMFLO); 41 42 if (Mips::ACC64DSPRegClass.contains(Src)) 43 return std::make_pair((unsigned)Mips::MFHI_DSP, (unsigned)Mips::MFLO_DSP); 44 45 if (Mips::ACC128RegClass.contains(Src)) 46 return std::make_pair((unsigned)Mips::PseudoMFHI64, 47 (unsigned)Mips::PseudoMFLO64); 48 49 return std::make_pair(0, 0); 50 } 51 52 /// Helper class to expand pseudos. 53 class ExpandPseudo { 54 public: 55 ExpandPseudo(MachineFunction &MF); 56 bool expand(); 57 58 private: 59 bool expandInstr(MachineBasicBlock &MBB, Iter I); 60 void expandLoadCCond(MachineBasicBlock &MBB, Iter I); 61 void expandStoreCCond(MachineBasicBlock &MBB, Iter I); 62 void expandLoadACC(MachineBasicBlock &MBB, Iter I, unsigned RegSize); 63 void expandStoreACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc, 64 unsigned MFLoOpc, unsigned RegSize); 65 bool expandCopy(MachineBasicBlock &MBB, Iter I); 66 bool expandCopyACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc, 67 unsigned MFLoOpc); 68 bool expandBuildPairF64(MachineBasicBlock &MBB, 69 MachineBasicBlock::iterator I, bool FP64) const; 70 bool expandExtractElementF64(MachineBasicBlock &MBB, 71 MachineBasicBlock::iterator I, bool FP64) const; 72 73 MachineFunction &MF; 74 MachineRegisterInfo &MRI; 75 const MipsSubtarget &Subtarget; 76 const MipsSEInstrInfo &TII; 77 const MipsRegisterInfo &RegInfo; 78 }; 79 } 80 81 ExpandPseudo::ExpandPseudo(MachineFunction &MF_) 82 : MF(MF_), MRI(MF.getRegInfo()), 83 Subtarget(static_cast<const MipsSubtarget &>(MF.getSubtarget())), 84 TII(*static_cast<const MipsSEInstrInfo *>(Subtarget.getInstrInfo())), 85 RegInfo(*Subtarget.getRegisterInfo()) {} 86 87 bool ExpandPseudo::expand() { 88 bool Expanded = false; 89 90 for (MachineFunction::iterator BB = MF.begin(), BBEnd = MF.end(); 91 BB != BBEnd; ++BB) 92 for (Iter I = BB->begin(), End = BB->end(); I != End;) 93 Expanded |= expandInstr(*BB, I++); 94 95 return Expanded; 96 } 97 98 bool ExpandPseudo::expandInstr(MachineBasicBlock &MBB, Iter I) { 99 switch(I->getOpcode()) { 100 case Mips::LOAD_CCOND_DSP: 101 expandLoadCCond(MBB, I); 102 break; 103 case Mips::STORE_CCOND_DSP: 104 expandStoreCCond(MBB, I); 105 break; 106 case Mips::LOAD_ACC64: 107 case Mips::LOAD_ACC64DSP: 108 expandLoadACC(MBB, I, 4); 109 break; 110 case Mips::LOAD_ACC128: 111 expandLoadACC(MBB, I, 8); 112 break; 113 case Mips::STORE_ACC64: 114 expandStoreACC(MBB, I, Mips::PseudoMFHI, Mips::PseudoMFLO, 4); 115 break; 116 case Mips::STORE_ACC64DSP: 117 expandStoreACC(MBB, I, Mips::MFHI_DSP, Mips::MFLO_DSP, 4); 118 break; 119 case Mips::STORE_ACC128: 120 expandStoreACC(MBB, I, Mips::PseudoMFHI64, Mips::PseudoMFLO64, 8); 121 break; 122 case Mips::BuildPairF64: 123 if (expandBuildPairF64(MBB, I, false)) 124 MBB.erase(I); 125 return false; 126 case Mips::BuildPairF64_64: 127 if (expandBuildPairF64(MBB, I, true)) 128 MBB.erase(I); 129 return false; 130 case Mips::ExtractElementF64: 131 if (expandExtractElementF64(MBB, I, false)) 132 MBB.erase(I); 133 return false; 134 case Mips::ExtractElementF64_64: 135 if (expandExtractElementF64(MBB, I, true)) 136 MBB.erase(I); 137 return false; 138 case TargetOpcode::COPY: 139 if (!expandCopy(MBB, I)) 140 return false; 141 break; 142 default: 143 return false; 144 } 145 146 MBB.erase(I); 147 return true; 148 } 149 150 void ExpandPseudo::expandLoadCCond(MachineBasicBlock &MBB, Iter I) { 151 // load $vr, FI 152 // copy ccond, $vr 153 154 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 155 156 const TargetRegisterClass *RC = RegInfo.intRegClass(4); 157 unsigned VR = MRI.createVirtualRegister(RC); 158 unsigned Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 159 160 TII.loadRegFromStack(MBB, I, VR, FI, RC, &RegInfo, 0); 161 BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), Dst) 162 .addReg(VR, RegState::Kill); 163 } 164 165 void ExpandPseudo::expandStoreCCond(MachineBasicBlock &MBB, Iter I) { 166 // copy $vr, ccond 167 // store $vr, FI 168 169 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 170 171 const TargetRegisterClass *RC = RegInfo.intRegClass(4); 172 unsigned VR = MRI.createVirtualRegister(RC); 173 unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 174 175 BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), VR) 176 .addReg(Src, getKillRegState(I->getOperand(0).isKill())); 177 TII.storeRegToStack(MBB, I, VR, true, FI, RC, &RegInfo, 0); 178 } 179 180 void ExpandPseudo::expandLoadACC(MachineBasicBlock &MBB, Iter I, 181 unsigned RegSize) { 182 // load $vr0, FI 183 // copy lo, $vr0 184 // load $vr1, FI + 4 185 // copy hi, $vr1 186 187 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 188 189 const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize); 190 unsigned VR0 = MRI.createVirtualRegister(RC); 191 unsigned VR1 = MRI.createVirtualRegister(RC); 192 unsigned Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 193 unsigned Lo = RegInfo.getSubReg(Dst, Mips::sub_lo); 194 unsigned Hi = RegInfo.getSubReg(Dst, Mips::sub_hi); 195 DebugLoc DL = I->getDebugLoc(); 196 const MCInstrDesc &Desc = TII.get(TargetOpcode::COPY); 197 198 TII.loadRegFromStack(MBB, I, VR0, FI, RC, &RegInfo, 0); 199 BuildMI(MBB, I, DL, Desc, Lo).addReg(VR0, RegState::Kill); 200 TII.loadRegFromStack(MBB, I, VR1, FI, RC, &RegInfo, RegSize); 201 BuildMI(MBB, I, DL, Desc, Hi).addReg(VR1, RegState::Kill); 202 } 203 204 void ExpandPseudo::expandStoreACC(MachineBasicBlock &MBB, Iter I, 205 unsigned MFHiOpc, unsigned MFLoOpc, 206 unsigned RegSize) { 207 // mflo $vr0, src 208 // store $vr0, FI 209 // mfhi $vr1, src 210 // store $vr1, FI + 4 211 212 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 213 214 const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize); 215 unsigned VR0 = MRI.createVirtualRegister(RC); 216 unsigned VR1 = MRI.createVirtualRegister(RC); 217 unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 218 unsigned SrcKill = getKillRegState(I->getOperand(0).isKill()); 219 DebugLoc DL = I->getDebugLoc(); 220 221 BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src); 222 TII.storeRegToStack(MBB, I, VR0, true, FI, RC, &RegInfo, 0); 223 BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill); 224 TII.storeRegToStack(MBB, I, VR1, true, FI, RC, &RegInfo, RegSize); 225 } 226 227 bool ExpandPseudo::expandCopy(MachineBasicBlock &MBB, Iter I) { 228 unsigned Src = I->getOperand(1).getReg(); 229 std::pair<unsigned, unsigned> Opcodes = getMFHiLoOpc(Src); 230 231 if (!Opcodes.first) 232 return false; 233 234 return expandCopyACC(MBB, I, Opcodes.first, Opcodes.second); 235 } 236 237 bool ExpandPseudo::expandCopyACC(MachineBasicBlock &MBB, Iter I, 238 unsigned MFHiOpc, unsigned MFLoOpc) { 239 // mflo $vr0, src 240 // copy dst_lo, $vr0 241 // mfhi $vr1, src 242 // copy dst_hi, $vr1 243 244 unsigned Dst = I->getOperand(0).getReg(), Src = I->getOperand(1).getReg(); 245 unsigned VRegSize = RegInfo.getMinimalPhysRegClass(Dst)->getSize() / 2; 246 const TargetRegisterClass *RC = RegInfo.intRegClass(VRegSize); 247 unsigned VR0 = MRI.createVirtualRegister(RC); 248 unsigned VR1 = MRI.createVirtualRegister(RC); 249 unsigned SrcKill = getKillRegState(I->getOperand(1).isKill()); 250 unsigned DstLo = RegInfo.getSubReg(Dst, Mips::sub_lo); 251 unsigned DstHi = RegInfo.getSubReg(Dst, Mips::sub_hi); 252 DebugLoc DL = I->getDebugLoc(); 253 254 BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src); 255 BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstLo) 256 .addReg(VR0, RegState::Kill); 257 BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill); 258 BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstHi) 259 .addReg(VR1, RegState::Kill); 260 return true; 261 } 262 263 /// This method expands the same instruction that MipsSEInstrInfo:: 264 /// expandBuildPairF64 does, for the case when ABI is fpxx and mthc1 is not 265 /// available and the case where the ABI is FP64A. It is implemented here 266 /// because frame indexes are eliminated before MipsSEInstrInfo:: 267 /// expandBuildPairF64 is called. 268 bool ExpandPseudo::expandBuildPairF64(MachineBasicBlock &MBB, 269 MachineBasicBlock::iterator I, 270 bool FP64) const { 271 // For fpxx and when mthc1 is not available, use: 272 // spill + reload via ldc1 273 // 274 // The case where dmtc1 is available doesn't need to be handled here 275 // because it never creates a BuildPairF64 node. 276 // 277 // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence 278 // for odd-numbered double precision values (because the lower 32-bits is 279 // transferred with mtc1 which is redirected to the upper half of the even 280 // register). Unfortunately, we have to make this decision before register 281 // allocation so for now we use a spill/reload sequence for all 282 // double-precision values in regardless of being an odd/even register. 283 if ((Subtarget.isABI_FPXX() && !Subtarget.hasMTHC1()) || 284 (FP64 && !Subtarget.useOddSPReg())) { 285 unsigned DstReg = I->getOperand(0).getReg(); 286 unsigned LoReg = I->getOperand(1).getReg(); 287 unsigned HiReg = I->getOperand(2).getReg(); 288 289 // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are 290 // the cases where mthc1 is not available). 64-bit architectures and 291 // MIPS32r2 or later can use FGR64 though. 292 assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() || 293 !Subtarget.isFP64bit()); 294 295 const TargetRegisterClass *RC = &Mips::GPR32RegClass; 296 const TargetRegisterClass *RC2 = 297 FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass; 298 299 // We re-use the same spill slot each time so that the stack frame doesn't 300 // grow too much in functions with a large number of moves. 301 int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(RC2); 302 if (!Subtarget.isLittle()) 303 std::swap(LoReg, HiReg); 304 TII.storeRegToStack(MBB, I, LoReg, I->getOperand(1).isKill(), FI, RC, 305 &RegInfo, 0); 306 TII.storeRegToStack(MBB, I, HiReg, I->getOperand(2).isKill(), FI, RC, 307 &RegInfo, 4); 308 TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, 0); 309 return true; 310 } 311 312 return false; 313 } 314 315 /// This method expands the same instruction that MipsSEInstrInfo:: 316 /// expandExtractElementF64 does, for the case when ABI is fpxx and mfhc1 is not 317 /// available and the case where the ABI is FP64A. It is implemented here 318 /// because frame indexes are eliminated before MipsSEInstrInfo:: 319 /// expandExtractElementF64 is called. 320 bool ExpandPseudo::expandExtractElementF64(MachineBasicBlock &MBB, 321 MachineBasicBlock::iterator I, 322 bool FP64) const { 323 const MachineOperand &Op1 = I->getOperand(1); 324 const MachineOperand &Op2 = I->getOperand(2); 325 326 if ((Op1.isReg() && Op1.isUndef()) || (Op2.isReg() && Op2.isUndef())) { 327 unsigned DstReg = I->getOperand(0).getReg(); 328 BuildMI(MBB, I, I->getDebugLoc(), TII.get(Mips::IMPLICIT_DEF), DstReg); 329 return true; 330 } 331 332 // For fpxx and when mfhc1 is not available, use: 333 // spill + reload via ldc1 334 // 335 // The case where dmfc1 is available doesn't need to be handled here 336 // because it never creates a ExtractElementF64 node. 337 // 338 // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence 339 // for odd-numbered double precision values (because the lower 32-bits is 340 // transferred with mfc1 which is redirected to the upper half of the even 341 // register). Unfortunately, we have to make this decision before register 342 // allocation so for now we use a spill/reload sequence for all 343 // double-precision values in regardless of being an odd/even register. 344 345 if ((Subtarget.isABI_FPXX() && !Subtarget.hasMTHC1()) || 346 (FP64 && !Subtarget.useOddSPReg())) { 347 unsigned DstReg = I->getOperand(0).getReg(); 348 unsigned SrcReg = Op1.getReg(); 349 unsigned N = Op2.getImm(); 350 int64_t Offset = 4 * (Subtarget.isLittle() ? N : (1 - N)); 351 352 // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are 353 // the cases where mfhc1 is not available). 64-bit architectures and 354 // MIPS32r2 or later can use FGR64 though. 355 assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() || 356 !Subtarget.isFP64bit()); 357 358 const TargetRegisterClass *RC = 359 FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass; 360 const TargetRegisterClass *RC2 = &Mips::GPR32RegClass; 361 362 // We re-use the same spill slot each time so that the stack frame doesn't 363 // grow too much in functions with a large number of moves. 364 int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(RC); 365 TII.storeRegToStack(MBB, I, SrcReg, Op1.isKill(), FI, RC, &RegInfo, 0); 366 TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, Offset); 367 return true; 368 } 369 370 return false; 371 } 372 373 MipsSEFrameLowering::MipsSEFrameLowering(const MipsSubtarget &STI) 374 : MipsFrameLowering(STI, STI.stackAlignment()) {} 375 376 void MipsSEFrameLowering::emitPrologue(MachineFunction &MF, 377 MachineBasicBlock &MBB) const { 378 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported"); 379 MachineFrameInfo *MFI = MF.getFrameInfo(); 380 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 381 382 const MipsSEInstrInfo &TII = 383 *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo()); 384 const MipsRegisterInfo &RegInfo = 385 *static_cast<const MipsRegisterInfo *>(STI.getRegisterInfo()); 386 387 MachineBasicBlock::iterator MBBI = MBB.begin(); 388 DebugLoc dl; 389 MipsABIInfo ABI = STI.getABI(); 390 unsigned SP = ABI.GetStackPtr(); 391 unsigned FP = ABI.GetFramePtr(); 392 unsigned ZERO = ABI.GetNullPtr(); 393 unsigned MOVE = ABI.GetGPRMoveOp(); 394 unsigned ADDiu = ABI.GetPtrAddiuOp(); 395 unsigned AND = ABI.IsN64() ? Mips::AND64 : Mips::AND; 396 397 const TargetRegisterClass *RC = ABI.ArePtrs64bit() ? 398 &Mips::GPR64RegClass : &Mips::GPR32RegClass; 399 400 // First, compute final stack size. 401 uint64_t StackSize = MFI->getStackSize(); 402 403 // No need to allocate space on the stack. 404 if (StackSize == 0 && !MFI->adjustsStack()) return; 405 406 MachineModuleInfo &MMI = MF.getMMI(); 407 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); 408 MachineLocation DstML, SrcML; 409 410 // Adjust stack. 411 TII.adjustStackPtr(SP, -StackSize, MBB, MBBI); 412 413 // emit ".cfi_def_cfa_offset StackSize" 414 unsigned CFIIndex = MMI.addFrameInst( 415 MCCFIInstruction::createDefCfaOffset(nullptr, -StackSize)); 416 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 417 .addCFIIndex(CFIIndex); 418 419 if (MF.getFunction()->hasFnAttribute("interrupt")) 420 emitInterruptPrologueStub(MF, MBB); 421 422 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); 423 424 if (CSI.size()) { 425 // Find the instruction past the last instruction that saves a callee-saved 426 // register to the stack. 427 for (unsigned i = 0; i < CSI.size(); ++i) 428 ++MBBI; 429 430 // Iterate over list of callee-saved registers and emit .cfi_offset 431 // directives. 432 for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(), 433 E = CSI.end(); I != E; ++I) { 434 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx()); 435 unsigned Reg = I->getReg(); 436 437 // If Reg is a double precision register, emit two cfa_offsets, 438 // one for each of the paired single precision registers. 439 if (Mips::AFGR64RegClass.contains(Reg)) { 440 unsigned Reg0 = 441 MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_lo), true); 442 unsigned Reg1 = 443 MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_hi), true); 444 445 if (!STI.isLittle()) 446 std::swap(Reg0, Reg1); 447 448 unsigned CFIIndex = MMI.addFrameInst( 449 MCCFIInstruction::createOffset(nullptr, Reg0, Offset)); 450 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 451 .addCFIIndex(CFIIndex); 452 453 CFIIndex = MMI.addFrameInst( 454 MCCFIInstruction::createOffset(nullptr, Reg1, Offset + 4)); 455 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 456 .addCFIIndex(CFIIndex); 457 } else if (Mips::FGR64RegClass.contains(Reg)) { 458 unsigned Reg0 = MRI->getDwarfRegNum(Reg, true); 459 unsigned Reg1 = MRI->getDwarfRegNum(Reg, true) + 1; 460 461 if (!STI.isLittle()) 462 std::swap(Reg0, Reg1); 463 464 unsigned CFIIndex = MMI.addFrameInst( 465 MCCFIInstruction::createOffset(nullptr, Reg0, Offset)); 466 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 467 .addCFIIndex(CFIIndex); 468 469 CFIIndex = MMI.addFrameInst( 470 MCCFIInstruction::createOffset(nullptr, Reg1, Offset + 4)); 471 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 472 .addCFIIndex(CFIIndex); 473 } else { 474 // Reg is either in GPR32 or FGR32. 475 unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createOffset( 476 nullptr, MRI->getDwarfRegNum(Reg, 1), Offset)); 477 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 478 .addCFIIndex(CFIIndex); 479 } 480 } 481 } 482 483 if (MipsFI->callsEhReturn()) { 484 // Insert instructions that spill eh data registers. 485 for (int I = 0; I < 4; ++I) { 486 if (!MBB.isLiveIn(ABI.GetEhDataReg(I))) 487 MBB.addLiveIn(ABI.GetEhDataReg(I)); 488 TII.storeRegToStackSlot(MBB, MBBI, ABI.GetEhDataReg(I), false, 489 MipsFI->getEhDataRegFI(I), RC, &RegInfo); 490 } 491 492 // Emit .cfi_offset directives for eh data registers. 493 for (int I = 0; I < 4; ++I) { 494 int64_t Offset = MFI->getObjectOffset(MipsFI->getEhDataRegFI(I)); 495 unsigned Reg = MRI->getDwarfRegNum(ABI.GetEhDataReg(I), true); 496 unsigned CFIIndex = MMI.addFrameInst( 497 MCCFIInstruction::createOffset(nullptr, Reg, Offset)); 498 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 499 .addCFIIndex(CFIIndex); 500 } 501 } 502 503 // if framepointer enabled, set it to point to the stack pointer. 504 if (hasFP(MF)) { 505 // Insert instruction "move $fp, $sp" at this location. 506 BuildMI(MBB, MBBI, dl, TII.get(MOVE), FP).addReg(SP).addReg(ZERO) 507 .setMIFlag(MachineInstr::FrameSetup); 508 509 // emit ".cfi_def_cfa_register $fp" 510 unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createDefCfaRegister( 511 nullptr, MRI->getDwarfRegNum(FP, true))); 512 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 513 .addCFIIndex(CFIIndex); 514 515 if (RegInfo.needsStackRealignment(MF)) { 516 // addiu $Reg, $zero, -MaxAlignment 517 // andi $sp, $sp, $Reg 518 unsigned VR = MF.getRegInfo().createVirtualRegister(RC); 519 assert(isInt<16>(MFI->getMaxAlignment()) && 520 "Function's alignment size requirement is not supported."); 521 int MaxAlign = - (signed) MFI->getMaxAlignment(); 522 523 BuildMI(MBB, MBBI, dl, TII.get(ADDiu), VR).addReg(ZERO) .addImm(MaxAlign); 524 BuildMI(MBB, MBBI, dl, TII.get(AND), SP).addReg(SP).addReg(VR); 525 526 if (hasBP(MF)) { 527 // move $s7, $sp 528 unsigned BP = STI.isABI_N64() ? Mips::S7_64 : Mips::S7; 529 BuildMI(MBB, MBBI, dl, TII.get(MOVE), BP) 530 .addReg(SP) 531 .addReg(ZERO); 532 } 533 } 534 } 535 } 536 537 void MipsSEFrameLowering::emitInterruptPrologueStub( 538 MachineFunction &MF, MachineBasicBlock &MBB) const { 539 540 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 541 MachineBasicBlock::iterator MBBI = MBB.begin(); 542 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 543 544 // Report an error the target doesn't support Mips32r2 or later. 545 // The epilogue relies on the use of the "ehb" to clear execution 546 // hazards. Pre R2 Mips relies on an implementation defined number 547 // of "ssnop"s to clear the execution hazard. Support for ssnop hazard 548 // clearing is not provided so reject that configuration. 549 if (!STI.hasMips32r2()) 550 report_fatal_error( 551 "\"interrupt\" attribute is not supported on pre-MIPS32R2 or " 552 "MIPS16 targets."); 553 554 // The GP register contains the "user" value, so we cannot perform 555 // any gp relative loads until we restore the "kernel" or "system" gp 556 // value. Until support is written we shall only accept the static 557 // relocation model. 558 if ((STI.getRelocationModel() != Reloc::Static)) 559 report_fatal_error("\"interrupt\" attribute is only supported for the " 560 "static relocation model on MIPS at the present time."); 561 562 if (!STI.isABI_O32() || STI.hasMips64()) 563 report_fatal_error("\"interrupt\" attribute is only supported for the " 564 "O32 ABI on MIPS32R2+ at the present time."); 565 566 // Perform ISR handling like GCC 567 StringRef IntKind = 568 MF.getFunction()->getFnAttribute("interrupt").getValueAsString(); 569 const TargetRegisterClass *PtrRC = &Mips::GPR32RegClass; 570 571 // EIC interrupt handling needs to read the Cause register to disable 572 // interrupts. 573 if (IntKind == "eic") { 574 // Coprocessor registers are always live per se. 575 MBB.addLiveIn(Mips::COP013); 576 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MFC0), Mips::K0) 577 .addReg(Mips::COP013) 578 .addImm(0) 579 .setMIFlag(MachineInstr::FrameSetup); 580 581 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::EXT), Mips::K0) 582 .addReg(Mips::K0) 583 .addImm(10) 584 .addImm(6) 585 .setMIFlag(MachineInstr::FrameSetup); 586 } 587 588 // Fetch and spill EPC 589 MBB.addLiveIn(Mips::COP014); 590 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MFC0), Mips::K1) 591 .addReg(Mips::COP014) 592 .addImm(0) 593 .setMIFlag(MachineInstr::FrameSetup); 594 595 STI.getInstrInfo()->storeRegToStack(MBB, MBBI, Mips::K1, false, 596 MipsFI->getISRRegFI(0), PtrRC, 597 STI.getRegisterInfo(), 0); 598 599 // Fetch and Spill Status 600 MBB.addLiveIn(Mips::COP012); 601 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MFC0), Mips::K1) 602 .addReg(Mips::COP012) 603 .addImm(0) 604 .setMIFlag(MachineInstr::FrameSetup); 605 606 STI.getInstrInfo()->storeRegToStack(MBB, MBBI, Mips::K1, false, 607 MipsFI->getISRRegFI(1), PtrRC, 608 STI.getRegisterInfo(), 0); 609 610 // Build the configuration for disabling lower priority interrupts. Non EIC 611 // interrupts need to be masked off with zero, EIC from the Cause register. 612 unsigned InsPosition = 8; 613 unsigned InsSize = 0; 614 unsigned SrcReg = Mips::ZERO; 615 616 // If the interrupt we're tied to is the EIC, switch the source for the 617 // masking off interrupts to the cause register. 618 if (IntKind == "eic") { 619 SrcReg = Mips::K0; 620 InsPosition = 10; 621 InsSize = 6; 622 } else 623 InsSize = StringSwitch<unsigned>(IntKind) 624 .Case("sw0", 1) 625 .Case("sw1", 2) 626 .Case("hw0", 3) 627 .Case("hw1", 4) 628 .Case("hw2", 5) 629 .Case("hw3", 6) 630 .Case("hw4", 7) 631 .Case("hw5", 8) 632 .Default(0); 633 assert(InsSize != 0 && "Unknown interrupt type!"); 634 635 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::INS), Mips::K1) 636 .addReg(SrcReg) 637 .addImm(InsPosition) 638 .addImm(InsSize) 639 .addReg(Mips::K1) 640 .setMIFlag(MachineInstr::FrameSetup); 641 642 // Mask off KSU, ERL, EXL 643 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::INS), Mips::K1) 644 .addReg(Mips::ZERO) 645 .addImm(1) 646 .addImm(4) 647 .addReg(Mips::K1) 648 .setMIFlag(MachineInstr::FrameSetup); 649 650 // Disable the FPU as we are not spilling those register sets. 651 if (!STI.useSoftFloat()) 652 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::INS), Mips::K1) 653 .addReg(Mips::ZERO) 654 .addImm(29) 655 .addImm(1) 656 .addReg(Mips::K1) 657 .setMIFlag(MachineInstr::FrameSetup); 658 659 // Set the new status 660 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP012) 661 .addReg(Mips::K1) 662 .addImm(0) 663 .setMIFlag(MachineInstr::FrameSetup); 664 } 665 666 void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF, 667 MachineBasicBlock &MBB) const { 668 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 669 MachineFrameInfo *MFI = MF.getFrameInfo(); 670 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 671 672 const MipsSEInstrInfo &TII = 673 *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo()); 674 const MipsRegisterInfo &RegInfo = 675 *static_cast<const MipsRegisterInfo *>(STI.getRegisterInfo()); 676 677 DebugLoc DL = MBBI->getDebugLoc(); 678 MipsABIInfo ABI = STI.getABI(); 679 unsigned SP = ABI.GetStackPtr(); 680 unsigned FP = ABI.GetFramePtr(); 681 unsigned ZERO = ABI.GetNullPtr(); 682 unsigned MOVE = ABI.GetGPRMoveOp(); 683 684 // if framepointer enabled, restore the stack pointer. 685 if (hasFP(MF)) { 686 // Find the first instruction that restores a callee-saved register. 687 MachineBasicBlock::iterator I = MBBI; 688 689 for (unsigned i = 0; i < MFI->getCalleeSavedInfo().size(); ++i) 690 --I; 691 692 // Insert instruction "move $sp, $fp" at this location. 693 BuildMI(MBB, I, DL, TII.get(MOVE), SP).addReg(FP).addReg(ZERO); 694 } 695 696 if (MipsFI->callsEhReturn()) { 697 const TargetRegisterClass *RC = 698 ABI.ArePtrs64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass; 699 700 // Find first instruction that restores a callee-saved register. 701 MachineBasicBlock::iterator I = MBBI; 702 for (unsigned i = 0; i < MFI->getCalleeSavedInfo().size(); ++i) 703 --I; 704 705 // Insert instructions that restore eh data registers. 706 for (int J = 0; J < 4; ++J) { 707 TII.loadRegFromStackSlot(MBB, I, ABI.GetEhDataReg(J), 708 MipsFI->getEhDataRegFI(J), RC, &RegInfo); 709 } 710 } 711 712 if (MF.getFunction()->hasFnAttribute("interrupt")) 713 emitInterruptEpilogueStub(MF, MBB); 714 715 // Get the number of bytes from FrameInfo 716 uint64_t StackSize = MFI->getStackSize(); 717 718 if (!StackSize) 719 return; 720 721 // Adjust stack. 722 TII.adjustStackPtr(SP, StackSize, MBB, MBBI); 723 } 724 725 void MipsSEFrameLowering::emitInterruptEpilogueStub( 726 MachineFunction &MF, MachineBasicBlock &MBB) const { 727 728 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 729 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 730 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 731 732 // Perform ISR handling like GCC 733 const TargetRegisterClass *PtrRC = &Mips::GPR32RegClass; 734 735 // Disable Interrupts. 736 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::DI), Mips::ZERO); 737 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::EHB)); 738 739 // Restore EPC 740 STI.getInstrInfo()->loadRegFromStackSlot(MBB, MBBI, Mips::K1, 741 MipsFI->getISRRegFI(0), PtrRC, 742 STI.getRegisterInfo()); 743 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP014) 744 .addReg(Mips::K1) 745 .addImm(0); 746 747 // Restore Status 748 STI.getInstrInfo()->loadRegFromStackSlot(MBB, MBBI, Mips::K1, 749 MipsFI->getISRRegFI(1), PtrRC, 750 STI.getRegisterInfo()); 751 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP012) 752 .addReg(Mips::K1) 753 .addImm(0); 754 } 755 756 int MipsSEFrameLowering::getFrameIndexReference(const MachineFunction &MF, 757 int FI, 758 unsigned &FrameReg) const { 759 const MachineFrameInfo *MFI = MF.getFrameInfo(); 760 MipsABIInfo ABI = STI.getABI(); 761 762 if (MFI->isFixedObjectIndex(FI)) 763 FrameReg = hasFP(MF) ? ABI.GetFramePtr() : ABI.GetStackPtr(); 764 else 765 FrameReg = hasBP(MF) ? ABI.GetBasePtr() : ABI.GetStackPtr(); 766 767 return MFI->getObjectOffset(FI) + MFI->getStackSize() - 768 getOffsetOfLocalArea() + MFI->getOffsetAdjustment(); 769 } 770 771 bool MipsSEFrameLowering:: 772 spillCalleeSavedRegisters(MachineBasicBlock &MBB, 773 MachineBasicBlock::iterator MI, 774 const std::vector<CalleeSavedInfo> &CSI, 775 const TargetRegisterInfo *TRI) const { 776 MachineFunction *MF = MBB.getParent(); 777 MachineBasicBlock *EntryBlock = &MF->front(); 778 const TargetInstrInfo &TII = *STI.getInstrInfo(); 779 780 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 781 // Add the callee-saved register as live-in. Do not add if the register is 782 // RA and return address is taken, because it has already been added in 783 // method MipsTargetLowering::LowerRETURNADDR. 784 // It's killed at the spill, unless the register is RA and return address 785 // is taken. 786 unsigned Reg = CSI[i].getReg(); 787 bool IsRAAndRetAddrIsTaken = (Reg == Mips::RA || Reg == Mips::RA_64) 788 && MF->getFrameInfo()->isReturnAddressTaken(); 789 if (!IsRAAndRetAddrIsTaken) 790 EntryBlock->addLiveIn(Reg); 791 792 // ISRs require HI/LO to be spilled into kernel registers to be then 793 // spilled to the stack frame. 794 bool IsLOHI = (Reg == Mips::LO0 || Reg == Mips::LO0_64 || 795 Reg == Mips::HI0 || Reg == Mips::HI0_64); 796 const Function *Func = MBB.getParent()->getFunction(); 797 if (IsLOHI && Func->hasFnAttribute("interrupt")) { 798 DebugLoc DL = MI->getDebugLoc(); 799 800 unsigned Op = 0; 801 if (!STI.getABI().ArePtrs64bit()) { 802 Op = (Reg == Mips::HI0) ? Mips::MFHI : Mips::MFLO; 803 Reg = Mips::K0; 804 } else { 805 Op = (Reg == Mips::HI0) ? Mips::MFHI64 : Mips::MFLO64; 806 Reg = Mips::K0_64; 807 } 808 BuildMI(MBB, MI, DL, TII.get(Op), Mips::K0) 809 .setMIFlag(MachineInstr::FrameSetup); 810 } 811 812 // Insert the spill to the stack frame. 813 bool IsKill = !IsRAAndRetAddrIsTaken; 814 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 815 TII.storeRegToStackSlot(*EntryBlock, MI, Reg, IsKill, 816 CSI[i].getFrameIdx(), RC, TRI); 817 } 818 819 return true; 820 } 821 822 bool 823 MipsSEFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { 824 const MachineFrameInfo *MFI = MF.getFrameInfo(); 825 826 // Reserve call frame if the size of the maximum call frame fits into 16-bit 827 // immediate field and there are no variable sized objects on the stack. 828 // Make sure the second register scavenger spill slot can be accessed with one 829 // instruction. 830 return isInt<16>(MFI->getMaxCallFrameSize() + getStackAlignment()) && 831 !MFI->hasVarSizedObjects(); 832 } 833 834 /// Mark \p Reg and all registers aliasing it in the bitset. 835 static void setAliasRegs(MachineFunction &MF, BitVector &SavedRegs, 836 unsigned Reg) { 837 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 838 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) 839 SavedRegs.set(*AI); 840 } 841 842 void MipsSEFrameLowering::determineCalleeSaves(MachineFunction &MF, 843 BitVector &SavedRegs, 844 RegScavenger *RS) const { 845 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 846 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 847 MipsABIInfo ABI = STI.getABI(); 848 unsigned FP = ABI.GetFramePtr(); 849 unsigned BP = ABI.IsN64() ? Mips::S7_64 : Mips::S7; 850 851 // Mark $fp as used if function has dedicated frame pointer. 852 if (hasFP(MF)) 853 setAliasRegs(MF, SavedRegs, FP); 854 // Mark $s7 as used if function has dedicated base pointer. 855 if (hasBP(MF)) 856 setAliasRegs(MF, SavedRegs, BP); 857 858 // Create spill slots for eh data registers if function calls eh_return. 859 if (MipsFI->callsEhReturn()) 860 MipsFI->createEhDataRegsFI(); 861 862 // Create spill slots for Coprocessor 0 registers if function is an ISR. 863 if (MipsFI->isISR()) 864 MipsFI->createISRRegFI(); 865 866 // Expand pseudo instructions which load, store or copy accumulators. 867 // Add an emergency spill slot if a pseudo was expanded. 868 if (ExpandPseudo(MF).expand()) { 869 // The spill slot should be half the size of the accumulator. If target is 870 // mips64, it should be 64-bit, otherwise it should be 32-bt. 871 const TargetRegisterClass *RC = STI.hasMips64() ? 872 &Mips::GPR64RegClass : &Mips::GPR32RegClass; 873 int FI = MF.getFrameInfo()->CreateStackObject(RC->getSize(), 874 RC->getAlignment(), false); 875 RS->addScavengingFrameIndex(FI); 876 } 877 878 // Set scavenging frame index if necessary. 879 uint64_t MaxSPOffset = MF.getInfo<MipsFunctionInfo>()->getIncomingArgSize() + 880 estimateStackSize(MF); 881 882 if (isInt<16>(MaxSPOffset)) 883 return; 884 885 const TargetRegisterClass *RC = 886 ABI.ArePtrs64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass; 887 int FI = MF.getFrameInfo()->CreateStackObject(RC->getSize(), 888 RC->getAlignment(), false); 889 RS->addScavengingFrameIndex(FI); 890 } 891 892 const MipsFrameLowering * 893 llvm::createMipsSEFrameLowering(const MipsSubtarget &ST) { 894 return new MipsSEFrameLowering(ST); 895 } 896