1 //===-- X86RegisterInfo.cpp - X86 Register Information --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the X86 implementation of the TargetRegisterInfo class. 11 // This file is responsible for the frame pointer elimination optimization 12 // on X86. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "X86RegisterInfo.h" 17 #include "X86InstrBuilder.h" 18 #include "X86MachineFunctionInfo.h" 19 #include "X86Subtarget.h" 20 #include "X86TargetMachine.h" 21 #include "llvm/ADT/BitVector.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/CodeGen/MachineFrameInfo.h" 24 #include "llvm/CodeGen/MachineFunction.h" 25 #include "llvm/CodeGen/MachineFunctionPass.h" 26 #include "llvm/CodeGen/MachineInstrBuilder.h" 27 #include "llvm/CodeGen/MachineModuleInfo.h" 28 #include "llvm/CodeGen/MachineRegisterInfo.h" 29 #include "llvm/CodeGen/MachineValueType.h" 30 #include "llvm/IR/Constants.h" 31 #include "llvm/IR/Function.h" 32 #include "llvm/IR/Type.h" 33 #include "llvm/MC/MCAsmInfo.h" 34 #include "llvm/Support/CommandLine.h" 35 #include "llvm/Support/ErrorHandling.h" 36 #include "llvm/Target/TargetFrameLowering.h" 37 #include "llvm/Target/TargetInstrInfo.h" 38 #include "llvm/Target/TargetMachine.h" 39 #include "llvm/Target/TargetOptions.h" 40 41 using namespace llvm; 42 43 #define GET_REGINFO_TARGET_DESC 44 #include "X86GenRegisterInfo.inc" 45 46 cl::opt<bool> 47 ForceStackAlign("force-align-stack", 48 cl::desc("Force align the stack to the minimum alignment" 49 " needed for the function."), 50 cl::init(false), cl::Hidden); 51 52 static cl::opt<bool> 53 EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true), 54 cl::desc("Enable use of a base pointer for complex stack frames")); 55 56 X86RegisterInfo::X86RegisterInfo(const X86Subtarget &STI) 57 : X86GenRegisterInfo( 58 (STI.is64Bit() ? X86::RIP : X86::EIP), 59 X86_MC::getDwarfRegFlavour(STI.getTargetTriple(), false), 60 X86_MC::getDwarfRegFlavour(STI.getTargetTriple(), true), 61 (STI.is64Bit() ? X86::RIP : X86::EIP)), 62 Subtarget(STI) { 63 X86_MC::InitLLVM2SEHRegisterMapping(this); 64 65 // Cache some information. 66 Is64Bit = Subtarget.is64Bit(); 67 IsWin64 = Subtarget.isTargetWin64(); 68 69 if (Is64Bit) { 70 SlotSize = 8; 71 StackPtr = X86::RSP; 72 FramePtr = X86::RBP; 73 } else { 74 SlotSize = 4; 75 StackPtr = X86::ESP; 76 FramePtr = X86::EBP; 77 } 78 // Use a callee-saved register as the base pointer. These registers must 79 // not conflict with any ABI requirements. For example, in 32-bit mode PIC 80 // requires GOT in the EBX register before function calls via PLT GOT pointer. 81 BasePtr = Is64Bit ? X86::RBX : X86::ESI; 82 } 83 84 bool 85 X86RegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const { 86 // ExeDepsFixer and PostRAScheduler require liveness. 87 return true; 88 } 89 90 int 91 X86RegisterInfo::getSEHRegNum(unsigned i) const { 92 return getEncodingValue(i); 93 } 94 95 const TargetRegisterClass * 96 X86RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC, 97 unsigned Idx) const { 98 // The sub_8bit sub-register index is more constrained in 32-bit mode. 99 // It behaves just like the sub_8bit_hi index. 100 if (!Is64Bit && Idx == X86::sub_8bit) 101 Idx = X86::sub_8bit_hi; 102 103 // Forward to TableGen's default version. 104 return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx); 105 } 106 107 const TargetRegisterClass * 108 X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A, 109 const TargetRegisterClass *B, 110 unsigned SubIdx) const { 111 // The sub_8bit sub-register index is more constrained in 32-bit mode. 112 if (!Is64Bit && SubIdx == X86::sub_8bit) { 113 A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi); 114 if (!A) 115 return nullptr; 116 } 117 return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx); 118 } 119 120 const TargetRegisterClass* 121 X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC) const{ 122 // Don't allow super-classes of GR8_NOREX. This class is only used after 123 // extrating sub_8bit_hi sub-registers. The H sub-registers cannot be copied 124 // to the full GR8 register class in 64-bit mode, so we cannot allow the 125 // reigster class inflation. 126 // 127 // The GR8_NOREX class is always used in a way that won't be constrained to a 128 // sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the 129 // full GR8 class. 130 if (RC == &X86::GR8_NOREXRegClass) 131 return RC; 132 133 const TargetRegisterClass *Super = RC; 134 TargetRegisterClass::sc_iterator I = RC->getSuperClasses(); 135 do { 136 switch (Super->getID()) { 137 case X86::GR8RegClassID: 138 case X86::GR16RegClassID: 139 case X86::GR32RegClassID: 140 case X86::GR64RegClassID: 141 case X86::FR32RegClassID: 142 case X86::FR64RegClassID: 143 case X86::RFP32RegClassID: 144 case X86::RFP64RegClassID: 145 case X86::RFP80RegClassID: 146 case X86::VR128RegClassID: 147 case X86::VR256RegClassID: 148 // Don't return a super-class that would shrink the spill size. 149 // That can happen with the vector and float classes. 150 if (Super->getSize() == RC->getSize()) 151 return Super; 152 } 153 Super = *I++; 154 } while (Super); 155 return RC; 156 } 157 158 const TargetRegisterClass * 159 X86RegisterInfo::getPointerRegClass(const MachineFunction &MF, 160 unsigned Kind) const { 161 switch (Kind) { 162 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!"); 163 case 0: // Normal GPRs. 164 if (Subtarget.isTarget64BitLP64()) 165 return &X86::GR64RegClass; 166 return &X86::GR32RegClass; 167 case 1: // Normal GPRs except the stack pointer (for encoding reasons). 168 if (Subtarget.isTarget64BitLP64()) 169 return &X86::GR64_NOSPRegClass; 170 return &X86::GR32_NOSPRegClass; 171 case 2: // Available for tailcall (not callee-saved GPRs). 172 if (Subtarget.isTargetWin64()) 173 return &X86::GR64_TCW64RegClass; 174 else if (Subtarget.is64Bit()) 175 return &X86::GR64_TCRegClass; 176 177 const Function *F = MF.getFunction(); 178 bool hasHipeCC = (F ? F->getCallingConv() == CallingConv::HiPE : false); 179 if (hasHipeCC) 180 return &X86::GR32RegClass; 181 return &X86::GR32_TCRegClass; 182 } 183 } 184 185 const TargetRegisterClass * 186 X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 187 if (RC == &X86::CCRRegClass) { 188 if (Is64Bit) 189 return &X86::GR64RegClass; 190 else 191 return &X86::GR32RegClass; 192 } 193 return RC; 194 } 195 196 unsigned 197 X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, 198 MachineFunction &MF) const { 199 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 200 201 unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0; 202 switch (RC->getID()) { 203 default: 204 return 0; 205 case X86::GR32RegClassID: 206 return 4 - FPDiff; 207 case X86::GR64RegClassID: 208 return 12 - FPDiff; 209 case X86::VR128RegClassID: 210 return Subtarget.is64Bit() ? 10 : 4; 211 case X86::VR64RegClassID: 212 return 4; 213 } 214 } 215 216 const MCPhysReg * 217 X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 218 bool HasAVX = Subtarget.hasAVX(); 219 bool HasAVX512 = Subtarget.hasAVX512(); 220 221 assert(MF && "MachineFunction required"); 222 switch (MF->getFunction()->getCallingConv()) { 223 case CallingConv::GHC: 224 case CallingConv::HiPE: 225 return CSR_NoRegs_SaveList; 226 case CallingConv::AnyReg: 227 if (HasAVX) 228 return CSR_64_AllRegs_AVX_SaveList; 229 return CSR_64_AllRegs_SaveList; 230 case CallingConv::PreserveMost: 231 return CSR_64_RT_MostRegs_SaveList; 232 case CallingConv::PreserveAll: 233 if (HasAVX) 234 return CSR_64_RT_AllRegs_AVX_SaveList; 235 return CSR_64_RT_AllRegs_SaveList; 236 case CallingConv::Intel_OCL_BI: { 237 if (HasAVX512 && IsWin64) 238 return CSR_Win64_Intel_OCL_BI_AVX512_SaveList; 239 if (HasAVX512 && Is64Bit) 240 return CSR_64_Intel_OCL_BI_AVX512_SaveList; 241 if (HasAVX && IsWin64) 242 return CSR_Win64_Intel_OCL_BI_AVX_SaveList; 243 if (HasAVX && Is64Bit) 244 return CSR_64_Intel_OCL_BI_AVX_SaveList; 245 if (!HasAVX && !IsWin64 && Is64Bit) 246 return CSR_64_Intel_OCL_BI_SaveList; 247 break; 248 } 249 case CallingConv::Cold: 250 if (Is64Bit) 251 return CSR_64_MostRegs_SaveList; 252 break; 253 default: 254 break; 255 } 256 257 bool CallsEHReturn = MF->getMMI().callsEHReturn(); 258 if (Is64Bit) { 259 if (IsWin64) 260 return CSR_Win64_SaveList; 261 if (CallsEHReturn) 262 return CSR_64EHRet_SaveList; 263 return CSR_64_SaveList; 264 } 265 if (CallsEHReturn) 266 return CSR_32EHRet_SaveList; 267 return CSR_32_SaveList; 268 } 269 270 const uint32_t* 271 X86RegisterInfo::getCallPreservedMask(CallingConv::ID CC) const { 272 bool HasAVX = Subtarget.hasAVX(); 273 bool HasAVX512 = Subtarget.hasAVX512(); 274 275 switch (CC) { 276 case CallingConv::GHC: 277 case CallingConv::HiPE: 278 return CSR_NoRegs_RegMask; 279 case CallingConv::AnyReg: 280 if (HasAVX) 281 return CSR_64_AllRegs_AVX_RegMask; 282 return CSR_64_AllRegs_RegMask; 283 case CallingConv::PreserveMost: 284 return CSR_64_RT_MostRegs_RegMask; 285 case CallingConv::PreserveAll: 286 if (HasAVX) 287 return CSR_64_RT_AllRegs_AVX_RegMask; 288 return CSR_64_RT_AllRegs_RegMask; 289 case CallingConv::Intel_OCL_BI: { 290 if (HasAVX512 && IsWin64) 291 return CSR_Win64_Intel_OCL_BI_AVX512_RegMask; 292 if (HasAVX512 && Is64Bit) 293 return CSR_64_Intel_OCL_BI_AVX512_RegMask; 294 if (HasAVX && IsWin64) 295 return CSR_Win64_Intel_OCL_BI_AVX_RegMask; 296 if (HasAVX && Is64Bit) 297 return CSR_64_Intel_OCL_BI_AVX_RegMask; 298 if (!HasAVX && !IsWin64 && Is64Bit) 299 return CSR_64_Intel_OCL_BI_RegMask; 300 break; 301 } 302 case CallingConv::Cold: 303 if (Is64Bit) 304 return CSR_64_MostRegs_RegMask; 305 break; 306 default: 307 break; 308 } 309 310 // Unlike getCalleeSavedRegs(), we don't have MMI so we can't check 311 // callsEHReturn(). 312 if (Is64Bit) { 313 if (IsWin64) 314 return CSR_Win64_RegMask; 315 return CSR_64_RegMask; 316 } 317 return CSR_32_RegMask; 318 } 319 320 const uint32_t* 321 X86RegisterInfo::getNoPreservedMask() const { 322 return CSR_NoRegs_RegMask; 323 } 324 325 BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { 326 BitVector Reserved(getNumRegs()); 327 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 328 329 // Set the stack-pointer register and its aliases as reserved. 330 for (MCSubRegIterator I(X86::RSP, this, /*IncludeSelf=*/true); I.isValid(); 331 ++I) 332 Reserved.set(*I); 333 334 // Set the instruction pointer register and its aliases as reserved. 335 for (MCSubRegIterator I(X86::RIP, this, /*IncludeSelf=*/true); I.isValid(); 336 ++I) 337 Reserved.set(*I); 338 339 // Set the frame-pointer register and its aliases as reserved if needed. 340 if (TFI->hasFP(MF)) { 341 for (MCSubRegIterator I(X86::RBP, this, /*IncludeSelf=*/true); I.isValid(); 342 ++I) 343 Reserved.set(*I); 344 } 345 346 // Set the base-pointer register and its aliases as reserved if needed. 347 if (hasBasePointer(MF)) { 348 CallingConv::ID CC = MF.getFunction()->getCallingConv(); 349 const uint32_t* RegMask = getCallPreservedMask(CC); 350 if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister())) 351 report_fatal_error( 352 "Stack realignment in presence of dynamic allocas is not supported with" 353 "this calling convention."); 354 355 for (MCSubRegIterator I(getBaseRegister(), this, /*IncludeSelf=*/true); 356 I.isValid(); ++I) 357 Reserved.set(*I); 358 } 359 360 // Mark the segment registers as reserved. 361 Reserved.set(X86::CS); 362 Reserved.set(X86::SS); 363 Reserved.set(X86::DS); 364 Reserved.set(X86::ES); 365 Reserved.set(X86::FS); 366 Reserved.set(X86::GS); 367 368 // Mark the floating point stack registers as reserved. 369 for (unsigned n = 0; n != 8; ++n) 370 Reserved.set(X86::ST0 + n); 371 372 // Reserve the registers that only exist in 64-bit mode. 373 if (!Is64Bit) { 374 // These 8-bit registers are part of the x86-64 extension even though their 375 // super-registers are old 32-bits. 376 Reserved.set(X86::SIL); 377 Reserved.set(X86::DIL); 378 Reserved.set(X86::BPL); 379 Reserved.set(X86::SPL); 380 381 for (unsigned n = 0; n != 8; ++n) { 382 // R8, R9, ... 383 for (MCRegAliasIterator AI(X86::R8 + n, this, true); AI.isValid(); ++AI) 384 Reserved.set(*AI); 385 386 // XMM8, XMM9, ... 387 for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI) 388 Reserved.set(*AI); 389 } 390 } 391 if (!Is64Bit || !Subtarget.hasAVX512()) { 392 for (unsigned n = 16; n != 32; ++n) { 393 for (MCRegAliasIterator AI(X86::XMM0 + n, this, true); AI.isValid(); ++AI) 394 Reserved.set(*AI); 395 } 396 } 397 398 return Reserved; 399 } 400 401 //===----------------------------------------------------------------------===// 402 // Stack Frame Processing methods 403 //===----------------------------------------------------------------------===// 404 405 bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const { 406 const MachineFrameInfo *MFI = MF.getFrameInfo(); 407 408 if (!EnableBasePointer) 409 return false; 410 411 // When we need stack realignment, we can't address the stack from the frame 412 // pointer. When we have dynamic allocas or stack-adjusting inline asm, we 413 // can't address variables from the stack pointer. MS inline asm can 414 // reference locals while also adjusting the stack pointer. When we can't 415 // use both the SP and the FP, we need a separate base pointer register. 416 bool CantUseFP = needsStackRealignment(MF); 417 bool CantUseSP = 418 MFI->hasVarSizedObjects() || MFI->hasInlineAsmWithSPAdjust(); 419 return CantUseFP && CantUseSP; 420 } 421 422 bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const { 423 if (MF.getFunction()->hasFnAttribute("no-realign-stack")) 424 return false; 425 426 const MachineFrameInfo *MFI = MF.getFrameInfo(); 427 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 428 429 // Stack realignment requires a frame pointer. If we already started 430 // register allocation with frame pointer elimination, it is too late now. 431 if (!MRI->canReserveReg(FramePtr)) 432 return false; 433 434 // If a base pointer is necessary. Check that it isn't too late to reserve 435 // it. 436 if (MFI->hasVarSizedObjects()) 437 return MRI->canReserveReg(BasePtr); 438 return true; 439 } 440 441 bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const { 442 const MachineFrameInfo *MFI = MF.getFrameInfo(); 443 const Function *F = MF.getFunction(); 444 unsigned StackAlign = MF.getTarget().getFrameLowering()->getStackAlignment(); 445 bool requiresRealignment = 446 ((MFI->getMaxAlignment() > StackAlign) || 447 F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, 448 Attribute::StackAlignment)); 449 450 // If we've requested that we force align the stack do so now. 451 if (ForceStackAlign) 452 return canRealignStack(MF); 453 454 return requiresRealignment && canRealignStack(MF); 455 } 456 457 bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF, 458 unsigned Reg, int &FrameIdx) const { 459 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 460 461 if (Reg == FramePtr && TFI->hasFP(MF)) { 462 FrameIdx = MF.getFrameInfo()->getObjectIndexBegin(); 463 return true; 464 } 465 return false; 466 } 467 468 void 469 X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 470 int SPAdj, unsigned FIOperandNum, 471 RegScavenger *RS) const { 472 assert(SPAdj == 0 && "Unexpected"); 473 474 MachineInstr &MI = *II; 475 MachineFunction &MF = *MI.getParent()->getParent(); 476 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 477 int FrameIndex = MI.getOperand(FIOperandNum).getIndex(); 478 unsigned BasePtr; 479 480 unsigned Opc = MI.getOpcode(); 481 bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm; 482 if (hasBasePointer(MF)) 483 BasePtr = (FrameIndex < 0 ? FramePtr : getBaseRegister()); 484 else if (needsStackRealignment(MF)) 485 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr); 486 else if (AfterFPPop) 487 BasePtr = StackPtr; 488 else 489 BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr); 490 491 // This must be part of a four operand memory reference. Replace the 492 // FrameIndex with base register with EBP. Add an offset to the offset. 493 MI.getOperand(FIOperandNum).ChangeToRegister(BasePtr, false); 494 495 // Now add the frame object offset to the offset from EBP. 496 int FIOffset; 497 if (AfterFPPop) { 498 // Tail call jmp happens after FP is popped. 499 const MachineFrameInfo *MFI = MF.getFrameInfo(); 500 FIOffset = MFI->getObjectOffset(FrameIndex) - TFI->getOffsetOfLocalArea(); 501 } else 502 FIOffset = TFI->getFrameIndexOffset(MF, FrameIndex); 503 504 // The frame index format for stackmaps and patchpoints is different from the 505 // X86 format. It only has a FI and an offset. 506 if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) { 507 assert(BasePtr == FramePtr && "Expected the FP as base register"); 508 int64_t Offset = MI.getOperand(FIOperandNum + 1).getImm() + FIOffset; 509 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset); 510 return; 511 } 512 513 if (MI.getOperand(FIOperandNum+3).isImm()) { 514 // Offset is a 32-bit integer. 515 int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm()); 516 int Offset = FIOffset + Imm; 517 assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) && 518 "Requesting 64-bit offset in 32-bit immediate!"); 519 MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset); 520 } else { 521 // Offset is symbolic. This is extremely rare. 522 uint64_t Offset = FIOffset + 523 (uint64_t)MI.getOperand(FIOperandNum+3).getOffset(); 524 MI.getOperand(FIOperandNum + 3).setOffset(Offset); 525 } 526 } 527 528 unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const { 529 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 530 return TFI->hasFP(MF) ? FramePtr : StackPtr; 531 } 532 533 namespace llvm { 534 unsigned getX86SubSuperRegister(unsigned Reg, MVT::SimpleValueType VT, 535 bool High) { 536 switch (VT) { 537 default: llvm_unreachable("Unexpected VT"); 538 case MVT::i8: 539 if (High) { 540 switch (Reg) { 541 default: return getX86SubSuperRegister(Reg, MVT::i64); 542 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 543 return X86::SI; 544 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 545 return X86::DI; 546 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 547 return X86::BP; 548 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 549 return X86::SP; 550 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 551 return X86::AH; 552 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 553 return X86::DH; 554 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 555 return X86::CH; 556 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 557 return X86::BH; 558 } 559 } else { 560 switch (Reg) { 561 default: llvm_unreachable("Unexpected register"); 562 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 563 return X86::AL; 564 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 565 return X86::DL; 566 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 567 return X86::CL; 568 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 569 return X86::BL; 570 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 571 return X86::SIL; 572 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 573 return X86::DIL; 574 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 575 return X86::BPL; 576 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 577 return X86::SPL; 578 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 579 return X86::R8B; 580 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 581 return X86::R9B; 582 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 583 return X86::R10B; 584 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 585 return X86::R11B; 586 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 587 return X86::R12B; 588 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 589 return X86::R13B; 590 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 591 return X86::R14B; 592 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 593 return X86::R15B; 594 } 595 } 596 case MVT::i16: 597 switch (Reg) { 598 default: llvm_unreachable("Unexpected register"); 599 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 600 return X86::AX; 601 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 602 return X86::DX; 603 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 604 return X86::CX; 605 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 606 return X86::BX; 607 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 608 return X86::SI; 609 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 610 return X86::DI; 611 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 612 return X86::BP; 613 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 614 return X86::SP; 615 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 616 return X86::R8W; 617 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 618 return X86::R9W; 619 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 620 return X86::R10W; 621 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 622 return X86::R11W; 623 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 624 return X86::R12W; 625 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 626 return X86::R13W; 627 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 628 return X86::R14W; 629 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 630 return X86::R15W; 631 } 632 case MVT::i32: 633 switch (Reg) { 634 default: llvm_unreachable("Unexpected register"); 635 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 636 return X86::EAX; 637 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 638 return X86::EDX; 639 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 640 return X86::ECX; 641 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 642 return X86::EBX; 643 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 644 return X86::ESI; 645 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 646 return X86::EDI; 647 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 648 return X86::EBP; 649 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 650 return X86::ESP; 651 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 652 return X86::R8D; 653 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 654 return X86::R9D; 655 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 656 return X86::R10D; 657 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 658 return X86::R11D; 659 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 660 return X86::R12D; 661 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 662 return X86::R13D; 663 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 664 return X86::R14D; 665 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 666 return X86::R15D; 667 } 668 case MVT::i64: 669 switch (Reg) { 670 default: llvm_unreachable("Unexpected register"); 671 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 672 return X86::RAX; 673 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 674 return X86::RDX; 675 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 676 return X86::RCX; 677 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 678 return X86::RBX; 679 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 680 return X86::RSI; 681 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 682 return X86::RDI; 683 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 684 return X86::RBP; 685 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 686 return X86::RSP; 687 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 688 return X86::R8; 689 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 690 return X86::R9; 691 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 692 return X86::R10; 693 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 694 return X86::R11; 695 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 696 return X86::R12; 697 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 698 return X86::R13; 699 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 700 return X86::R14; 701 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 702 return X86::R15; 703 } 704 } 705 } 706 707 unsigned get512BitSuperRegister(unsigned Reg) { 708 if (Reg >= X86::XMM0 && Reg <= X86::XMM31) 709 return X86::ZMM0 + (Reg - X86::XMM0); 710 if (Reg >= X86::YMM0 && Reg <= X86::YMM31) 711 return X86::ZMM0 + (Reg - X86::YMM0); 712 if (Reg >= X86::ZMM0 && Reg <= X86::ZMM31) 713 return Reg; 714 llvm_unreachable("Unexpected SIMD register"); 715 } 716 717 } 718