Home | History | Annotate | Download | only in X86
      1 //===-- X86RegisterInfo.cpp - X86 Register Information --------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file contains the X86 implementation of the TargetRegisterInfo class.
     11 // This file is responsible for the frame pointer elimination optimization
     12 // on X86.
     13 //
     14 //===----------------------------------------------------------------------===//
     15 
     16 #include "X86RegisterInfo.h"
     17 #include "X86.h"
     18 #include "X86InstrBuilder.h"
     19 #include "X86MachineFunctionInfo.h"
     20 #include "X86Subtarget.h"
     21 #include "X86TargetMachine.h"
     22 #include "llvm/ADT/BitVector.h"
     23 #include "llvm/ADT/STLExtras.h"
     24 #include "llvm/CodeGen/MachineFrameInfo.h"
     25 #include "llvm/CodeGen/MachineFunction.h"
     26 #include "llvm/CodeGen/MachineFunctionPass.h"
     27 #include "llvm/CodeGen/MachineInstrBuilder.h"
     28 #include "llvm/CodeGen/MachineModuleInfo.h"
     29 #include "llvm/CodeGen/MachineRegisterInfo.h"
     30 #include "llvm/CodeGen/ValueTypes.h"
     31 #include "llvm/IR/Constants.h"
     32 #include "llvm/IR/Function.h"
     33 #include "llvm/IR/Type.h"
     34 #include "llvm/MC/MCAsmInfo.h"
     35 #include "llvm/Support/CommandLine.h"
     36 #include "llvm/Support/ErrorHandling.h"
     37 #include "llvm/Target/TargetFrameLowering.h"
     38 #include "llvm/Target/TargetInstrInfo.h"
     39 #include "llvm/Target/TargetMachine.h"
     40 #include "llvm/Target/TargetOptions.h"
     41 
     42 #define GET_REGINFO_TARGET_DESC
     43 #include "X86GenRegisterInfo.inc"
     44 
     45 using namespace llvm;
     46 
     47 cl::opt<bool>
     48 ForceStackAlign("force-align-stack",
     49                  cl::desc("Force align the stack to the minimum alignment"
     50                            " needed for the function."),
     51                  cl::init(false), cl::Hidden);
     52 
     53 static cl::opt<bool>
     54 EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true),
     55           cl::desc("Enable use of a base pointer for complex stack frames"));
     56 
     57 X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm,
     58                                  const TargetInstrInfo &tii)
     59   : X86GenRegisterInfo((tm.getSubtarget<X86Subtarget>().is64Bit()
     60                          ? X86::RIP : X86::EIP),
     61                        X86_MC::getDwarfRegFlavour(tm.getTargetTriple(), false),
     62                        X86_MC::getDwarfRegFlavour(tm.getTargetTriple(), true),
     63                        (tm.getSubtarget<X86Subtarget>().is64Bit()
     64                          ? X86::RIP : X86::EIP)),
     65                        TM(tm), TII(tii) {
     66   X86_MC::InitLLVM2SEHRegisterMapping(this);
     67 
     68   // Cache some information.
     69   const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
     70   Is64Bit = Subtarget->is64Bit();
     71   IsWin64 = Subtarget->isTargetWin64();
     72 
     73   if (Is64Bit) {
     74     SlotSize = 8;
     75     StackPtr = X86::RSP;
     76     FramePtr = X86::RBP;
     77   } else {
     78     SlotSize = 4;
     79     StackPtr = X86::ESP;
     80     FramePtr = X86::EBP;
     81   }
     82   // Use a callee-saved register as the base pointer.  These registers must
     83   // not conflict with any ABI requirements.  For example, in 32-bit mode PIC
     84   // requires GOT in the EBX register before function calls via PLT GOT pointer.
     85   BasePtr = Is64Bit ? X86::RBX : X86::ESI;
     86 }
     87 
     88 /// getCompactUnwindRegNum - This function maps the register to the number for
     89 /// compact unwind encoding. Return -1 if the register isn't valid.
     90 int X86RegisterInfo::getCompactUnwindRegNum(unsigned RegNum, bool isEH) const {
     91   switch (getLLVMRegNum(RegNum, isEH)) {
     92   case X86::EBX: case X86::RBX: return 1;
     93   case X86::ECX: case X86::R12: return 2;
     94   case X86::EDX: case X86::R13: return 3;
     95   case X86::EDI: case X86::R14: return 4;
     96   case X86::ESI: case X86::R15: return 5;
     97   case X86::EBP: case X86::RBP: return 6;
     98   }
     99 
    100   return -1;
    101 }
    102 
    103 bool
    104 X86RegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
    105   // Only enable when post-RA scheduling is enabled and this is needed.
    106   return TM.getSubtargetImpl()->postRAScheduler();
    107 }
    108 
    109 int
    110 X86RegisterInfo::getSEHRegNum(unsigned i) const {
    111   return getEncodingValue(i);
    112 }
    113 
    114 const TargetRegisterClass *
    115 X86RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC,
    116                                        unsigned Idx) const {
    117   // The sub_8bit sub-register index is more constrained in 32-bit mode.
    118   // It behaves just like the sub_8bit_hi index.
    119   if (!Is64Bit && Idx == X86::sub_8bit)
    120     Idx = X86::sub_8bit_hi;
    121 
    122   // Forward to TableGen's default version.
    123   return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
    124 }
    125 
    126 const TargetRegisterClass *
    127 X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
    128                                           const TargetRegisterClass *B,
    129                                           unsigned SubIdx) const {
    130   // The sub_8bit sub-register index is more constrained in 32-bit mode.
    131   if (!Is64Bit && SubIdx == X86::sub_8bit) {
    132     A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi);
    133     if (!A)
    134       return 0;
    135   }
    136   return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx);
    137 }
    138 
    139 const TargetRegisterClass*
    140 X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC) const{
    141   // Don't allow super-classes of GR8_NOREX.  This class is only used after
    142   // extrating sub_8bit_hi sub-registers.  The H sub-registers cannot be copied
    143   // to the full GR8 register class in 64-bit mode, so we cannot allow the
    144   // reigster class inflation.
    145   //
    146   // The GR8_NOREX class is always used in a way that won't be constrained to a
    147   // sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the
    148   // full GR8 class.
    149   if (RC == &X86::GR8_NOREXRegClass)
    150     return RC;
    151 
    152   const TargetRegisterClass *Super = RC;
    153   TargetRegisterClass::sc_iterator I = RC->getSuperClasses();
    154   do {
    155     switch (Super->getID()) {
    156     case X86::GR8RegClassID:
    157     case X86::GR16RegClassID:
    158     case X86::GR32RegClassID:
    159     case X86::GR64RegClassID:
    160     case X86::FR32RegClassID:
    161     case X86::FR64RegClassID:
    162     case X86::RFP32RegClassID:
    163     case X86::RFP64RegClassID:
    164     case X86::RFP80RegClassID:
    165     case X86::VR128RegClassID:
    166     case X86::VR256RegClassID:
    167       // Don't return a super-class that would shrink the spill size.
    168       // That can happen with the vector and float classes.
    169       if (Super->getSize() == RC->getSize())
    170         return Super;
    171     }
    172     Super = *I++;
    173   } while (Super);
    174   return RC;
    175 }
    176 
    177 const TargetRegisterClass *
    178 X86RegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind)
    179                                                                          const {
    180   const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
    181   switch (Kind) {
    182   default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
    183   case 0: // Normal GPRs.
    184     if (Subtarget.isTarget64BitLP64())
    185       return &X86::GR64RegClass;
    186     return &X86::GR32RegClass;
    187   case 1: // Normal GPRs except the stack pointer (for encoding reasons).
    188     if (Subtarget.isTarget64BitLP64())
    189       return &X86::GR64_NOSPRegClass;
    190     return &X86::GR32_NOSPRegClass;
    191   case 2: // Available for tailcall (not callee-saved GPRs).
    192     if (Subtarget.isTargetWin64())
    193       return &X86::GR64_TCW64RegClass;
    194     else if (Subtarget.is64Bit())
    195       return &X86::GR64_TCRegClass;
    196 
    197     const Function *F = MF.getFunction();
    198     bool hasHipeCC = (F ? F->getCallingConv() == CallingConv::HiPE : false);
    199     if (hasHipeCC)
    200       return &X86::GR32RegClass;
    201     return &X86::GR32_TCRegClass;
    202   }
    203 }
    204 
    205 const TargetRegisterClass *
    206 X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
    207   if (RC == &X86::CCRRegClass) {
    208     if (Is64Bit)
    209       return &X86::GR64RegClass;
    210     else
    211       return &X86::GR32RegClass;
    212   }
    213   return RC;
    214 }
    215 
    216 unsigned
    217 X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
    218                                      MachineFunction &MF) const {
    219   const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
    220 
    221   unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0;
    222   switch (RC->getID()) {
    223   default:
    224     return 0;
    225   case X86::GR32RegClassID:
    226     return 4 - FPDiff;
    227   case X86::GR64RegClassID:
    228     return 12 - FPDiff;
    229   case X86::VR128RegClassID:
    230     return TM.getSubtarget<X86Subtarget>().is64Bit() ? 10 : 4;
    231   case X86::VR64RegClassID:
    232     return 4;
    233   }
    234 }
    235 
    236 const uint16_t *
    237 X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
    238   switch (MF->getFunction()->getCallingConv()) {
    239   case CallingConv::GHC:
    240   case CallingConv::HiPE:
    241     return CSR_NoRegs_SaveList;
    242 
    243   case CallingConv::Intel_OCL_BI: {
    244     bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
    245     if (HasAVX && IsWin64)
    246       return CSR_Win64_Intel_OCL_BI_AVX_SaveList;
    247     if (HasAVX && Is64Bit)
    248       return CSR_64_Intel_OCL_BI_AVX_SaveList;
    249     if (!HasAVX && !IsWin64 && Is64Bit)
    250       return CSR_64_Intel_OCL_BI_SaveList;
    251     break;
    252   }
    253 
    254   case CallingConv::Cold:
    255     if (Is64Bit)
    256       return CSR_MostRegs_64_SaveList;
    257     break;
    258 
    259   default:
    260     break;
    261   }
    262 
    263   bool CallsEHReturn = MF->getMMI().callsEHReturn();
    264   if (Is64Bit) {
    265     if (IsWin64)
    266       return CSR_Win64_SaveList;
    267     if (CallsEHReturn)
    268       return CSR_64EHRet_SaveList;
    269     return CSR_64_SaveList;
    270   }
    271   if (CallsEHReturn)
    272     return CSR_32EHRet_SaveList;
    273   return CSR_32_SaveList;
    274 }
    275 
    276 const uint32_t*
    277 X86RegisterInfo::getCallPreservedMask(CallingConv::ID CC) const {
    278   bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
    279 
    280   if (CC == CallingConv::Intel_OCL_BI) {
    281     if (IsWin64 && HasAVX)
    282       return CSR_Win64_Intel_OCL_BI_AVX_RegMask;
    283     if (Is64Bit && HasAVX)
    284       return CSR_64_Intel_OCL_BI_AVX_RegMask;
    285     if (!HasAVX && !IsWin64 && Is64Bit)
    286       return CSR_64_Intel_OCL_BI_RegMask;
    287   }
    288   if (CC == CallingConv::GHC || CC == CallingConv::HiPE)
    289     return CSR_NoRegs_RegMask;
    290   if (!Is64Bit)
    291     return CSR_32_RegMask;
    292   if (CC == CallingConv::Cold)
    293     return CSR_MostRegs_64_RegMask;
    294   if (IsWin64)
    295     return CSR_Win64_RegMask;
    296   return CSR_64_RegMask;
    297 }
    298 
    299 const uint32_t*
    300 X86RegisterInfo::getNoPreservedMask() const {
    301   return CSR_NoRegs_RegMask;
    302 }
    303 
    304 BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
    305   BitVector Reserved(getNumRegs());
    306   const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
    307 
    308   // Set the stack-pointer register and its aliases as reserved.
    309   Reserved.set(X86::RSP);
    310   for (MCSubRegIterator I(X86::RSP, this); I.isValid(); ++I)
    311     Reserved.set(*I);
    312 
    313   // Set the instruction pointer register and its aliases as reserved.
    314   Reserved.set(X86::RIP);
    315   for (MCSubRegIterator I(X86::RIP, this); I.isValid(); ++I)
    316     Reserved.set(*I);
    317 
    318   // Set the frame-pointer register and its aliases as reserved if needed.
    319   if (TFI->hasFP(MF)) {
    320     Reserved.set(X86::RBP);
    321     for (MCSubRegIterator I(X86::RBP, this); I.isValid(); ++I)
    322       Reserved.set(*I);
    323   }
    324 
    325   // Set the base-pointer register and its aliases as reserved if needed.
    326   if (hasBasePointer(MF)) {
    327     CallingConv::ID CC = MF.getFunction()->getCallingConv();
    328     const uint32_t* RegMask = getCallPreservedMask(CC);
    329     if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister()))
    330       report_fatal_error(
    331         "Stack realignment in presence of dynamic allocas is not supported with"
    332         "this calling convention.");
    333 
    334     Reserved.set(getBaseRegister());
    335     for (MCSubRegIterator I(getBaseRegister(), this); I.isValid(); ++I)
    336       Reserved.set(*I);
    337   }
    338 
    339   // Mark the segment registers as reserved.
    340   Reserved.set(X86::CS);
    341   Reserved.set(X86::SS);
    342   Reserved.set(X86::DS);
    343   Reserved.set(X86::ES);
    344   Reserved.set(X86::FS);
    345   Reserved.set(X86::GS);
    346 
    347   // Mark the floating point stack registers as reserved.
    348   Reserved.set(X86::ST0);
    349   Reserved.set(X86::ST1);
    350   Reserved.set(X86::ST2);
    351   Reserved.set(X86::ST3);
    352   Reserved.set(X86::ST4);
    353   Reserved.set(X86::ST5);
    354   Reserved.set(X86::ST6);
    355   Reserved.set(X86::ST7);
    356 
    357   // Reserve the registers that only exist in 64-bit mode.
    358   if (!Is64Bit) {
    359     // These 8-bit registers are part of the x86-64 extension even though their
    360     // super-registers are old 32-bits.
    361     Reserved.set(X86::SIL);
    362     Reserved.set(X86::DIL);
    363     Reserved.set(X86::BPL);
    364     Reserved.set(X86::SPL);
    365 
    366     for (unsigned n = 0; n != 8; ++n) {
    367       // R8, R9, ...
    368       static const uint16_t GPR64[] = {
    369         X86::R8,  X86::R9,  X86::R10, X86::R11,
    370         X86::R12, X86::R13, X86::R14, X86::R15
    371       };
    372       for (MCRegAliasIterator AI(GPR64[n], this, true); AI.isValid(); ++AI)
    373         Reserved.set(*AI);
    374 
    375       // XMM8, XMM9, ...
    376       assert(X86::XMM15 == X86::XMM8+7);
    377       for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI)
    378         Reserved.set(*AI);
    379     }
    380   }
    381 
    382   return Reserved;
    383 }
    384 
    385 //===----------------------------------------------------------------------===//
    386 // Stack Frame Processing methods
    387 //===----------------------------------------------------------------------===//
    388 
    389 bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
    390    const MachineFrameInfo *MFI = MF.getFrameInfo();
    391 
    392    if (!EnableBasePointer)
    393      return false;
    394 
    395    // When we need stack realignment and there are dynamic allocas, we can't
    396    // reference off of the stack pointer, so we reserve a base pointer.
    397    //
    398    // This is also true if the function contain MS-style inline assembly.  We
    399    // do this because if any stack changes occur in the inline assembly, e.g.,
    400    // "pusha", then any C local variable or C argument references in the
    401    // inline assembly will be wrong because the SP is not properly tracked.
    402    if ((needsStackRealignment(MF) && MFI->hasVarSizedObjects()) ||
    403        MF.hasMSInlineAsm())
    404      return true;
    405 
    406    return false;
    407 }
    408 
    409 bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
    410   const MachineFrameInfo *MFI = MF.getFrameInfo();
    411   const MachineRegisterInfo *MRI = &MF.getRegInfo();
    412   if (!MF.getTarget().Options.RealignStack)
    413     return false;
    414 
    415   // Stack realignment requires a frame pointer.  If we already started
    416   // register allocation with frame pointer elimination, it is too late now.
    417   if (!MRI->canReserveReg(FramePtr))
    418     return false;
    419 
    420   // If a base pointer is necessary.  Check that it isn't too late to reserve
    421   // it.
    422   if (MFI->hasVarSizedObjects())
    423     return MRI->canReserveReg(BasePtr);
    424   return true;
    425 }
    426 
    427 bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
    428   const MachineFrameInfo *MFI = MF.getFrameInfo();
    429   const Function *F = MF.getFunction();
    430   unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
    431   bool requiresRealignment =
    432     ((MFI->getMaxAlignment() > StackAlign) ||
    433      F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
    434                                      Attribute::StackAlignment));
    435 
    436   // If we've requested that we force align the stack do so now.
    437   if (ForceStackAlign)
    438     return canRealignStack(MF);
    439 
    440   return requiresRealignment && canRealignStack(MF);
    441 }
    442 
    443 bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
    444                                            unsigned Reg, int &FrameIdx) const {
    445   const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
    446 
    447   if (Reg == FramePtr && TFI->hasFP(MF)) {
    448     FrameIdx = MF.getFrameInfo()->getObjectIndexBegin();
    449     return true;
    450   }
    451   return false;
    452 }
    453 
    454 void
    455 X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
    456                                      int SPAdj, unsigned FIOperandNum,
    457                                      RegScavenger *RS) const {
    458   assert(SPAdj == 0 && "Unexpected");
    459 
    460   MachineInstr &MI = *II;
    461   MachineFunction &MF = *MI.getParent()->getParent();
    462   const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
    463   int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
    464   unsigned BasePtr;
    465 
    466   unsigned Opc = MI.getOpcode();
    467   bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm;
    468   if (hasBasePointer(MF))
    469     BasePtr = (FrameIndex < 0 ? FramePtr : getBaseRegister());
    470   else if (needsStackRealignment(MF))
    471     BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr);
    472   else if (AfterFPPop)
    473     BasePtr = StackPtr;
    474   else
    475     BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr);
    476 
    477   // This must be part of a four operand memory reference.  Replace the
    478   // FrameIndex with base register with EBP.  Add an offset to the offset.
    479   MI.getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
    480 
    481   // Now add the frame object offset to the offset from EBP.
    482   int FIOffset;
    483   if (AfterFPPop) {
    484     // Tail call jmp happens after FP is popped.
    485     const MachineFrameInfo *MFI = MF.getFrameInfo();
    486     FIOffset = MFI->getObjectOffset(FrameIndex) - TFI->getOffsetOfLocalArea();
    487   } else
    488     FIOffset = TFI->getFrameIndexOffset(MF, FrameIndex);
    489 
    490   if (MI.getOperand(FIOperandNum+3).isImm()) {
    491     // Offset is a 32-bit integer.
    492     int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm());
    493     int Offset = FIOffset + Imm;
    494     assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&
    495            "Requesting 64-bit offset in 32-bit immediate!");
    496     MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset);
    497   } else {
    498     // Offset is symbolic. This is extremely rare.
    499     uint64_t Offset = FIOffset +
    500       (uint64_t)MI.getOperand(FIOperandNum+3).getOffset();
    501     MI.getOperand(FIOperandNum + 3).setOffset(Offset);
    502   }
    503 }
    504 
    505 unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
    506   const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
    507   return TFI->hasFP(MF) ? FramePtr : StackPtr;
    508 }
    509 
    510 unsigned X86RegisterInfo::getEHExceptionRegister() const {
    511   llvm_unreachable("What is the exception register");
    512 }
    513 
    514 unsigned X86RegisterInfo::getEHHandlerRegister() const {
    515   llvm_unreachable("What is the exception handler register");
    516 }
    517 
    518 namespace llvm {
    519 unsigned getX86SubSuperRegister(unsigned Reg, MVT::SimpleValueType VT,
    520                                 bool High) {
    521   switch (VT) {
    522   default: llvm_unreachable("Unexpected VT");
    523   case MVT::i8:
    524     if (High) {
    525       switch (Reg) {
    526       default: return getX86SubSuperRegister(Reg, MVT::i64);
    527       case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
    528         return X86::SI;
    529       case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
    530         return X86::DI;
    531       case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
    532         return X86::BP;
    533       case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
    534         return X86::SP;
    535       case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
    536         return X86::AH;
    537       case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
    538         return X86::DH;
    539       case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
    540         return X86::CH;
    541       case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
    542         return X86::BH;
    543       }
    544     } else {
    545       switch (Reg) {
    546       default: llvm_unreachable("Unexpected register");
    547       case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
    548         return X86::AL;
    549       case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
    550         return X86::DL;
    551       case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
    552         return X86::CL;
    553       case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
    554         return X86::BL;
    555       case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
    556         return X86::SIL;
    557       case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
    558         return X86::DIL;
    559       case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
    560         return X86::BPL;
    561       case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
    562         return X86::SPL;
    563       case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
    564         return X86::R8B;
    565       case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
    566         return X86::R9B;
    567       case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
    568         return X86::R10B;
    569       case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
    570         return X86::R11B;
    571       case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
    572         return X86::R12B;
    573       case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
    574         return X86::R13B;
    575       case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
    576         return X86::R14B;
    577       case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
    578         return X86::R15B;
    579       }
    580     }
    581   case MVT::i16:
    582     switch (Reg) {
    583     default: llvm_unreachable("Unexpected register");
    584     case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
    585       return X86::AX;
    586     case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
    587       return X86::DX;
    588     case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
    589       return X86::CX;
    590     case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
    591       return X86::BX;
    592     case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
    593       return X86::SI;
    594     case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
    595       return X86::DI;
    596     case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
    597       return X86::BP;
    598     case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
    599       return X86::SP;
    600     case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
    601       return X86::R8W;
    602     case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
    603       return X86::R9W;
    604     case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
    605       return X86::R10W;
    606     case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
    607       return X86::R11W;
    608     case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
    609       return X86::R12W;
    610     case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
    611       return X86::R13W;
    612     case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
    613       return X86::R14W;
    614     case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
    615       return X86::R15W;
    616     }
    617   case MVT::i32:
    618     switch (Reg) {
    619     default: llvm_unreachable("Unexpected register");
    620     case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
    621       return X86::EAX;
    622     case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
    623       return X86::EDX;
    624     case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
    625       return X86::ECX;
    626     case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
    627       return X86::EBX;
    628     case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
    629       return X86::ESI;
    630     case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
    631       return X86::EDI;
    632     case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
    633       return X86::EBP;
    634     case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
    635       return X86::ESP;
    636     case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
    637       return X86::R8D;
    638     case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
    639       return X86::R9D;
    640     case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
    641       return X86::R10D;
    642     case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
    643       return X86::R11D;
    644     case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
    645       return X86::R12D;
    646     case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
    647       return X86::R13D;
    648     case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
    649       return X86::R14D;
    650     case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
    651       return X86::R15D;
    652     }
    653   case MVT::i64:
    654     switch (Reg) {
    655     default: llvm_unreachable("Unexpected register");
    656     case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
    657       return X86::RAX;
    658     case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
    659       return X86::RDX;
    660     case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
    661       return X86::RCX;
    662     case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
    663       return X86::RBX;
    664     case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
    665       return X86::RSI;
    666     case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
    667       return X86::RDI;
    668     case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
    669       return X86::RBP;
    670     case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
    671       return X86::RSP;
    672     case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
    673       return X86::R8;
    674     case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
    675       return X86::R9;
    676     case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
    677       return X86::R10;
    678     case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
    679       return X86::R11;
    680     case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
    681       return X86::R12;
    682     case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
    683       return X86::R13;
    684     case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
    685       return X86::R14;
    686     case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
    687       return X86::R15;
    688     }
    689   }
    690 }
    691 }
    692