Home | History | Annotate | Download | only in SelectionDAG
      1 //===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file contains the implementation of the FastISel class.
     11 //
     12 // "Fast" instruction selection is designed to emit very poor code quickly.
     13 // Also, it is not designed to be able to do much lowering, so most illegal
     14 // types (e.g. i64 on 32-bit targets) and operations are not supported.  It is
     15 // also not intended to be able to do much optimization, except in a few cases
     16 // where doing optimizations reduces overall compile time.  For example, folding
     17 // constants into immediate fields is often done, because it's cheap and it
     18 // reduces the number of instructions later phases have to examine.
     19 //
     20 // "Fast" instruction selection is able to fail gracefully and transfer
     21 // control to the SelectionDAG selector for operations that it doesn't
     22 // support.  In many cases, this allows us to avoid duplicating a lot of
     23 // the complicated lowering logic that SelectionDAG currently has.
     24 //
     25 // The intended use for "fast" instruction selection is "-O0" mode
     26 // compilation, where the quality of the generated code is irrelevant when
     27 // weighed against the speed at which the code can be generated.  Also,
     28 // at -O0, the LLVM optimizers are not running, and this makes the
     29 // compile time of codegen a much higher portion of the overall compile
     30 // time.  Despite its limitations, "fast" instruction selection is able to
     31 // handle enough code on its own to provide noticeable overall speedups
     32 // in -O0 compiles.
     33 //
     34 // Basic operations are supported in a target-independent way, by reading
     35 // the same instruction descriptions that the SelectionDAG selector reads,
     36 // and identifying simple arithmetic operations that can be directly selected
     37 // from simple operators.  More complicated operations currently require
     38 // target-specific code.
     39 //
     40 //===----------------------------------------------------------------------===//
     41 
     42 #include "llvm/CodeGen/Analysis.h"
     43 #include "llvm/ADT/Optional.h"
     44 #include "llvm/ADT/Statistic.h"
     45 #include "llvm/Analysis/BranchProbabilityInfo.h"
     46 #include "llvm/Analysis/Loads.h"
     47 #include "llvm/Analysis/TargetLibraryInfo.h"
     48 #include "llvm/CodeGen/Analysis.h"
     49 #include "llvm/CodeGen/FastISel.h"
     50 #include "llvm/CodeGen/FunctionLoweringInfo.h"
     51 #include "llvm/CodeGen/MachineFrameInfo.h"
     52 #include "llvm/CodeGen/MachineInstrBuilder.h"
     53 #include "llvm/CodeGen/MachineModuleInfo.h"
     54 #include "llvm/CodeGen/MachineRegisterInfo.h"
     55 #include "llvm/CodeGen/StackMaps.h"
     56 #include "llvm/IR/DataLayout.h"
     57 #include "llvm/IR/DebugInfo.h"
     58 #include "llvm/IR/Function.h"
     59 #include "llvm/IR/GlobalVariable.h"
     60 #include "llvm/IR/Instructions.h"
     61 #include "llvm/IR/IntrinsicInst.h"
     62 #include "llvm/IR/Mangler.h"
     63 #include "llvm/IR/Operator.h"
     64 #include "llvm/Support/Debug.h"
     65 #include "llvm/Support/ErrorHandling.h"
     66 #include "llvm/Support/raw_ostream.h"
     67 #include "llvm/Target/TargetInstrInfo.h"
     68 #include "llvm/Target/TargetLowering.h"
     69 #include "llvm/Target/TargetMachine.h"
     70 #include "llvm/Target/TargetSubtargetInfo.h"
     71 using namespace llvm;
     72 
     73 #define DEBUG_TYPE "isel"
     74 
     75 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
     76                                          "target-independent selector");
     77 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
     78                                     "target-specific selector");
     79 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
     80 
     81 void FastISel::ArgListEntry::setAttributes(ImmutableCallSite *CS,
     82                                            unsigned AttrIdx) {
     83   IsSExt = CS->paramHasAttr(AttrIdx, Attribute::SExt);
     84   IsZExt = CS->paramHasAttr(AttrIdx, Attribute::ZExt);
     85   IsInReg = CS->paramHasAttr(AttrIdx, Attribute::InReg);
     86   IsSRet = CS->paramHasAttr(AttrIdx, Attribute::StructRet);
     87   IsNest = CS->paramHasAttr(AttrIdx, Attribute::Nest);
     88   IsByVal = CS->paramHasAttr(AttrIdx, Attribute::ByVal);
     89   IsInAlloca = CS->paramHasAttr(AttrIdx, Attribute::InAlloca);
     90   IsReturned = CS->paramHasAttr(AttrIdx, Attribute::Returned);
     91   Alignment = CS->getParamAlignment(AttrIdx);
     92 }
     93 
     94 /// Set the current block to which generated machine instructions will be
     95 /// appended, and clear the local CSE map.
     96 void FastISel::startNewBlock() {
     97   LocalValueMap.clear();
     98 
     99   // Instructions are appended to FuncInfo.MBB. If the basic block already
    100   // contains labels or copies, use the last instruction as the last local
    101   // value.
    102   EmitStartPt = nullptr;
    103   if (!FuncInfo.MBB->empty())
    104     EmitStartPt = &FuncInfo.MBB->back();
    105   LastLocalValue = EmitStartPt;
    106 }
    107 
    108 bool FastISel::lowerArguments() {
    109   if (!FuncInfo.CanLowerReturn)
    110     // Fallback to SDISel argument lowering code to deal with sret pointer
    111     // parameter.
    112     return false;
    113 
    114   if (!fastLowerArguments())
    115     return false;
    116 
    117   // Enter arguments into ValueMap for uses in non-entry BBs.
    118   for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(),
    119                                     E = FuncInfo.Fn->arg_end();
    120        I != E; ++I) {
    121     DenseMap<const Value *, unsigned>::iterator VI = LocalValueMap.find(&*I);
    122     assert(VI != LocalValueMap.end() && "Missed an argument?");
    123     FuncInfo.ValueMap[&*I] = VI->second;
    124   }
    125   return true;
    126 }
    127 
    128 void FastISel::flushLocalValueMap() {
    129   LocalValueMap.clear();
    130   LastLocalValue = EmitStartPt;
    131   recomputeInsertPt();
    132   SavedInsertPt = FuncInfo.InsertPt;
    133 }
    134 
    135 bool FastISel::hasTrivialKill(const Value *V) {
    136   // Don't consider constants or arguments to have trivial kills.
    137   const Instruction *I = dyn_cast<Instruction>(V);
    138   if (!I)
    139     return false;
    140 
    141   // No-op casts are trivially coalesced by fast-isel.
    142   if (const auto *Cast = dyn_cast<CastInst>(I))
    143     if (Cast->isNoopCast(DL.getIntPtrType(Cast->getContext())) &&
    144         !hasTrivialKill(Cast->getOperand(0)))
    145       return false;
    146 
    147   // Even the value might have only one use in the LLVM IR, it is possible that
    148   // FastISel might fold the use into another instruction and now there is more
    149   // than one use at the Machine Instruction level.
    150   unsigned Reg = lookUpRegForValue(V);
    151   if (Reg && !MRI.use_empty(Reg))
    152     return false;
    153 
    154   // GEPs with all zero indices are trivially coalesced by fast-isel.
    155   if (const auto *GEP = dyn_cast<GetElementPtrInst>(I))
    156     if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
    157       return false;
    158 
    159   // Only instructions with a single use in the same basic block are considered
    160   // to have trivial kills.
    161   return I->hasOneUse() &&
    162          !(I->getOpcode() == Instruction::BitCast ||
    163            I->getOpcode() == Instruction::PtrToInt ||
    164            I->getOpcode() == Instruction::IntToPtr) &&
    165          cast<Instruction>(*I->user_begin())->getParent() == I->getParent();
    166 }
    167 
    168 unsigned FastISel::getRegForValue(const Value *V) {
    169   EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
    170   // Don't handle non-simple values in FastISel.
    171   if (!RealVT.isSimple())
    172     return 0;
    173 
    174   // Ignore illegal types. We must do this before looking up the value
    175   // in ValueMap because Arguments are given virtual registers regardless
    176   // of whether FastISel can handle them.
    177   MVT VT = RealVT.getSimpleVT();
    178   if (!TLI.isTypeLegal(VT)) {
    179     // Handle integer promotions, though, because they're common and easy.
    180     if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
    181       VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
    182     else
    183       return 0;
    184   }
    185 
    186   // Look up the value to see if we already have a register for it.
    187   unsigned Reg = lookUpRegForValue(V);
    188   if (Reg)
    189     return Reg;
    190 
    191   // In bottom-up mode, just create the virtual register which will be used
    192   // to hold the value. It will be materialized later.
    193   if (isa<Instruction>(V) &&
    194       (!isa<AllocaInst>(V) ||
    195        !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
    196     return FuncInfo.InitializeRegForValue(V);
    197 
    198   SavePoint SaveInsertPt = enterLocalValueArea();
    199 
    200   // Materialize the value in a register. Emit any instructions in the
    201   // local value area.
    202   Reg = materializeRegForValue(V, VT);
    203 
    204   leaveLocalValueArea(SaveInsertPt);
    205 
    206   return Reg;
    207 }
    208 
    209 unsigned FastISel::materializeConstant(const Value *V, MVT VT) {
    210   unsigned Reg = 0;
    211   if (const auto *CI = dyn_cast<ConstantInt>(V)) {
    212     if (CI->getValue().getActiveBits() <= 64)
    213       Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
    214   } else if (isa<AllocaInst>(V))
    215     Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
    216   else if (isa<ConstantPointerNull>(V))
    217     // Translate this as an integer zero so that it can be
    218     // local-CSE'd with actual integer zeros.
    219     Reg = getRegForValue(
    220         Constant::getNullValue(DL.getIntPtrType(V->getContext())));
    221   else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
    222     if (CF->isNullValue())
    223       Reg = fastMaterializeFloatZero(CF);
    224     else
    225       // Try to emit the constant directly.
    226       Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
    227 
    228     if (!Reg) {
    229       // Try to emit the constant by using an integer constant with a cast.
    230       const APFloat &Flt = CF->getValueAPF();
    231       EVT IntVT = TLI.getPointerTy(DL);
    232 
    233       uint64_t x[2];
    234       uint32_t IntBitWidth = IntVT.getSizeInBits();
    235       bool isExact;
    236       (void)Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
    237                                  APFloat::rmTowardZero, &isExact);
    238       if (isExact) {
    239         APInt IntVal(IntBitWidth, x);
    240 
    241         unsigned IntegerReg =
    242             getRegForValue(ConstantInt::get(V->getContext(), IntVal));
    243         if (IntegerReg != 0)
    244           Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg,
    245                            /*Kill=*/false);
    246       }
    247     }
    248   } else if (const auto *Op = dyn_cast<Operator>(V)) {
    249     if (!selectOperator(Op, Op->getOpcode()))
    250       if (!isa<Instruction>(Op) ||
    251           !fastSelectInstruction(cast<Instruction>(Op)))
    252         return 0;
    253     Reg = lookUpRegForValue(Op);
    254   } else if (isa<UndefValue>(V)) {
    255     Reg = createResultReg(TLI.getRegClassFor(VT));
    256     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
    257             TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
    258   }
    259   return Reg;
    260 }
    261 
    262 /// Helper for getRegForValue. This function is called when the value isn't
    263 /// already available in a register and must be materialized with new
    264 /// instructions.
    265 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
    266   unsigned Reg = 0;
    267   // Give the target-specific code a try first.
    268   if (isa<Constant>(V))
    269     Reg = fastMaterializeConstant(cast<Constant>(V));
    270 
    271   // If target-specific code couldn't or didn't want to handle the value, then
    272   // give target-independent code a try.
    273   if (!Reg)
    274     Reg = materializeConstant(V, VT);
    275 
    276   // Don't cache constant materializations in the general ValueMap.
    277   // To do so would require tracking what uses they dominate.
    278   if (Reg) {
    279     LocalValueMap[V] = Reg;
    280     LastLocalValue = MRI.getVRegDef(Reg);
    281   }
    282   return Reg;
    283 }
    284 
    285 unsigned FastISel::lookUpRegForValue(const Value *V) {
    286   // Look up the value to see if we already have a register for it. We
    287   // cache values defined by Instructions across blocks, and other values
    288   // only locally. This is because Instructions already have the SSA
    289   // def-dominates-use requirement enforced.
    290   DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
    291   if (I != FuncInfo.ValueMap.end())
    292     return I->second;
    293   return LocalValueMap[V];
    294 }
    295 
    296 void FastISel::updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
    297   if (!isa<Instruction>(I)) {
    298     LocalValueMap[I] = Reg;
    299     return;
    300   }
    301 
    302   unsigned &AssignedReg = FuncInfo.ValueMap[I];
    303   if (AssignedReg == 0)
    304     // Use the new register.
    305     AssignedReg = Reg;
    306   else if (Reg != AssignedReg) {
    307     // Arrange for uses of AssignedReg to be replaced by uses of Reg.
    308     for (unsigned i = 0; i < NumRegs; i++)
    309       FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
    310 
    311     AssignedReg = Reg;
    312   }
    313 }
    314 
    315 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
    316   unsigned IdxN = getRegForValue(Idx);
    317   if (IdxN == 0)
    318     // Unhandled operand. Halt "fast" selection and bail.
    319     return std::pair<unsigned, bool>(0, false);
    320 
    321   bool IdxNIsKill = hasTrivialKill(Idx);
    322 
    323   // If the index is smaller or larger than intptr_t, truncate or extend it.
    324   MVT PtrVT = TLI.getPointerTy(DL);
    325   EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
    326   if (IdxVT.bitsLT(PtrVT)) {
    327     IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN,
    328                       IdxNIsKill);
    329     IdxNIsKill = true;
    330   } else if (IdxVT.bitsGT(PtrVT)) {
    331     IdxN =
    332         fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill);
    333     IdxNIsKill = true;
    334   }
    335   return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
    336 }
    337 
    338 void FastISel::recomputeInsertPt() {
    339   if (getLastLocalValue()) {
    340     FuncInfo.InsertPt = getLastLocalValue();
    341     FuncInfo.MBB = FuncInfo.InsertPt->getParent();
    342     ++FuncInfo.InsertPt;
    343   } else
    344     FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
    345 
    346   // Now skip past any EH_LABELs, which must remain at the beginning.
    347   while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
    348          FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
    349     ++FuncInfo.InsertPt;
    350 }
    351 
    352 void FastISel::removeDeadCode(MachineBasicBlock::iterator I,
    353                               MachineBasicBlock::iterator E) {
    354   assert(I && E && std::distance(I, E) > 0 && "Invalid iterator!");
    355   while (I != E) {
    356     MachineInstr *Dead = &*I;
    357     ++I;
    358     Dead->eraseFromParent();
    359     ++NumFastIselDead;
    360   }
    361   recomputeInsertPt();
    362 }
    363 
    364 FastISel::SavePoint FastISel::enterLocalValueArea() {
    365   MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
    366   DebugLoc OldDL = DbgLoc;
    367   recomputeInsertPt();
    368   DbgLoc = DebugLoc();
    369   SavePoint SP = {OldInsertPt, OldDL};
    370   return SP;
    371 }
    372 
    373 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
    374   if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
    375     LastLocalValue = std::prev(FuncInfo.InsertPt);
    376 
    377   // Restore the previous insert position.
    378   FuncInfo.InsertPt = OldInsertPt.InsertPt;
    379   DbgLoc = OldInsertPt.DL;
    380 }
    381 
    382 bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
    383   EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
    384   if (VT == MVT::Other || !VT.isSimple())
    385     // Unhandled type. Halt "fast" selection and bail.
    386     return false;
    387 
    388   // We only handle legal types. For example, on x86-32 the instruction
    389   // selector contains all of the 64-bit instructions from x86-64,
    390   // under the assumption that i64 won't be used if the target doesn't
    391   // support it.
    392   if (!TLI.isTypeLegal(VT)) {
    393     // MVT::i1 is special. Allow AND, OR, or XOR because they
    394     // don't require additional zeroing, which makes them easy.
    395     if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
    396                           ISDOpcode == ISD::XOR))
    397       VT = TLI.getTypeToTransformTo(I->getContext(), VT);
    398     else
    399       return false;
    400   }
    401 
    402   // Check if the first operand is a constant, and handle it as "ri".  At -O0,
    403   // we don't have anything that canonicalizes operand order.
    404   if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
    405     if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
    406       unsigned Op1 = getRegForValue(I->getOperand(1));
    407       if (!Op1)
    408         return false;
    409       bool Op1IsKill = hasTrivialKill(I->getOperand(1));
    410 
    411       unsigned ResultReg =
    412           fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill,
    413                        CI->getZExtValue(), VT.getSimpleVT());
    414       if (!ResultReg)
    415         return false;
    416 
    417       // We successfully emitted code for the given LLVM Instruction.
    418       updateValueMap(I, ResultReg);
    419       return true;
    420     }
    421 
    422   unsigned Op0 = getRegForValue(I->getOperand(0));
    423   if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
    424     return false;
    425   bool Op0IsKill = hasTrivialKill(I->getOperand(0));
    426 
    427   // Check if the second operand is a constant and handle it appropriately.
    428   if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
    429     uint64_t Imm = CI->getSExtValue();
    430 
    431     // Transform "sdiv exact X, 8" -> "sra X, 3".
    432     if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
    433         cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
    434       Imm = Log2_64(Imm);
    435       ISDOpcode = ISD::SRA;
    436     }
    437 
    438     // Transform "urem x, pow2" -> "and x, pow2-1".
    439     if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
    440         isPowerOf2_64(Imm)) {
    441       --Imm;
    442       ISDOpcode = ISD::AND;
    443     }
    444 
    445     unsigned ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
    446                                       Op0IsKill, Imm, VT.getSimpleVT());
    447     if (!ResultReg)
    448       return false;
    449 
    450     // We successfully emitted code for the given LLVM Instruction.
    451     updateValueMap(I, ResultReg);
    452     return true;
    453   }
    454 
    455   // Check if the second operand is a constant float.
    456   if (const auto *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
    457     unsigned ResultReg = fastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
    458                                      ISDOpcode, Op0, Op0IsKill, CF);
    459     if (ResultReg) {
    460       // We successfully emitted code for the given LLVM Instruction.
    461       updateValueMap(I, ResultReg);
    462       return true;
    463     }
    464   }
    465 
    466   unsigned Op1 = getRegForValue(I->getOperand(1));
    467   if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
    468     return false;
    469   bool Op1IsKill = hasTrivialKill(I->getOperand(1));
    470 
    471   // Now we have both operands in registers. Emit the instruction.
    472   unsigned ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
    473                                    ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill);
    474   if (!ResultReg)
    475     // Target-specific code wasn't able to find a machine opcode for
    476     // the given ISD opcode and type. Halt "fast" selection and bail.
    477     return false;
    478 
    479   // We successfully emitted code for the given LLVM Instruction.
    480   updateValueMap(I, ResultReg);
    481   return true;
    482 }
    483 
    484 bool FastISel::selectGetElementPtr(const User *I) {
    485   unsigned N = getRegForValue(I->getOperand(0));
    486   if (!N) // Unhandled operand. Halt "fast" selection and bail.
    487     return false;
    488   bool NIsKill = hasTrivialKill(I->getOperand(0));
    489 
    490   // Keep a running tab of the total offset to coalesce multiple N = N + Offset
    491   // into a single N = N + TotalOffset.
    492   uint64_t TotalOffs = 0;
    493   // FIXME: What's a good SWAG number for MaxOffs?
    494   uint64_t MaxOffs = 2048;
    495   Type *Ty = I->getOperand(0)->getType();
    496   MVT VT = TLI.getPointerTy(DL);
    497   for (GetElementPtrInst::const_op_iterator OI = I->op_begin() + 1,
    498                                             E = I->op_end();
    499        OI != E; ++OI) {
    500     const Value *Idx = *OI;
    501     if (auto *StTy = dyn_cast<StructType>(Ty)) {
    502       uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
    503       if (Field) {
    504         // N = N + Offset
    505         TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
    506         if (TotalOffs >= MaxOffs) {
    507           N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
    508           if (!N) // Unhandled operand. Halt "fast" selection and bail.
    509             return false;
    510           NIsKill = true;
    511           TotalOffs = 0;
    512         }
    513       }
    514       Ty = StTy->getElementType(Field);
    515     } else {
    516       Ty = cast<SequentialType>(Ty)->getElementType();
    517 
    518       // If this is a constant subscript, handle it quickly.
    519       if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
    520         if (CI->isZero())
    521           continue;
    522         // N = N + Offset
    523         uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
    524         TotalOffs += DL.getTypeAllocSize(Ty) * IdxN;
    525         if (TotalOffs >= MaxOffs) {
    526           N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
    527           if (!N) // Unhandled operand. Halt "fast" selection and bail.
    528             return false;
    529           NIsKill = true;
    530           TotalOffs = 0;
    531         }
    532         continue;
    533       }
    534       if (TotalOffs) {
    535         N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
    536         if (!N) // Unhandled operand. Halt "fast" selection and bail.
    537           return false;
    538         NIsKill = true;
    539         TotalOffs = 0;
    540       }
    541 
    542       // N = N + Idx * ElementSize;
    543       uint64_t ElementSize = DL.getTypeAllocSize(Ty);
    544       std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
    545       unsigned IdxN = Pair.first;
    546       bool IdxNIsKill = Pair.second;
    547       if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
    548         return false;
    549 
    550       if (ElementSize != 1) {
    551         IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
    552         if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
    553           return false;
    554         IdxNIsKill = true;
    555       }
    556       N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
    557       if (!N) // Unhandled operand. Halt "fast" selection and bail.
    558         return false;
    559     }
    560   }
    561   if (TotalOffs) {
    562     N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
    563     if (!N) // Unhandled operand. Halt "fast" selection and bail.
    564       return false;
    565   }
    566 
    567   // We successfully emitted code for the given LLVM Instruction.
    568   updateValueMap(I, N);
    569   return true;
    570 }
    571 
    572 bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
    573                                    const CallInst *CI, unsigned StartIdx) {
    574   for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) {
    575     Value *Val = CI->getArgOperand(i);
    576     // Check for constants and encode them with a StackMaps::ConstantOp prefix.
    577     if (const auto *C = dyn_cast<ConstantInt>(Val)) {
    578       Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
    579       Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
    580     } else if (isa<ConstantPointerNull>(Val)) {
    581       Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
    582       Ops.push_back(MachineOperand::CreateImm(0));
    583     } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
    584       // Values coming from a stack location also require a sepcial encoding,
    585       // but that is added later on by the target specific frame index
    586       // elimination implementation.
    587       auto SI = FuncInfo.StaticAllocaMap.find(AI);
    588       if (SI != FuncInfo.StaticAllocaMap.end())
    589         Ops.push_back(MachineOperand::CreateFI(SI->second));
    590       else
    591         return false;
    592     } else {
    593       unsigned Reg = getRegForValue(Val);
    594       if (!Reg)
    595         return false;
    596       Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
    597     }
    598   }
    599   return true;
    600 }
    601 
    602 bool FastISel::selectStackmap(const CallInst *I) {
    603   // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
    604   //                                  [live variables...])
    605   assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
    606          "Stackmap cannot return a value.");
    607 
    608   // The stackmap intrinsic only records the live variables (the arguments
    609   // passed to it) and emits NOPS (if requested). Unlike the patchpoint
    610   // intrinsic, this won't be lowered to a function call. This means we don't
    611   // have to worry about calling conventions and target-specific lowering code.
    612   // Instead we perform the call lowering right here.
    613   //
    614   // CALLSEQ_START(0...)
    615   // STACKMAP(id, nbytes, ...)
    616   // CALLSEQ_END(0, 0)
    617   //
    618   SmallVector<MachineOperand, 32> Ops;
    619 
    620   // Add the <id> and <numBytes> constants.
    621   assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
    622          "Expected a constant integer.");
    623   const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
    624   Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
    625 
    626   assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
    627          "Expected a constant integer.");
    628   const auto *NumBytes =
    629       cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
    630   Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
    631 
    632   // Push live variables for the stack map (skipping the first two arguments
    633   // <id> and <numBytes>).
    634   if (!addStackMapLiveVars(Ops, I, 2))
    635     return false;
    636 
    637   // We are not adding any register mask info here, because the stackmap doesn't
    638   // clobber anything.
    639 
    640   // Add scratch registers as implicit def and early clobber.
    641   CallingConv::ID CC = I->getCallingConv();
    642   const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
    643   for (unsigned i = 0; ScratchRegs[i]; ++i)
    644     Ops.push_back(MachineOperand::CreateReg(
    645         ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
    646         /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
    647 
    648   // Issue CALLSEQ_START
    649   unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
    650   auto Builder =
    651       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown));
    652   const MCInstrDesc &MCID = Builder.getInstr()->getDesc();
    653   for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I)
    654     Builder.addImm(0);
    655 
    656   // Issue STACKMAP.
    657   MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
    658                                     TII.get(TargetOpcode::STACKMAP));
    659   for (auto const &MO : Ops)
    660     MIB.addOperand(MO);
    661 
    662   // Issue CALLSEQ_END
    663   unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
    664   BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
    665       .addImm(0)
    666       .addImm(0);
    667 
    668   // Inform the Frame Information that we have a stackmap in this function.
    669   FuncInfo.MF->getFrameInfo()->setHasStackMap();
    670 
    671   return true;
    672 }
    673 
    674 /// \brief Lower an argument list according to the target calling convention.
    675 ///
    676 /// This is a helper for lowering intrinsics that follow a target calling
    677 /// convention or require stack pointer adjustment. Only a subset of the
    678 /// intrinsic's operands need to participate in the calling convention.
    679 bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
    680                                  unsigned NumArgs, const Value *Callee,
    681                                  bool ForceRetVoidTy, CallLoweringInfo &CLI) {
    682   ArgListTy Args;
    683   Args.reserve(NumArgs);
    684 
    685   // Populate the argument list.
    686   // Attributes for args start at offset 1, after the return attribute.
    687   ImmutableCallSite CS(CI);
    688   for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs, AttrI = ArgIdx + 1;
    689        ArgI != ArgE; ++ArgI) {
    690     Value *V = CI->getOperand(ArgI);
    691 
    692     assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
    693 
    694     ArgListEntry Entry;
    695     Entry.Val = V;
    696     Entry.Ty = V->getType();
    697     Entry.setAttributes(&CS, AttrI);
    698     Args.push_back(Entry);
    699   }
    700 
    701   Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
    702                                : CI->getType();
    703   CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
    704 
    705   return lowerCallTo(CLI);
    706 }
    707 
    708 FastISel::CallLoweringInfo &FastISel::CallLoweringInfo::setCallee(
    709     const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
    710     const char *Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
    711   SmallString<32> MangledName;
    712   Mangler::getNameWithPrefix(MangledName, Target, DL);
    713   MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
    714   return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
    715 }
    716 
    717 bool FastISel::selectPatchpoint(const CallInst *I) {
    718   // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
    719   //                                                 i32 <numBytes>,
    720   //                                                 i8* <target>,
    721   //                                                 i32 <numArgs>,
    722   //                                                 [Args...],
    723   //                                                 [live variables...])
    724   CallingConv::ID CC = I->getCallingConv();
    725   bool IsAnyRegCC = CC == CallingConv::AnyReg;
    726   bool HasDef = !I->getType()->isVoidTy();
    727   Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts();
    728 
    729   // Get the real number of arguments participating in the call <numArgs>
    730   assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
    731          "Expected a constant integer.");
    732   const auto *NumArgsVal =
    733       cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
    734   unsigned NumArgs = NumArgsVal->getZExtValue();
    735 
    736   // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
    737   // This includes all meta-operands up to but not including CC.
    738   unsigned NumMetaOpers = PatchPointOpers::CCPos;
    739   assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs &&
    740          "Not enough arguments provided to the patchpoint intrinsic");
    741 
    742   // For AnyRegCC the arguments are lowered later on manually.
    743   unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
    744   CallLoweringInfo CLI;
    745   CLI.setIsPatchPoint();
    746   if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
    747     return false;
    748 
    749   assert(CLI.Call && "No call instruction specified.");
    750 
    751   SmallVector<MachineOperand, 32> Ops;
    752 
    753   // Add an explicit result reg if we use the anyreg calling convention.
    754   if (IsAnyRegCC && HasDef) {
    755     assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
    756     CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64));
    757     CLI.NumResultRegs = 1;
    758     Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*IsDef=*/true));
    759   }
    760 
    761   // Add the <id> and <numBytes> constants.
    762   assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
    763          "Expected a constant integer.");
    764   const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
    765   Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
    766 
    767   assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
    768          "Expected a constant integer.");
    769   const auto *NumBytes =
    770       cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
    771   Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
    772 
    773   // Add the call target.
    774   if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
    775     uint64_t CalleeConstAddr =
    776       cast<ConstantInt>(C->getOperand(0))->getZExtValue();
    777     Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
    778   } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
    779     if (C->getOpcode() == Instruction::IntToPtr) {
    780       uint64_t CalleeConstAddr =
    781         cast<ConstantInt>(C->getOperand(0))->getZExtValue();
    782       Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
    783     } else
    784       llvm_unreachable("Unsupported ConstantExpr.");
    785   } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
    786     Ops.push_back(MachineOperand::CreateGA(GV, 0));
    787   } else if (isa<ConstantPointerNull>(Callee))
    788     Ops.push_back(MachineOperand::CreateImm(0));
    789   else
    790     llvm_unreachable("Unsupported callee address.");
    791 
    792   // Adjust <numArgs> to account for any arguments that have been passed on
    793   // the stack instead.
    794   unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
    795   Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
    796 
    797   // Add the calling convention
    798   Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
    799 
    800   // Add the arguments we omitted previously. The register allocator should
    801   // place these in any free register.
    802   if (IsAnyRegCC) {
    803     for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
    804       unsigned Reg = getRegForValue(I->getArgOperand(i));
    805       if (!Reg)
    806         return false;
    807       Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
    808     }
    809   }
    810 
    811   // Push the arguments from the call instruction.
    812   for (auto Reg : CLI.OutRegs)
    813     Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
    814 
    815   // Push live variables for the stack map.
    816   if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
    817     return false;
    818 
    819   // Push the register mask info.
    820   Ops.push_back(MachineOperand::CreateRegMask(
    821       TRI.getCallPreservedMask(*FuncInfo.MF, CC)));
    822 
    823   // Add scratch registers as implicit def and early clobber.
    824   const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
    825   for (unsigned i = 0; ScratchRegs[i]; ++i)
    826     Ops.push_back(MachineOperand::CreateReg(
    827         ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
    828         /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
    829 
    830   // Add implicit defs (return values).
    831   for (auto Reg : CLI.InRegs)
    832     Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/true,
    833                                             /*IsImpl=*/true));
    834 
    835   // Insert the patchpoint instruction before the call generated by the target.
    836   MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, DbgLoc,
    837                                     TII.get(TargetOpcode::PATCHPOINT));
    838 
    839   for (auto &MO : Ops)
    840     MIB.addOperand(MO);
    841 
    842   MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI);
    843 
    844   // Delete the original call instruction.
    845   CLI.Call->eraseFromParent();
    846 
    847   // Inform the Frame Information that we have a patchpoint in this function.
    848   FuncInfo.MF->getFrameInfo()->setHasPatchPoint();
    849 
    850   if (CLI.NumResultRegs)
    851     updateValueMap(I, CLI.ResultReg, CLI.NumResultRegs);
    852   return true;
    853 }
    854 
    855 /// Returns an AttributeSet representing the attributes applied to the return
    856 /// value of the given call.
    857 static AttributeSet getReturnAttrs(FastISel::CallLoweringInfo &CLI) {
    858   SmallVector<Attribute::AttrKind, 2> Attrs;
    859   if (CLI.RetSExt)
    860     Attrs.push_back(Attribute::SExt);
    861   if (CLI.RetZExt)
    862     Attrs.push_back(Attribute::ZExt);
    863   if (CLI.IsInReg)
    864     Attrs.push_back(Attribute::InReg);
    865 
    866   return AttributeSet::get(CLI.RetTy->getContext(), AttributeSet::ReturnIndex,
    867                            Attrs);
    868 }
    869 
    870 bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
    871                            unsigned NumArgs) {
    872   MCContext &Ctx = MF->getContext();
    873   SmallString<32> MangledName;
    874   Mangler::getNameWithPrefix(MangledName, SymName, DL);
    875   MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
    876   return lowerCallTo(CI, Sym, NumArgs);
    877 }
    878 
    879 bool FastISel::lowerCallTo(const CallInst *CI, MCSymbol *Symbol,
    880                            unsigned NumArgs) {
    881   ImmutableCallSite CS(CI);
    882 
    883   PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
    884   FunctionType *FTy = cast<FunctionType>(PT->getElementType());
    885   Type *RetTy = FTy->getReturnType();
    886 
    887   ArgListTy Args;
    888   Args.reserve(NumArgs);
    889 
    890   // Populate the argument list.
    891   // Attributes for args start at offset 1, after the return attribute.
    892   for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
    893     Value *V = CI->getOperand(ArgI);
    894 
    895     assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
    896 
    897     ArgListEntry Entry;
    898     Entry.Val = V;
    899     Entry.Ty = V->getType();
    900     Entry.setAttributes(&CS, ArgI + 1);
    901     Args.push_back(Entry);
    902   }
    903 
    904   CallLoweringInfo CLI;
    905   CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), CS, NumArgs);
    906 
    907   return lowerCallTo(CLI);
    908 }
    909 
    910 bool FastISel::lowerCallTo(CallLoweringInfo &CLI) {
    911   // Handle the incoming return values from the call.
    912   CLI.clearIns();
    913   SmallVector<EVT, 4> RetTys;
    914   ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
    915 
    916   SmallVector<ISD::OutputArg, 4> Outs;
    917   GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
    918 
    919   bool CanLowerReturn = TLI.CanLowerReturn(
    920       CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
    921 
    922   // FIXME: sret demotion isn't supported yet - bail out.
    923   if (!CanLowerReturn)
    924     return false;
    925 
    926   for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
    927     EVT VT = RetTys[I];
    928     MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
    929     unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
    930     for (unsigned i = 0; i != NumRegs; ++i) {
    931       ISD::InputArg MyFlags;
    932       MyFlags.VT = RegisterVT;
    933       MyFlags.ArgVT = VT;
    934       MyFlags.Used = CLI.IsReturnValueUsed;
    935       if (CLI.RetSExt)
    936         MyFlags.Flags.setSExt();
    937       if (CLI.RetZExt)
    938         MyFlags.Flags.setZExt();
    939       if (CLI.IsInReg)
    940         MyFlags.Flags.setInReg();
    941       CLI.Ins.push_back(MyFlags);
    942     }
    943   }
    944 
    945   // Handle all of the outgoing arguments.
    946   CLI.clearOuts();
    947   for (auto &Arg : CLI.getArgs()) {
    948     Type *FinalType = Arg.Ty;
    949     if (Arg.IsByVal)
    950       FinalType = cast<PointerType>(Arg.Ty)->getElementType();
    951     bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
    952         FinalType, CLI.CallConv, CLI.IsVarArg);
    953 
    954     ISD::ArgFlagsTy Flags;
    955     if (Arg.IsZExt)
    956       Flags.setZExt();
    957     if (Arg.IsSExt)
    958       Flags.setSExt();
    959     if (Arg.IsInReg)
    960       Flags.setInReg();
    961     if (Arg.IsSRet)
    962       Flags.setSRet();
    963     if (Arg.IsByVal)
    964       Flags.setByVal();
    965     if (Arg.IsInAlloca) {
    966       Flags.setInAlloca();
    967       // Set the byval flag for CCAssignFn callbacks that don't know about
    968       // inalloca. This way we can know how many bytes we should've allocated
    969       // and how many bytes a callee cleanup function will pop.  If we port
    970       // inalloca to more targets, we'll have to add custom inalloca handling in
    971       // the various CC lowering callbacks.
    972       Flags.setByVal();
    973     }
    974     if (Arg.IsByVal || Arg.IsInAlloca) {
    975       PointerType *Ty = cast<PointerType>(Arg.Ty);
    976       Type *ElementTy = Ty->getElementType();
    977       unsigned FrameSize = DL.getTypeAllocSize(ElementTy);
    978       // For ByVal, alignment should come from FE. BE will guess if this info is
    979       // not there, but there are cases it cannot get right.
    980       unsigned FrameAlign = Arg.Alignment;
    981       if (!FrameAlign)
    982         FrameAlign = TLI.getByValTypeAlignment(ElementTy, DL);
    983       Flags.setByValSize(FrameSize);
    984       Flags.setByValAlign(FrameAlign);
    985     }
    986     if (Arg.IsNest)
    987       Flags.setNest();
    988     if (NeedsRegBlock)
    989       Flags.setInConsecutiveRegs();
    990     unsigned OriginalAlignment = DL.getABITypeAlignment(Arg.Ty);
    991     Flags.setOrigAlign(OriginalAlignment);
    992 
    993     CLI.OutVals.push_back(Arg.Val);
    994     CLI.OutFlags.push_back(Flags);
    995   }
    996 
    997   if (!fastLowerCall(CLI))
    998     return false;
    999 
   1000   // Set all unused physreg defs as dead.
   1001   assert(CLI.Call && "No call instruction specified.");
   1002   CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI);
   1003 
   1004   if (CLI.NumResultRegs && CLI.CS)
   1005     updateValueMap(CLI.CS->getInstruction(), CLI.ResultReg, CLI.NumResultRegs);
   1006 
   1007   return true;
   1008 }
   1009 
   1010 bool FastISel::lowerCall(const CallInst *CI) {
   1011   ImmutableCallSite CS(CI);
   1012 
   1013   PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
   1014   FunctionType *FuncTy = cast<FunctionType>(PT->getElementType());
   1015   Type *RetTy = FuncTy->getReturnType();
   1016 
   1017   ArgListTy Args;
   1018   ArgListEntry Entry;
   1019   Args.reserve(CS.arg_size());
   1020 
   1021   for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
   1022        i != e; ++i) {
   1023     Value *V = *i;
   1024 
   1025     // Skip empty types
   1026     if (V->getType()->isEmptyTy())
   1027       continue;
   1028 
   1029     Entry.Val = V;
   1030     Entry.Ty = V->getType();
   1031 
   1032     // Skip the first return-type Attribute to get to params.
   1033     Entry.setAttributes(&CS, i - CS.arg_begin() + 1);
   1034     Args.push_back(Entry);
   1035   }
   1036 
   1037   // Check if target-independent constraints permit a tail call here.
   1038   // Target-dependent constraints are checked within fastLowerCall.
   1039   bool IsTailCall = CI->isTailCall();
   1040   if (IsTailCall && !isInTailCallPosition(CS, TM))
   1041     IsTailCall = false;
   1042 
   1043   CallLoweringInfo CLI;
   1044   CLI.setCallee(RetTy, FuncTy, CI->getCalledValue(), std::move(Args), CS)
   1045       .setTailCall(IsTailCall);
   1046 
   1047   return lowerCallTo(CLI);
   1048 }
   1049 
   1050 bool FastISel::selectCall(const User *I) {
   1051   const CallInst *Call = cast<CallInst>(I);
   1052 
   1053   // Handle simple inline asms.
   1054   if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) {
   1055     // If the inline asm has side effects, then make sure that no local value
   1056     // lives across by flushing the local value map.
   1057     if (IA->hasSideEffects())
   1058       flushLocalValueMap();
   1059 
   1060     // Don't attempt to handle constraints.
   1061     if (!IA->getConstraintString().empty())
   1062       return false;
   1063 
   1064     unsigned ExtraInfo = 0;
   1065     if (IA->hasSideEffects())
   1066       ExtraInfo |= InlineAsm::Extra_HasSideEffects;
   1067     if (IA->isAlignStack())
   1068       ExtraInfo |= InlineAsm::Extra_IsAlignStack;
   1069 
   1070     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
   1071             TII.get(TargetOpcode::INLINEASM))
   1072         .addExternalSymbol(IA->getAsmString().c_str())
   1073         .addImm(ExtraInfo);
   1074     return true;
   1075   }
   1076 
   1077   MachineModuleInfo &MMI = FuncInfo.MF->getMMI();
   1078   ComputeUsesVAFloatArgument(*Call, &MMI);
   1079 
   1080   // Handle intrinsic function calls.
   1081   if (const auto *II = dyn_cast<IntrinsicInst>(Call))
   1082     return selectIntrinsicCall(II);
   1083 
   1084   // Usually, it does not make sense to initialize a value,
   1085   // make an unrelated function call and use the value, because
   1086   // it tends to be spilled on the stack. So, we move the pointer
   1087   // to the last local value to the beginning of the block, so that
   1088   // all the values which have already been materialized,
   1089   // appear after the call. It also makes sense to skip intrinsics
   1090   // since they tend to be inlined.
   1091   flushLocalValueMap();
   1092 
   1093   return lowerCall(Call);
   1094 }
   1095 
   1096 bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
   1097   switch (II->getIntrinsicID()) {
   1098   default:
   1099     break;
   1100   // At -O0 we don't care about the lifetime intrinsics.
   1101   case Intrinsic::lifetime_start:
   1102   case Intrinsic::lifetime_end:
   1103   // The donothing intrinsic does, well, nothing.
   1104   case Intrinsic::donothing:
   1105     return true;
   1106   case Intrinsic::dbg_declare: {
   1107     const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
   1108     assert(DI->getVariable() && "Missing variable");
   1109     if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
   1110       DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
   1111       return true;
   1112     }
   1113 
   1114     const Value *Address = DI->getAddress();
   1115     if (!Address || isa<UndefValue>(Address)) {
   1116       DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
   1117       return true;
   1118     }
   1119 
   1120     unsigned Offset = 0;
   1121     Optional<MachineOperand> Op;
   1122     if (const auto *Arg = dyn_cast<Argument>(Address))
   1123       // Some arguments' frame index is recorded during argument lowering.
   1124       Offset = FuncInfo.getArgumentFrameIndex(Arg);
   1125     if (Offset)
   1126       Op = MachineOperand::CreateFI(Offset);
   1127     if (!Op)
   1128       if (unsigned Reg = lookUpRegForValue(Address))
   1129         Op = MachineOperand::CreateReg(Reg, false);
   1130 
   1131     // If we have a VLA that has a "use" in a metadata node that's then used
   1132     // here but it has no other uses, then we have a problem. E.g.,
   1133     //
   1134     //   int foo (const int *x) {
   1135     //     char a[*x];
   1136     //     return 0;
   1137     //   }
   1138     //
   1139     // If we assign 'a' a vreg and fast isel later on has to use the selection
   1140     // DAG isel, it will want to copy the value to the vreg. However, there are
   1141     // no uses, which goes counter to what selection DAG isel expects.
   1142     if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
   1143         (!isa<AllocaInst>(Address) ||
   1144          !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
   1145       Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address),
   1146                                      false);
   1147 
   1148     if (Op) {
   1149       assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
   1150              "Expected inlined-at fields to agree");
   1151       if (Op->isReg()) {
   1152         Op->setIsDebug(true);
   1153         BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
   1154                 TII.get(TargetOpcode::DBG_VALUE), false, Op->getReg(), 0,
   1155                 DI->getVariable(), DI->getExpression());
   1156       } else
   1157         BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
   1158                 TII.get(TargetOpcode::DBG_VALUE))
   1159             .addOperand(*Op)
   1160             .addImm(0)
   1161             .addMetadata(DI->getVariable())
   1162             .addMetadata(DI->getExpression());
   1163     } else {
   1164       // We can't yet handle anything else here because it would require
   1165       // generating code, thus altering codegen because of debug info.
   1166       DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
   1167     }
   1168     return true;
   1169   }
   1170   case Intrinsic::dbg_value: {
   1171     // This form of DBG_VALUE is target-independent.
   1172     const DbgValueInst *DI = cast<DbgValueInst>(II);
   1173     const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
   1174     const Value *V = DI->getValue();
   1175     assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
   1176            "Expected inlined-at fields to agree");
   1177     if (!V) {
   1178       // Currently the optimizer can produce this; insert an undef to
   1179       // help debugging.  Probably the optimizer should not do this.
   1180       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
   1181           .addReg(0U)
   1182           .addImm(DI->getOffset())
   1183           .addMetadata(DI->getVariable())
   1184           .addMetadata(DI->getExpression());
   1185     } else if (const auto *CI = dyn_cast<ConstantInt>(V)) {
   1186       if (CI->getBitWidth() > 64)
   1187         BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
   1188             .addCImm(CI)
   1189             .addImm(DI->getOffset())
   1190             .addMetadata(DI->getVariable())
   1191             .addMetadata(DI->getExpression());
   1192       else
   1193         BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
   1194             .addImm(CI->getZExtValue())
   1195             .addImm(DI->getOffset())
   1196             .addMetadata(DI->getVariable())
   1197             .addMetadata(DI->getExpression());
   1198     } else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
   1199       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
   1200           .addFPImm(CF)
   1201           .addImm(DI->getOffset())
   1202           .addMetadata(DI->getVariable())
   1203           .addMetadata(DI->getExpression());
   1204     } else if (unsigned Reg = lookUpRegForValue(V)) {
   1205       // FIXME: This does not handle register-indirect values at offset 0.
   1206       bool IsIndirect = DI->getOffset() != 0;
   1207       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg,
   1208               DI->getOffset(), DI->getVariable(), DI->getExpression());
   1209     } else {
   1210       // We can't yet handle anything else here because it would require
   1211       // generating code, thus altering codegen because of debug info.
   1212       DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
   1213     }
   1214     return true;
   1215   }
   1216   case Intrinsic::objectsize: {
   1217     ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
   1218     unsigned long long Res = CI->isZero() ? -1ULL : 0;
   1219     Constant *ResCI = ConstantInt::get(II->getType(), Res);
   1220     unsigned ResultReg = getRegForValue(ResCI);
   1221     if (!ResultReg)
   1222       return false;
   1223     updateValueMap(II, ResultReg);
   1224     return true;
   1225   }
   1226   case Intrinsic::expect: {
   1227     unsigned ResultReg = getRegForValue(II->getArgOperand(0));
   1228     if (!ResultReg)
   1229       return false;
   1230     updateValueMap(II, ResultReg);
   1231     return true;
   1232   }
   1233   case Intrinsic::experimental_stackmap:
   1234     return selectStackmap(II);
   1235   case Intrinsic::experimental_patchpoint_void:
   1236   case Intrinsic::experimental_patchpoint_i64:
   1237     return selectPatchpoint(II);
   1238   }
   1239 
   1240   return fastLowerIntrinsicCall(II);
   1241 }
   1242 
   1243 bool FastISel::selectCast(const User *I, unsigned Opcode) {
   1244   EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
   1245   EVT DstVT = TLI.getValueType(DL, I->getType());
   1246 
   1247   if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
   1248       !DstVT.isSimple())
   1249     // Unhandled type. Halt "fast" selection and bail.
   1250     return false;
   1251 
   1252   // Check if the destination type is legal.
   1253   if (!TLI.isTypeLegal(DstVT))
   1254     return false;
   1255 
   1256   // Check if the source operand is legal.
   1257   if (!TLI.isTypeLegal(SrcVT))
   1258     return false;
   1259 
   1260   unsigned InputReg = getRegForValue(I->getOperand(0));
   1261   if (!InputReg)
   1262     // Unhandled operand.  Halt "fast" selection and bail.
   1263     return false;
   1264 
   1265   bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
   1266 
   1267   unsigned ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
   1268                                   Opcode, InputReg, InputRegIsKill);
   1269   if (!ResultReg)
   1270     return false;
   1271 
   1272   updateValueMap(I, ResultReg);
   1273   return true;
   1274 }
   1275 
   1276 bool FastISel::selectBitCast(const User *I) {
   1277   // If the bitcast doesn't change the type, just use the operand value.
   1278   if (I->getType() == I->getOperand(0)->getType()) {
   1279     unsigned Reg = getRegForValue(I->getOperand(0));
   1280     if (!Reg)
   1281       return false;
   1282     updateValueMap(I, Reg);
   1283     return true;
   1284   }
   1285 
   1286   // Bitcasts of other values become reg-reg copies or BITCAST operators.
   1287   EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
   1288   EVT DstEVT = TLI.getValueType(DL, I->getType());
   1289   if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
   1290       !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
   1291     // Unhandled type. Halt "fast" selection and bail.
   1292     return false;
   1293 
   1294   MVT SrcVT = SrcEVT.getSimpleVT();
   1295   MVT DstVT = DstEVT.getSimpleVT();
   1296   unsigned Op0 = getRegForValue(I->getOperand(0));
   1297   if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
   1298     return false;
   1299   bool Op0IsKill = hasTrivialKill(I->getOperand(0));
   1300 
   1301   // First, try to perform the bitcast by inserting a reg-reg copy.
   1302   unsigned ResultReg = 0;
   1303   if (SrcVT == DstVT) {
   1304     const TargetRegisterClass *SrcClass = TLI.getRegClassFor(SrcVT);
   1305     const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT);
   1306     // Don't attempt a cross-class copy. It will likely fail.
   1307     if (SrcClass == DstClass) {
   1308       ResultReg = createResultReg(DstClass);
   1309       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
   1310               TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0);
   1311     }
   1312   }
   1313 
   1314   // If the reg-reg copy failed, select a BITCAST opcode.
   1315   if (!ResultReg)
   1316     ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill);
   1317 
   1318   if (!ResultReg)
   1319     return false;
   1320 
   1321   updateValueMap(I, ResultReg);
   1322   return true;
   1323 }
   1324 
   1325 // Remove local value instructions starting from the instruction after
   1326 // SavedLastLocalValue to the current function insert point.
   1327 void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue)
   1328 {
   1329   MachineInstr *CurLastLocalValue = getLastLocalValue();
   1330   if (CurLastLocalValue != SavedLastLocalValue) {
   1331     // Find the first local value instruction to be deleted.
   1332     // This is the instruction after SavedLastLocalValue if it is non-NULL.
   1333     // Otherwise it's the first instruction in the block.
   1334     MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue);
   1335     if (SavedLastLocalValue)
   1336       ++FirstDeadInst;
   1337     else
   1338       FirstDeadInst = FuncInfo.MBB->getFirstNonPHI();
   1339     setLastLocalValue(SavedLastLocalValue);
   1340     removeDeadCode(FirstDeadInst, FuncInfo.InsertPt);
   1341   }
   1342 }
   1343 
   1344 bool FastISel::selectInstruction(const Instruction *I) {
   1345   MachineInstr *SavedLastLocalValue = getLastLocalValue();
   1346   // Just before the terminator instruction, insert instructions to
   1347   // feed PHI nodes in successor blocks.
   1348   if (isa<TerminatorInst>(I))
   1349     if (!handlePHINodesInSuccessorBlocks(I->getParent())) {
   1350       // PHI node handling may have generated local value instructions,
   1351       // even though it failed to handle all PHI nodes.
   1352       // We remove these instructions because SelectionDAGISel will generate
   1353       // them again.
   1354       removeDeadLocalValueCode(SavedLastLocalValue);
   1355       return false;
   1356     }
   1357 
   1358   DbgLoc = I->getDebugLoc();
   1359 
   1360   SavedInsertPt = FuncInfo.InsertPt;
   1361 
   1362   if (const auto *Call = dyn_cast<CallInst>(I)) {
   1363     const Function *F = Call->getCalledFunction();
   1364     LibFunc::Func Func;
   1365 
   1366     // As a special case, don't handle calls to builtin library functions that
   1367     // may be translated directly to target instructions.
   1368     if (F && !F->hasLocalLinkage() && F->hasName() &&
   1369         LibInfo->getLibFunc(F->getName(), Func) &&
   1370         LibInfo->hasOptimizedCodeGen(Func))
   1371       return false;
   1372 
   1373     // Don't handle Intrinsic::trap if a trap function is specified.
   1374     if (F && F->getIntrinsicID() == Intrinsic::trap &&
   1375         Call->hasFnAttr("trap-func-name"))
   1376       return false;
   1377   }
   1378 
   1379   // First, try doing target-independent selection.
   1380   if (!SkipTargetIndependentISel) {
   1381     if (selectOperator(I, I->getOpcode())) {
   1382       ++NumFastIselSuccessIndependent;
   1383       DbgLoc = DebugLoc();
   1384       return true;
   1385     }
   1386     // Remove dead code.
   1387     recomputeInsertPt();
   1388     if (SavedInsertPt != FuncInfo.InsertPt)
   1389       removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
   1390     SavedInsertPt = FuncInfo.InsertPt;
   1391   }
   1392   // Next, try calling the target to attempt to handle the instruction.
   1393   if (fastSelectInstruction(I)) {
   1394     ++NumFastIselSuccessTarget;
   1395     DbgLoc = DebugLoc();
   1396     return true;
   1397   }
   1398   // Remove dead code.
   1399   recomputeInsertPt();
   1400   if (SavedInsertPt != FuncInfo.InsertPt)
   1401     removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
   1402 
   1403   DbgLoc = DebugLoc();
   1404   // Undo phi node updates, because they will be added again by SelectionDAG.
   1405   if (isa<TerminatorInst>(I)) {
   1406     // PHI node handling may have generated local value instructions.
   1407     // We remove them because SelectionDAGISel will generate them again.
   1408     removeDeadLocalValueCode(SavedLastLocalValue);
   1409     FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
   1410   }
   1411   return false;
   1412 }
   1413 
   1414 /// Emit an unconditional branch to the given block, unless it is the immediate
   1415 /// (fall-through) successor, and update the CFG.
   1416 void FastISel::fastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DbgLoc) {
   1417   if (FuncInfo.MBB->getBasicBlock()->size() > 1 &&
   1418       FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
   1419     // For more accurate line information if this is the only instruction
   1420     // in the block then emit it, otherwise we have the unconditional
   1421     // fall-through case, which needs no instructions.
   1422   } else {
   1423     // The unconditional branch case.
   1424     TII.InsertBranch(*FuncInfo.MBB, MSucc, nullptr,
   1425                      SmallVector<MachineOperand, 0>(), DbgLoc);
   1426   }
   1427   if (FuncInfo.BPI) {
   1428     auto BranchProbability = FuncInfo.BPI->getEdgeProbability(
   1429         FuncInfo.MBB->getBasicBlock(), MSucc->getBasicBlock());
   1430     FuncInfo.MBB->addSuccessor(MSucc, BranchProbability);
   1431   } else
   1432     FuncInfo.MBB->addSuccessorWithoutProb(MSucc);
   1433 }
   1434 
   1435 void FastISel::finishCondBranch(const BasicBlock *BranchBB,
   1436                                 MachineBasicBlock *TrueMBB,
   1437                                 MachineBasicBlock *FalseMBB) {
   1438   // Add TrueMBB as successor unless it is equal to the FalseMBB: This can
   1439   // happen in degenerate IR and MachineIR forbids to have a block twice in the
   1440   // successor/predecessor lists.
   1441   if (TrueMBB != FalseMBB) {
   1442     if (FuncInfo.BPI) {
   1443       auto BranchProbability =
   1444           FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock());
   1445       FuncInfo.MBB->addSuccessor(TrueMBB, BranchProbability);
   1446     } else
   1447       FuncInfo.MBB->addSuccessorWithoutProb(TrueMBB);
   1448   }
   1449 
   1450   fastEmitBranch(FalseMBB, DbgLoc);
   1451 }
   1452 
   1453 /// Emit an FNeg operation.
   1454 bool FastISel::selectFNeg(const User *I) {
   1455   unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
   1456   if (!OpReg)
   1457     return false;
   1458   bool OpRegIsKill = hasTrivialKill(I);
   1459 
   1460   // If the target has ISD::FNEG, use it.
   1461   EVT VT = TLI.getValueType(DL, I->getType());
   1462   unsigned ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
   1463                                   OpReg, OpRegIsKill);
   1464   if (ResultReg) {
   1465     updateValueMap(I, ResultReg);
   1466     return true;
   1467   }
   1468 
   1469   // Bitcast the value to integer, twiddle the sign bit with xor,
   1470   // and then bitcast it back to floating-point.
   1471   if (VT.getSizeInBits() > 64)
   1472     return false;
   1473   EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
   1474   if (!TLI.isTypeLegal(IntVT))
   1475     return false;
   1476 
   1477   unsigned IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
   1478                                ISD::BITCAST, OpReg, OpRegIsKill);
   1479   if (!IntReg)
   1480     return false;
   1481 
   1482   unsigned IntResultReg = fastEmit_ri_(
   1483       IntVT.getSimpleVT(), ISD::XOR, IntReg, /*IsKill=*/true,
   1484       UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
   1485   if (!IntResultReg)
   1486     return false;
   1487 
   1488   ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
   1489                          IntResultReg, /*IsKill=*/true);
   1490   if (!ResultReg)
   1491     return false;
   1492 
   1493   updateValueMap(I, ResultReg);
   1494   return true;
   1495 }
   1496 
   1497 bool FastISel::selectExtractValue(const User *U) {
   1498   const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
   1499   if (!EVI)
   1500     return false;
   1501 
   1502   // Make sure we only try to handle extracts with a legal result.  But also
   1503   // allow i1 because it's easy.
   1504   EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
   1505   if (!RealVT.isSimple())
   1506     return false;
   1507   MVT VT = RealVT.getSimpleVT();
   1508   if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
   1509     return false;
   1510 
   1511   const Value *Op0 = EVI->getOperand(0);
   1512   Type *AggTy = Op0->getType();
   1513 
   1514   // Get the base result register.
   1515   unsigned ResultReg;
   1516   DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(Op0);
   1517   if (I != FuncInfo.ValueMap.end())
   1518     ResultReg = I->second;
   1519   else if (isa<Instruction>(Op0))
   1520     ResultReg = FuncInfo.InitializeRegForValue(Op0);
   1521   else
   1522     return false; // fast-isel can't handle aggregate constants at the moment
   1523 
   1524   // Get the actual result register, which is an offset from the base register.
   1525   unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
   1526 
   1527   SmallVector<EVT, 4> AggValueVTs;
   1528   ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
   1529 
   1530   for (unsigned i = 0; i < VTIndex; i++)
   1531     ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
   1532 
   1533   updateValueMap(EVI, ResultReg);
   1534   return true;
   1535 }
   1536 
   1537 bool FastISel::selectOperator(const User *I, unsigned Opcode) {
   1538   switch (Opcode) {
   1539   case Instruction::Add:
   1540     return selectBinaryOp(I, ISD::ADD);
   1541   case Instruction::FAdd:
   1542     return selectBinaryOp(I, ISD::FADD);
   1543   case Instruction::Sub:
   1544     return selectBinaryOp(I, ISD::SUB);
   1545   case Instruction::FSub:
   1546     // FNeg is currently represented in LLVM IR as a special case of FSub.
   1547     if (BinaryOperator::isFNeg(I))
   1548       return selectFNeg(I);
   1549     return selectBinaryOp(I, ISD::FSUB);
   1550   case Instruction::Mul:
   1551     return selectBinaryOp(I, ISD::MUL);
   1552   case Instruction::FMul:
   1553     return selectBinaryOp(I, ISD::FMUL);
   1554   case Instruction::SDiv:
   1555     return selectBinaryOp(I, ISD::SDIV);
   1556   case Instruction::UDiv:
   1557     return selectBinaryOp(I, ISD::UDIV);
   1558   case Instruction::FDiv:
   1559     return selectBinaryOp(I, ISD::FDIV);
   1560   case Instruction::SRem:
   1561     return selectBinaryOp(I, ISD::SREM);
   1562   case Instruction::URem:
   1563     return selectBinaryOp(I, ISD::UREM);
   1564   case Instruction::FRem:
   1565     return selectBinaryOp(I, ISD::FREM);
   1566   case Instruction::Shl:
   1567     return selectBinaryOp(I, ISD::SHL);
   1568   case Instruction::LShr:
   1569     return selectBinaryOp(I, ISD::SRL);
   1570   case Instruction::AShr:
   1571     return selectBinaryOp(I, ISD::SRA);
   1572   case Instruction::And:
   1573     return selectBinaryOp(I, ISD::AND);
   1574   case Instruction::Or:
   1575     return selectBinaryOp(I, ISD::OR);
   1576   case Instruction::Xor:
   1577     return selectBinaryOp(I, ISD::XOR);
   1578 
   1579   case Instruction::GetElementPtr:
   1580     return selectGetElementPtr(I);
   1581 
   1582   case Instruction::Br: {
   1583     const BranchInst *BI = cast<BranchInst>(I);
   1584 
   1585     if (BI->isUnconditional()) {
   1586       const BasicBlock *LLVMSucc = BI->getSuccessor(0);
   1587       MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
   1588       fastEmitBranch(MSucc, BI->getDebugLoc());
   1589       return true;
   1590     }
   1591 
   1592     // Conditional branches are not handed yet.
   1593     // Halt "fast" selection and bail.
   1594     return false;
   1595   }
   1596 
   1597   case Instruction::Unreachable:
   1598     if (TM.Options.TrapUnreachable)
   1599       return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
   1600     else
   1601       return true;
   1602 
   1603   case Instruction::Alloca:
   1604     // FunctionLowering has the static-sized case covered.
   1605     if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
   1606       return true;
   1607 
   1608     // Dynamic-sized alloca is not handled yet.
   1609     return false;
   1610 
   1611   case Instruction::Call:
   1612     return selectCall(I);
   1613 
   1614   case Instruction::BitCast:
   1615     return selectBitCast(I);
   1616 
   1617   case Instruction::FPToSI:
   1618     return selectCast(I, ISD::FP_TO_SINT);
   1619   case Instruction::ZExt:
   1620     return selectCast(I, ISD::ZERO_EXTEND);
   1621   case Instruction::SExt:
   1622     return selectCast(I, ISD::SIGN_EXTEND);
   1623   case Instruction::Trunc:
   1624     return selectCast(I, ISD::TRUNCATE);
   1625   case Instruction::SIToFP:
   1626     return selectCast(I, ISD::SINT_TO_FP);
   1627 
   1628   case Instruction::IntToPtr: // Deliberate fall-through.
   1629   case Instruction::PtrToInt: {
   1630     EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
   1631     EVT DstVT = TLI.getValueType(DL, I->getType());
   1632     if (DstVT.bitsGT(SrcVT))
   1633       return selectCast(I, ISD::ZERO_EXTEND);
   1634     if (DstVT.bitsLT(SrcVT))
   1635       return selectCast(I, ISD::TRUNCATE);
   1636     unsigned Reg = getRegForValue(I->getOperand(0));
   1637     if (!Reg)
   1638       return false;
   1639     updateValueMap(I, Reg);
   1640     return true;
   1641   }
   1642 
   1643   case Instruction::ExtractValue:
   1644     return selectExtractValue(I);
   1645 
   1646   case Instruction::PHI:
   1647     llvm_unreachable("FastISel shouldn't visit PHI nodes!");
   1648 
   1649   default:
   1650     // Unhandled instruction. Halt "fast" selection and bail.
   1651     return false;
   1652   }
   1653 }
   1654 
   1655 FastISel::FastISel(FunctionLoweringInfo &FuncInfo,
   1656                    const TargetLibraryInfo *LibInfo,
   1657                    bool SkipTargetIndependentISel)
   1658     : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
   1659       MFI(*FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
   1660       TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
   1661       TII(*MF->getSubtarget().getInstrInfo()),
   1662       TLI(*MF->getSubtarget().getTargetLowering()),
   1663       TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
   1664       SkipTargetIndependentISel(SkipTargetIndependentISel) {}
   1665 
   1666 FastISel::~FastISel() {}
   1667 
   1668 bool FastISel::fastLowerArguments() { return false; }
   1669 
   1670 bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
   1671 
   1672 bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) {
   1673   return false;
   1674 }
   1675 
   1676 unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
   1677 
   1678 unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/,
   1679                               bool /*Op0IsKill*/) {
   1680   return 0;
   1681 }
   1682 
   1683 unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
   1684                                bool /*Op0IsKill*/, unsigned /*Op1*/,
   1685                                bool /*Op1IsKill*/) {
   1686   return 0;
   1687 }
   1688 
   1689 unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
   1690   return 0;
   1691 }
   1692 
   1693 unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
   1694                               const ConstantFP * /*FPImm*/) {
   1695   return 0;
   1696 }
   1697 
   1698 unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
   1699                                bool /*Op0IsKill*/, uint64_t /*Imm*/) {
   1700   return 0;
   1701 }
   1702 
   1703 unsigned FastISel::fastEmit_rf(MVT, MVT, unsigned, unsigned /*Op0*/,
   1704                                bool /*Op0IsKill*/,
   1705                                const ConstantFP * /*FPImm*/) {
   1706   return 0;
   1707 }
   1708 
   1709 unsigned FastISel::fastEmit_rri(MVT, MVT, unsigned, unsigned /*Op0*/,
   1710                                 bool /*Op0IsKill*/, unsigned /*Op1*/,
   1711                                 bool /*Op1IsKill*/, uint64_t /*Imm*/) {
   1712   return 0;
   1713 }
   1714 
   1715 /// This method is a wrapper of fastEmit_ri. It first tries to emit an
   1716 /// instruction with an immediate operand using fastEmit_ri.
   1717 /// If that fails, it materializes the immediate into a register and try
   1718 /// fastEmit_rr instead.
   1719 unsigned FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
   1720                                 bool Op0IsKill, uint64_t Imm, MVT ImmType) {
   1721   // If this is a multiply by a power of two, emit this as a shift left.
   1722   if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
   1723     Opcode = ISD::SHL;
   1724     Imm = Log2_64(Imm);
   1725   } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
   1726     // div x, 8 -> srl x, 3
   1727     Opcode = ISD::SRL;
   1728     Imm = Log2_64(Imm);
   1729   }
   1730 
   1731   // Horrible hack (to be removed), check to make sure shift amounts are
   1732   // in-range.
   1733   if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
   1734       Imm >= VT.getSizeInBits())
   1735     return 0;
   1736 
   1737   // First check if immediate type is legal. If not, we can't use the ri form.
   1738   unsigned ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
   1739   if (ResultReg)
   1740     return ResultReg;
   1741   unsigned MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
   1742   bool IsImmKill = true;
   1743   if (!MaterialReg) {
   1744     // This is a bit ugly/slow, but failing here means falling out of
   1745     // fast-isel, which would be very slow.
   1746     IntegerType *ITy =
   1747         IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits());
   1748     MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
   1749     if (!MaterialReg)
   1750       return 0;
   1751     // FIXME: If the materialized register here has no uses yet then this
   1752     // will be the first use and we should be able to mark it as killed.
   1753     // However, the local value area for materialising constant expressions
   1754     // grows down, not up, which means that any constant expressions we generate
   1755     // later which also use 'Imm' could be after this instruction and therefore
   1756     // after this kill.
   1757     IsImmKill = false;
   1758   }
   1759   return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg, IsImmKill);
   1760 }
   1761 
   1762 unsigned FastISel::createResultReg(const TargetRegisterClass *RC) {
   1763   return MRI.createVirtualRegister(RC);
   1764 }
   1765 
   1766 unsigned FastISel::constrainOperandRegClass(const MCInstrDesc &II, unsigned Op,
   1767                                             unsigned OpNum) {
   1768   if (TargetRegisterInfo::isVirtualRegister(Op)) {
   1769     const TargetRegisterClass *RegClass =
   1770         TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
   1771     if (!MRI.constrainRegClass(Op, RegClass)) {
   1772       // If it's not legal to COPY between the register classes, something
   1773       // has gone very wrong before we got here.
   1774       unsigned NewOp = createResultReg(RegClass);
   1775       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
   1776               TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
   1777       return NewOp;
   1778     }
   1779   }
   1780   return Op;
   1781 }
   1782 
   1783 unsigned FastISel::fastEmitInst_(unsigned MachineInstOpcode,
   1784                                  const TargetRegisterClass *RC) {
   1785   unsigned ResultReg = createResultReg(RC);
   1786   const MCInstrDesc &II = TII.get(MachineInstOpcode);
   1787 
   1788   BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg);
   1789   return ResultReg;
   1790 }
   1791 
   1792 unsigned FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
   1793                                   const TargetRegisterClass *RC, unsigned Op0,
   1794                                   bool Op0IsKill) {
   1795   const MCInstrDesc &II = TII.get(MachineInstOpcode);
   1796 
   1797   unsigned ResultReg = createResultReg(RC);
   1798   Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
   1799 
   1800   if (II.getNumDefs() >= 1)
   1801     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
   1802         .addReg(Op0, getKillRegState(Op0IsKill));
   1803   else {
   1804     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
   1805         .addReg(Op0, getKillRegState(Op0IsKill));
   1806     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
   1807             TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
   1808   }
   1809 
   1810   return ResultReg;
   1811 }
   1812 
   1813 unsigned FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
   1814                                    const TargetRegisterClass *RC, unsigned Op0,
   1815                                    bool Op0IsKill, unsigned Op1,
   1816                                    bool Op1IsKill) {
   1817   const MCInstrDesc &II = TII.get(MachineInstOpcode);
   1818 
   1819   unsigned ResultReg = createResultReg(RC);
   1820   Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
   1821   Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
   1822 
   1823   if (II.getNumDefs() >= 1)
   1824     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
   1825         .addReg(Op0, getKillRegState(Op0IsKill))
   1826         .addReg(Op1, getKillRegState(Op1IsKill));
   1827   else {
   1828     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
   1829         .addReg(Op0, getKillRegState(Op0IsKill))
   1830         .addReg(Op1, getKillRegState(Op1IsKill));
   1831     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
   1832             TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
   1833   }
   1834   return ResultReg;
   1835 }
   1836 
   1837 unsigned FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
   1838                                     const TargetRegisterClass *RC, unsigned Op0,
   1839                                     bool Op0IsKill, unsigned Op1,
   1840                                     bool Op1IsKill, unsigned Op2,
   1841                                     bool Op2IsKill) {
   1842   const MCInstrDesc &II = TII.get(MachineInstOpcode);
   1843 
   1844   unsigned ResultReg = createResultReg(RC);
   1845   Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
   1846   Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
   1847   Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
   1848 
   1849   if (II.getNumDefs() >= 1)
   1850     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
   1851         .addReg(Op0, getKillRegState(Op0IsKill))
   1852         .addReg(Op1, getKillRegState(Op1IsKill))
   1853         .addReg(Op2, getKillRegState(Op2IsKill));
   1854   else {
   1855     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
   1856         .addReg(Op0, getKillRegState(Op0IsKill))
   1857         .addReg(Op1, getKillRegState(Op1IsKill))
   1858         .addReg(Op2, getKillRegState(Op2IsKill));
   1859     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
   1860             TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
   1861   }
   1862   return ResultReg;
   1863 }
   1864 
   1865 unsigned FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
   1866                                    const TargetRegisterClass *RC, unsigned Op0,
   1867                                    bool Op0IsKill, uint64_t Imm) {
   1868   const MCInstrDesc &II = TII.get(MachineInstOpcode);
   1869 
   1870   unsigned ResultReg = createResultReg(RC);
   1871   Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
   1872 
   1873   if (II.getNumDefs() >= 1)
   1874     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
   1875         .addReg(Op0, getKillRegState(Op0IsKill))
   1876         .addImm(Imm);
   1877   else {
   1878     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
   1879         .addReg(Op0, getKillRegState(Op0IsKill))
   1880         .addImm(Imm);
   1881     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
   1882             TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
   1883   }
   1884   return ResultReg;
   1885 }
   1886 
   1887 unsigned FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
   1888                                     const TargetRegisterClass *RC, unsigned Op0,
   1889                                     bool Op0IsKill, uint64_t Imm1,
   1890                                     uint64_t Imm2) {
   1891   const MCInstrDesc &II = TII.get(MachineInstOpcode);
   1892 
   1893   unsigned ResultReg = createResultReg(RC);
   1894   Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
   1895 
   1896   if (II.getNumDefs() >= 1)
   1897     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
   1898         .addReg(Op0, getKillRegState(Op0IsKill))
   1899         .addImm(Imm1)
   1900         .addImm(Imm2);
   1901   else {
   1902     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
   1903         .addReg(Op0, getKillRegState(Op0IsKill))
   1904         .addImm(Imm1)
   1905         .addImm(Imm2);
   1906     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
   1907             TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
   1908   }
   1909   return ResultReg;
   1910 }
   1911 
   1912 unsigned FastISel::fastEmitInst_f(unsigned MachineInstOpcode,
   1913                                   const TargetRegisterClass *RC,
   1914                                   const ConstantFP *FPImm) {
   1915   const MCInstrDesc &II = TII.get(MachineInstOpcode);
   1916 
   1917   unsigned ResultReg = createResultReg(RC);
   1918 
   1919   if (II.getNumDefs() >= 1)
   1920     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
   1921         .addFPImm(FPImm);
   1922   else {
   1923     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
   1924         .addFPImm(FPImm);
   1925     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
   1926             TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
   1927   }
   1928   return ResultReg;
   1929 }
   1930 
   1931 unsigned FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
   1932                                     const TargetRegisterClass *RC, unsigned Op0,
   1933                                     bool Op0IsKill, unsigned Op1,
   1934                                     bool Op1IsKill, uint64_t Imm) {
   1935   const MCInstrDesc &II = TII.get(MachineInstOpcode);
   1936 
   1937   unsigned ResultReg = createResultReg(RC);
   1938   Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
   1939   Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
   1940 
   1941   if (II.getNumDefs() >= 1)
   1942     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
   1943         .addReg(Op0, getKillRegState(Op0IsKill))
   1944         .addReg(Op1, getKillRegState(Op1IsKill))
   1945         .addImm(Imm);
   1946   else {
   1947     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
   1948         .addReg(Op0, getKillRegState(Op0IsKill))
   1949         .addReg(Op1, getKillRegState(Op1IsKill))
   1950         .addImm(Imm);
   1951     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
   1952             TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
   1953   }
   1954   return ResultReg;
   1955 }
   1956 
   1957 unsigned FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
   1958                                   const TargetRegisterClass *RC, uint64_t Imm) {
   1959   unsigned ResultReg = createResultReg(RC);
   1960   const MCInstrDesc &II = TII.get(MachineInstOpcode);
   1961 
   1962   if (II.getNumDefs() >= 1)
   1963     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
   1964         .addImm(Imm);
   1965   else {
   1966     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm);
   1967     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
   1968             TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
   1969   }
   1970   return ResultReg;
   1971 }
   1972 
   1973 unsigned FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
   1974                                               bool Op0IsKill, uint32_t Idx) {
   1975   unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
   1976   assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
   1977          "Cannot yet extract from physregs");
   1978   const TargetRegisterClass *RC = MRI.getRegClass(Op0);
   1979   MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
   1980   BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
   1981           ResultReg).addReg(Op0, getKillRegState(Op0IsKill), Idx);
   1982   return ResultReg;
   1983 }
   1984 
   1985 /// Emit MachineInstrs to compute the value of Op with all but the least
   1986 /// significant bit set to zero.
   1987 unsigned FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
   1988   return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
   1989 }
   1990 
   1991 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
   1992 /// Emit code to ensure constants are copied into registers when needed.
   1993 /// Remember the virtual registers that need to be added to the Machine PHI
   1994 /// nodes as input.  We cannot just directly add them, because expansion
   1995 /// might result in multiple MBB's for one BB.  As such, the start of the
   1996 /// BB might correspond to a different MBB than the end.
   1997 bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
   1998   const TerminatorInst *TI = LLVMBB->getTerminator();
   1999 
   2000   SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
   2001   FuncInfo.OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
   2002 
   2003   // Check successor nodes' PHI nodes that expect a constant to be available
   2004   // from this block.
   2005   for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
   2006     const BasicBlock *SuccBB = TI->getSuccessor(succ);
   2007     if (!isa<PHINode>(SuccBB->begin()))
   2008       continue;
   2009     MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
   2010 
   2011     // If this terminator has multiple identical successors (common for
   2012     // switches), only handle each succ once.
   2013     if (!SuccsHandled.insert(SuccMBB).second)
   2014       continue;
   2015 
   2016     MachineBasicBlock::iterator MBBI = SuccMBB->begin();
   2017 
   2018     // At this point we know that there is a 1-1 correspondence between LLVM PHI
   2019     // nodes and Machine PHI nodes, but the incoming operands have not been
   2020     // emitted yet.
   2021     for (BasicBlock::const_iterator I = SuccBB->begin();
   2022          const auto *PN = dyn_cast<PHINode>(I); ++I) {
   2023 
   2024       // Ignore dead phi's.
   2025       if (PN->use_empty())
   2026         continue;
   2027 
   2028       // Only handle legal types. Two interesting things to note here. First,
   2029       // by bailing out early, we may leave behind some dead instructions,
   2030       // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
   2031       // own moves. Second, this check is necessary because FastISel doesn't
   2032       // use CreateRegs to create registers, so it always creates
   2033       // exactly one register for each non-void instruction.
   2034       EVT VT = TLI.getValueType(DL, PN->getType(), /*AllowUnknown=*/true);
   2035       if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
   2036         // Handle integer promotions, though, because they're common and easy.
   2037         if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
   2038           FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
   2039           return false;
   2040         }
   2041       }
   2042 
   2043       const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
   2044 
   2045       // Set the DebugLoc for the copy. Prefer the location of the operand
   2046       // if there is one; use the location of the PHI otherwise.
   2047       DbgLoc = PN->getDebugLoc();
   2048       if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
   2049         DbgLoc = Inst->getDebugLoc();
   2050 
   2051       unsigned Reg = getRegForValue(PHIOp);
   2052       if (!Reg) {
   2053         FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
   2054         return false;
   2055       }
   2056       FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
   2057       DbgLoc = DebugLoc();
   2058     }
   2059   }
   2060 
   2061   return true;
   2062 }
   2063 
   2064 bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
   2065   assert(LI->hasOneUse() &&
   2066          "tryToFoldLoad expected a LoadInst with a single use");
   2067   // We know that the load has a single use, but don't know what it is.  If it
   2068   // isn't one of the folded instructions, then we can't succeed here.  Handle
   2069   // this by scanning the single-use users of the load until we get to FoldInst.
   2070   unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
   2071 
   2072   const Instruction *TheUser = LI->user_back();
   2073   while (TheUser != FoldInst && // Scan up until we find FoldInst.
   2074          // Stay in the right block.
   2075          TheUser->getParent() == FoldInst->getParent() &&
   2076          --MaxUsers) { // Don't scan too far.
   2077     // If there are multiple or no uses of this instruction, then bail out.
   2078     if (!TheUser->hasOneUse())
   2079       return false;
   2080 
   2081     TheUser = TheUser->user_back();
   2082   }
   2083 
   2084   // If we didn't find the fold instruction, then we failed to collapse the
   2085   // sequence.
   2086   if (TheUser != FoldInst)
   2087     return false;
   2088 
   2089   // Don't try to fold volatile loads.  Target has to deal with alignment
   2090   // constraints.
   2091   if (LI->isVolatile())
   2092     return false;
   2093 
   2094   // Figure out which vreg this is going into.  If there is no assigned vreg yet
   2095   // then there actually was no reference to it.  Perhaps the load is referenced
   2096   // by a dead instruction.
   2097   unsigned LoadReg = getRegForValue(LI);
   2098   if (!LoadReg)
   2099     return false;
   2100 
   2101   // We can't fold if this vreg has no uses or more than one use.  Multiple uses
   2102   // may mean that the instruction got lowered to multiple MIs, or the use of
   2103   // the loaded value ended up being multiple operands of the result.
   2104   if (!MRI.hasOneUse(LoadReg))
   2105     return false;
   2106 
   2107   MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg);
   2108   MachineInstr *User = RI->getParent();
   2109 
   2110   // Set the insertion point properly.  Folding the load can cause generation of
   2111   // other random instructions (like sign extends) for addressing modes; make
   2112   // sure they get inserted in a logical place before the new instruction.
   2113   FuncInfo.InsertPt = User;
   2114   FuncInfo.MBB = User->getParent();
   2115 
   2116   // Ask the target to try folding the load.
   2117   return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
   2118 }
   2119 
   2120 bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) {
   2121   // Must be an add.
   2122   if (!isa<AddOperator>(Add))
   2123     return false;
   2124   // Type size needs to match.
   2125   if (DL.getTypeSizeInBits(GEP->getType()) !=
   2126       DL.getTypeSizeInBits(Add->getType()))
   2127     return false;
   2128   // Must be in the same basic block.
   2129   if (isa<Instruction>(Add) &&
   2130       FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
   2131     return false;
   2132   // Must have a constant operand.
   2133   return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
   2134 }
   2135 
   2136 MachineMemOperand *
   2137 FastISel::createMachineMemOperandFor(const Instruction *I) const {
   2138   const Value *Ptr;
   2139   Type *ValTy;
   2140   unsigned Alignment;
   2141   unsigned Flags;
   2142   bool IsVolatile;
   2143 
   2144   if (const auto *LI = dyn_cast<LoadInst>(I)) {
   2145     Alignment = LI->getAlignment();
   2146     IsVolatile = LI->isVolatile();
   2147     Flags = MachineMemOperand::MOLoad;
   2148     Ptr = LI->getPointerOperand();
   2149     ValTy = LI->getType();
   2150   } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
   2151     Alignment = SI->getAlignment();
   2152     IsVolatile = SI->isVolatile();
   2153     Flags = MachineMemOperand::MOStore;
   2154     Ptr = SI->getPointerOperand();
   2155     ValTy = SI->getValueOperand()->getType();
   2156   } else
   2157     return nullptr;
   2158 
   2159   bool IsNonTemporal = I->getMetadata(LLVMContext::MD_nontemporal) != nullptr;
   2160   bool IsInvariant = I->getMetadata(LLVMContext::MD_invariant_load) != nullptr;
   2161   const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
   2162 
   2163   AAMDNodes AAInfo;
   2164   I->getAAMetadata(AAInfo);
   2165 
   2166   if (Alignment == 0) // Ensure that codegen never sees alignment 0.
   2167     Alignment = DL.getABITypeAlignment(ValTy);
   2168 
   2169   unsigned Size = DL.getTypeStoreSize(ValTy);
   2170 
   2171   if (IsVolatile)
   2172     Flags |= MachineMemOperand::MOVolatile;
   2173   if (IsNonTemporal)
   2174     Flags |= MachineMemOperand::MONonTemporal;
   2175   if (IsInvariant)
   2176     Flags |= MachineMemOperand::MOInvariant;
   2177 
   2178   return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
   2179                                            Alignment, AAInfo, Ranges);
   2180 }
   2181 
   2182 CmpInst::Predicate FastISel::optimizeCmpPredicate(const CmpInst *CI) const {
   2183   // If both operands are the same, then try to optimize or fold the cmp.
   2184   CmpInst::Predicate Predicate = CI->getPredicate();
   2185   if (CI->getOperand(0) != CI->getOperand(1))
   2186     return Predicate;
   2187 
   2188   switch (Predicate) {
   2189   default: llvm_unreachable("Invalid predicate!");
   2190   case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
   2191   case CmpInst::FCMP_OEQ:   Predicate = CmpInst::FCMP_ORD;   break;
   2192   case CmpInst::FCMP_OGT:   Predicate = CmpInst::FCMP_FALSE; break;
   2193   case CmpInst::FCMP_OGE:   Predicate = CmpInst::FCMP_ORD;   break;
   2194   case CmpInst::FCMP_OLT:   Predicate = CmpInst::FCMP_FALSE; break;
   2195   case CmpInst::FCMP_OLE:   Predicate = CmpInst::FCMP_ORD;   break;
   2196   case CmpInst::FCMP_ONE:   Predicate = CmpInst::FCMP_FALSE; break;
   2197   case CmpInst::FCMP_ORD:   Predicate = CmpInst::FCMP_ORD;   break;
   2198   case CmpInst::FCMP_UNO:   Predicate = CmpInst::FCMP_UNO;   break;
   2199   case CmpInst::FCMP_UEQ:   Predicate = CmpInst::FCMP_TRUE;  break;
   2200   case CmpInst::FCMP_UGT:   Predicate = CmpInst::FCMP_UNO;   break;
   2201   case CmpInst::FCMP_UGE:   Predicate = CmpInst::FCMP_TRUE;  break;
   2202   case CmpInst::FCMP_ULT:   Predicate = CmpInst::FCMP_UNO;   break;
   2203   case CmpInst::FCMP_ULE:   Predicate = CmpInst::FCMP_TRUE;  break;
   2204   case CmpInst::FCMP_UNE:   Predicate = CmpInst::FCMP_UNO;   break;
   2205   case CmpInst::FCMP_TRUE:  Predicate = CmpInst::FCMP_TRUE;  break;
   2206 
   2207   case CmpInst::ICMP_EQ:    Predicate = CmpInst::FCMP_TRUE;  break;
   2208   case CmpInst::ICMP_NE:    Predicate = CmpInst::FCMP_FALSE; break;
   2209   case CmpInst::ICMP_UGT:   Predicate = CmpInst::FCMP_FALSE; break;
   2210   case CmpInst::ICMP_UGE:   Predicate = CmpInst::FCMP_TRUE;  break;
   2211   case CmpInst::ICMP_ULT:   Predicate = CmpInst::FCMP_FALSE; break;
   2212   case CmpInst::ICMP_ULE:   Predicate = CmpInst::FCMP_TRUE;  break;
   2213   case CmpInst::ICMP_SGT:   Predicate = CmpInst::FCMP_FALSE; break;
   2214   case CmpInst::ICMP_SGE:   Predicate = CmpInst::FCMP_TRUE;  break;
   2215   case CmpInst::ICMP_SLT:   Predicate = CmpInst::FCMP_FALSE; break;
   2216   case CmpInst::ICMP_SLE:   Predicate = CmpInst::FCMP_TRUE;  break;
   2217   }
   2218 
   2219   return Predicate;
   2220 }
   2221