Home | History | Annotate | Download | only in X86
      1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file defines a DAG pattern matching instruction selector for X86,
     11 // converting from a legalized dag to a X86 dag.
     12 //
     13 //===----------------------------------------------------------------------===//
     14 
     15 #include "X86.h"
     16 #include "X86InstrBuilder.h"
     17 #include "X86MachineFunctionInfo.h"
     18 #include "X86RegisterInfo.h"
     19 #include "X86Subtarget.h"
     20 #include "X86TargetMachine.h"
     21 #include "llvm/ADT/Statistic.h"
     22 #include "llvm/CodeGen/MachineFrameInfo.h"
     23 #include "llvm/CodeGen/MachineFunction.h"
     24 #include "llvm/CodeGen/MachineInstrBuilder.h"
     25 #include "llvm/CodeGen/MachineRegisterInfo.h"
     26 #include "llvm/CodeGen/SelectionDAGISel.h"
     27 #include "llvm/IR/Instructions.h"
     28 #include "llvm/IR/Intrinsics.h"
     29 #include "llvm/IR/Type.h"
     30 #include "llvm/Support/Debug.h"
     31 #include "llvm/Support/ErrorHandling.h"
     32 #include "llvm/Support/MathExtras.h"
     33 #include "llvm/Support/raw_ostream.h"
     34 #include "llvm/Target/TargetMachine.h"
     35 #include "llvm/Target/TargetOptions.h"
     36 using namespace llvm;
     37 
     38 #define DEBUG_TYPE "x86-isel"
     39 
     40 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
     41 
     42 //===----------------------------------------------------------------------===//
     43 //                      Pattern Matcher Implementation
     44 //===----------------------------------------------------------------------===//
     45 
     46 namespace {
     47   /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
     48   /// SDValue's instead of register numbers for the leaves of the matched
     49   /// tree.
     50   struct X86ISelAddressMode {
     51     enum {
     52       RegBase,
     53       FrameIndexBase
     54     } BaseType;
     55 
     56     // This is really a union, discriminated by BaseType!
     57     SDValue Base_Reg;
     58     int Base_FrameIndex;
     59 
     60     unsigned Scale;
     61     SDValue IndexReg;
     62     int32_t Disp;
     63     SDValue Segment;
     64     const GlobalValue *GV;
     65     const Constant *CP;
     66     const BlockAddress *BlockAddr;
     67     const char *ES;
     68     int JT;
     69     unsigned Align;    // CP alignment.
     70     unsigned char SymbolFlags;  // X86II::MO_*
     71 
     72     X86ISelAddressMode()
     73       : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0),
     74         Segment(), GV(nullptr), CP(nullptr), BlockAddr(nullptr), ES(nullptr),
     75         JT(-1), Align(0), SymbolFlags(X86II::MO_NO_FLAG) {
     76     }
     77 
     78     bool hasSymbolicDisplacement() const {
     79       return GV != nullptr || CP != nullptr || ES != nullptr ||
     80              JT != -1 || BlockAddr != nullptr;
     81     }
     82 
     83     bool hasBaseOrIndexReg() const {
     84       return BaseType == FrameIndexBase ||
     85              IndexReg.getNode() != nullptr || Base_Reg.getNode() != nullptr;
     86     }
     87 
     88     /// isRIPRelative - Return true if this addressing mode is already RIP
     89     /// relative.
     90     bool isRIPRelative() const {
     91       if (BaseType != RegBase) return false;
     92       if (RegisterSDNode *RegNode =
     93             dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode()))
     94         return RegNode->getReg() == X86::RIP;
     95       return false;
     96     }
     97 
     98     void setBaseReg(SDValue Reg) {
     99       BaseType = RegBase;
    100       Base_Reg = Reg;
    101     }
    102 
    103 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
    104     void dump() {
    105       dbgs() << "X86ISelAddressMode " << this << '\n';
    106       dbgs() << "Base_Reg ";
    107       if (Base_Reg.getNode())
    108         Base_Reg.getNode()->dump();
    109       else
    110         dbgs() << "nul";
    111       dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n'
    112              << " Scale" << Scale << '\n'
    113              << "IndexReg ";
    114       if (IndexReg.getNode())
    115         IndexReg.getNode()->dump();
    116       else
    117         dbgs() << "nul";
    118       dbgs() << " Disp " << Disp << '\n'
    119              << "GV ";
    120       if (GV)
    121         GV->dump();
    122       else
    123         dbgs() << "nul";
    124       dbgs() << " CP ";
    125       if (CP)
    126         CP->dump();
    127       else
    128         dbgs() << "nul";
    129       dbgs() << '\n'
    130              << "ES ";
    131       if (ES)
    132         dbgs() << ES;
    133       else
    134         dbgs() << "nul";
    135       dbgs() << " JT" << JT << " Align" << Align << '\n';
    136     }
    137 #endif
    138   };
    139 }
    140 
    141 namespace {
    142   //===--------------------------------------------------------------------===//
    143   /// ISel - X86 specific code to select X86 machine instructions for
    144   /// SelectionDAG operations.
    145   ///
    146   class X86DAGToDAGISel final : public SelectionDAGISel {
    147     /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
    148     /// make the right decision when generating code for different targets.
    149     const X86Subtarget *Subtarget;
    150 
    151     /// OptForSize - If true, selector should try to optimize for code size
    152     /// instead of performance.
    153     bool OptForSize;
    154 
    155   public:
    156     explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
    157       : SelectionDAGISel(tm, OptLevel),
    158         Subtarget(&tm.getSubtarget<X86Subtarget>()),
    159         OptForSize(false) {}
    160 
    161     const char *getPassName() const override {
    162       return "X86 DAG->DAG Instruction Selection";
    163     }
    164 
    165     bool runOnMachineFunction(MachineFunction &MF) override {
    166       // Reset the subtarget each time through.
    167       Subtarget = &TM.getSubtarget<X86Subtarget>();
    168       SelectionDAGISel::runOnMachineFunction(MF);
    169       return true;
    170     }
    171 
    172     void EmitFunctionEntryCode() override;
    173 
    174     bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const override;
    175 
    176     void PreprocessISelDAG() override;
    177 
    178     inline bool immSext8(SDNode *N) const {
    179       return isInt<8>(cast<ConstantSDNode>(N)->getSExtValue());
    180     }
    181 
    182     // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
    183     // sign extended field.
    184     inline bool i64immSExt32(SDNode *N) const {
    185       uint64_t v = cast<ConstantSDNode>(N)->getZExtValue();
    186       return (int64_t)v == (int32_t)v;
    187     }
    188 
    189 // Include the pieces autogenerated from the target description.
    190 #include "X86GenDAGISel.inc"
    191 
    192   private:
    193     SDNode *Select(SDNode *N) override;
    194     SDNode *SelectGather(SDNode *N, unsigned Opc);
    195     SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
    196     SDNode *SelectAtomicLoadArith(SDNode *Node, MVT NVT);
    197 
    198     bool FoldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM);
    199     bool MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM);
    200     bool MatchWrapper(SDValue N, X86ISelAddressMode &AM);
    201     bool MatchAddress(SDValue N, X86ISelAddressMode &AM);
    202     bool MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
    203                                  unsigned Depth);
    204     bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM);
    205     bool SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
    206                     SDValue &Scale, SDValue &Index, SDValue &Disp,
    207                     SDValue &Segment);
    208     bool SelectMOV64Imm32(SDValue N, SDValue &Imm);
    209     bool SelectLEAAddr(SDValue N, SDValue &Base,
    210                        SDValue &Scale, SDValue &Index, SDValue &Disp,
    211                        SDValue &Segment);
    212     bool SelectLEA64_32Addr(SDValue N, SDValue &Base,
    213                             SDValue &Scale, SDValue &Index, SDValue &Disp,
    214                             SDValue &Segment);
    215     bool SelectTLSADDRAddr(SDValue N, SDValue &Base,
    216                            SDValue &Scale, SDValue &Index, SDValue &Disp,
    217                            SDValue &Segment);
    218     bool SelectScalarSSELoad(SDNode *Root, SDValue N,
    219                              SDValue &Base, SDValue &Scale,
    220                              SDValue &Index, SDValue &Disp,
    221                              SDValue &Segment,
    222                              SDValue &NodeWithChain);
    223 
    224     bool TryFoldLoad(SDNode *P, SDValue N,
    225                      SDValue &Base, SDValue &Scale,
    226                      SDValue &Index, SDValue &Disp,
    227                      SDValue &Segment);
    228 
    229     /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
    230     /// inline asm expressions.
    231     bool SelectInlineAsmMemoryOperand(const SDValue &Op,
    232                                       char ConstraintCode,
    233                                       std::vector<SDValue> &OutOps) override;
    234 
    235     void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);
    236 
    237     inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base,
    238                                    SDValue &Scale, SDValue &Index,
    239                                    SDValue &Disp, SDValue &Segment) {
    240       Base  = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
    241         CurDAG->getTargetFrameIndex(AM.Base_FrameIndex,
    242                                     getTargetLowering()->getPointerTy()) :
    243         AM.Base_Reg;
    244       Scale = getI8Imm(AM.Scale);
    245       Index = AM.IndexReg;
    246       // These are 32-bit even in 64-bit mode since RIP relative offset
    247       // is 32-bit.
    248       if (AM.GV)
    249         Disp = CurDAG->getTargetGlobalAddress(AM.GV, SDLoc(),
    250                                               MVT::i32, AM.Disp,
    251                                               AM.SymbolFlags);
    252       else if (AM.CP)
    253         Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
    254                                              AM.Align, AM.Disp, AM.SymbolFlags);
    255       else if (AM.ES) {
    256         assert(!AM.Disp && "Non-zero displacement is ignored with ES.");
    257         Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
    258       } else if (AM.JT != -1) {
    259         assert(!AM.Disp && "Non-zero displacement is ignored with JT.");
    260         Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
    261       } else if (AM.BlockAddr)
    262         Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp,
    263                                              AM.SymbolFlags);
    264       else
    265         Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32);
    266 
    267       if (AM.Segment.getNode())
    268         Segment = AM.Segment;
    269       else
    270         Segment = CurDAG->getRegister(0, MVT::i32);
    271     }
    272 
    273     /// getI8Imm - Return a target constant with the specified value, of type
    274     /// i8.
    275     inline SDValue getI8Imm(unsigned Imm) {
    276       return CurDAG->getTargetConstant(Imm, MVT::i8);
    277     }
    278 
    279     /// getI32Imm - Return a target constant with the specified value, of type
    280     /// i32.
    281     inline SDValue getI32Imm(unsigned Imm) {
    282       return CurDAG->getTargetConstant(Imm, MVT::i32);
    283     }
    284 
    285     /// getGlobalBaseReg - Return an SDNode that returns the value of
    286     /// the global base register. Output instructions required to
    287     /// initialize the global base register, if necessary.
    288     ///
    289     SDNode *getGlobalBaseReg();
    290 
    291     /// getTargetMachine - Return a reference to the TargetMachine, casted
    292     /// to the target-specific type.
    293     const X86TargetMachine &getTargetMachine() const {
    294       return static_cast<const X86TargetMachine &>(TM);
    295     }
    296 
    297     /// getInstrInfo - Return a reference to the TargetInstrInfo, casted
    298     /// to the target-specific type.
    299     const X86InstrInfo *getInstrInfo() const {
    300       return getTargetMachine().getInstrInfo();
    301     }
    302   };
    303 }
    304 
    305 
    306 bool
    307 X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
    308   if (OptLevel == CodeGenOpt::None) return false;
    309 
    310   if (!N.hasOneUse())
    311     return false;
    312 
    313   if (N.getOpcode() != ISD::LOAD)
    314     return true;
    315 
    316   // If N is a load, do additional profitability checks.
    317   if (U == Root) {
    318     switch (U->getOpcode()) {
    319     default: break;
    320     case X86ISD::ADD:
    321     case X86ISD::SUB:
    322     case X86ISD::AND:
    323     case X86ISD::XOR:
    324     case X86ISD::OR:
    325     case ISD::ADD:
    326     case ISD::ADDC:
    327     case ISD::ADDE:
    328     case ISD::AND:
    329     case ISD::OR:
    330     case ISD::XOR: {
    331       SDValue Op1 = U->getOperand(1);
    332 
    333       // If the other operand is a 8-bit immediate we should fold the immediate
    334       // instead. This reduces code size.
    335       // e.g.
    336       // movl 4(%esp), %eax
    337       // addl $4, %eax
    338       // vs.
    339       // movl $4, %eax
    340       // addl 4(%esp), %eax
    341       // The former is 2 bytes shorter. In case where the increment is 1, then
    342       // the saving can be 4 bytes (by using incl %eax).
    343       if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1))
    344         if (Imm->getAPIntValue().isSignedIntN(8))
    345           return false;
    346 
    347       // If the other operand is a TLS address, we should fold it instead.
    348       // This produces
    349       // movl    %gs:0, %eax
    350       // leal    i@NTPOFF(%eax), %eax
    351       // instead of
    352       // movl    $i@NTPOFF, %eax
    353       // addl    %gs:0, %eax
    354       // if the block also has an access to a second TLS address this will save
    355       // a load.
    356       // FIXME: This is probably also true for non-TLS addresses.
    357       if (Op1.getOpcode() == X86ISD::Wrapper) {
    358         SDValue Val = Op1.getOperand(0);
    359         if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
    360           return false;
    361       }
    362     }
    363     }
    364   }
    365 
    366   return true;
    367 }
    368 
    369 /// MoveBelowCallOrigChain - Replace the original chain operand of the call with
    370 /// load's chain operand and move load below the call's chain operand.
    371 static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
    372                                SDValue Call, SDValue OrigChain) {
    373   SmallVector<SDValue, 8> Ops;
    374   SDValue Chain = OrigChain.getOperand(0);
    375   if (Chain.getNode() == Load.getNode())
    376     Ops.push_back(Load.getOperand(0));
    377   else {
    378     assert(Chain.getOpcode() == ISD::TokenFactor &&
    379            "Unexpected chain operand");
    380     for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
    381       if (Chain.getOperand(i).getNode() == Load.getNode())
    382         Ops.push_back(Load.getOperand(0));
    383       else
    384         Ops.push_back(Chain.getOperand(i));
    385     SDValue NewChain =
    386       CurDAG->getNode(ISD::TokenFactor, SDLoc(Load), MVT::Other, Ops);
    387     Ops.clear();
    388     Ops.push_back(NewChain);
    389   }
    390   for (unsigned i = 1, e = OrigChain.getNumOperands(); i != e; ++i)
    391     Ops.push_back(OrigChain.getOperand(i));
    392   CurDAG->UpdateNodeOperands(OrigChain.getNode(), Ops);
    393   CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
    394                              Load.getOperand(1), Load.getOperand(2));
    395 
    396   unsigned NumOps = Call.getNode()->getNumOperands();
    397   Ops.clear();
    398   Ops.push_back(SDValue(Load.getNode(), 1));
    399   for (unsigned i = 1, e = NumOps; i != e; ++i)
    400     Ops.push_back(Call.getOperand(i));
    401   CurDAG->UpdateNodeOperands(Call.getNode(), Ops);
    402 }
    403 
    404 /// isCalleeLoad - Return true if call address is a load and it can be
    405 /// moved below CALLSEQ_START and the chains leading up to the call.
    406 /// Return the CALLSEQ_START by reference as a second output.
    407 /// In the case of a tail call, there isn't a callseq node between the call
    408 /// chain and the load.
    409 static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
    410   // The transformation is somewhat dangerous if the call's chain was glued to
    411   // the call. After MoveBelowOrigChain the load is moved between the call and
    412   // the chain, this can create a cycle if the load is not folded. So it is
    413   // *really* important that we are sure the load will be folded.
    414   if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
    415     return false;
    416   LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
    417   if (!LD ||
    418       LD->isVolatile() ||
    419       LD->getAddressingMode() != ISD::UNINDEXED ||
    420       LD->getExtensionType() != ISD::NON_EXTLOAD)
    421     return false;
    422 
    423   // Now let's find the callseq_start.
    424   while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) {
    425     if (!Chain.hasOneUse())
    426       return false;
    427     Chain = Chain.getOperand(0);
    428   }
    429 
    430   if (!Chain.getNumOperands())
    431     return false;
    432   // Since we are not checking for AA here, conservatively abort if the chain
    433   // writes to memory. It's not safe to move the callee (a load) across a store.
    434   if (isa<MemSDNode>(Chain.getNode()) &&
    435       cast<MemSDNode>(Chain.getNode())->writeMem())
    436     return false;
    437   if (Chain.getOperand(0).getNode() == Callee.getNode())
    438     return true;
    439   if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
    440       Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) &&
    441       Callee.getValue(1).hasOneUse())
    442     return true;
    443   return false;
    444 }
    445 
    446 void X86DAGToDAGISel::PreprocessISelDAG() {
    447   // OptForSize is used in pattern predicates that isel is matching.
    448   OptForSize = MF->getFunction()->getAttributes().
    449     hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
    450 
    451   for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
    452        E = CurDAG->allnodes_end(); I != E; ) {
    453     SDNode *N = I++;  // Preincrement iterator to avoid invalidation issues.
    454 
    455     if (OptLevel != CodeGenOpt::None &&
    456         // Only does this when target favors doesn't favor register indirect
    457         // call.
    458         ((N->getOpcode() == X86ISD::CALL && !Subtarget->callRegIndirect()) ||
    459          (N->getOpcode() == X86ISD::TC_RETURN &&
    460           // Only does this if load can be folded into TC_RETURN.
    461           (Subtarget->is64Bit() ||
    462            getTargetMachine().getRelocationModel() != Reloc::PIC_)))) {
    463       /// Also try moving call address load from outside callseq_start to just
    464       /// before the call to allow it to be folded.
    465       ///
    466       ///     [Load chain]
    467       ///         ^
    468       ///         |
    469       ///       [Load]
    470       ///       ^    ^
    471       ///       |    |
    472       ///      /      \--
    473       ///     /          |
    474       ///[CALLSEQ_START] |
    475       ///     ^          |
    476       ///     |          |
    477       /// [LOAD/C2Reg]   |
    478       ///     |          |
    479       ///      \        /
    480       ///       \      /
    481       ///       [CALL]
    482       bool HasCallSeq = N->getOpcode() == X86ISD::CALL;
    483       SDValue Chain = N->getOperand(0);
    484       SDValue Load  = N->getOperand(1);
    485       if (!isCalleeLoad(Load, Chain, HasCallSeq))
    486         continue;
    487       MoveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain);
    488       ++NumLoadMoved;
    489       continue;
    490     }
    491 
    492     // Lower fpround and fpextend nodes that target the FP stack to be store and
    493     // load to the stack.  This is a gross hack.  We would like to simply mark
    494     // these as being illegal, but when we do that, legalize produces these when
    495     // it expands calls, then expands these in the same legalize pass.  We would
    496     // like dag combine to be able to hack on these between the call expansion
    497     // and the node legalization.  As such this pass basically does "really
    498     // late" legalization of these inline with the X86 isel pass.
    499     // FIXME: This should only happen when not compiled with -O0.
    500     if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
    501       continue;
    502 
    503     MVT SrcVT = N->getOperand(0).getSimpleValueType();
    504     MVT DstVT = N->getSimpleValueType(0);
    505 
    506     // If any of the sources are vectors, no fp stack involved.
    507     if (SrcVT.isVector() || DstVT.isVector())
    508       continue;
    509 
    510     // If the source and destination are SSE registers, then this is a legal
    511     // conversion that should not be lowered.
    512     const X86TargetLowering *X86Lowering =
    513         static_cast<const X86TargetLowering *>(getTargetLowering());
    514     bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT);
    515     bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT);
    516     if (SrcIsSSE && DstIsSSE)
    517       continue;
    518 
    519     if (!SrcIsSSE && !DstIsSSE) {
    520       // If this is an FPStack extension, it is a noop.
    521       if (N->getOpcode() == ISD::FP_EXTEND)
    522         continue;
    523       // If this is a value-preserving FPStack truncation, it is a noop.
    524       if (N->getConstantOperandVal(1))
    525         continue;
    526     }
    527 
    528     // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
    529     // FPStack has extload and truncstore.  SSE can fold direct loads into other
    530     // operations.  Based on this, decide what we want to do.
    531     MVT MemVT;
    532     if (N->getOpcode() == ISD::FP_ROUND)
    533       MemVT = DstVT;  // FP_ROUND must use DstVT, we can't do a 'trunc load'.
    534     else
    535       MemVT = SrcIsSSE ? SrcVT : DstVT;
    536 
    537     SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
    538     SDLoc dl(N);
    539 
    540     // FIXME: optimize the case where the src/dest is a load or store?
    541     SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl,
    542                                           N->getOperand(0),
    543                                           MemTmp, MachinePointerInfo(), MemVT,
    544                                           false, false, 0);
    545     SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
    546                                         MachinePointerInfo(),
    547                                         MemVT, false, false, 0);
    548 
    549     // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
    550     // extload we created.  This will cause general havok on the dag because
    551     // anything below the conversion could be folded into other existing nodes.
    552     // To avoid invalidating 'I', back it up to the convert node.
    553     --I;
    554     CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
    555 
    556     // Now that we did that, the node is dead.  Increment the iterator to the
    557     // next node to process, then delete N.
    558     ++I;
    559     CurDAG->DeleteNode(N);
    560   }
    561 }
    562 
    563 
    564 /// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
    565 /// the main function.
    566 void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
    567                                              MachineFrameInfo *MFI) {
    568   const TargetInstrInfo *TII = TM.getInstrInfo();
    569   if (Subtarget->isTargetCygMing()) {
    570     unsigned CallOp =
    571       Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32;
    572     BuildMI(BB, DebugLoc(),
    573             TII->get(CallOp)).addExternalSymbol("__main");
    574   }
    575 }
    576 
    577 void X86DAGToDAGISel::EmitFunctionEntryCode() {
    578   // If this is main, emit special code for main.
    579   if (const Function *Fn = MF->getFunction())
    580     if (Fn->hasExternalLinkage() && Fn->getName() == "main")
    581       EmitSpecialCodeForMain(MF->begin(), MF->getFrameInfo());
    582 }
    583 
    584 static bool isDispSafeForFrameIndex(int64_t Val) {
    585   // On 64-bit platforms, we can run into an issue where a frame index
    586   // includes a displacement that, when added to the explicit displacement,
    587   // will overflow the displacement field. Assuming that the frame index
    588   // displacement fits into a 31-bit integer  (which is only slightly more
    589   // aggressive than the current fundamental assumption that it fits into
    590   // a 32-bit integer), a 31-bit disp should always be safe.
    591   return isInt<31>(Val);
    592 }
    593 
    594 bool X86DAGToDAGISel::FoldOffsetIntoAddress(uint64_t Offset,
    595                                             X86ISelAddressMode &AM) {
    596   int64_t Val = AM.Disp + Offset;
    597   CodeModel::Model M = TM.getCodeModel();
    598   if (Subtarget->is64Bit()) {
    599     if (!X86::isOffsetSuitableForCodeModel(Val, M,
    600                                            AM.hasSymbolicDisplacement()))
    601       return true;
    602     // In addition to the checks required for a register base, check that
    603     // we do not try to use an unsafe Disp with a frame index.
    604     if (AM.BaseType == X86ISelAddressMode::FrameIndexBase &&
    605         !isDispSafeForFrameIndex(Val))
    606       return true;
    607   }
    608   AM.Disp = Val;
    609   return false;
    610 
    611 }
    612 
    613 bool X86DAGToDAGISel::MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){
    614   SDValue Address = N->getOperand(1);
    615 
    616   // load gs:0 -> GS segment register.
    617   // load fs:0 -> FS segment register.
    618   //
    619   // This optimization is valid because the GNU TLS model defines that
    620   // gs:0 (or fs:0 on X86-64) contains its own address.
    621   // For more information see http://people.redhat.com/drepper/tls.pdf
    622   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address))
    623     if (C->getSExtValue() == 0 && AM.Segment.getNode() == nullptr &&
    624         Subtarget->isTargetLinux())
    625       switch (N->getPointerInfo().getAddrSpace()) {
    626       case 256:
    627         AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
    628         return false;
    629       case 257:
    630         AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
    631         return false;
    632       }
    633 
    634   return true;
    635 }
    636 
    637 /// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes
    638 /// into an addressing mode.  These wrap things that will resolve down into a
    639 /// symbol reference.  If no match is possible, this returns true, otherwise it
    640 /// returns false.
    641 bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
    642   // If the addressing mode already has a symbol as the displacement, we can
    643   // never match another symbol.
    644   if (AM.hasSymbolicDisplacement())
    645     return true;
    646 
    647   SDValue N0 = N.getOperand(0);
    648   CodeModel::Model M = TM.getCodeModel();
    649 
    650   // Handle X86-64 rip-relative addresses.  We check this before checking direct
    651   // folding because RIP is preferable to non-RIP accesses.
    652   if (Subtarget->is64Bit() && N.getOpcode() == X86ISD::WrapperRIP &&
    653       // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
    654       // they cannot be folded into immediate fields.
    655       // FIXME: This can be improved for kernel and other models?
    656       (M == CodeModel::Small || M == CodeModel::Kernel)) {
    657     // Base and index reg must be 0 in order to use %rip as base.
    658     if (AM.hasBaseOrIndexReg())
    659       return true;
    660     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
    661       X86ISelAddressMode Backup = AM;
    662       AM.GV = G->getGlobal();
    663       AM.SymbolFlags = G->getTargetFlags();
    664       if (FoldOffsetIntoAddress(G->getOffset(), AM)) {
    665         AM = Backup;
    666         return true;
    667       }
    668     } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
    669       X86ISelAddressMode Backup = AM;
    670       AM.CP = CP->getConstVal();
    671       AM.Align = CP->getAlignment();
    672       AM.SymbolFlags = CP->getTargetFlags();
    673       if (FoldOffsetIntoAddress(CP->getOffset(), AM)) {
    674         AM = Backup;
    675         return true;
    676       }
    677     } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
    678       AM.ES = S->getSymbol();
    679       AM.SymbolFlags = S->getTargetFlags();
    680     } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
    681       AM.JT = J->getIndex();
    682       AM.SymbolFlags = J->getTargetFlags();
    683     } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
    684       X86ISelAddressMode Backup = AM;
    685       AM.BlockAddr = BA->getBlockAddress();
    686       AM.SymbolFlags = BA->getTargetFlags();
    687       if (FoldOffsetIntoAddress(BA->getOffset(), AM)) {
    688         AM = Backup;
    689         return true;
    690       }
    691     } else
    692       llvm_unreachable("Unhandled symbol reference node.");
    693 
    694     if (N.getOpcode() == X86ISD::WrapperRIP)
    695       AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
    696     return false;
    697   }
    698 
    699   // Handle the case when globals fit in our immediate field: This is true for
    700   // X86-32 always and X86-64 when in -mcmodel=small mode.  In 64-bit
    701   // mode, this only applies to a non-RIP-relative computation.
    702   if (!Subtarget->is64Bit() ||
    703       M == CodeModel::Small || M == CodeModel::Kernel) {
    704     assert(N.getOpcode() != X86ISD::WrapperRIP &&
    705            "RIP-relative addressing already handled");
    706     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
    707       AM.GV = G->getGlobal();
    708       AM.Disp += G->getOffset();
    709       AM.SymbolFlags = G->getTargetFlags();
    710     } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
    711       AM.CP = CP->getConstVal();
    712       AM.Align = CP->getAlignment();
    713       AM.Disp += CP->getOffset();
    714       AM.SymbolFlags = CP->getTargetFlags();
    715     } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
    716       AM.ES = S->getSymbol();
    717       AM.SymbolFlags = S->getTargetFlags();
    718     } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
    719       AM.JT = J->getIndex();
    720       AM.SymbolFlags = J->getTargetFlags();
    721     } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
    722       AM.BlockAddr = BA->getBlockAddress();
    723       AM.Disp += BA->getOffset();
    724       AM.SymbolFlags = BA->getTargetFlags();
    725     } else
    726       llvm_unreachable("Unhandled symbol reference node.");
    727     return false;
    728   }
    729 
    730   return true;
    731 }
    732 
    733 /// MatchAddress - Add the specified node to the specified addressing mode,
    734 /// returning true if it cannot be done.  This just pattern matches for the
    735 /// addressing mode.
    736 bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) {
    737   if (MatchAddressRecursively(N, AM, 0))
    738     return true;
    739 
    740   // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
    741   // a smaller encoding and avoids a scaled-index.
    742   if (AM.Scale == 2 &&
    743       AM.BaseType == X86ISelAddressMode::RegBase &&
    744       AM.Base_Reg.getNode() == nullptr) {
    745     AM.Base_Reg = AM.IndexReg;
    746     AM.Scale = 1;
    747   }
    748 
    749   // Post-processing: Convert foo to foo(%rip), even in non-PIC mode,
    750   // because it has a smaller encoding.
    751   // TODO: Which other code models can use this?
    752   if (TM.getCodeModel() == CodeModel::Small &&
    753       Subtarget->is64Bit() &&
    754       AM.Scale == 1 &&
    755       AM.BaseType == X86ISelAddressMode::RegBase &&
    756       AM.Base_Reg.getNode() == nullptr &&
    757       AM.IndexReg.getNode() == nullptr &&
    758       AM.SymbolFlags == X86II::MO_NO_FLAG &&
    759       AM.hasSymbolicDisplacement())
    760     AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
    761 
    762   return false;
    763 }
    764 
    765 // Insert a node into the DAG at least before the Pos node's position. This
    766 // will reposition the node as needed, and will assign it a node ID that is <=
    767 // the Pos node's ID. Note that this does *not* preserve the uniqueness of node
    768 // IDs! The selection DAG must no longer depend on their uniqueness when this
    769 // is used.
    770 static void InsertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) {
    771   if (N.getNode()->getNodeId() == -1 ||
    772       N.getNode()->getNodeId() > Pos.getNode()->getNodeId()) {
    773     DAG.RepositionNode(Pos.getNode(), N.getNode());
    774     N.getNode()->setNodeId(Pos.getNode()->getNodeId());
    775   }
    776 }
    777 
    778 // Transform "(X >> (8-C1)) & C2" to "(X >> 8) & 0xff)" if safe. This
    779 // allows us to convert the shift and and into an h-register extract and
    780 // a scaled index. Returns false if the simplification is performed.
    781 static bool FoldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N,
    782                                       uint64_t Mask,
    783                                       SDValue Shift, SDValue X,
    784                                       X86ISelAddressMode &AM) {
    785   if (Shift.getOpcode() != ISD::SRL ||
    786       !isa<ConstantSDNode>(Shift.getOperand(1)) ||
    787       !Shift.hasOneUse())
    788     return true;
    789 
    790   int ScaleLog = 8 - Shift.getConstantOperandVal(1);
    791   if (ScaleLog <= 0 || ScaleLog >= 4 ||
    792       Mask != (0xffu << ScaleLog))
    793     return true;
    794 
    795   MVT VT = N.getSimpleValueType();
    796   SDLoc DL(N);
    797   SDValue Eight = DAG.getConstant(8, MVT::i8);
    798   SDValue NewMask = DAG.getConstant(0xff, VT);
    799   SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight);
    800   SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask);
    801   SDValue ShlCount = DAG.getConstant(ScaleLog, MVT::i8);
    802   SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount);
    803 
    804   // Insert the new nodes into the topological ordering. We must do this in
    805   // a valid topological ordering as nothing is going to go back and re-sort
    806   // these nodes. We continually insert before 'N' in sequence as this is
    807   // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
    808   // hierarchy left to express.
    809   InsertDAGNode(DAG, N, Eight);
    810   InsertDAGNode(DAG, N, Srl);
    811   InsertDAGNode(DAG, N, NewMask);
    812   InsertDAGNode(DAG, N, And);
    813   InsertDAGNode(DAG, N, ShlCount);
    814   InsertDAGNode(DAG, N, Shl);
    815   DAG.ReplaceAllUsesWith(N, Shl);
    816   AM.IndexReg = And;
    817   AM.Scale = (1 << ScaleLog);
    818   return false;
    819 }
    820 
    821 // Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this
    822 // allows us to fold the shift into this addressing mode. Returns false if the
    823 // transform succeeded.
    824 static bool FoldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N,
    825                                         uint64_t Mask,
    826                                         SDValue Shift, SDValue X,
    827                                         X86ISelAddressMode &AM) {
    828   if (Shift.getOpcode() != ISD::SHL ||
    829       !isa<ConstantSDNode>(Shift.getOperand(1)))
    830     return true;
    831 
    832   // Not likely to be profitable if either the AND or SHIFT node has more
    833   // than one use (unless all uses are for address computation). Besides,
    834   // isel mechanism requires their node ids to be reused.
    835   if (!N.hasOneUse() || !Shift.hasOneUse())
    836     return true;
    837 
    838   // Verify that the shift amount is something we can fold.
    839   unsigned ShiftAmt = Shift.getConstantOperandVal(1);
    840   if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3)
    841     return true;
    842 
    843   MVT VT = N.getSimpleValueType();
    844   SDLoc DL(N);
    845   SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, VT);
    846   SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask);
    847   SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1));
    848 
    849   // Insert the new nodes into the topological ordering. We must do this in
    850   // a valid topological ordering as nothing is going to go back and re-sort
    851   // these nodes. We continually insert before 'N' in sequence as this is
    852   // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
    853   // hierarchy left to express.
    854   InsertDAGNode(DAG, N, NewMask);
    855   InsertDAGNode(DAG, N, NewAnd);
    856   InsertDAGNode(DAG, N, NewShift);
    857   DAG.ReplaceAllUsesWith(N, NewShift);
    858 
    859   AM.Scale = 1 << ShiftAmt;
    860   AM.IndexReg = NewAnd;
    861   return false;
    862 }
    863 
    864 // Implement some heroics to detect shifts of masked values where the mask can
    865 // be replaced by extending the shift and undoing that in the addressing mode
    866 // scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and
    867 // (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in
    868 // the addressing mode. This results in code such as:
    869 //
    870 //   int f(short *y, int *lookup_table) {
    871 //     ...
    872 //     return *y + lookup_table[*y >> 11];
    873 //   }
    874 //
    875 // Turning into:
    876 //   movzwl (%rdi), %eax
    877 //   movl %eax, %ecx
    878 //   shrl $11, %ecx
    879 //   addl (%rsi,%rcx,4), %eax
    880 //
    881 // Instead of:
    882 //   movzwl (%rdi), %eax
    883 //   movl %eax, %ecx
    884 //   shrl $9, %ecx
    885 //   andl $124, %rcx
    886 //   addl (%rsi,%rcx), %eax
    887 //
    888 // Note that this function assumes the mask is provided as a mask *after* the
    889 // value is shifted. The input chain may or may not match that, but computing
    890 // such a mask is trivial.
    891 static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
    892                                     uint64_t Mask,
    893                                     SDValue Shift, SDValue X,
    894                                     X86ISelAddressMode &AM) {
    895   if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() ||
    896       !isa<ConstantSDNode>(Shift.getOperand(1)))
    897     return true;
    898 
    899   unsigned ShiftAmt = Shift.getConstantOperandVal(1);
    900   unsigned MaskLZ = countLeadingZeros(Mask);
    901   unsigned MaskTZ = countTrailingZeros(Mask);
    902 
    903   // The amount of shift we're trying to fit into the addressing mode is taken
    904   // from the trailing zeros of the mask.
    905   unsigned AMShiftAmt = MaskTZ;
    906 
    907   // There is nothing we can do here unless the mask is removing some bits.
    908   // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
    909   if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true;
    910 
    911   // We also need to ensure that mask is a continuous run of bits.
    912   if (CountTrailingOnes_64(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true;
    913 
    914   // Scale the leading zero count down based on the actual size of the value.
    915   // Also scale it down based on the size of the shift.
    916   MaskLZ -= (64 - X.getSimpleValueType().getSizeInBits()) + ShiftAmt;
    917 
    918   // The final check is to ensure that any masked out high bits of X are
    919   // already known to be zero. Otherwise, the mask has a semantic impact
    920   // other than masking out a couple of low bits. Unfortunately, because of
    921   // the mask, zero extensions will be removed from operands in some cases.
    922   // This code works extra hard to look through extensions because we can
    923   // replace them with zero extensions cheaply if necessary.
    924   bool ReplacingAnyExtend = false;
    925   if (X.getOpcode() == ISD::ANY_EXTEND) {
    926     unsigned ExtendBits = X.getSimpleValueType().getSizeInBits() -
    927                           X.getOperand(0).getSimpleValueType().getSizeInBits();
    928     // Assume that we'll replace the any-extend with a zero-extend, and
    929     // narrow the search to the extended value.
    930     X = X.getOperand(0);
    931     MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits;
    932     ReplacingAnyExtend = true;
    933   }
    934   APInt MaskedHighBits =
    935     APInt::getHighBitsSet(X.getSimpleValueType().getSizeInBits(), MaskLZ);
    936   APInt KnownZero, KnownOne;
    937   DAG.computeKnownBits(X, KnownZero, KnownOne);
    938   if (MaskedHighBits != KnownZero) return true;
    939 
    940   // We've identified a pattern that can be transformed into a single shift
    941   // and an addressing mode. Make it so.
    942   MVT VT = N.getSimpleValueType();
    943   if (ReplacingAnyExtend) {
    944     assert(X.getValueType() != VT);
    945     // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND.
    946     SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(X), VT, X);
    947     InsertDAGNode(DAG, N, NewX);
    948     X = NewX;
    949   }
    950   SDLoc DL(N);
    951   SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, MVT::i8);
    952   SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
    953   SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, MVT::i8);
    954   SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt);
    955 
    956   // Insert the new nodes into the topological ordering. We must do this in
    957   // a valid topological ordering as nothing is going to go back and re-sort
    958   // these nodes. We continually insert before 'N' in sequence as this is
    959   // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
    960   // hierarchy left to express.
    961   InsertDAGNode(DAG, N, NewSRLAmt);
    962   InsertDAGNode(DAG, N, NewSRL);
    963   InsertDAGNode(DAG, N, NewSHLAmt);
    964   InsertDAGNode(DAG, N, NewSHL);
    965   DAG.ReplaceAllUsesWith(N, NewSHL);
    966 
    967   AM.Scale = 1 << AMShiftAmt;
    968   AM.IndexReg = NewSRL;
    969   return false;
    970 }
    971 
    972 bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
    973                                               unsigned Depth) {
    974   SDLoc dl(N);
    975   DEBUG({
    976       dbgs() << "MatchAddress: ";
    977       AM.dump();
    978     });
    979   // Limit recursion.
    980   if (Depth > 5)
    981     return MatchAddressBase(N, AM);
    982 
    983   // If this is already a %rip relative address, we can only merge immediates
    984   // into it.  Instead of handling this in every case, we handle it here.
    985   // RIP relative addressing: %rip + 32-bit displacement!
    986   if (AM.isRIPRelative()) {
    987     // FIXME: JumpTable and ExternalSymbol address currently don't like
    988     // displacements.  It isn't very important, but this should be fixed for
    989     // consistency.
    990     if (!AM.ES && AM.JT != -1) return true;
    991 
    992     if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N))
    993       if (!FoldOffsetIntoAddress(Cst->getSExtValue(), AM))
    994         return false;
    995     return true;
    996   }
    997 
    998   switch (N.getOpcode()) {
    999   default: break;
   1000   case ISD::Constant: {
   1001     uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
   1002     if (!FoldOffsetIntoAddress(Val, AM))
   1003       return false;
   1004     break;
   1005   }
   1006 
   1007   case X86ISD::Wrapper:
   1008   case X86ISD::WrapperRIP:
   1009     if (!MatchWrapper(N, AM))
   1010       return false;
   1011     break;
   1012 
   1013   case ISD::LOAD:
   1014     if (!MatchLoadInAddress(cast<LoadSDNode>(N), AM))
   1015       return false;
   1016     break;
   1017 
   1018   case ISD::FrameIndex:
   1019     if (AM.BaseType == X86ISelAddressMode::RegBase &&
   1020         AM.Base_Reg.getNode() == nullptr &&
   1021         (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) {
   1022       AM.BaseType = X86ISelAddressMode::FrameIndexBase;
   1023       AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
   1024       return false;
   1025     }
   1026     break;
   1027 
   1028   case ISD::SHL:
   1029     if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1)
   1030       break;
   1031 
   1032     if (ConstantSDNode
   1033           *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) {
   1034       unsigned Val = CN->getZExtValue();
   1035       // Note that we handle x<<1 as (,x,2) rather than (x,x) here so
   1036       // that the base operand remains free for further matching. If
   1037       // the base doesn't end up getting used, a post-processing step
   1038       // in MatchAddress turns (,x,2) into (x,x), which is cheaper.
   1039       if (Val == 1 || Val == 2 || Val == 3) {
   1040         AM.Scale = 1 << Val;
   1041         SDValue ShVal = N.getNode()->getOperand(0);
   1042 
   1043         // Okay, we know that we have a scale by now.  However, if the scaled
   1044         // value is an add of something and a constant, we can fold the
   1045         // constant into the disp field here.
   1046         if (CurDAG->isBaseWithConstantOffset(ShVal)) {
   1047           AM.IndexReg = ShVal.getNode()->getOperand(0);
   1048           ConstantSDNode *AddVal =
   1049             cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
   1050           uint64_t Disp = (uint64_t)AddVal->getSExtValue() << Val;
   1051           if (!FoldOffsetIntoAddress(Disp, AM))
   1052             return false;
   1053         }
   1054 
   1055         AM.IndexReg = ShVal;
   1056         return false;
   1057       }
   1058     }
   1059     break;
   1060 
   1061   case ISD::SRL: {
   1062     // Scale must not be used already.
   1063     if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
   1064 
   1065     SDValue And = N.getOperand(0);
   1066     if (And.getOpcode() != ISD::AND) break;
   1067     SDValue X = And.getOperand(0);
   1068 
   1069     // We only handle up to 64-bit values here as those are what matter for
   1070     // addressing mode optimizations.
   1071     if (X.getSimpleValueType().getSizeInBits() > 64) break;
   1072 
   1073     // The mask used for the transform is expected to be post-shift, but we
   1074     // found the shift first so just apply the shift to the mask before passing
   1075     // it down.
   1076     if (!isa<ConstantSDNode>(N.getOperand(1)) ||
   1077         !isa<ConstantSDNode>(And.getOperand(1)))
   1078       break;
   1079     uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1);
   1080 
   1081     // Try to fold the mask and shift into the scale, and return false if we
   1082     // succeed.
   1083     if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM))
   1084       return false;
   1085     break;
   1086   }
   1087 
   1088   case ISD::SMUL_LOHI:
   1089   case ISD::UMUL_LOHI:
   1090     // A mul_lohi where we need the low part can be folded as a plain multiply.
   1091     if (N.getResNo() != 0) break;
   1092     // FALL THROUGH
   1093   case ISD::MUL:
   1094   case X86ISD::MUL_IMM:
   1095     // X*[3,5,9] -> X+X*[2,4,8]
   1096     if (AM.BaseType == X86ISelAddressMode::RegBase &&
   1097         AM.Base_Reg.getNode() == nullptr &&
   1098         AM.IndexReg.getNode() == nullptr) {
   1099       if (ConstantSDNode
   1100             *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1)))
   1101         if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
   1102             CN->getZExtValue() == 9) {
   1103           AM.Scale = unsigned(CN->getZExtValue())-1;
   1104 
   1105           SDValue MulVal = N.getNode()->getOperand(0);
   1106           SDValue Reg;
   1107 
   1108           // Okay, we know that we have a scale by now.  However, if the scaled
   1109           // value is an add of something and a constant, we can fold the
   1110           // constant into the disp field here.
   1111           if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
   1112               isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) {
   1113             Reg = MulVal.getNode()->getOperand(0);
   1114             ConstantSDNode *AddVal =
   1115               cast<ConstantSDNode>(MulVal.getNode()->getOperand(1));
   1116             uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue();
   1117             if (FoldOffsetIntoAddress(Disp, AM))
   1118               Reg = N.getNode()->getOperand(0);
   1119           } else {
   1120             Reg = N.getNode()->getOperand(0);
   1121           }
   1122 
   1123           AM.IndexReg = AM.Base_Reg = Reg;
   1124           return false;
   1125         }
   1126     }
   1127     break;
   1128 
   1129   case ISD::SUB: {
   1130     // Given A-B, if A can be completely folded into the address and
   1131     // the index field with the index field unused, use -B as the index.
   1132     // This is a win if a has multiple parts that can be folded into
   1133     // the address. Also, this saves a mov if the base register has
   1134     // other uses, since it avoids a two-address sub instruction, however
   1135     // it costs an additional mov if the index register has other uses.
   1136 
   1137     // Add an artificial use to this node so that we can keep track of
   1138     // it if it gets CSE'd with a different node.
   1139     HandleSDNode Handle(N);
   1140 
   1141     // Test if the LHS of the sub can be folded.
   1142     X86ISelAddressMode Backup = AM;
   1143     if (MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1)) {
   1144       AM = Backup;
   1145       break;
   1146     }
   1147     // Test if the index field is free for use.
   1148     if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
   1149       AM = Backup;
   1150       break;
   1151     }
   1152 
   1153     int Cost = 0;
   1154     SDValue RHS = Handle.getValue().getNode()->getOperand(1);
   1155     // If the RHS involves a register with multiple uses, this
   1156     // transformation incurs an extra mov, due to the neg instruction
   1157     // clobbering its operand.
   1158     if (!RHS.getNode()->hasOneUse() ||
   1159         RHS.getNode()->getOpcode() == ISD::CopyFromReg ||
   1160         RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
   1161         RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
   1162         (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
   1163          RHS.getNode()->getOperand(0).getValueType() == MVT::i32))
   1164       ++Cost;
   1165     // If the base is a register with multiple uses, this
   1166     // transformation may save a mov.
   1167     if ((AM.BaseType == X86ISelAddressMode::RegBase &&
   1168          AM.Base_Reg.getNode() &&
   1169          !AM.Base_Reg.getNode()->hasOneUse()) ||
   1170         AM.BaseType == X86ISelAddressMode::FrameIndexBase)
   1171       --Cost;
   1172     // If the folded LHS was interesting, this transformation saves
   1173     // address arithmetic.
   1174     if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) +
   1175         ((AM.Disp != 0) && (Backup.Disp == 0)) +
   1176         (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2)
   1177       --Cost;
   1178     // If it doesn't look like it may be an overall win, don't do it.
   1179     if (Cost >= 0) {
   1180       AM = Backup;
   1181       break;
   1182     }
   1183 
   1184     // Ok, the transformation is legal and appears profitable. Go for it.
   1185     SDValue Zero = CurDAG->getConstant(0, N.getValueType());
   1186     SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS);
   1187     AM.IndexReg = Neg;
   1188     AM.Scale = 1;
   1189 
   1190     // Insert the new nodes into the topological ordering.
   1191     InsertDAGNode(*CurDAG, N, Zero);
   1192     InsertDAGNode(*CurDAG, N, Neg);
   1193     return false;
   1194   }
   1195 
   1196   case ISD::ADD: {
   1197     // Add an artificial use to this node so that we can keep track of
   1198     // it if it gets CSE'd with a different node.
   1199     HandleSDNode Handle(N);
   1200 
   1201     X86ISelAddressMode Backup = AM;
   1202     if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
   1203         !MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1))
   1204       return false;
   1205     AM = Backup;
   1206 
   1207     // Try again after commuting the operands.
   1208     if (!MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)&&
   1209         !MatchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth+1))
   1210       return false;
   1211     AM = Backup;
   1212 
   1213     // If we couldn't fold both operands into the address at the same time,
   1214     // see if we can just put each operand into a register and fold at least
   1215     // the add.
   1216     if (AM.BaseType == X86ISelAddressMode::RegBase &&
   1217         !AM.Base_Reg.getNode() &&
   1218         !AM.IndexReg.getNode()) {
   1219       N = Handle.getValue();
   1220       AM.Base_Reg = N.getOperand(0);
   1221       AM.IndexReg = N.getOperand(1);
   1222       AM.Scale = 1;
   1223       return false;
   1224     }
   1225     N = Handle.getValue();
   1226     break;
   1227   }
   1228 
   1229   case ISD::OR:
   1230     // Handle "X | C" as "X + C" iff X is known to have C bits clear.
   1231     if (CurDAG->isBaseWithConstantOffset(N)) {
   1232       X86ISelAddressMode Backup = AM;
   1233       ConstantSDNode *CN = cast<ConstantSDNode>(N.getOperand(1));
   1234 
   1235       // Start with the LHS as an addr mode.
   1236       if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
   1237           !FoldOffsetIntoAddress(CN->getSExtValue(), AM))
   1238         return false;
   1239       AM = Backup;
   1240     }
   1241     break;
   1242 
   1243   case ISD::AND: {
   1244     // Perform some heroic transforms on an and of a constant-count shift
   1245     // with a constant to enable use of the scaled offset field.
   1246 
   1247     // Scale must not be used already.
   1248     if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
   1249 
   1250     SDValue Shift = N.getOperand(0);
   1251     if (Shift.getOpcode() != ISD::SRL && Shift.getOpcode() != ISD::SHL) break;
   1252     SDValue X = Shift.getOperand(0);
   1253 
   1254     // We only handle up to 64-bit values here as those are what matter for
   1255     // addressing mode optimizations.
   1256     if (X.getSimpleValueType().getSizeInBits() > 64) break;
   1257 
   1258     if (!isa<ConstantSDNode>(N.getOperand(1)))
   1259       break;
   1260     uint64_t Mask = N.getConstantOperandVal(1);
   1261 
   1262     // Try to fold the mask and shift into an extract and scale.
   1263     if (!FoldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM))
   1264       return false;
   1265 
   1266     // Try to fold the mask and shift directly into the scale.
   1267     if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM))
   1268       return false;
   1269 
   1270     // Try to swap the mask and shift to place shifts which can be done as
   1271     // a scale on the outside of the mask.
   1272     if (!FoldMaskedShiftToScaledMask(*CurDAG, N, Mask, Shift, X, AM))
   1273       return false;
   1274     break;
   1275   }
   1276   }
   1277 
   1278   return MatchAddressBase(N, AM);
   1279 }
   1280 
   1281 /// MatchAddressBase - Helper for MatchAddress. Add the specified node to the
   1282 /// specified addressing mode without any further recursion.
   1283 bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) {
   1284   // Is the base register already occupied?
   1285   if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) {
   1286     // If so, check to see if the scale index register is set.
   1287     if (!AM.IndexReg.getNode()) {
   1288       AM.IndexReg = N;
   1289       AM.Scale = 1;
   1290       return false;
   1291     }
   1292 
   1293     // Otherwise, we cannot select it.
   1294     return true;
   1295   }
   1296 
   1297   // Default, generate it as a register.
   1298   AM.BaseType = X86ISelAddressMode::RegBase;
   1299   AM.Base_Reg = N;
   1300   return false;
   1301 }
   1302 
   1303 /// SelectAddr - returns true if it is able pattern match an addressing mode.
   1304 /// It returns the operands which make up the maximal addressing mode it can
   1305 /// match by reference.
   1306 ///
   1307 /// Parent is the parent node of the addr operand that is being matched.  It
   1308 /// is always a load, store, atomic node, or null.  It is only null when
   1309 /// checking memory operands for inline asm nodes.
   1310 bool X86DAGToDAGISel::SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
   1311                                  SDValue &Scale, SDValue &Index,
   1312                                  SDValue &Disp, SDValue &Segment) {
   1313   X86ISelAddressMode AM;
   1314 
   1315   if (Parent &&
   1316       // This list of opcodes are all the nodes that have an "addr:$ptr" operand
   1317       // that are not a MemSDNode, and thus don't have proper addrspace info.
   1318       Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme
   1319       Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores
   1320       Parent->getOpcode() != X86ISD::TLSCALL && // Fixme
   1321       Parent->getOpcode() != X86ISD::EH_SJLJ_SETJMP && // setjmp
   1322       Parent->getOpcode() != X86ISD::EH_SJLJ_LONGJMP) { // longjmp
   1323     unsigned AddrSpace =
   1324       cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
   1325     // AddrSpace 256 -> GS, 257 -> FS.
   1326     if (AddrSpace == 256)
   1327       AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
   1328     if (AddrSpace == 257)
   1329       AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
   1330   }
   1331 
   1332   if (MatchAddress(N, AM))
   1333     return false;
   1334 
   1335   MVT VT = N.getSimpleValueType();
   1336   if (AM.BaseType == X86ISelAddressMode::RegBase) {
   1337     if (!AM.Base_Reg.getNode())
   1338       AM.Base_Reg = CurDAG->getRegister(0, VT);
   1339   }
   1340 
   1341   if (!AM.IndexReg.getNode())
   1342     AM.IndexReg = CurDAG->getRegister(0, VT);
   1343 
   1344   getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
   1345   return true;
   1346 }
   1347 
   1348 /// SelectScalarSSELoad - Match a scalar SSE load.  In particular, we want to
   1349 /// match a load whose top elements are either undef or zeros.  The load flavor
   1350 /// is derived from the type of N, which is either v4f32 or v2f64.
   1351 ///
   1352 /// We also return:
   1353 ///   PatternChainNode: this is the matched node that has a chain input and
   1354 ///   output.
   1355 bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root,
   1356                                           SDValue N, SDValue &Base,
   1357                                           SDValue &Scale, SDValue &Index,
   1358                                           SDValue &Disp, SDValue &Segment,
   1359                                           SDValue &PatternNodeWithChain) {
   1360   if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) {
   1361     PatternNodeWithChain = N.getOperand(0);
   1362     if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) &&
   1363         PatternNodeWithChain.hasOneUse() &&
   1364         IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
   1365         IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
   1366       LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
   1367       if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
   1368         return false;
   1369       return true;
   1370     }
   1371   }
   1372 
   1373   // Also handle the case where we explicitly require zeros in the top
   1374   // elements.  This is a vector shuffle from the zero vector.
   1375   if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() &&
   1376       // Check to see if the top elements are all zeros (or bitcast of zeros).
   1377       N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
   1378       N.getOperand(0).getNode()->hasOneUse() &&
   1379       ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) &&
   1380       N.getOperand(0).getOperand(0).hasOneUse() &&
   1381       IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
   1382       IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
   1383     // Okay, this is a zero extending load.  Fold it.
   1384     LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0));
   1385     if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
   1386       return false;
   1387     PatternNodeWithChain = SDValue(LD, 0);
   1388     return true;
   1389   }
   1390   return false;
   1391 }
   1392 
   1393 
   1394 bool X86DAGToDAGISel::SelectMOV64Imm32(SDValue N, SDValue &Imm) {
   1395   if (const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
   1396     uint64_t ImmVal = CN->getZExtValue();
   1397     if ((uint32_t)ImmVal != (uint64_t)ImmVal)
   1398       return false;
   1399 
   1400     Imm = CurDAG->getTargetConstant(ImmVal, MVT::i64);
   1401     return true;
   1402   }
   1403 
   1404   // In static codegen with small code model, we can get the address of a label
   1405   // into a register with 'movl'. TableGen has already made sure we're looking
   1406   // at a label of some kind.
   1407   assert(N->getOpcode() == X86ISD::Wrapper &&
   1408          "Unexpected node type for MOV32ri64");
   1409   N = N.getOperand(0);
   1410 
   1411   if (N->getOpcode() != ISD::TargetConstantPool &&
   1412       N->getOpcode() != ISD::TargetJumpTable &&
   1413       N->getOpcode() != ISD::TargetGlobalAddress &&
   1414       N->getOpcode() != ISD::TargetExternalSymbol &&
   1415       N->getOpcode() != ISD::TargetBlockAddress)
   1416     return false;
   1417 
   1418   Imm = N;
   1419   return TM.getCodeModel() == CodeModel::Small;
   1420 }
   1421 
   1422 bool X86DAGToDAGISel::SelectLEA64_32Addr(SDValue N, SDValue &Base,
   1423                                          SDValue &Scale, SDValue &Index,
   1424                                          SDValue &Disp, SDValue &Segment) {
   1425   if (!SelectLEAAddr(N, Base, Scale, Index, Disp, Segment))
   1426     return false;
   1427 
   1428   SDLoc DL(N);
   1429   RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Base);
   1430   if (RN && RN->getReg() == 0)
   1431     Base = CurDAG->getRegister(0, MVT::i64);
   1432   else if (Base.getValueType() == MVT::i32 && !dyn_cast<FrameIndexSDNode>(N)) {
   1433     // Base could already be %rip, particularly in the x32 ABI.
   1434     Base = SDValue(CurDAG->getMachineNode(
   1435                        TargetOpcode::SUBREG_TO_REG, DL, MVT::i64,
   1436                        CurDAG->getTargetConstant(0, MVT::i64),
   1437                        Base,
   1438                        CurDAG->getTargetConstant(X86::sub_32bit, MVT::i32)),
   1439                    0);
   1440   }
   1441 
   1442   RN = dyn_cast<RegisterSDNode>(Index);
   1443   if (RN && RN->getReg() == 0)
   1444     Index = CurDAG->getRegister(0, MVT::i64);
   1445   else {
   1446     assert(Index.getValueType() == MVT::i32 &&
   1447            "Expect to be extending 32-bit registers for use in LEA");
   1448     Index = SDValue(CurDAG->getMachineNode(
   1449                         TargetOpcode::SUBREG_TO_REG, DL, MVT::i64,
   1450                         CurDAG->getTargetConstant(0, MVT::i64),
   1451                         Index,
   1452                         CurDAG->getTargetConstant(X86::sub_32bit, MVT::i32)),
   1453                     0);
   1454   }
   1455 
   1456   return true;
   1457 }
   1458 
   1459 /// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing
   1460 /// mode it matches can be cost effectively emitted as an LEA instruction.
   1461 bool X86DAGToDAGISel::SelectLEAAddr(SDValue N,
   1462                                     SDValue &Base, SDValue &Scale,
   1463                                     SDValue &Index, SDValue &Disp,
   1464                                     SDValue &Segment) {
   1465   X86ISelAddressMode AM;
   1466 
   1467   // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
   1468   // segments.
   1469   SDValue Copy = AM.Segment;
   1470   SDValue T = CurDAG->getRegister(0, MVT::i32);
   1471   AM.Segment = T;
   1472   if (MatchAddress(N, AM))
   1473     return false;
   1474   assert (T == AM.Segment);
   1475   AM.Segment = Copy;
   1476 
   1477   MVT VT = N.getSimpleValueType();
   1478   unsigned Complexity = 0;
   1479   if (AM.BaseType == X86ISelAddressMode::RegBase)
   1480     if (AM.Base_Reg.getNode())
   1481       Complexity = 1;
   1482     else
   1483       AM.Base_Reg = CurDAG->getRegister(0, VT);
   1484   else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
   1485     Complexity = 4;
   1486 
   1487   if (AM.IndexReg.getNode())
   1488     Complexity++;
   1489   else
   1490     AM.IndexReg = CurDAG->getRegister(0, VT);
   1491 
   1492   // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
   1493   // a simple shift.
   1494   if (AM.Scale > 1)
   1495     Complexity++;
   1496 
   1497   // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
   1498   // to a LEA. This is determined with some expermentation but is by no means
   1499   // optimal (especially for code size consideration). LEA is nice because of
   1500   // its three-address nature. Tweak the cost function again when we can run
   1501   // convertToThreeAddress() at register allocation time.
   1502   if (AM.hasSymbolicDisplacement()) {
   1503     // For X86-64, we should always use lea to materialize RIP relative
   1504     // addresses.
   1505     if (Subtarget->is64Bit())
   1506       Complexity = 4;
   1507     else
   1508       Complexity += 2;
   1509   }
   1510 
   1511   if (AM.Disp && (AM.Base_Reg.getNode() || AM.IndexReg.getNode()))
   1512     Complexity++;
   1513 
   1514   // If it isn't worth using an LEA, reject it.
   1515   if (Complexity <= 2)
   1516     return false;
   1517 
   1518   getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
   1519   return true;
   1520 }
   1521 
   1522 /// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes.
   1523 bool X86DAGToDAGISel::SelectTLSADDRAddr(SDValue N, SDValue &Base,
   1524                                         SDValue &Scale, SDValue &Index,
   1525                                         SDValue &Disp, SDValue &Segment) {
   1526   assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
   1527   const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
   1528 
   1529   X86ISelAddressMode AM;
   1530   AM.GV = GA->getGlobal();
   1531   AM.Disp += GA->getOffset();
   1532   AM.Base_Reg = CurDAG->getRegister(0, N.getValueType());
   1533   AM.SymbolFlags = GA->getTargetFlags();
   1534 
   1535   if (N.getValueType() == MVT::i32) {
   1536     AM.Scale = 1;
   1537     AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
   1538   } else {
   1539     AM.IndexReg = CurDAG->getRegister(0, MVT::i64);
   1540   }
   1541 
   1542   getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
   1543   return true;
   1544 }
   1545 
   1546 
   1547 bool X86DAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N,
   1548                                   SDValue &Base, SDValue &Scale,
   1549                                   SDValue &Index, SDValue &Disp,
   1550                                   SDValue &Segment) {
   1551   if (!ISD::isNON_EXTLoad(N.getNode()) ||
   1552       !IsProfitableToFold(N, P, P) ||
   1553       !IsLegalToFold(N, P, P, OptLevel))
   1554     return false;
   1555 
   1556   return SelectAddr(N.getNode(),
   1557                     N.getOperand(1), Base, Scale, Index, Disp, Segment);
   1558 }
   1559 
   1560 /// getGlobalBaseReg - Return an SDNode that returns the value of
   1561 /// the global base register. Output instructions required to
   1562 /// initialize the global base register, if necessary.
   1563 ///
   1564 SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
   1565   unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
   1566   return CurDAG->getRegister(GlobalBaseReg,
   1567                              getTargetLowering()->getPointerTy()).getNode();
   1568 }
   1569 
   1570 SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
   1571   SDValue Chain = Node->getOperand(0);
   1572   SDValue In1 = Node->getOperand(1);
   1573   SDValue In2L = Node->getOperand(2);
   1574   SDValue In2H = Node->getOperand(3);
   1575 
   1576   SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
   1577   if (!SelectAddr(Node, In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
   1578     return nullptr;
   1579   MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
   1580   MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
   1581   const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, Chain};
   1582   SDNode *ResNode = CurDAG->getMachineNode(Opc, SDLoc(Node),
   1583                                            MVT::i32, MVT::i32, MVT::Other, Ops);
   1584   cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1);
   1585   return ResNode;
   1586 }
   1587 
   1588 /// Atomic opcode table
   1589 ///
   1590 enum AtomicOpc {
   1591   ADD,
   1592   SUB,
   1593   INC,
   1594   DEC,
   1595   OR,
   1596   AND,
   1597   XOR,
   1598   AtomicOpcEnd
   1599 };
   1600 
   1601 enum AtomicSz {
   1602   ConstantI8,
   1603   I8,
   1604   SextConstantI16,
   1605   ConstantI16,
   1606   I16,
   1607   SextConstantI32,
   1608   ConstantI32,
   1609   I32,
   1610   SextConstantI64,
   1611   ConstantI64,
   1612   I64,
   1613   AtomicSzEnd
   1614 };
   1615 
   1616 static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = {
   1617   {
   1618     X86::LOCK_ADD8mi,
   1619     X86::LOCK_ADD8mr,
   1620     X86::LOCK_ADD16mi8,
   1621     X86::LOCK_ADD16mi,
   1622     X86::LOCK_ADD16mr,
   1623     X86::LOCK_ADD32mi8,
   1624     X86::LOCK_ADD32mi,
   1625     X86::LOCK_ADD32mr,
   1626     X86::LOCK_ADD64mi8,
   1627     X86::LOCK_ADD64mi32,
   1628     X86::LOCK_ADD64mr,
   1629   },
   1630   {
   1631     X86::LOCK_SUB8mi,
   1632     X86::LOCK_SUB8mr,
   1633     X86::LOCK_SUB16mi8,
   1634     X86::LOCK_SUB16mi,
   1635     X86::LOCK_SUB16mr,
   1636     X86::LOCK_SUB32mi8,
   1637     X86::LOCK_SUB32mi,
   1638     X86::LOCK_SUB32mr,
   1639     X86::LOCK_SUB64mi8,
   1640     X86::LOCK_SUB64mi32,
   1641     X86::LOCK_SUB64mr,
   1642   },
   1643   {
   1644     0,
   1645     X86::LOCK_INC8m,
   1646     0,
   1647     0,
   1648     X86::LOCK_INC16m,
   1649     0,
   1650     0,
   1651     X86::LOCK_INC32m,
   1652     0,
   1653     0,
   1654     X86::LOCK_INC64m,
   1655   },
   1656   {
   1657     0,
   1658     X86::LOCK_DEC8m,
   1659     0,
   1660     0,
   1661     X86::LOCK_DEC16m,
   1662     0,
   1663     0,
   1664     X86::LOCK_DEC32m,
   1665     0,
   1666     0,
   1667     X86::LOCK_DEC64m,
   1668   },
   1669   {
   1670     X86::LOCK_OR8mi,
   1671     X86::LOCK_OR8mr,
   1672     X86::LOCK_OR16mi8,
   1673     X86::LOCK_OR16mi,
   1674     X86::LOCK_OR16mr,
   1675     X86::LOCK_OR32mi8,
   1676     X86::LOCK_OR32mi,
   1677     X86::LOCK_OR32mr,
   1678     X86::LOCK_OR64mi8,
   1679     X86::LOCK_OR64mi32,
   1680     X86::LOCK_OR64mr,
   1681   },
   1682   {
   1683     X86::LOCK_AND8mi,
   1684     X86::LOCK_AND8mr,
   1685     X86::LOCK_AND16mi8,
   1686     X86::LOCK_AND16mi,
   1687     X86::LOCK_AND16mr,
   1688     X86::LOCK_AND32mi8,
   1689     X86::LOCK_AND32mi,
   1690     X86::LOCK_AND32mr,
   1691     X86::LOCK_AND64mi8,
   1692     X86::LOCK_AND64mi32,
   1693     X86::LOCK_AND64mr,
   1694   },
   1695   {
   1696     X86::LOCK_XOR8mi,
   1697     X86::LOCK_XOR8mr,
   1698     X86::LOCK_XOR16mi8,
   1699     X86::LOCK_XOR16mi,
   1700     X86::LOCK_XOR16mr,
   1701     X86::LOCK_XOR32mi8,
   1702     X86::LOCK_XOR32mi,
   1703     X86::LOCK_XOR32mr,
   1704     X86::LOCK_XOR64mi8,
   1705     X86::LOCK_XOR64mi32,
   1706     X86::LOCK_XOR64mr,
   1707   }
   1708 };
   1709 
   1710 // Return the target constant operand for atomic-load-op and do simple
   1711 // translations, such as from atomic-load-add to lock-sub. The return value is
   1712 // one of the following 3 cases:
   1713 // + target-constant, the operand could be supported as a target constant.
   1714 // + empty, the operand is not needed any more with the new op selected.
   1715 // + non-empty, otherwise.
   1716 static SDValue getAtomicLoadArithTargetConstant(SelectionDAG *CurDAG,
   1717                                                 SDLoc dl,
   1718                                                 enum AtomicOpc &Op, MVT NVT,
   1719                                                 SDValue Val) {
   1720   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val)) {
   1721     int64_t CNVal = CN->getSExtValue();
   1722     // Quit if not 32-bit imm.
   1723     if ((int32_t)CNVal != CNVal)
   1724       return Val;
   1725     // For atomic-load-add, we could do some optimizations.
   1726     if (Op == ADD) {
   1727       // Translate to INC/DEC if ADD by 1 or -1.
   1728       if ((CNVal == 1) || (CNVal == -1)) {
   1729         Op = (CNVal == 1) ? INC : DEC;
   1730         // No more constant operand after being translated into INC/DEC.
   1731         return SDValue();
   1732       }
   1733       // Translate to SUB if ADD by negative value.
   1734       if (CNVal < 0) {
   1735         Op = SUB;
   1736         CNVal = -CNVal;
   1737       }
   1738     }
   1739     return CurDAG->getTargetConstant(CNVal, NVT);
   1740   }
   1741 
   1742   // If the value operand is single-used, try to optimize it.
   1743   if (Op == ADD && Val.hasOneUse()) {
   1744     // Translate (atomic-load-add ptr (sub 0 x)) back to (lock-sub x).
   1745     if (Val.getOpcode() == ISD::SUB && X86::isZeroNode(Val.getOperand(0))) {
   1746       Op = SUB;
   1747       return Val.getOperand(1);
   1748     }
   1749     // A special case for i16, which needs truncating as, in most cases, it's
   1750     // promoted to i32. We will translate
   1751     // (atomic-load-add (truncate (sub 0 x))) to (lock-sub (EXTRACT_SUBREG x))
   1752     if (Val.getOpcode() == ISD::TRUNCATE && NVT == MVT::i16 &&
   1753         Val.getOperand(0).getOpcode() == ISD::SUB &&
   1754         X86::isZeroNode(Val.getOperand(0).getOperand(0))) {
   1755       Op = SUB;
   1756       Val = Val.getOperand(0);
   1757       return CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl, NVT,
   1758                                             Val.getOperand(1));
   1759     }
   1760   }
   1761 
   1762   return Val;
   1763 }
   1764 
   1765 SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, MVT NVT) {
   1766   if (Node->hasAnyUseOfValue(0))
   1767     return nullptr;
   1768 
   1769   SDLoc dl(Node);
   1770 
   1771   // Optimize common patterns for __sync_or_and_fetch and similar arith
   1772   // operations where the result is not used. This allows us to use the "lock"
   1773   // version of the arithmetic instruction.
   1774   SDValue Chain = Node->getOperand(0);
   1775   SDValue Ptr = Node->getOperand(1);
   1776   SDValue Val = Node->getOperand(2);
   1777   SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
   1778   if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
   1779     return nullptr;
   1780 
   1781   // Which index into the table.
   1782   enum AtomicOpc Op;
   1783   switch (Node->getOpcode()) {
   1784     default:
   1785       return nullptr;
   1786     case ISD::ATOMIC_LOAD_OR:
   1787       Op = OR;
   1788       break;
   1789     case ISD::ATOMIC_LOAD_AND:
   1790       Op = AND;
   1791       break;
   1792     case ISD::ATOMIC_LOAD_XOR:
   1793       Op = XOR;
   1794       break;
   1795     case ISD::ATOMIC_LOAD_ADD:
   1796       Op = ADD;
   1797       break;
   1798   }
   1799 
   1800   Val = getAtomicLoadArithTargetConstant(CurDAG, dl, Op, NVT, Val);
   1801   bool isUnOp = !Val.getNode();
   1802   bool isCN = Val.getNode() && (Val.getOpcode() == ISD::TargetConstant);
   1803 
   1804   unsigned Opc = 0;
   1805   switch (NVT.SimpleTy) {
   1806     default: return nullptr;
   1807     case MVT::i8:
   1808       if (isCN)
   1809         Opc = AtomicOpcTbl[Op][ConstantI8];
   1810       else
   1811         Opc = AtomicOpcTbl[Op][I8];
   1812       break;
   1813     case MVT::i16:
   1814       if (isCN) {
   1815         if (immSext8(Val.getNode()))
   1816           Opc = AtomicOpcTbl[Op][SextConstantI16];
   1817         else
   1818           Opc = AtomicOpcTbl[Op][ConstantI16];
   1819       } else
   1820         Opc = AtomicOpcTbl[Op][I16];
   1821       break;
   1822     case MVT::i32:
   1823       if (isCN) {
   1824         if (immSext8(Val.getNode()))
   1825           Opc = AtomicOpcTbl[Op][SextConstantI32];
   1826         else
   1827           Opc = AtomicOpcTbl[Op][ConstantI32];
   1828       } else
   1829         Opc = AtomicOpcTbl[Op][I32];
   1830       break;
   1831     case MVT::i64:
   1832       Opc = AtomicOpcTbl[Op][I64];
   1833       if (isCN) {
   1834         if (immSext8(Val.getNode()))
   1835           Opc = AtomicOpcTbl[Op][SextConstantI64];
   1836         else if (i64immSExt32(Val.getNode()))
   1837           Opc = AtomicOpcTbl[Op][ConstantI64];
   1838       }
   1839       break;
   1840   }
   1841 
   1842   assert(Opc != 0 && "Invalid arith lock transform!");
   1843 
   1844   SDValue Ret;
   1845   SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
   1846                                                  dl, NVT), 0);
   1847   MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
   1848   MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
   1849   if (isUnOp) {
   1850     SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain };
   1851     Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops), 0);
   1852   } else {
   1853     SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain };
   1854     Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops), 0);
   1855   }
   1856   cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
   1857   SDValue RetVals[] = { Undef, Ret };
   1858   return CurDAG->getMergeValues(RetVals, dl).getNode();
   1859 }
   1860 
   1861 /// HasNoSignedComparisonUses - Test whether the given X86ISD::CMP node has
   1862 /// any uses which require the SF or OF bits to be accurate.
   1863 static bool HasNoSignedComparisonUses(SDNode *N) {
   1864   // Examine each user of the node.
   1865   for (SDNode::use_iterator UI = N->use_begin(),
   1866          UE = N->use_end(); UI != UE; ++UI) {
   1867     // Only examine CopyToReg uses.
   1868     if (UI->getOpcode() != ISD::CopyToReg)
   1869       return false;
   1870     // Only examine CopyToReg uses that copy to EFLAGS.
   1871     if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() !=
   1872           X86::EFLAGS)
   1873       return false;
   1874     // Examine each user of the CopyToReg use.
   1875     for (SDNode::use_iterator FlagUI = UI->use_begin(),
   1876            FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
   1877       // Only examine the Flag result.
   1878       if (FlagUI.getUse().getResNo() != 1) continue;
   1879       // Anything unusual: assume conservatively.
   1880       if (!FlagUI->isMachineOpcode()) return false;
   1881       // Examine the opcode of the user.
   1882       switch (FlagUI->getMachineOpcode()) {
   1883       // These comparisons don't treat the most significant bit specially.
   1884       case X86::SETAr: case X86::SETAEr: case X86::SETBr: case X86::SETBEr:
   1885       case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr:
   1886       case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm:
   1887       case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm:
   1888       case X86::JA_4: case X86::JAE_4: case X86::JB_4: case X86::JBE_4:
   1889       case X86::JE_4: case X86::JNE_4: case X86::JP_4: case X86::JNP_4:
   1890       case X86::CMOVA16rr: case X86::CMOVA16rm:
   1891       case X86::CMOVA32rr: case X86::CMOVA32rm:
   1892       case X86::CMOVA64rr: case X86::CMOVA64rm:
   1893       case X86::CMOVAE16rr: case X86::CMOVAE16rm:
   1894       case X86::CMOVAE32rr: case X86::CMOVAE32rm:
   1895       case X86::CMOVAE64rr: case X86::CMOVAE64rm:
   1896       case X86::CMOVB16rr: case X86::CMOVB16rm:
   1897       case X86::CMOVB32rr: case X86::CMOVB32rm:
   1898       case X86::CMOVB64rr: case X86::CMOVB64rm:
   1899       case X86::CMOVBE16rr: case X86::CMOVBE16rm:
   1900       case X86::CMOVBE32rr: case X86::CMOVBE32rm:
   1901       case X86::CMOVBE64rr: case X86::CMOVBE64rm:
   1902       case X86::CMOVE16rr: case X86::CMOVE16rm:
   1903       case X86::CMOVE32rr: case X86::CMOVE32rm:
   1904       case X86::CMOVE64rr: case X86::CMOVE64rm:
   1905       case X86::CMOVNE16rr: case X86::CMOVNE16rm:
   1906       case X86::CMOVNE32rr: case X86::CMOVNE32rm:
   1907       case X86::CMOVNE64rr: case X86::CMOVNE64rm:
   1908       case X86::CMOVNP16rr: case X86::CMOVNP16rm:
   1909       case X86::CMOVNP32rr: case X86::CMOVNP32rm:
   1910       case X86::CMOVNP64rr: case X86::CMOVNP64rm:
   1911       case X86::CMOVP16rr: case X86::CMOVP16rm:
   1912       case X86::CMOVP32rr: case X86::CMOVP32rm:
   1913       case X86::CMOVP64rr: case X86::CMOVP64rm:
   1914         continue;
   1915       // Anything else: assume conservatively.
   1916       default: return false;
   1917       }
   1918     }
   1919   }
   1920   return true;
   1921 }
   1922 
   1923 /// isLoadIncOrDecStore - Check whether or not the chain ending in StoreNode
   1924 /// is suitable for doing the {load; increment or decrement; store} to modify
   1925 /// transformation.
   1926 static bool isLoadIncOrDecStore(StoreSDNode *StoreNode, unsigned Opc,
   1927                                 SDValue StoredVal, SelectionDAG *CurDAG,
   1928                                 LoadSDNode* &LoadNode, SDValue &InputChain) {
   1929 
   1930   // is the value stored the result of a DEC or INC?
   1931   if (!(Opc == X86ISD::DEC || Opc == X86ISD::INC)) return false;
   1932 
   1933   // is the stored value result 0 of the load?
   1934   if (StoredVal.getResNo() != 0) return false;
   1935 
   1936   // are there other uses of the loaded value than the inc or dec?
   1937   if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false;
   1938 
   1939   // is the store non-extending and non-indexed?
   1940   if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal())
   1941     return false;
   1942 
   1943   SDValue Load = StoredVal->getOperand(0);
   1944   // Is the stored value a non-extending and non-indexed load?
   1945   if (!ISD::isNormalLoad(Load.getNode())) return false;
   1946 
   1947   // Return LoadNode by reference.
   1948   LoadNode = cast<LoadSDNode>(Load);
   1949   // is the size of the value one that we can handle? (i.e. 64, 32, 16, or 8)
   1950   EVT LdVT = LoadNode->getMemoryVT();
   1951   if (LdVT != MVT::i64 && LdVT != MVT::i32 && LdVT != MVT::i16 &&
   1952       LdVT != MVT::i8)
   1953     return false;
   1954 
   1955   // Is store the only read of the loaded value?
   1956   if (!Load.hasOneUse())
   1957     return false;
   1958 
   1959   // Is the address of the store the same as the load?
   1960   if (LoadNode->getBasePtr() != StoreNode->getBasePtr() ||
   1961       LoadNode->getOffset() != StoreNode->getOffset())
   1962     return false;
   1963 
   1964   // Check if the chain is produced by the load or is a TokenFactor with
   1965   // the load output chain as an operand. Return InputChain by reference.
   1966   SDValue Chain = StoreNode->getChain();
   1967 
   1968   bool ChainCheck = false;
   1969   if (Chain == Load.getValue(1)) {
   1970     ChainCheck = true;
   1971     InputChain = LoadNode->getChain();
   1972   } else if (Chain.getOpcode() == ISD::TokenFactor) {
   1973     SmallVector<SDValue, 4> ChainOps;
   1974     for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) {
   1975       SDValue Op = Chain.getOperand(i);
   1976       if (Op == Load.getValue(1)) {
   1977         ChainCheck = true;
   1978         continue;
   1979       }
   1980 
   1981       // Make sure using Op as part of the chain would not cause a cycle here.
   1982       // In theory, we could check whether the chain node is a predecessor of
   1983       // the load. But that can be very expensive. Instead visit the uses and
   1984       // make sure they all have smaller node id than the load.
   1985       int LoadId = LoadNode->getNodeId();
   1986       for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
   1987              UE = UI->use_end(); UI != UE; ++UI) {
   1988         if (UI.getUse().getResNo() != 0)
   1989           continue;
   1990         if (UI->getNodeId() > LoadId)
   1991           return false;
   1992       }
   1993 
   1994       ChainOps.push_back(Op);
   1995     }
   1996 
   1997     if (ChainCheck)
   1998       // Make a new TokenFactor with all the other input chains except
   1999       // for the load.
   2000       InputChain = CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain),
   2001                                    MVT::Other, ChainOps);
   2002   }
   2003   if (!ChainCheck)
   2004     return false;
   2005 
   2006   return true;
   2007 }
   2008 
   2009 /// getFusedLdStOpcode - Get the appropriate X86 opcode for an in memory
   2010 /// increment or decrement. Opc should be X86ISD::DEC or X86ISD::INC.
   2011 static unsigned getFusedLdStOpcode(EVT &LdVT, unsigned Opc) {
   2012   if (Opc == X86ISD::DEC) {
   2013     if (LdVT == MVT::i64) return X86::DEC64m;
   2014     if (LdVT == MVT::i32) return X86::DEC32m;
   2015     if (LdVT == MVT::i16) return X86::DEC16m;
   2016     if (LdVT == MVT::i8)  return X86::DEC8m;
   2017   } else {
   2018     assert(Opc == X86ISD::INC && "unrecognized opcode");
   2019     if (LdVT == MVT::i64) return X86::INC64m;
   2020     if (LdVT == MVT::i32) return X86::INC32m;
   2021     if (LdVT == MVT::i16) return X86::INC16m;
   2022     if (LdVT == MVT::i8)  return X86::INC8m;
   2023   }
   2024   llvm_unreachable("unrecognized size for LdVT");
   2025 }
   2026 
   2027 /// SelectGather - Customized ISel for GATHER operations.
   2028 ///
   2029 SDNode *X86DAGToDAGISel::SelectGather(SDNode *Node, unsigned Opc) {
   2030   // Operands of Gather: VSrc, Base, VIdx, VMask, Scale
   2031   SDValue Chain = Node->getOperand(0);
   2032   SDValue VSrc = Node->getOperand(2);
   2033   SDValue Base = Node->getOperand(3);
   2034   SDValue VIdx = Node->getOperand(4);
   2035   SDValue VMask = Node->getOperand(5);
   2036   ConstantSDNode *Scale = dyn_cast<ConstantSDNode>(Node->getOperand(6));
   2037   if (!Scale)
   2038     return nullptr;
   2039 
   2040   SDVTList VTs = CurDAG->getVTList(VSrc.getValueType(), VSrc.getValueType(),
   2041                                    MVT::Other);
   2042 
   2043   // Memory Operands: Base, Scale, Index, Disp, Segment
   2044   SDValue Disp = CurDAG->getTargetConstant(0, MVT::i32);
   2045   SDValue Segment = CurDAG->getRegister(0, MVT::i32);
   2046   const SDValue Ops[] = { VSrc, Base, getI8Imm(Scale->getSExtValue()), VIdx,
   2047                           Disp, Segment, VMask, Chain};
   2048   SDNode *ResNode = CurDAG->getMachineNode(Opc, SDLoc(Node), VTs, Ops);
   2049   // Node has 2 outputs: VDst and MVT::Other.
   2050   // ResNode has 3 outputs: VDst, VMask_wb, and MVT::Other.
   2051   // We replace VDst of Node with VDst of ResNode, and Other of Node with Other
   2052   // of ResNode.
   2053   ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0));
   2054   ReplaceUses(SDValue(Node, 1), SDValue(ResNode, 2));
   2055   return ResNode;
   2056 }
   2057 
   2058 SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
   2059   MVT NVT = Node->getSimpleValueType(0);
   2060   unsigned Opc, MOpc;
   2061   unsigned Opcode = Node->getOpcode();
   2062   SDLoc dl(Node);
   2063 
   2064   DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n');
   2065 
   2066   if (Node->isMachineOpcode()) {
   2067     DEBUG(dbgs() << "== ";  Node->dump(CurDAG); dbgs() << '\n');
   2068     Node->setNodeId(-1);
   2069     return nullptr;   // Already selected.
   2070   }
   2071 
   2072   switch (Opcode) {
   2073   default: break;
   2074   case ISD::INTRINSIC_W_CHAIN: {
   2075     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
   2076     switch (IntNo) {
   2077     default: break;
   2078     case Intrinsic::x86_avx2_gather_d_pd:
   2079     case Intrinsic::x86_avx2_gather_d_pd_256:
   2080     case Intrinsic::x86_avx2_gather_q_pd:
   2081     case Intrinsic::x86_avx2_gather_q_pd_256:
   2082     case Intrinsic::x86_avx2_gather_d_ps:
   2083     case Intrinsic::x86_avx2_gather_d_ps_256:
   2084     case Intrinsic::x86_avx2_gather_q_ps:
   2085     case Intrinsic::x86_avx2_gather_q_ps_256:
   2086     case Intrinsic::x86_avx2_gather_d_q:
   2087     case Intrinsic::x86_avx2_gather_d_q_256:
   2088     case Intrinsic::x86_avx2_gather_q_q:
   2089     case Intrinsic::x86_avx2_gather_q_q_256:
   2090     case Intrinsic::x86_avx2_gather_d_d:
   2091     case Intrinsic::x86_avx2_gather_d_d_256:
   2092     case Intrinsic::x86_avx2_gather_q_d:
   2093     case Intrinsic::x86_avx2_gather_q_d_256: {
   2094       if (!Subtarget->hasAVX2())
   2095         break;
   2096       unsigned Opc;
   2097       switch (IntNo) {
   2098       default: llvm_unreachable("Impossible intrinsic");
   2099       case Intrinsic::x86_avx2_gather_d_pd:     Opc = X86::VGATHERDPDrm;  break;
   2100       case Intrinsic::x86_avx2_gather_d_pd_256: Opc = X86::VGATHERDPDYrm; break;
   2101       case Intrinsic::x86_avx2_gather_q_pd:     Opc = X86::VGATHERQPDrm;  break;
   2102       case Intrinsic::x86_avx2_gather_q_pd_256: Opc = X86::VGATHERQPDYrm; break;
   2103       case Intrinsic::x86_avx2_gather_d_ps:     Opc = X86::VGATHERDPSrm;  break;
   2104       case Intrinsic::x86_avx2_gather_d_ps_256: Opc = X86::VGATHERDPSYrm; break;
   2105       case Intrinsic::x86_avx2_gather_q_ps:     Opc = X86::VGATHERQPSrm;  break;
   2106       case Intrinsic::x86_avx2_gather_q_ps_256: Opc = X86::VGATHERQPSYrm; break;
   2107       case Intrinsic::x86_avx2_gather_d_q:      Opc = X86::VPGATHERDQrm;  break;
   2108       case Intrinsic::x86_avx2_gather_d_q_256:  Opc = X86::VPGATHERDQYrm; break;
   2109       case Intrinsic::x86_avx2_gather_q_q:      Opc = X86::VPGATHERQQrm;  break;
   2110       case Intrinsic::x86_avx2_gather_q_q_256:  Opc = X86::VPGATHERQQYrm; break;
   2111       case Intrinsic::x86_avx2_gather_d_d:      Opc = X86::VPGATHERDDrm;  break;
   2112       case Intrinsic::x86_avx2_gather_d_d_256:  Opc = X86::VPGATHERDDYrm; break;
   2113       case Intrinsic::x86_avx2_gather_q_d:      Opc = X86::VPGATHERQDrm;  break;
   2114       case Intrinsic::x86_avx2_gather_q_d_256:  Opc = X86::VPGATHERQDYrm; break;
   2115       }
   2116       SDNode *RetVal = SelectGather(Node, Opc);
   2117       if (RetVal)
   2118         // We already called ReplaceUses inside SelectGather.
   2119         return nullptr;
   2120       break;
   2121     }
   2122     }
   2123     break;
   2124   }
   2125   case X86ISD::GlobalBaseReg:
   2126     return getGlobalBaseReg();
   2127 
   2128 
   2129   case ISD::ATOMIC_LOAD_XOR:
   2130   case ISD::ATOMIC_LOAD_AND:
   2131   case ISD::ATOMIC_LOAD_OR:
   2132   case ISD::ATOMIC_LOAD_ADD: {
   2133     SDNode *RetVal = SelectAtomicLoadArith(Node, NVT);
   2134     if (RetVal)
   2135       return RetVal;
   2136     break;
   2137   }
   2138   case ISD::AND:
   2139   case ISD::OR:
   2140   case ISD::XOR: {
   2141     // For operations of the form (x << C1) op C2, check if we can use a smaller
   2142     // encoding for C2 by transforming it into (x op (C2>>C1)) << C1.
   2143     SDValue N0 = Node->getOperand(0);
   2144     SDValue N1 = Node->getOperand(1);
   2145 
   2146     if (N0->getOpcode() != ISD::SHL || !N0->hasOneUse())
   2147       break;
   2148 
   2149     // i8 is unshrinkable, i16 should be promoted to i32.
   2150     if (NVT != MVT::i32 && NVT != MVT::i64)
   2151       break;
   2152 
   2153     ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
   2154     ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(N0->getOperand(1));
   2155     if (!Cst || !ShlCst)
   2156       break;
   2157 
   2158     int64_t Val = Cst->getSExtValue();
   2159     uint64_t ShlVal = ShlCst->getZExtValue();
   2160 
   2161     // Make sure that we don't change the operation by removing bits.
   2162     // This only matters for OR and XOR, AND is unaffected.
   2163     uint64_t RemovedBitsMask = (1ULL << ShlVal) - 1;
   2164     if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0)
   2165       break;
   2166 
   2167     unsigned ShlOp, Op;
   2168     MVT CstVT = NVT;
   2169 
   2170     // Check the minimum bitwidth for the new constant.
   2171     // TODO: AND32ri is the same as AND64ri32 with zext imm.
   2172     // TODO: MOV32ri+OR64r is cheaper than MOV64ri64+OR64rr
   2173     // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32.
   2174     if (!isInt<8>(Val) && isInt<8>(Val >> ShlVal))
   2175       CstVT = MVT::i8;
   2176     else if (!isInt<32>(Val) && isInt<32>(Val >> ShlVal))
   2177       CstVT = MVT::i32;
   2178 
   2179     // Bail if there is no smaller encoding.
   2180     if (NVT == CstVT)
   2181       break;
   2182 
   2183     switch (NVT.SimpleTy) {
   2184     default: llvm_unreachable("Unsupported VT!");
   2185     case MVT::i32:
   2186       assert(CstVT == MVT::i8);
   2187       ShlOp = X86::SHL32ri;
   2188 
   2189       switch (Opcode) {
   2190       default: llvm_unreachable("Impossible opcode");
   2191       case ISD::AND: Op = X86::AND32ri8; break;
   2192       case ISD::OR:  Op =  X86::OR32ri8; break;
   2193       case ISD::XOR: Op = X86::XOR32ri8; break;
   2194       }
   2195       break;
   2196     case MVT::i64:
   2197       assert(CstVT == MVT::i8 || CstVT == MVT::i32);
   2198       ShlOp = X86::SHL64ri;
   2199 
   2200       switch (Opcode) {
   2201       default: llvm_unreachable("Impossible opcode");
   2202       case ISD::AND: Op = CstVT==MVT::i8? X86::AND64ri8 : X86::AND64ri32; break;
   2203       case ISD::OR:  Op = CstVT==MVT::i8?  X86::OR64ri8 :  X86::OR64ri32; break;
   2204       case ISD::XOR: Op = CstVT==MVT::i8? X86::XOR64ri8 : X86::XOR64ri32; break;
   2205       }
   2206       break;
   2207     }
   2208 
   2209     // Emit the smaller op and the shift.
   2210     SDValue NewCst = CurDAG->getTargetConstant(Val >> ShlVal, CstVT);
   2211     SDNode *New = CurDAG->getMachineNode(Op, dl, NVT, N0->getOperand(0),NewCst);
   2212     return CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0),
   2213                                 getI8Imm(ShlVal));
   2214   }
   2215   case X86ISD::UMUL: {
   2216     SDValue N0 = Node->getOperand(0);
   2217     SDValue N1 = Node->getOperand(1);
   2218 
   2219     unsigned LoReg;
   2220     switch (NVT.SimpleTy) {
   2221     default: llvm_unreachable("Unsupported VT!");
   2222     case MVT::i8:  LoReg = X86::AL;  Opc = X86::MUL8r; break;
   2223     case MVT::i16: LoReg = X86::AX;  Opc = X86::MUL16r; break;
   2224     case MVT::i32: LoReg = X86::EAX; Opc = X86::MUL32r; break;
   2225     case MVT::i64: LoReg = X86::RAX; Opc = X86::MUL64r; break;
   2226     }
   2227 
   2228     SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
   2229                                           N0, SDValue()).getValue(1);
   2230 
   2231     SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::i32);
   2232     SDValue Ops[] = {N1, InFlag};
   2233     SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
   2234 
   2235     ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
   2236     ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1));
   2237     ReplaceUses(SDValue(Node, 2), SDValue(CNode, 2));
   2238     return nullptr;
   2239   }
   2240 
   2241   case ISD::SMUL_LOHI:
   2242   case ISD::UMUL_LOHI: {
   2243     SDValue N0 = Node->getOperand(0);
   2244     SDValue N1 = Node->getOperand(1);
   2245 
   2246     bool isSigned = Opcode == ISD::SMUL_LOHI;
   2247     bool hasBMI2 = Subtarget->hasBMI2();
   2248     if (!isSigned) {
   2249       switch (NVT.SimpleTy) {
   2250       default: llvm_unreachable("Unsupported VT!");
   2251       case MVT::i8:  Opc = X86::MUL8r;  MOpc = X86::MUL8m;  break;
   2252       case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
   2253       case MVT::i32: Opc = hasBMI2 ? X86::MULX32rr : X86::MUL32r;
   2254                      MOpc = hasBMI2 ? X86::MULX32rm : X86::MUL32m; break;
   2255       case MVT::i64: Opc = hasBMI2 ? X86::MULX64rr : X86::MUL64r;
   2256                      MOpc = hasBMI2 ? X86::MULX64rm : X86::MUL64m; break;
   2257       }
   2258     } else {
   2259       switch (NVT.SimpleTy) {
   2260       default: llvm_unreachable("Unsupported VT!");
   2261       case MVT::i8:  Opc = X86::IMUL8r;  MOpc = X86::IMUL8m;  break;
   2262       case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
   2263       case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
   2264       case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
   2265       }
   2266     }
   2267 
   2268     unsigned SrcReg, LoReg, HiReg;
   2269     switch (Opc) {
   2270     default: llvm_unreachable("Unknown MUL opcode!");
   2271     case X86::IMUL8r:
   2272     case X86::MUL8r:
   2273       SrcReg = LoReg = X86::AL; HiReg = X86::AH;
   2274       break;
   2275     case X86::IMUL16r:
   2276     case X86::MUL16r:
   2277       SrcReg = LoReg = X86::AX; HiReg = X86::DX;
   2278       break;
   2279     case X86::IMUL32r:
   2280     case X86::MUL32r:
   2281       SrcReg = LoReg = X86::EAX; HiReg = X86::EDX;
   2282       break;
   2283     case X86::IMUL64r:
   2284     case X86::MUL64r:
   2285       SrcReg = LoReg = X86::RAX; HiReg = X86::RDX;
   2286       break;
   2287     case X86::MULX32rr:
   2288       SrcReg = X86::EDX; LoReg = HiReg = 0;
   2289       break;
   2290     case X86::MULX64rr:
   2291       SrcReg = X86::RDX; LoReg = HiReg = 0;
   2292       break;
   2293     }
   2294 
   2295     SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
   2296     bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
   2297     // Multiply is commmutative.
   2298     if (!foldedLoad) {
   2299       foldedLoad = TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
   2300       if (foldedLoad)
   2301         std::swap(N0, N1);
   2302     }
   2303 
   2304     SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, SrcReg,
   2305                                           N0, SDValue()).getValue(1);
   2306     SDValue ResHi, ResLo;
   2307 
   2308     if (foldedLoad) {
   2309       SDValue Chain;
   2310       SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
   2311                         InFlag };
   2312       if (MOpc == X86::MULX32rm || MOpc == X86::MULX64rm) {
   2313         SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Other, MVT::Glue);
   2314         SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
   2315         ResHi = SDValue(CNode, 0);
   2316         ResLo = SDValue(CNode, 1);
   2317         Chain = SDValue(CNode, 2);
   2318         InFlag = SDValue(CNode, 3);
   2319       } else {
   2320         SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue);
   2321         SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
   2322         Chain = SDValue(CNode, 0);
   2323         InFlag = SDValue(CNode, 1);
   2324       }
   2325 
   2326       // Update the chain.
   2327       ReplaceUses(N1.getValue(1), Chain);
   2328     } else {
   2329       SDValue Ops[] = { N1, InFlag };
   2330       if (Opc == X86::MULX32rr || Opc == X86::MULX64rr) {
   2331         SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Glue);
   2332         SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
   2333         ResHi = SDValue(CNode, 0);
   2334         ResLo = SDValue(CNode, 1);
   2335         InFlag = SDValue(CNode, 2);
   2336       } else {
   2337         SDVTList VTs = CurDAG->getVTList(MVT::Glue);
   2338         SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
   2339         InFlag = SDValue(CNode, 0);
   2340       }
   2341     }
   2342 
   2343     // Prevent use of AH in a REX instruction by referencing AX instead.
   2344     if (HiReg == X86::AH && Subtarget->is64Bit() &&
   2345         !SDValue(Node, 1).use_empty()) {
   2346       SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
   2347                                               X86::AX, MVT::i16, InFlag);
   2348       InFlag = Result.getValue(2);
   2349       // Get the low part if needed. Don't use getCopyFromReg for aliasing
   2350       // registers.
   2351       if (!SDValue(Node, 0).use_empty())
   2352         ReplaceUses(SDValue(Node, 1),
   2353           CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
   2354 
   2355       // Shift AX down 8 bits.
   2356       Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
   2357                                               Result,
   2358                                      CurDAG->getTargetConstant(8, MVT::i8)), 0);
   2359       // Then truncate it down to i8.
   2360       ReplaceUses(SDValue(Node, 1),
   2361         CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
   2362     }
   2363     // Copy the low half of the result, if it is needed.
   2364     if (!SDValue(Node, 0).use_empty()) {
   2365       if (!ResLo.getNode()) {
   2366         assert(LoReg && "Register for low half is not defined!");
   2367         ResLo = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, LoReg, NVT,
   2368                                        InFlag);
   2369         InFlag = ResLo.getValue(2);
   2370       }
   2371       ReplaceUses(SDValue(Node, 0), ResLo);
   2372       DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG); dbgs() << '\n');
   2373     }
   2374     // Copy the high half of the result, if it is needed.
   2375     if (!SDValue(Node, 1).use_empty()) {
   2376       if (!ResHi.getNode()) {
   2377         assert(HiReg && "Register for high half is not defined!");
   2378         ResHi = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, HiReg, NVT,
   2379                                        InFlag);
   2380         InFlag = ResHi.getValue(2);
   2381       }
   2382       ReplaceUses(SDValue(Node, 1), ResHi);
   2383       DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG); dbgs() << '\n');
   2384     }
   2385 
   2386     return nullptr;
   2387   }
   2388 
   2389   case ISD::SDIVREM:
   2390   case ISD::UDIVREM: {
   2391     SDValue N0 = Node->getOperand(0);
   2392     SDValue N1 = Node->getOperand(1);
   2393 
   2394     bool isSigned = Opcode == ISD::SDIVREM;
   2395     if (!isSigned) {
   2396       switch (NVT.SimpleTy) {
   2397       default: llvm_unreachable("Unsupported VT!");
   2398       case MVT::i8:  Opc = X86::DIV8r;  MOpc = X86::DIV8m;  break;
   2399       case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
   2400       case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
   2401       case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
   2402       }
   2403     } else {
   2404       switch (NVT.SimpleTy) {
   2405       default: llvm_unreachable("Unsupported VT!");
   2406       case MVT::i8:  Opc = X86::IDIV8r;  MOpc = X86::IDIV8m;  break;
   2407       case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
   2408       case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
   2409       case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
   2410       }
   2411     }
   2412 
   2413     unsigned LoReg, HiReg, ClrReg;
   2414     unsigned SExtOpcode;
   2415     switch (NVT.SimpleTy) {
   2416     default: llvm_unreachable("Unsupported VT!");
   2417     case MVT::i8:
   2418       LoReg = X86::AL;  ClrReg = HiReg = X86::AH;
   2419       SExtOpcode = X86::CBW;
   2420       break;
   2421     case MVT::i16:
   2422       LoReg = X86::AX;  HiReg = X86::DX;
   2423       ClrReg = X86::DX;
   2424       SExtOpcode = X86::CWD;
   2425       break;
   2426     case MVT::i32:
   2427       LoReg = X86::EAX; ClrReg = HiReg = X86::EDX;
   2428       SExtOpcode = X86::CDQ;
   2429       break;
   2430     case MVT::i64:
   2431       LoReg = X86::RAX; ClrReg = HiReg = X86::RDX;
   2432       SExtOpcode = X86::CQO;
   2433       break;
   2434     }
   2435 
   2436     SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
   2437     bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
   2438     bool signBitIsZero = CurDAG->SignBitIsZero(N0);
   2439 
   2440     SDValue InFlag;
   2441     if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) {
   2442       // Special case for div8, just use a move with zero extension to AX to
   2443       // clear the upper 8 bits (AH).
   2444       SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain;
   2445       if (TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
   2446         SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
   2447         Move =
   2448           SDValue(CurDAG->getMachineNode(X86::MOVZX32rm8, dl, MVT::i32,
   2449                                          MVT::Other, Ops), 0);
   2450         Chain = Move.getValue(1);
   2451         ReplaceUses(N0.getValue(1), Chain);
   2452       } else {
   2453         Move =
   2454           SDValue(CurDAG->getMachineNode(X86::MOVZX32rr8, dl, MVT::i32, N0),0);
   2455         Chain = CurDAG->getEntryNode();
   2456       }
   2457       Chain  = CurDAG->getCopyToReg(Chain, dl, X86::EAX, Move, SDValue());
   2458       InFlag = Chain.getValue(1);
   2459     } else {
   2460       InFlag =
   2461         CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
   2462                              LoReg, N0, SDValue()).getValue(1);
   2463       if (isSigned && !signBitIsZero) {
   2464         // Sign extend the low part into the high part.
   2465         InFlag =
   2466           SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0);
   2467       } else {
   2468         // Zero out the high part, effectively zero extending the input.
   2469         SDValue ClrNode = SDValue(CurDAG->getMachineNode(X86::MOV32r0, dl, NVT), 0);
   2470         switch (NVT.SimpleTy) {
   2471         case MVT::i16:
   2472           ClrNode =
   2473               SDValue(CurDAG->getMachineNode(
   2474                           TargetOpcode::EXTRACT_SUBREG, dl, MVT::i16, ClrNode,
   2475                           CurDAG->getTargetConstant(X86::sub_16bit, MVT::i32)),
   2476                       0);
   2477           break;
   2478         case MVT::i32:
   2479           break;
   2480         case MVT::i64:
   2481           ClrNode =
   2482               SDValue(CurDAG->getMachineNode(
   2483                           TargetOpcode::SUBREG_TO_REG, dl, MVT::i64,
   2484                           CurDAG->getTargetConstant(0, MVT::i64), ClrNode,
   2485                           CurDAG->getTargetConstant(X86::sub_32bit, MVT::i32)),
   2486                       0);
   2487           break;
   2488         default:
   2489           llvm_unreachable("Unexpected division source");
   2490         }
   2491 
   2492         InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg,
   2493                                       ClrNode, InFlag).getValue(1);
   2494       }
   2495     }
   2496 
   2497     if (foldedLoad) {
   2498       SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
   2499                         InFlag };
   2500       SDNode *CNode =
   2501         CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops);
   2502       InFlag = SDValue(CNode, 1);
   2503       // Update the chain.
   2504       ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
   2505     } else {
   2506       InFlag =
   2507         SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag), 0);
   2508     }
   2509 
   2510     // Prevent use of AH in a REX instruction by referencing AX instead.
   2511     // Shift it down 8 bits.
   2512     //
   2513     // The current assumption of the register allocator is that isel
   2514     // won't generate explicit references to the GPR8_NOREX registers. If
   2515     // the allocator and/or the backend get enhanced to be more robust in
   2516     // that regard, this can be, and should be, removed.
   2517     if (HiReg == X86::AH && Subtarget->is64Bit() &&
   2518         !SDValue(Node, 1).use_empty()) {
   2519       SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
   2520                                               X86::AX, MVT::i16, InFlag);
   2521       InFlag = Result.getValue(2);
   2522 
   2523       // If we also need AL (the quotient), get it by extracting a subreg from
   2524       // Result. The fast register allocator does not like multiple CopyFromReg
   2525       // nodes using aliasing registers.
   2526       if (!SDValue(Node, 0).use_empty())
   2527         ReplaceUses(SDValue(Node, 0),
   2528           CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
   2529 
   2530       // Shift AX right by 8 bits instead of using AH.
   2531       Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
   2532                                          Result,
   2533                                          CurDAG->getTargetConstant(8, MVT::i8)),
   2534                        0);
   2535       ReplaceUses(SDValue(Node, 1),
   2536         CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
   2537     }
   2538     // Copy the division (low) result, if it is needed.
   2539     if (!SDValue(Node, 0).use_empty()) {
   2540       SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
   2541                                                 LoReg, NVT, InFlag);
   2542       InFlag = Result.getValue(2);
   2543       ReplaceUses(SDValue(Node, 0), Result);
   2544       DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
   2545     }
   2546     // Copy the remainder (high) result, if it is needed.
   2547     if (!SDValue(Node, 1).use_empty()) {
   2548       SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
   2549                                               HiReg, NVT, InFlag);
   2550       InFlag = Result.getValue(2);
   2551       ReplaceUses(SDValue(Node, 1), Result);
   2552       DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
   2553     }
   2554     return nullptr;
   2555   }
   2556 
   2557   case X86ISD::CMP:
   2558   case X86ISD::SUB: {
   2559     // Sometimes a SUB is used to perform comparison.
   2560     if (Opcode == X86ISD::SUB && Node->hasAnyUseOfValue(0))
   2561       // This node is not a CMP.
   2562       break;
   2563     SDValue N0 = Node->getOperand(0);
   2564     SDValue N1 = Node->getOperand(1);
   2565 
   2566     // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to
   2567     // use a smaller encoding.
   2568     if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() &&
   2569         HasNoSignedComparisonUses(Node))
   2570       // Look past the truncate if CMP is the only use of it.
   2571       N0 = N0.getOperand(0);
   2572     if ((N0.getNode()->getOpcode() == ISD::AND ||
   2573          (N0.getResNo() == 0 && N0.getNode()->getOpcode() == X86ISD::AND)) &&
   2574         N0.getNode()->hasOneUse() &&
   2575         N0.getValueType() != MVT::i8 &&
   2576         X86::isZeroNode(N1)) {
   2577       ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getNode()->getOperand(1));
   2578       if (!C) break;
   2579 
   2580       // For example, convert "testl %eax, $8" to "testb %al, $8"
   2581       if ((C->getZExtValue() & ~UINT64_C(0xff)) == 0 &&
   2582           (!(C->getZExtValue() & 0x80) ||
   2583            HasNoSignedComparisonUses(Node))) {
   2584         SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i8);
   2585         SDValue Reg = N0.getNode()->getOperand(0);
   2586 
   2587         // On x86-32, only the ABCD registers have 8-bit subregisters.
   2588         if (!Subtarget->is64Bit()) {
   2589           const TargetRegisterClass *TRC;
   2590           switch (N0.getSimpleValueType().SimpleTy) {
   2591           case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
   2592           case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
   2593           default: llvm_unreachable("Unsupported TEST operand type!");
   2594           }
   2595           SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
   2596           Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
   2597                                                Reg.getValueType(), Reg, RC), 0);
   2598         }
   2599 
   2600         // Extract the l-register.
   2601         SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl,
   2602                                                         MVT::i8, Reg);
   2603 
   2604         // Emit a testb.
   2605         SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32,
   2606                                                  Subreg, Imm);
   2607         // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
   2608         // one, do not call ReplaceAllUsesWith.
   2609         ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
   2610                     SDValue(NewNode, 0));
   2611         return nullptr;
   2612       }
   2613 
   2614       // For example, "testl %eax, $2048" to "testb %ah, $8".
   2615       if ((C->getZExtValue() & ~UINT64_C(0xff00)) == 0 &&
   2616           (!(C->getZExtValue() & 0x8000) ||
   2617            HasNoSignedComparisonUses(Node))) {
   2618         // Shift the immediate right by 8 bits.
   2619         SDValue ShiftedImm = CurDAG->getTargetConstant(C->getZExtValue() >> 8,
   2620                                                        MVT::i8);
   2621         SDValue Reg = N0.getNode()->getOperand(0);
   2622 
   2623         // Put the value in an ABCD register.
   2624         const TargetRegisterClass *TRC;
   2625         switch (N0.getSimpleValueType().SimpleTy) {
   2626         case MVT::i64: TRC = &X86::GR64_ABCDRegClass; break;
   2627         case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
   2628         case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
   2629         default: llvm_unreachable("Unsupported TEST operand type!");
   2630         }
   2631         SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
   2632         Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
   2633                                              Reg.getValueType(), Reg, RC), 0);
   2634 
   2635         // Extract the h-register.
   2636         SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit_hi, dl,
   2637                                                         MVT::i8, Reg);
   2638 
   2639         // Emit a testb.  The EXTRACT_SUBREG becomes a COPY that can only
   2640         // target GR8_NOREX registers, so make sure the register class is
   2641         // forced.
   2642         SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri_NOREX, dl,
   2643                                                  MVT::i32, Subreg, ShiftedImm);
   2644         // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
   2645         // one, do not call ReplaceAllUsesWith.
   2646         ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
   2647                     SDValue(NewNode, 0));
   2648         return nullptr;
   2649       }
   2650 
   2651       // For example, "testl %eax, $32776" to "testw %ax, $32776".
   2652       if ((C->getZExtValue() & ~UINT64_C(0xffff)) == 0 &&
   2653           N0.getValueType() != MVT::i16 &&
   2654           (!(C->getZExtValue() & 0x8000) ||
   2655            HasNoSignedComparisonUses(Node))) {
   2656         SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i16);
   2657         SDValue Reg = N0.getNode()->getOperand(0);
   2658 
   2659         // Extract the 16-bit subregister.
   2660         SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl,
   2661                                                         MVT::i16, Reg);
   2662 
   2663         // Emit a testw.
   2664         SDNode *NewNode = CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32,
   2665                                                  Subreg, Imm);
   2666         // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
   2667         // one, do not call ReplaceAllUsesWith.
   2668         ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
   2669                     SDValue(NewNode, 0));
   2670         return nullptr;
   2671       }
   2672 
   2673       // For example, "testq %rax, $268468232" to "testl %eax, $268468232".
   2674       if ((C->getZExtValue() & ~UINT64_C(0xffffffff)) == 0 &&
   2675           N0.getValueType() == MVT::i64 &&
   2676           (!(C->getZExtValue() & 0x80000000) ||
   2677            HasNoSignedComparisonUses(Node))) {
   2678         SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
   2679         SDValue Reg = N0.getNode()->getOperand(0);
   2680 
   2681         // Extract the 32-bit subregister.
   2682         SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_32bit, dl,
   2683                                                         MVT::i32, Reg);
   2684 
   2685         // Emit a testl.
   2686         SDNode *NewNode = CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32,
   2687                                                  Subreg, Imm);
   2688         // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
   2689         // one, do not call ReplaceAllUsesWith.
   2690         ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
   2691                     SDValue(NewNode, 0));
   2692         return nullptr;
   2693       }
   2694     }
   2695     break;
   2696   }
   2697   case ISD::STORE: {
   2698     // Change a chain of {load; incr or dec; store} of the same value into
   2699     // a simple increment or decrement through memory of that value, if the
   2700     // uses of the modified value and its address are suitable.
   2701     // The DEC64m tablegen pattern is currently not able to match the case where
   2702     // the EFLAGS on the original DEC are used. (This also applies to
   2703     // {INC,DEC}X{64,32,16,8}.)
   2704     // We'll need to improve tablegen to allow flags to be transferred from a
   2705     // node in the pattern to the result node.  probably with a new keyword
   2706     // for example, we have this
   2707     // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
   2708     //  [(store (add (loadi64 addr:$dst), -1), addr:$dst),
   2709     //   (implicit EFLAGS)]>;
   2710     // but maybe need something like this
   2711     // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
   2712     //  [(store (add (loadi64 addr:$dst), -1), addr:$dst),
   2713     //   (transferrable EFLAGS)]>;
   2714 
   2715     StoreSDNode *StoreNode = cast<StoreSDNode>(Node);
   2716     SDValue StoredVal = StoreNode->getOperand(1);
   2717     unsigned Opc = StoredVal->getOpcode();
   2718 
   2719     LoadSDNode *LoadNode = nullptr;
   2720     SDValue InputChain;
   2721     if (!isLoadIncOrDecStore(StoreNode, Opc, StoredVal, CurDAG,
   2722                              LoadNode, InputChain))
   2723       break;
   2724 
   2725     SDValue Base, Scale, Index, Disp, Segment;
   2726     if (!SelectAddr(LoadNode, LoadNode->getBasePtr(),
   2727                     Base, Scale, Index, Disp, Segment))
   2728       break;
   2729 
   2730     MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(2);
   2731     MemOp[0] = StoreNode->getMemOperand();
   2732     MemOp[1] = LoadNode->getMemOperand();
   2733     const SDValue Ops[] = { Base, Scale, Index, Disp, Segment, InputChain };
   2734     EVT LdVT = LoadNode->getMemoryVT();
   2735     unsigned newOpc = getFusedLdStOpcode(LdVT, Opc);
   2736     MachineSDNode *Result = CurDAG->getMachineNode(newOpc,
   2737                                                    SDLoc(Node),
   2738                                                    MVT::i32, MVT::Other, Ops);
   2739     Result->setMemRefs(MemOp, MemOp + 2);
   2740 
   2741     ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
   2742     ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
   2743 
   2744     return Result;
   2745   }
   2746   }
   2747 
   2748   SDNode *ResNode = SelectCode(Node);
   2749 
   2750   DEBUG(dbgs() << "=> ";
   2751         if (ResNode == nullptr || ResNode == Node)
   2752           Node->dump(CurDAG);
   2753         else
   2754           ResNode->dump(CurDAG);
   2755         dbgs() << '\n');
   2756 
   2757   return ResNode;
   2758 }
   2759 
   2760 bool X86DAGToDAGISel::
   2761 SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
   2762                              std::vector<SDValue> &OutOps) {
   2763   SDValue Op0, Op1, Op2, Op3, Op4;
   2764   switch (ConstraintCode) {
   2765   case 'o':   // offsetable        ??
   2766   case 'v':   // not offsetable    ??
   2767   default: return true;
   2768   case 'm':   // memory
   2769     if (!SelectAddr(nullptr, Op, Op0, Op1, Op2, Op3, Op4))
   2770       return true;
   2771     break;
   2772   }
   2773 
   2774   OutOps.push_back(Op0);
   2775   OutOps.push_back(Op1);
   2776   OutOps.push_back(Op2);
   2777   OutOps.push_back(Op3);
   2778   OutOps.push_back(Op4);
   2779   return false;
   2780 }
   2781 
   2782 /// createX86ISelDag - This pass converts a legalized DAG into a
   2783 /// X86-specific DAG, ready for instruction scheduling.
   2784 ///
   2785 FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM,
   2786                                      CodeGenOpt::Level OptLevel) {
   2787   return new X86DAGToDAGISel(TM, OptLevel);
   2788 }
   2789